Update write time marked on actual flush not on flush request.
JIRA: CONTROLLER-2108
Change-Id: I92a66ae775cbae6aeea69bddf654df741f473dbd
Signed-off-by: Ruslan Kashapov <ruslan.kashapov@pantheon.tech>
Signed-off-by: Robert Varga <robert.varga@pantheon.tech>
--- /dev/null
+# .readthedocs.yml
+# Read the Docs configuration file
+# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
+
+# Required
+version: 2
+
+build:
+ os: ubuntu-22.04
+ tools:
+ python: "3.11"
+ jobs:
+ post_checkout:
+ - git fetch --unshallow || true
+
+sphinx:
+ configuration: docs/conf.py
+
+python:
+ install:
+ - requirements: docs/requirements.txt
<?xml version="1.0" encoding="UTF-8"?>
-<!-- vi: set et smarttab sw=4 tabstop=4: -->
<!--
- Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ Copyright (c) 2020 PANTHEON.tech, s.r.o. and others. All rights reserved.
This program and the accompanying materials are made available under the
terms of the Eclipse Public License v1.0 which accompanies this distribution,
<parent>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>odlparent-lite</artifactId>
- <version>7.0.5</version>
+ <version>13.0.11</version>
<relativePath/>
</parent>
- <groupId>org.opendaylight.controller.model</groupId>
- <artifactId>model-aggregator</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>akka-aggregator</artifactId>
+ <version>9.0.3-SNAPSHOT</version>
<packaging>pom</packaging>
<properties>
</properties>
<modules>
- <module>model-inventory</module>
- <module>model-topology</module>
+ <module>repackaged-akka-jar</module>
+ <module>repackaged-akka</module>
</modules>
</project>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- vi: set et smarttab sw=4 tabstop=4: -->
+<!--
+ Copyright (c) 2020 PANTHEON.tech, s.r.o. and others. All rights reserved.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.opendaylight.odlparent</groupId>
+ <artifactId>odlparent</artifactId>
+ <version>13.0.11</version>
+ <relativePath/>
+ </parent>
+
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>repackaged-akka-jar</artifactId>
+ <packaging>jar</packaging>
+ <version>9.0.3-SNAPSHOT</version>
+ <name>${project.artifactId}</name>
+
+ <properties>
+ <!-- We do not want to leak this artifact -->
+ <maven.deploy.skip>true</maven.deploy.skip>
+ </properties>
+
+ <dependencies>
+ <!-- Note: when bumping versions, make sure to update configurations in src/main/resources -->
+ <dependency>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-actor_2.13</artifactId>
+ <version>2.6.21</version>
+ </dependency>
+ <dependency>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-actor-typed_2.13</artifactId>
+ <version>2.6.21</version>
+ </dependency>
+ <dependency>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-cluster_2.13</artifactId>
+ <version>2.6.21</version>
+ </dependency>
+ <dependency>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-cluster-typed_2.13</artifactId>
+ <version>2.6.21</version>
+ </dependency>
+ <dependency>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-osgi_2.13</artifactId>
+ <version>2.6.21</version>
+ </dependency>
+ <dependency>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-persistence_2.13</artifactId>
+ <version>2.6.21</version>
+ </dependency>
+ <dependency>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-protobuf_2.13</artifactId>
+ <version>2.6.21</version>
+ </dependency>
+ <dependency>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-remote_2.13</artifactId>
+ <version>2.6.21</version>
+ </dependency>
+ <dependency>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-slf4j_2.13</artifactId>
+ <version>2.6.21</version>
+ </dependency>
+ <dependency>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-stream_2.13</artifactId>
+ <version>2.6.21</version>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <artifactId>maven-dependency-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>unpack-license</id>
+ <configuration>
+ <!-- Akka is Apache-2.0 licensed -->
+ <skip>true</skip>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <artifactId>maven-shade-plugin</artifactId>
+ <executions>
+ <execution>
+ <phase>package</phase>
+ <goals>
+ <goal>shade</goal>
+ </goals>
+ <configuration>
+ <createDependencyReducedPom>false</createDependencyReducedPom>
+ <shadeSourcesContent>true</shadeSourcesContent>
+ <createSourcesJar>true</createSourcesJar>
+ <promoteTransitiveDependencies>true</promoteTransitiveDependencies>
+ <artifactSet>
+ <includes>
+ <include>com.typesafe.akka</include>
+ </includes>
+ </artifactSet>
+ <filters>
+ <filter>
+ <artifact>com.typesafe.akka:*</artifact>
+ <excludes>
+ <exclude>META-INF/MANIFEST.MF</exclude>
+ <exclude>reference.conf</exclude>
+ </excludes>
+ </filter>
+ </filters>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <artifactId>maven-source-plugin</artifactId>
+ <configuration>
+ <!-- We handle this through shade plugin -->
+ <skipSource>true</skipSource>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+</project>
--- /dev/null
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+---------------
+
+Licenses for dependency projects can be found here:
+[http://akka.io/docs/akka/snapshot/project/licenses.html]
+
+---------------
+
+akka-protobuf contains the sources of Google protobuf 2.5.0 runtime support,
+moved into the source package `akka.protobuf` so as to avoid version conflicts.
+For license information see COPYING.protobuf
--- /dev/null
+####################################
+# Akka Actor Reference Config File #
+####################################
+
+# This is the reference config file that contains all the default settings.
+# Make your edits/overrides in your application.conf.
+
+# Akka version, checked against the runtime version of Akka. Loaded from generated conf file.
+include "version"
+
+akka {
+ # Home directory of Akka, modules in the deploy directory will be loaded
+ home = ""
+
+ # Loggers to register at boot time (akka.event.Logging$DefaultLogger logs
+ # to STDOUT)
+ loggers = ["akka.event.Logging$DefaultLogger"]
+
+ # Filter of log events that is used by the LoggingAdapter before
+ # publishing log events to the eventStream. It can perform
+ # fine grained filtering based on the log source. The default
+ # implementation filters on the `loglevel`.
+ # FQCN of the LoggingFilter. The Class of the FQCN must implement
+ # akka.event.LoggingFilter and have a public constructor with
+ # (akka.actor.ActorSystem.Settings, akka.event.EventStream) parameters.
+ logging-filter = "akka.event.DefaultLoggingFilter"
+
+ # Specifies the default loggers dispatcher
+ loggers-dispatcher = "akka.actor.default-dispatcher"
+
+ # Loggers are created and registered synchronously during ActorSystem
+ # start-up, and since they are actors, this timeout is used to bound the
+ # waiting time
+ logger-startup-timeout = 5s
+
+ # Log level used by the configured loggers (see "loggers") as soon
+ # as they have been started; before that, see "stdout-loglevel"
+ # Options: OFF, ERROR, WARNING, INFO, DEBUG
+ loglevel = "INFO"
+
+ # Log level for the very basic logger activated during ActorSystem startup.
+ # This logger prints the log messages to stdout (System.out).
+ # Options: OFF, ERROR, WARNING, INFO, DEBUG
+ stdout-loglevel = "WARNING"
+
+ # Log the complete configuration at INFO level when the actor system is started.
+ # This is useful when you are uncertain of what configuration is used.
+ log-config-on-start = off
+
+ # Log at info level when messages are sent to dead letters, or published to
+ # eventStream as `DeadLetter`, `Dropped` or `UnhandledMessage`.
+ # Possible values:
+ # on: all dead letters are logged
+ # off: no logging of dead letters
+ # n: positive integer, number of dead letters that will be logged
+ log-dead-letters = 10
+
+ # Possibility to turn off logging of dead letters while the actor system
+ # is shutting down. Logging is only done when enabled by 'log-dead-letters'
+ # setting.
+ log-dead-letters-during-shutdown = off
+
+ # When log-dead-letters is enabled, this will re-enable the logging after configured duration.
+ # infinite: suspend the logging forever;
+ # or a duration (eg: 5 minutes), after which the logging will be re-enabled.
+ log-dead-letters-suspend-duration = 5 minutes
+
+ # List FQCN of extensions which shall be loaded at actor system startup.
+ # Library extensions are regular extensions that are loaded at startup and are
+ # available for third party library authors to enable auto-loading of extensions when
+ # present on the classpath. This is done by appending entries:
+ # 'library-extensions += "Extension"' in the library `reference.conf`.
+ #
+ # Should not be set by end user applications in 'application.conf', use the extensions property for that
+ #
+ library-extensions = ${?akka.library-extensions} ["akka.serialization.SerializationExtension$"]
+
+ # List FQCN of extensions which shall be loaded at actor system startup.
+ # Should be on the format: 'extensions = ["foo", "bar"]' etc.
+ # See the Akka Documentation for more info about Extensions
+ extensions = []
+
+ # Toggles whether threads created by this ActorSystem should be daemons or not
+ daemonic = off
+
+ # JVM shutdown, System.exit(-1), in case of a fatal error,
+ # such as OutOfMemoryError
+ jvm-exit-on-fatal-error = on
+
+ # Akka installs JVM shutdown hooks by default, e.g. in CoordinatedShutdown and Artery. This property will
+ # not disable user-provided hooks registered using `CoordinatedShutdown#addCancellableJvmShutdownHook`.
+ # This property is related to `akka.coordinated-shutdown.run-by-jvm-shutdown-hook` below.
+ # This property makes it possible to disable all such hooks if the application itself
+ # or a higher level framework such as Play prefers to install the JVM shutdown hook and
+ # terminate the ActorSystem itself, with or without using CoordinatedShutdown.
+ jvm-shutdown-hooks = on
+
+ # Version must be the same across all modules and if they are different the startup
+ # will fail. It's possible but not recommended, to disable this check, and only log a warning,
+ # by setting this property to `off`.
+ fail-mixed-versions = on
+
+ # Some modules (remoting only right now) can emit custom events to the Java Flight Recorder if running
+ # on JDK 11 or later. If you for some reason do not want that, it can be disabled and switched to no-ops
+ # with this toggle.
+ java-flight-recorder {
+ enabled = true
+ }
+
+ actor {
+
+ # Either one of "local", "remote" or "cluster" or the
+ # FQCN of the ActorRefProvider to be used; the below is the built-in default,
+ # note that "remote" and "cluster" requires the akka-remote and akka-cluster
+ # artifacts to be on the classpath.
+ provider = "local"
+
+ # The guardian "/user" will use this class to obtain its supervisorStrategy.
+ # It needs to be a subclass of akka.actor.SupervisorStrategyConfigurator.
+ # In addition to the default there is akka.actor.StoppingSupervisorStrategy.
+ guardian-supervisor-strategy = "akka.actor.DefaultSupervisorStrategy"
+
+ # Timeout for Extension creation and a few other potentially blocking
+ # initialization tasks.
+ creation-timeout = 20s
+
+ # Serializes and deserializes (non-primitive) messages to ensure immutability,
+ # this is only intended for testing.
+ serialize-messages = off
+
+ # Serializes and deserializes creators (in Props) to ensure that they can be
+ # sent over the network, this is only intended for testing. Purely local deployments
+ # as marked with deploy.scope == LocalScope are exempt from verification.
+ serialize-creators = off
+
+ # If serialize-messages or serialize-creators are enabled classes that starts with
+ # a prefix listed here are not verified.
+ no-serialization-verification-needed-class-prefix = ["akka."]
+
+ # Timeout for send operations to top-level actors which are in the process
+ # of being started. This is only relevant if using a bounded mailbox or the
+ # CallingThreadDispatcher for a top-level actor.
+ unstarted-push-timeout = 10s
+
+ # TypedActor deprecated since 2.6.0.
+ typed {
+ # Default timeout for the deprecated TypedActor (not the new actor APIs in 2.6)
+ # methods with non-void return type.
+ timeout = 5s
+ }
+
+ # Mapping between ´deployment.router' short names to fully qualified class names
+ router.type-mapping {
+ from-code = "akka.routing.NoRouter"
+ round-robin-pool = "akka.routing.RoundRobinPool"
+ round-robin-group = "akka.routing.RoundRobinGroup"
+ random-pool = "akka.routing.RandomPool"
+ random-group = "akka.routing.RandomGroup"
+ balancing-pool = "akka.routing.BalancingPool"
+ smallest-mailbox-pool = "akka.routing.SmallestMailboxPool"
+ broadcast-pool = "akka.routing.BroadcastPool"
+ broadcast-group = "akka.routing.BroadcastGroup"
+ scatter-gather-pool = "akka.routing.ScatterGatherFirstCompletedPool"
+ scatter-gather-group = "akka.routing.ScatterGatherFirstCompletedGroup"
+ tail-chopping-pool = "akka.routing.TailChoppingPool"
+ tail-chopping-group = "akka.routing.TailChoppingGroup"
+ consistent-hashing-pool = "akka.routing.ConsistentHashingPool"
+ consistent-hashing-group = "akka.routing.ConsistentHashingGroup"
+ }
+
+ deployment {
+
+ # deployment id pattern - on the format: /parent/child etc.
+ default {
+
+ # The id of the dispatcher to use for this actor.
+ # If undefined or empty the dispatcher specified in code
+ # (Props.withDispatcher) is used, or default-dispatcher if not
+ # specified at all.
+ dispatcher = ""
+
+ # The id of the mailbox to use for this actor.
+ # If undefined or empty the default mailbox of the configured dispatcher
+ # is used or if there is no mailbox configuration the mailbox specified
+ # in code (Props.withMailbox) is used.
+ # If there is a mailbox defined in the configured dispatcher then that
+ # overrides this setting.
+ mailbox = ""
+
+ # routing (load-balance) scheme to use
+ # - available: "from-code", "round-robin", "random", "smallest-mailbox",
+ # "scatter-gather", "broadcast"
+ # - or: Fully qualified class name of the router class.
+ # The class must extend akka.routing.CustomRouterConfig and
+ # have a public constructor with com.typesafe.config.Config
+ # and optional akka.actor.DynamicAccess parameter.
+ # - default is "from-code";
+ # Whether or not an actor is transformed to a Router is decided in code
+ # only (Props.withRouter). The type of router can be overridden in the
+ # configuration; specifying "from-code" means that the values specified
+ # in the code shall be used.
+ # In case of routing, the actors to be routed to can be specified
+ # in several ways:
+ # - nr-of-instances: will create that many children
+ # - routees.paths: will route messages to these paths using ActorSelection,
+ # i.e. will not create children
+ # - resizer: dynamically resizable number of routees as specified in
+ # resizer below
+ router = "from-code"
+
+ # number of children to create in case of a router;
+ # this setting is ignored if routees.paths is given
+ nr-of-instances = 1
+
+ # within is the timeout used for routers containing future calls
+ within = 5 seconds
+
+ # number of virtual nodes per node for consistent-hashing router
+ virtual-nodes-factor = 10
+
+ tail-chopping-router {
+ # interval is duration between sending message to next routee
+ interval = 10 milliseconds
+ }
+
+ routees {
+ # Alternatively to giving nr-of-instances you can specify the full
+ # paths of those actors which should be routed to. This setting takes
+ # precedence over nr-of-instances
+ paths = []
+ }
+
+ # To use a dedicated dispatcher for the routees of the pool you can
+ # define the dispatcher configuration inline with the property name
+ # 'pool-dispatcher' in the deployment section of the router.
+ # For example:
+ # pool-dispatcher {
+ # fork-join-executor.parallelism-min = 5
+ # fork-join-executor.parallelism-max = 5
+ # }
+
+ # Routers with dynamically resizable number of routees; this feature is
+ # enabled by including (parts of) this section in the deployment
+ resizer {
+
+ enabled = off
+
+ # The fewest number of routees the router should ever have.
+ lower-bound = 1
+
+ # The most number of routees the router should ever have.
+ # Must be greater than or equal to lower-bound.
+ upper-bound = 10
+
+ # Threshold used to evaluate if a routee is considered to be busy
+ # (under pressure). Implementation depends on this value (default is 1).
+ # 0: number of routees currently processing a message.
+ # 1: number of routees currently processing a message has
+ # some messages in mailbox.
+ # > 1: number of routees with at least the configured pressure-threshold
+ # messages in their mailbox. Note that estimating mailbox size of
+ # default UnboundedMailbox is O(N) operation.
+ pressure-threshold = 1
+
+ # Percentage to increase capacity whenever all routees are busy.
+ # For example, 0.2 would increase 20% (rounded up), i.e. if current
+ # capacity is 6 it will request an increase of 2 more routees.
+ rampup-rate = 0.2
+
+ # Minimum fraction of busy routees before backing off.
+ # For example, if this is 0.3, then we'll remove some routees only when
+ # less than 30% of routees are busy, i.e. if current capacity is 10 and
+ # 3 are busy then the capacity is unchanged, but if 2 or less are busy
+ # the capacity is decreased.
+ # Use 0.0 or negative to avoid removal of routees.
+ backoff-threshold = 0.3
+
+ # Fraction of routees to be removed when the resizer reaches the
+ # backoffThreshold.
+ # For example, 0.1 would decrease 10% (rounded up), i.e. if current
+ # capacity is 9 it will request an decrease of 1 routee.
+ backoff-rate = 0.1
+
+ # Number of messages between resize operation.
+ # Use 1 to resize before each message.
+ messages-per-resize = 10
+ }
+
+ # Routers with dynamically resizable number of routees based on
+ # performance metrics.
+ # This feature is enabled by including (parts of) this section in
+ # the deployment, cannot be enabled together with default resizer.
+ optimal-size-exploring-resizer {
+
+ enabled = off
+
+ # The fewest number of routees the router should ever have.
+ lower-bound = 1
+
+ # The most number of routees the router should ever have.
+ # Must be greater than or equal to lower-bound.
+ upper-bound = 10
+
+ # probability of doing a ramping down when all routees are busy
+ # during exploration.
+ chance-of-ramping-down-when-full = 0.2
+
+ # Interval between each resize attempt
+ action-interval = 5s
+
+ # If the routees have not been fully utilized (i.e. all routees busy)
+ # for such length, the resizer will downsize the pool.
+ downsize-after-underutilized-for = 72h
+
+ # Duration exploration, the ratio between the largest step size and
+ # current pool size. E.g. if the current pool size is 50, and the
+ # explore-step-size is 0.1, the maximum pool size change during
+ # exploration will be +- 5
+ explore-step-size = 0.1
+
+ # Probability of doing an exploration v.s. optimization.
+ chance-of-exploration = 0.4
+
+ # When downsizing after a long streak of underutilization, the resizer
+ # will downsize the pool to the highest utiliziation multiplied by a
+ # a downsize ratio. This downsize ratio determines the new pools size
+ # in comparison to the highest utilization.
+ # E.g. if the highest utilization is 10, and the down size ratio
+ # is 0.8, the pool will be downsized to 8
+ downsize-ratio = 0.8
+
+ # When optimizing, the resizer only considers the sizes adjacent to the
+ # current size. This number indicates how many adjacent sizes to consider.
+ optimization-range = 16
+
+ # The weight of the latest metric over old metrics when collecting
+ # performance metrics.
+ # E.g. if the last processing speed is 10 millis per message at pool
+ # size 5, and if the new processing speed collected is 6 millis per
+ # message at pool size 5. Given a weight of 0.3, the metrics
+ # representing pool size 5 will be 6 * 0.3 + 10 * 0.7, i.e. 8.8 millis
+ # Obviously, this number should be between 0 and 1.
+ weight-of-latest-metric = 0.5
+ }
+ }
+
+ "/IO-DNS/inet-address" {
+ mailbox = "unbounded"
+ router = "consistent-hashing-pool"
+ nr-of-instances = 4
+ }
+
+ "/IO-DNS/inet-address/*" {
+ dispatcher = "akka.actor.default-blocking-io-dispatcher"
+ }
+
+ "/IO-DNS/async-dns" {
+ mailbox = "unbounded"
+ router = "round-robin-pool"
+ nr-of-instances = 1
+ }
+ }
+
+ default-dispatcher {
+ # Must be one of the following
+ # Dispatcher, PinnedDispatcher, or a FQCN to a class inheriting
+ # MessageDispatcherConfigurator with a public constructor with
+ # both com.typesafe.config.Config parameter and
+ # akka.dispatch.DispatcherPrerequisites parameters.
+ # PinnedDispatcher must be used together with executor=thread-pool-executor.
+ type = "Dispatcher"
+
+ # Which kind of ExecutorService to use for this dispatcher
+ # Valid options:
+ # - "default-executor" requires a "default-executor" section
+ # - "fork-join-executor" requires a "fork-join-executor" section
+ # - "thread-pool-executor" requires a "thread-pool-executor" section
+ # - "affinity-pool-executor" requires an "affinity-pool-executor" section
+ # - A FQCN of a class extending ExecutorServiceConfigurator
+ executor = "default-executor"
+
+ # This will be used if you have set "executor = "default-executor"".
+ # If an ActorSystem is created with a given ExecutionContext, this
+ # ExecutionContext will be used as the default executor for all
+ # dispatchers in the ActorSystem configured with
+ # executor = "default-executor". Note that "default-executor"
+ # is the default value for executor, and therefore used if not
+ # specified otherwise. If no ExecutionContext is given,
+ # the executor configured in "fallback" will be used.
+ default-executor {
+ fallback = "fork-join-executor"
+ }
+
+ # This will be used if you have set "executor = "affinity-pool-executor""
+ # Underlying thread pool implementation is akka.dispatch.affinity.AffinityPool.
+ # This executor is classified as "ApiMayChange".
+ affinity-pool-executor {
+ # Min number of threads to cap factor-based parallelism number to
+ parallelism-min = 4
+
+ # The parallelism factor is used to determine thread pool size using the
+ # following formula: ceil(available processors * factor). Resulting size
+ # is then bounded by the parallelism-min and parallelism-max values.
+ parallelism-factor = 0.8
+
+ # Max number of threads to cap factor-based parallelism number to.
+ parallelism-max = 64
+
+ # Each worker in the pool uses a separate bounded MPSC queue. This value
+ # indicates the upper bound of the queue. Whenever an attempt to enqueue
+ # a task is made and the queue does not have capacity to accommodate
+ # the task, the rejection handler created by the rejection handler specified
+ # in "rejection-handler" is invoked.
+ task-queue-size = 512
+
+ # FQCN of the Rejection handler used in the pool.
+ # Must have an empty public constructor and must
+ # implement akka.actor.affinity.RejectionHandlerFactory.
+ rejection-handler = "akka.dispatch.affinity.ThrowOnOverflowRejectionHandler"
+
+ # Level of CPU time used, on a scale between 1 and 10, during backoff/idle.
+ # The tradeoff is that to have low latency more CPU time must be used to be
+ # able to react quickly on incoming messages or send as fast as possible after
+ # backoff backpressure.
+ # Level 1 strongly prefer low CPU consumption over low latency.
+ # Level 10 strongly prefer low latency over low CPU consumption.
+ idle-cpu-level = 5
+
+ # FQCN of the akka.dispatch.affinity.QueueSelectorFactory.
+ # The Class of the FQCN must have a public constructor with a
+ # (com.typesafe.config.Config) parameter.
+ # A QueueSelectorFactory create instances of akka.dispatch.affinity.QueueSelector,
+ # that is responsible for determining which task queue a Runnable should be enqueued in.
+ queue-selector = "akka.dispatch.affinity.FairDistributionHashCache"
+
+ # When using the "akka.dispatch.affinity.FairDistributionHashCache" queue selector
+ # internally the AffinityPool uses two methods to determine which task
+ # queue to allocate a Runnable to:
+ # - map based - maintains a round robin counter and a map of Runnable
+ # hashcodes to queues that they have been associated with. This ensures
+ # maximum fairness in terms of work distribution, meaning that each worker
+ # will get approximately equal amount of mailboxes to execute. This is suitable
+ # in cases where we have a small number of actors that will be scheduled on
+ # the pool and we want to ensure the maximum possible utilization of the
+ # available threads.
+ # - hash based - the task - queue in which the runnable should go is determined
+ # by using an uniformly distributed int to int hash function which uses the
+ # hash code of the Runnable as an input. This is preferred in situations where we
+ # have enough number of distinct actors to ensure statistically uniform
+ # distribution of work across threads or we are ready to sacrifice the
+ # former for the added benefit of avoiding map look-ups.
+ fair-work-distribution {
+ # The value serves as a threshold which determines the point at which the
+ # pool switches from the first to the second work distribution schemes.
+ # For example, if the value is set to 128, the pool can observe up to
+ # 128 unique actors and schedule their mailboxes using the map based
+ # approach. Once this number is reached the pool switches to hash based
+ # task distribution mode. If the value is set to 0, the map based
+ # work distribution approach is disabled and only the hash based is
+ # used irrespective of the number of unique actors. Valid range is
+ # 0 to 2048 (inclusive)
+ threshold = 128
+ }
+ }
+
+ # This will be used if you have set "executor = "fork-join-executor""
+ # Underlying thread pool implementation is java.util.concurrent.ForkJoinPool
+ fork-join-executor {
+ # Min number of threads to cap factor-based parallelism number to
+ parallelism-min = 8
+
+ # The parallelism factor is used to determine thread pool size using the
+ # following formula: ceil(available processors * factor). Resulting size
+ # is then bounded by the parallelism-min and parallelism-max values.
+ parallelism-factor = 1.0
+
+ # Max number of threads to cap factor-based parallelism number to
+ parallelism-max = 64
+
+ # Setting to "FIFO" to use queue like peeking mode which "poll" or "LIFO" to use stack
+ # like peeking mode which "pop".
+ task-peeking-mode = "FIFO"
+ }
+
+ # This will be used if you have set "executor = "thread-pool-executor""
+ # Underlying thread pool implementation is java.util.concurrent.ThreadPoolExecutor
+ thread-pool-executor {
+ # Keep alive time for threads
+ keep-alive-time = 60s
+
+ # Define a fixed thread pool size with this property. The corePoolSize
+ # and the maximumPoolSize of the ThreadPoolExecutor will be set to this
+ # value, if it is defined. Then the other pool-size properties will not
+ # be used.
+ #
+ # Valid values are: `off` or a positive integer.
+ fixed-pool-size = off
+
+ # Min number of threads to cap factor-based corePoolSize number to
+ core-pool-size-min = 8
+
+ # The core-pool-size-factor is used to determine corePoolSize of the
+ # ThreadPoolExecutor using the following formula:
+ # ceil(available processors * factor).
+ # Resulting size is then bounded by the core-pool-size-min and
+ # core-pool-size-max values.
+ core-pool-size-factor = 3.0
+
+ # Max number of threads to cap factor-based corePoolSize number to
+ core-pool-size-max = 64
+
+ # Minimum number of threads to cap factor-based maximumPoolSize number to
+ max-pool-size-min = 8
+
+ # The max-pool-size-factor is used to determine maximumPoolSize of the
+ # ThreadPoolExecutor using the following formula:
+ # ceil(available processors * factor)
+ # The maximumPoolSize will not be less than corePoolSize.
+ # It is only used if using a bounded task queue.
+ max-pool-size-factor = 3.0
+
+ # Max number of threads to cap factor-based maximumPoolSize number to
+ max-pool-size-max = 64
+
+ # Specifies the bounded capacity of the task queue (< 1 == unbounded)
+ task-queue-size = -1
+
+ # Specifies which type of task queue will be used, can be "array" or
+ # "linked" (default)
+ task-queue-type = "linked"
+
+ # Allow core threads to time out
+ allow-core-timeout = on
+ }
+
+ # How long time the dispatcher will wait for new actors until it shuts down
+ shutdown-timeout = 1s
+
+ # Throughput defines the number of messages that are processed in a batch
+ # before the thread is returned to the pool. Set to 1 for as fair as possible.
+ throughput = 5
+
+ # Throughput deadline for Dispatcher, set to 0 or negative for no deadline
+ throughput-deadline-time = 0ms
+
+ # For BalancingDispatcher: If the balancing dispatcher should attempt to
+ # schedule idle actors using the same dispatcher when a message comes in,
+ # and the dispatchers ExecutorService is not fully busy already.
+ attempt-teamwork = on
+
+ # If this dispatcher requires a specific type of mailbox, specify the
+ # fully-qualified class name here; the actually created mailbox will
+ # be a subtype of this type. The empty string signifies no requirement.
+ mailbox-requirement = ""
+ }
+
+ # Default separate internal dispatcher to run Akka internal tasks and actors on
+ # protecting them against starvation because of accidental blocking in user actors (which run on the
+ # default dispatcher)
+ internal-dispatcher {
+ type = "Dispatcher"
+ executor = "fork-join-executor"
+ throughput = 5
+ fork-join-executor {
+ parallelism-min = 4
+ parallelism-factor = 1.0
+ parallelism-max = 64
+ }
+ }
+
+ default-blocking-io-dispatcher {
+ type = "Dispatcher"
+ executor = "thread-pool-executor"
+ throughput = 1
+
+ thread-pool-executor {
+ fixed-pool-size = 16
+ }
+ }
+
+ default-mailbox {
+ # FQCN of the MailboxType. The Class of the FQCN must have a public
+ # constructor with
+ # (akka.actor.ActorSystem.Settings, com.typesafe.config.Config) parameters.
+ mailbox-type = "akka.dispatch.UnboundedMailbox"
+
+ # If the mailbox is bounded then it uses this setting to determine its
+ # capacity. The provided value must be positive.
+ # NOTICE:
+ # Up to version 2.1 the mailbox type was determined based on this setting;
+ # this is no longer the case, the type must explicitly be a bounded mailbox.
+ mailbox-capacity = 1000
+
+ # If the mailbox is bounded then this is the timeout for enqueueing
+ # in case the mailbox is full. Negative values signify infinite
+ # timeout, which should be avoided as it bears the risk of dead-lock.
+ mailbox-push-timeout-time = 10s
+
+ # For Actor with Stash: The default capacity of the stash.
+ # If negative (or zero) then an unbounded stash is used (default)
+ # If positive then a bounded stash is used and the capacity is set using
+ # the property
+ stash-capacity = -1
+ }
+
+ mailbox {
+ # Mapping between message queue semantics and mailbox configurations.
+ # Used by akka.dispatch.RequiresMessageQueue[T] to enforce different
+ # mailbox types on actors.
+ # If your Actor implements RequiresMessageQueue[T], then when you create
+ # an instance of that actor its mailbox type will be decided by looking
+ # up a mailbox configuration via T in this mapping
+ requirements {
+ "akka.dispatch.UnboundedMessageQueueSemantics" =
+ akka.actor.mailbox.unbounded-queue-based
+ "akka.dispatch.BoundedMessageQueueSemantics" =
+ akka.actor.mailbox.bounded-queue-based
+ "akka.dispatch.DequeBasedMessageQueueSemantics" =
+ akka.actor.mailbox.unbounded-deque-based
+ "akka.dispatch.UnboundedDequeBasedMessageQueueSemantics" =
+ akka.actor.mailbox.unbounded-deque-based
+ "akka.dispatch.BoundedDequeBasedMessageQueueSemantics" =
+ akka.actor.mailbox.bounded-deque-based
+ "akka.dispatch.MultipleConsumerSemantics" =
+ akka.actor.mailbox.unbounded-queue-based
+ "akka.dispatch.ControlAwareMessageQueueSemantics" =
+ akka.actor.mailbox.unbounded-control-aware-queue-based
+ "akka.dispatch.UnboundedControlAwareMessageQueueSemantics" =
+ akka.actor.mailbox.unbounded-control-aware-queue-based
+ "akka.dispatch.BoundedControlAwareMessageQueueSemantics" =
+ akka.actor.mailbox.bounded-control-aware-queue-based
+ "akka.event.LoggerMessageQueueSemantics" =
+ akka.actor.mailbox.logger-queue
+ }
+
+ unbounded-queue-based {
+ # FQCN of the MailboxType, The Class of the FQCN must have a public
+ # constructor with (akka.actor.ActorSystem.Settings,
+ # com.typesafe.config.Config) parameters.
+ mailbox-type = "akka.dispatch.UnboundedMailbox"
+ }
+
+ bounded-queue-based {
+ # FQCN of the MailboxType, The Class of the FQCN must have a public
+ # constructor with (akka.actor.ActorSystem.Settings,
+ # com.typesafe.config.Config) parameters.
+ mailbox-type = "akka.dispatch.BoundedMailbox"
+ }
+
+ unbounded-deque-based {
+ # FQCN of the MailboxType, The Class of the FQCN must have a public
+ # constructor with (akka.actor.ActorSystem.Settings,
+ # com.typesafe.config.Config) parameters.
+ mailbox-type = "akka.dispatch.UnboundedDequeBasedMailbox"
+ }
+
+ bounded-deque-based {
+ # FQCN of the MailboxType, The Class of the FQCN must have a public
+ # constructor with (akka.actor.ActorSystem.Settings,
+ # com.typesafe.config.Config) parameters.
+ mailbox-type = "akka.dispatch.BoundedDequeBasedMailbox"
+ }
+
+ unbounded-control-aware-queue-based {
+ # FQCN of the MailboxType, The Class of the FQCN must have a public
+ # constructor with (akka.actor.ActorSystem.Settings,
+ # com.typesafe.config.Config) parameters.
+ mailbox-type = "akka.dispatch.UnboundedControlAwareMailbox"
+ }
+
+ bounded-control-aware-queue-based {
+ # FQCN of the MailboxType, The Class of the FQCN must have a public
+ # constructor with (akka.actor.ActorSystem.Settings,
+ # com.typesafe.config.Config) parameters.
+ mailbox-type = "akka.dispatch.BoundedControlAwareMailbox"
+ }
+
+ # The LoggerMailbox will drain all messages in the mailbox
+ # when the system is shutdown and deliver them to the StandardOutLogger.
+ # Do not change this unless you know what you are doing.
+ logger-queue {
+ mailbox-type = "akka.event.LoggerMailboxType"
+ }
+ }
+
+ debug {
+ # enable function of Actor.loggable(), which is to log any received message
+ # at DEBUG level, see the “Testing Actor Systems” section of the Akka
+ # Documentation at https://akka.io/docs
+ receive = off
+
+ # enable DEBUG logging of all AutoReceiveMessages (Kill, PoisonPill etc.)
+ autoreceive = off
+
+ # enable DEBUG logging of actor lifecycle changes
+ lifecycle = off
+
+ # enable DEBUG logging of all LoggingFSMs for events, transitions and timers
+ fsm = off
+
+ # enable DEBUG logging of subscription changes on the eventStream
+ event-stream = off
+
+ # enable DEBUG logging of unhandled messages
+ unhandled = off
+
+ # enable WARN logging of misconfigured routers
+ router-misconfiguration = off
+ }
+
+ # SECURITY BEST-PRACTICE is to disable java serialization for its multiple
+ # known attack surfaces.
+ #
+ # This setting is a short-cut to
+ # - using DisabledJavaSerializer instead of JavaSerializer
+ #
+ # Completely disable the use of `akka.serialization.JavaSerialization` by the
+ # Akka Serialization extension, instead DisabledJavaSerializer will
+ # be inserted which will fail explicitly if attempts to use java serialization are made.
+ #
+ # The log messages emitted by such serializer SHOULD be treated as potential
+ # attacks which the serializer prevented, as they MAY indicate an external operator
+ # attempting to send malicious messages intending to use java serialization as attack vector.
+ # The attempts are logged with the SECURITY marker.
+ #
+ # Please note that this option does not stop you from manually invoking java serialization
+ #
+ allow-java-serialization = on
+
+ # Log warnings when the Java serialization is used to serialize messages.
+ # Java serialization is not very performant and should not be used in production
+ # environments unless you don't care about performance and security. In that case
+ # you can turn this off.
+ warn-about-java-serializer-usage = on
+
+ # To be used with the above warn-about-java-serializer-usage
+ # When warn-about-java-serializer-usage = on, and this warn-on-no-serialization-verification = off,
+ # warnings are suppressed for classes extending NoSerializationVerificationNeeded
+ # to reduce noise.
+ warn-on-no-serialization-verification = on
+
+ # Entries for pluggable serializers and their bindings.
+ serializers {
+ java = "akka.serialization.JavaSerializer"
+ bytes = "akka.serialization.ByteArraySerializer"
+ primitive-long = "akka.serialization.LongSerializer"
+ primitive-int = "akka.serialization.IntSerializer"
+ primitive-string = "akka.serialization.StringSerializer"
+ primitive-bytestring = "akka.serialization.ByteStringSerializer"
+ primitive-boolean = "akka.serialization.BooleanSerializer"
+ }
+
+ # Class to Serializer binding. You only need to specify the name of an
+ # interface or abstract base class of the messages. In case of ambiguity it
+ # is using the most specific configured class, or giving a warning and
+ # choosing the “first” one.
+ #
+ # To disable one of the default serializers, assign its class to "none", like
+ # "java.io.Serializable" = none
+ serialization-bindings {
+ "[B" = bytes
+ "java.io.Serializable" = java
+
+ "java.lang.String" = primitive-string
+ "akka.util.ByteString$ByteString1C" = primitive-bytestring
+ "akka.util.ByteString$ByteString1" = primitive-bytestring
+ "akka.util.ByteString$ByteStrings" = primitive-bytestring
+ "java.lang.Long" = primitive-long
+ "scala.Long" = primitive-long
+ "java.lang.Integer" = primitive-int
+ "scala.Int" = primitive-int
+ "java.lang.Boolean" = primitive-boolean
+ "scala.Boolean" = primitive-boolean
+ }
+
+ # Configuration namespace of serialization identifiers.
+ # Each serializer implementation must have an entry in the following format:
+ # `akka.actor.serialization-identifiers."FQCN" = ID`
+ # where `FQCN` is fully qualified class name of the serializer implementation
+ # and `ID` is globally unique serializer identifier number.
+ # Identifier values from 0 to 40 are reserved for Akka internal usage.
+ serialization-identifiers {
+ "akka.serialization.JavaSerializer" = 1
+ "akka.serialization.ByteArraySerializer" = 4
+
+ primitive-long = 18
+ primitive-int = 19
+ primitive-string = 20
+ primitive-bytestring = 21
+ primitive-boolean = 35
+ }
+
+ }
+
+ serialization.protobuf {
+ # deprecated, use `allowed-classes` instead
+ whitelist-class = [
+ "com.google.protobuf.GeneratedMessage",
+ "com.google.protobuf.GeneratedMessageV3",
+ "scalapb.GeneratedMessageCompanion",
+ "akka.protobuf.GeneratedMessage",
+ "akka.protobufv3.internal.GeneratedMessageV3"
+ ]
+
+ # Additional classes that are allowed even if they are not defined in `serialization-bindings`.
+ # It can be exact class name or name of super class or interfaces (one level).
+ # This is useful when a class is not used for serialization any more and therefore removed
+ # from `serialization-bindings`, but should still be possible to deserialize.
+ allowed-classes = ${akka.serialization.protobuf.whitelist-class}
+
+ }
+
+ # Used to set the behavior of the scheduler.
+ # Changing the default values may change the system behavior drastically so make
+ # sure you know what you're doing! See the Scheduler section of the Akka
+ # Documentation for more details.
+ scheduler {
+ # The LightArrayRevolverScheduler is used as the default scheduler in the
+ # system. It does not execute the scheduled tasks on exact time, but on every
+ # tick, it will run everything that is (over)due. You can increase or decrease
+ # the accuracy of the execution timing by specifying smaller or larger tick
+ # duration. If you are scheduling a lot of tasks you should consider increasing
+ # the ticks per wheel.
+ # Note that it might take up to 1 tick to stop the Timer, so setting the
+ # tick-duration to a high value will make shutting down the actor system
+ # take longer.
+ tick-duration = 10ms
+
+ # The timer uses a circular wheel of buckets to store the timer tasks.
+ # This should be set such that the majority of scheduled timeouts (for high
+ # scheduling frequency) will be shorter than one rotation of the wheel
+ # (ticks-per-wheel * ticks-duration)
+ # THIS MUST BE A POWER OF TWO!
+ ticks-per-wheel = 512
+
+ # This setting selects the timer implementation which shall be loaded at
+ # system start-up.
+ # The class given here must implement the akka.actor.Scheduler interface
+ # and offer a public constructor which takes three arguments:
+ # 1) com.typesafe.config.Config
+ # 2) akka.event.LoggingAdapter
+ # 3) java.util.concurrent.ThreadFactory
+ implementation = akka.actor.LightArrayRevolverScheduler
+
+ # When shutting down the scheduler, there will typically be a thread which
+ # needs to be stopped, and this timeout determines how long to wait for
+ # that to happen. In case of timeout the shutdown of the actor system will
+ # proceed without running possibly still enqueued tasks.
+ shutdown-timeout = 5s
+ }
+
+ io {
+
+ # By default the select loops run on dedicated threads, hence using a
+ # PinnedDispatcher
+ pinned-dispatcher {
+ type = "PinnedDispatcher"
+ executor = "thread-pool-executor"
+ thread-pool-executor.allow-core-timeout = off
+ }
+
+ tcp {
+
+ # The number of selectors to stripe the served channels over; each of
+ # these will use one select loop on the selector-dispatcher.
+ nr-of-selectors = 1
+
+ # Maximum number of open channels supported by this TCP module; there is
+ # no intrinsic general limit, this setting is meant to enable DoS
+ # protection by limiting the number of concurrently connected clients.
+ # Also note that this is a "soft" limit; in certain cases the implementation
+ # will accept a few connections more or a few less than the number configured
+ # here. Must be an integer > 0 or "unlimited".
+ max-channels = 256000
+
+ # When trying to assign a new connection to a selector and the chosen
+ # selector is at full capacity, retry selector choosing and assignment
+ # this many times before giving up
+ selector-association-retries = 10
+
+ # The maximum number of connection that are accepted in one go,
+ # higher numbers decrease latency, lower numbers increase fairness on
+ # the worker-dispatcher
+ batch-accept-limit = 10
+
+ # The number of bytes per direct buffer in the pool used to read or write
+ # network data from the kernel.
+ direct-buffer-size = 128 KiB
+
+ # The maximal number of direct buffers kept in the direct buffer pool for
+ # reuse.
+ direct-buffer-pool-limit = 1000
+
+ # The duration a connection actor waits for a `Register` message from
+ # its commander before aborting the connection.
+ register-timeout = 5s
+
+ # The maximum number of bytes delivered by a `Received` message. Before
+ # more data is read from the network the connection actor will try to
+ # do other work.
+ # The purpose of this setting is to impose a smaller limit than the
+ # configured receive buffer size. When using value 'unlimited' it will
+ # try to read all from the receive buffer.
+ max-received-message-size = unlimited
+
+ # Enable fine grained logging of what goes on inside the implementation.
+ # Be aware that this may log more than once per message sent to the actors
+ # of the tcp implementation.
+ trace-logging = off
+
+ # Fully qualified config path which holds the dispatcher configuration
+ # to be used for running the select() calls in the selectors
+ selector-dispatcher = "akka.io.pinned-dispatcher"
+
+ # Fully qualified config path which holds the dispatcher configuration
+ # for the read/write worker actors
+ worker-dispatcher = "akka.actor.internal-dispatcher"
+
+ # Fully qualified config path which holds the dispatcher configuration
+ # for the selector management actors
+ management-dispatcher = "akka.actor.internal-dispatcher"
+
+ # Fully qualified config path which holds the dispatcher configuration
+ # on which file IO tasks are scheduled
+ file-io-dispatcher = "akka.actor.default-blocking-io-dispatcher"
+
+ # The maximum number of bytes (or "unlimited") to transfer in one batch
+ # when using `WriteFile` command which uses `FileChannel.transferTo` to
+ # pipe files to a TCP socket. On some OS like Linux `FileChannel.transferTo`
+ # may block for a long time when network IO is faster than file IO.
+ # Decreasing the value may improve fairness while increasing may improve
+ # throughput.
+ file-io-transferTo-limit = 512 KiB
+
+ # The number of times to retry the `finishConnect` call after being notified about
+ # OP_CONNECT. Retries are needed if the OP_CONNECT notification doesn't imply that
+ # `finishConnect` will succeed, which is the case on Android.
+ finish-connect-retries = 5
+
+ # On Windows connection aborts are not reliably detected unless an OP_READ is
+ # registered on the selector _after_ the connection has been reset. This
+ # workaround enables an OP_CONNECT which forces the abort to be visible on Windows.
+ # Enabling this setting on other platforms than Windows will cause various failures
+ # and undefined behavior.
+ # Possible values of this key are on, off and auto where auto will enable the
+ # workaround if Windows is detected automatically.
+ windows-connection-abort-workaround-enabled = off
+ }
+
+ udp {
+
+ # The number of selectors to stripe the served channels over; each of
+ # these will use one select loop on the selector-dispatcher.
+ nr-of-selectors = 1
+
+ # Maximum number of open channels supported by this UDP module Generally
+ # UDP does not require a large number of channels, therefore it is
+ # recommended to keep this setting low.
+ max-channels = 4096
+
+ # The select loop can be used in two modes:
+ # - setting "infinite" will select without a timeout, hogging a thread
+ # - setting a positive timeout will do a bounded select call,
+ # enabling sharing of a single thread between multiple selectors
+ # (in this case you will have to use a different configuration for the
+ # selector-dispatcher, e.g. using "type=Dispatcher" with size 1)
+ # - setting it to zero means polling, i.e. calling selectNow()
+ select-timeout = infinite
+
+ # When trying to assign a new connection to a selector and the chosen
+ # selector is at full capacity, retry selector choosing and assignment
+ # this many times before giving up
+ selector-association-retries = 10
+
+ # The maximum number of datagrams that are read in one go,
+ # higher numbers decrease latency, lower numbers increase fairness on
+ # the worker-dispatcher
+ receive-throughput = 3
+
+ # The number of bytes per direct buffer in the pool used to read or write
+ # network data from the kernel.
+ direct-buffer-size = 128 KiB
+
+ # The maximal number of direct buffers kept in the direct buffer pool for
+ # reuse.
+ direct-buffer-pool-limit = 1000
+
+ # Enable fine grained logging of what goes on inside the implementation.
+ # Be aware that this may log more than once per message sent to the actors
+ # of the tcp implementation.
+ trace-logging = off
+
+ # Fully qualified config path which holds the dispatcher configuration
+ # to be used for running the select() calls in the selectors
+ selector-dispatcher = "akka.io.pinned-dispatcher"
+
+ # Fully qualified config path which holds the dispatcher configuration
+ # for the read/write worker actors
+ worker-dispatcher = "akka.actor.internal-dispatcher"
+
+ # Fully qualified config path which holds the dispatcher configuration
+ # for the selector management actors
+ management-dispatcher = "akka.actor.internal-dispatcher"
+ }
+
+ udp-connected {
+
+ # The number of selectors to stripe the served channels over; each of
+ # these will use one select loop on the selector-dispatcher.
+ nr-of-selectors = 1
+
+ # Maximum number of open channels supported by this UDP module Generally
+ # UDP does not require a large number of channels, therefore it is
+ # recommended to keep this setting low.
+ max-channels = 4096
+
+ # The select loop can be used in two modes:
+ # - setting "infinite" will select without a timeout, hogging a thread
+ # - setting a positive timeout will do a bounded select call,
+ # enabling sharing of a single thread between multiple selectors
+ # (in this case you will have to use a different configuration for the
+ # selector-dispatcher, e.g. using "type=Dispatcher" with size 1)
+ # - setting it to zero means polling, i.e. calling selectNow()
+ select-timeout = infinite
+
+ # When trying to assign a new connection to a selector and the chosen
+ # selector is at full capacity, retry selector choosing and assignment
+ # this many times before giving up
+ selector-association-retries = 10
+
+ # The maximum number of datagrams that are read in one go,
+ # higher numbers decrease latency, lower numbers increase fairness on
+ # the worker-dispatcher
+ receive-throughput = 3
+
+ # The number of bytes per direct buffer in the pool used to read or write
+ # network data from the kernel.
+ direct-buffer-size = 128 KiB
+
+ # The maximal number of direct buffers kept in the direct buffer pool for
+ # reuse.
+ direct-buffer-pool-limit = 1000
+
+ # Enable fine grained logging of what goes on inside the implementation.
+ # Be aware that this may log more than once per message sent to the actors
+ # of the tcp implementation.
+ trace-logging = off
+
+ # Fully qualified config path which holds the dispatcher configuration
+ # to be used for running the select() calls in the selectors
+ selector-dispatcher = "akka.io.pinned-dispatcher"
+
+ # Fully qualified config path which holds the dispatcher configuration
+ # for the read/write worker actors
+ worker-dispatcher = "akka.actor.internal-dispatcher"
+
+ # Fully qualified config path which holds the dispatcher configuration
+ # for the selector management actors
+ management-dispatcher = "akka.actor.internal-dispatcher"
+ }
+
+ dns {
+ # Fully qualified config path which holds the dispatcher configuration
+ # for the manager and resolver router actors.
+ # For actual router configuration see akka.actor.deployment./IO-DNS/*
+ dispatcher = "akka.actor.internal-dispatcher"
+
+ # Name of the subconfig at path akka.io.dns, see inet-address below
+ #
+ # Change to `async-dns` to use the new "native" DNS resolver,
+ # which is also capable of resolving SRV records.
+ resolver = "inet-address"
+
+ # To-be-deprecated DNS resolver implementation which uses the Java InetAddress to resolve DNS records.
+ # To be replaced by `akka.io.dns.async` which implements the DNS protocol natively and without blocking (which InetAddress does)
+ inet-address {
+ # Must implement akka.io.DnsProvider
+ provider-object = "akka.io.InetAddressDnsProvider"
+
+ # To set the time to cache name resolutions
+ # Possible values:
+ # default: sun.net.InetAddressCachePolicy.get() and getNegative()
+ # forever: cache forever
+ # never: no caching
+ # n [time unit]: positive timeout with unit, for example 30s
+ positive-ttl = default
+ negative-ttl = default
+
+ # How often to sweep out expired cache entries.
+ # Note that this interval has nothing to do with TTLs
+ cache-cleanup-interval = 120s
+ }
+
+ async-dns {
+ provider-object = "akka.io.dns.internal.AsyncDnsProvider"
+
+ # Set upper bound for caching successfully resolved dns entries
+ # if the DNS record has a smaller TTL value than the setting that
+ # will be used. Default is to use the record TTL with no cap.
+ # Possible values:
+ # forever: always use the minimum TTL from the found records
+ # never: never cache
+ # n [time unit] = cap the caching to this value
+ positive-ttl = forever
+
+ # Set how long the fact that a DNS record could not be found is
+ # cached. If a new resolution is done while the fact is cached it will
+ # be failed and not result in an actual DNS resolution. Default is
+ # to never cache.
+ # Possible values:
+ # never: never cache
+ # forever: cache a missing DNS record forever (you probably will not want to do this)
+ # n [time unit] = cache for this long
+ negative-ttl = never
+
+ # Configures nameservers to query during DNS resolution.
+ # Defaults to the nameservers that would be used by the JVM by default.
+ # Set to a list of IPs to override the servers, e.g. [ "8.8.8.8", "8.8.4.4" ] for Google's servers
+ # If multiple are defined then they are tried in order until one responds
+ nameservers = default
+
+ # The time that a request is allowed to live before being discarded
+ # given no reply. The lower bound of this should always be the amount
+ # of time to reasonably expect a DNS server to reply within.
+ # If multiple name servers are provided then each gets this long to response before trying
+ # the next one
+ resolve-timeout = 5s
+
+ # How often to sweep out expired cache entries.
+ # Note that this interval has nothing to do with TTLs
+ cache-cleanup-interval = 120s
+
+ # Configures the list of search domains.
+ # Defaults to a system dependent lookup (on Unix like OSes, will attempt to parse /etc/resolv.conf, on
+ # other platforms, will not make any attempt to lookup the search domains). Set to a single domain, or
+ # a list of domains, eg, [ "example.com", "example.net" ].
+ search-domains = default
+
+ # Any hosts that have a number of dots less than this will not be looked up directly, instead, a search on
+ # the search domains will be tried first. This corresponds to the ndots option in /etc/resolv.conf, see
+ # https://linux.die.net/man/5/resolver for more info.
+ # Defaults to a system dependent lookup (on Unix like OSes, will attempt to parse /etc/resolv.conf, on
+ # other platforms, will default to 1).
+ ndots = default
+ }
+ }
+ }
+
+
+ # CoordinatedShutdown is an extension that will perform registered
+ # tasks in the order that is defined by the phases. It is started
+ # by calling CoordinatedShutdown(system).run(). This can be triggered
+ # by different things, for example:
+ # - JVM shutdown hook will by default run CoordinatedShutdown
+ # - Cluster node will automatically run CoordinatedShutdown when it
+ # sees itself as Exiting
+ # - A management console or other application specific command can
+ # run CoordinatedShutdown
+ coordinated-shutdown {
+ # The timeout that will be used for a phase if not specified with
+ # 'timeout' in the phase
+ default-phase-timeout = 5 s
+
+ # Terminate the ActorSystem in the last phase actor-system-terminate.
+ terminate-actor-system = on
+
+ # Exit the JVM (System.exit(0)) in the last phase actor-system-terminate
+ # if this is set to 'on'. It is done after termination of the
+ # ActorSystem if terminate-actor-system=on, otherwise it is done
+ # immediately when the last phase is reached.
+ exit-jvm = off
+
+ # Exit status to use on System.exit(int) when 'exit-jvm' is 'on'.
+ exit-code = 0
+
+ # Run the coordinated shutdown when the JVM process exits, e.g.
+ # via kill SIGTERM signal (SIGINT ctrl-c doesn't work).
+ # This property is related to `akka.jvm-shutdown-hooks` above.
+ run-by-jvm-shutdown-hook = on
+
+ # Run the coordinated shutdown when ActorSystem.terminate is called.
+ # Enabling this and disabling terminate-actor-system is not a supported
+ # combination (will throw ConfigurationException at startup).
+ run-by-actor-system-terminate = on
+
+ # When Coordinated Shutdown is triggered an instance of `Reason` is
+ # required. That value can be used to override the default settings.
+ # Only 'exit-jvm', 'exit-code' and 'terminate-actor-system' may be
+ # overridden depending on the reason.
+ reason-overrides {
+ # Overrides are applied using the `reason.getClass.getName`.
+ # Overrides the `exit-code` when the `Reason` is a cluster
+ # Downing or a Cluster Join Unsuccessful event
+ "akka.actor.CoordinatedShutdown$ClusterDowningReason$" {
+ exit-code = -1
+ }
+ "akka.actor.CoordinatedShutdown$ClusterJoinUnsuccessfulReason$" {
+ exit-code = -1
+ }
+ }
+
+ #//#coordinated-shutdown-phases
+ # CoordinatedShutdown is enabled by default and will run the tasks that
+ # are added to these phases by individual Akka modules and user logic.
+ #
+ # The phases are ordered as a DAG by defining the dependencies between the phases
+ # to make sure shutdown tasks are run in the right order.
+ #
+ # In general user tasks belong in the first few phases, but there may be use
+ # cases where you would want to hook in new phases or register tasks later in
+ # the DAG.
+ #
+ # Each phase is defined as a named config section with the
+ # following optional properties:
+ # - timeout=15s: Override the default-phase-timeout for this phase.
+ # - recover=off: If the phase fails the shutdown is aborted
+ # and depending phases will not be executed.
+ # - enabled=off: Skip all tasks registered in this phase. DO NOT use
+ # this to disable phases unless you are absolutely sure what the
+ # consequences are. Many of the built in tasks depend on other tasks
+ # having been executed in earlier phases and may break if those are disabled.
+ # depends-on=[]: Run the phase after the given phases
+ phases {
+
+ # The first pre-defined phase that applications can add tasks to.
+ # Note that more phases can be added in the application's
+ # configuration by overriding this phase with an additional
+ # depends-on.
+ before-service-unbind {
+ }
+
+ # Stop accepting new incoming connections.
+ # This is where you can register tasks that makes a server stop accepting new connections. Already
+ # established connections should be allowed to continue and complete if possible.
+ service-unbind {
+ depends-on = [before-service-unbind]
+ }
+
+ # Wait for requests that are in progress to be completed.
+ # This is where you register tasks that will wait for already established connections to complete, potentially
+ # also first telling them that it is time to close down.
+ service-requests-done {
+ depends-on = [service-unbind]
+ }
+
+ # Final shutdown of service endpoints.
+ # This is where you would add tasks that forcefully kill connections that are still around.
+ service-stop {
+ depends-on = [service-requests-done]
+ }
+
+ # Phase for custom application tasks that are to be run
+ # after service shutdown and before cluster shutdown.
+ before-cluster-shutdown {
+ depends-on = [service-stop]
+ }
+
+ # Graceful shutdown of the Cluster Sharding regions.
+ # This phase is not meant for users to add tasks to.
+ cluster-sharding-shutdown-region {
+ timeout = 10 s
+ depends-on = [before-cluster-shutdown]
+ }
+
+ # Emit the leave command for the node that is shutting down.
+ # This phase is not meant for users to add tasks to.
+ cluster-leave {
+ depends-on = [cluster-sharding-shutdown-region]
+ }
+
+ # Shutdown cluster singletons
+ # This is done as late as possible to allow the shard region shutdown triggered in
+ # the "cluster-sharding-shutdown-region" phase to complete before the shard coordinator is shut down.
+ # This phase is not meant for users to add tasks to.
+ cluster-exiting {
+ timeout = 10 s
+ depends-on = [cluster-leave]
+ }
+
+ # Wait until exiting has been completed
+ # This phase is not meant for users to add tasks to.
+ cluster-exiting-done {
+ depends-on = [cluster-exiting]
+ }
+
+ # Shutdown the cluster extension
+ # This phase is not meant for users to add tasks to.
+ cluster-shutdown {
+ depends-on = [cluster-exiting-done]
+ }
+
+ # Phase for custom application tasks that are to be run
+ # after cluster shutdown and before ActorSystem termination.
+ before-actor-system-terminate {
+ depends-on = [cluster-shutdown]
+ }
+
+ # Last phase. See terminate-actor-system and exit-jvm above.
+ # Don't add phases that depends on this phase because the
+ # dispatcher and scheduler of the ActorSystem have been shutdown.
+ # This phase is not meant for users to add tasks to.
+ actor-system-terminate {
+ timeout = 10 s
+ depends-on = [before-actor-system-terminate]
+ }
+ }
+ #//#coordinated-shutdown-phases
+ }
+
+ #//#circuit-breaker-default
+ # Configuration for circuit breakers created with the APIs accepting an id to
+ # identify or look up the circuit breaker.
+ # Note: Circuit breakers created without ids are not affected by this configuration.
+ # A child configuration section with the same name as the circuit breaker identifier
+ # will be used, with fallback to the `akka.circuit-breaker.default` section.
+ circuit-breaker {
+
+ # Default configuration that is used if a configuration section
+ # with the circuit breaker identifier is not defined.
+ default {
+ # Number of failures before opening the circuit.
+ max-failures = 10
+
+ # Duration of time after which to consider a call a failure.
+ call-timeout = 10s
+
+ # Duration of time in open state after which to attempt to close
+ # the circuit, by first entering the half-open state.
+ reset-timeout = 15s
+
+ # The upper bound of reset-timeout
+ max-reset-timeout = 36500d
+
+ # Exponential backoff
+ # For details see https://en.wikipedia.org/wiki/Exponential_backoff
+ exponential-backoff = 1.0
+
+ # Additional random delay based on this factor is added to backoff
+ # For example 0.2 adds up to 20% delay
+ # In order to skip this additional delay set as 0
+ random-factor = 0.0
+
+ # A allowlist of fqcn of Exceptions that the CircuitBreaker
+ # should not consider failures. By default all exceptions are
+ # considered failures.
+ exception-allowlist = []
+ }
+ }
+ #//#circuit-breaker-default
+
+}
--- /dev/null
+akka.actor.typed {
+
+ # List FQCN of `akka.actor.typed.ExtensionId`s which shall be loaded at actor system startup.
+ # Should be on the format: 'extensions = ["com.example.MyExtId1", "com.example.MyExtId2"]' etc.
+ # See the Akka Documentation for more info about Extensions
+ extensions = []
+
+ # List FQCN of extensions which shall be loaded at actor system startup.
+ # Library extensions are regular extensions that are loaded at startup and are
+ # available for third party library authors to enable auto-loading of extensions when
+ # present on the classpath. This is done by appending entries:
+ # 'library-extensions += "Extension"' in the library `reference.conf`.
+ #
+ # Should not be set by end user applications in 'application.conf', use the extensions property for that
+ #
+ library-extensions = ${?akka.actor.typed.library-extensions} []
+
+ # Receptionist is started eagerly to allow clustered receptionist to gather remote registrations early on.
+ library-extensions += "akka.actor.typed.receptionist.Receptionist$"
+
+ # While an actor is restarted (waiting for backoff to expire and children to stop)
+ # incoming messages and signals are stashed, and delivered later to the newly restarted
+ # behavior. This property defines the capacity in number of messages of the stash
+ # buffer. If the capacity is exceed then additional incoming messages are dropped.
+ restart-stash-capacity = 1000
+
+ # Typed mailbox defaults to the single consumber mailbox as balancing dispatcher is not supported
+ default-mailbox {
+ mailbox-type = "akka.dispatch.SingleConsumerOnlyUnboundedMailbox"
+ }
+}
+
+# Load typed extensions by a classic extension.
+akka.library-extensions += "akka.actor.typed.internal.adapter.ActorSystemAdapter$LoadTypedExtensions"
+
+akka.actor {
+ serializers {
+ typed-misc = "akka.actor.typed.internal.MiscMessageSerializer"
+ service-key = "akka.actor.typed.internal.receptionist.ServiceKeySerializer"
+ }
+
+ serialization-identifiers {
+ "akka.actor.typed.internal.MiscMessageSerializer" = 24
+ "akka.actor.typed.internal.receptionist.ServiceKeySerializer" = 26
+ }
+
+ serialization-bindings {
+ "akka.actor.typed.ActorRef" = typed-misc
+ "akka.actor.typed.internal.adapter.ActorRefAdapter" = typed-misc
+ "akka.actor.typed.internal.receptionist.DefaultServiceKey" = service-key
+ }
+}
+
+# When using Akka Typed (having akka-actor-typed in classpath) the
+# akka.event.slf4j.Slf4jLogger is enabled instead of the DefaultLogger
+# even though it has not been explicitly defined in `akka.loggers`
+# configuration.
+#
+# Slf4jLogger will be used for all Akka classic logging via eventStream,
+# including logging from Akka internals. The Slf4jLogger is then using
+# an ordinary org.slf4j.Logger to emit the log events.
+#
+# The Slf4jLoggingFilter is also enabled automatically.
+#
+# This behavior can be disabled by setting this property to `off`.
+akka.use-slf4j = on
+
+akka.reliable-delivery {
+ producer-controller {
+
+ # To avoid head of line blocking from serialization and transfer
+ # of large messages this can be enabled.
+ # Large messages are chunked into pieces of the given size in bytes. The
+ # chunked messages are sent separatetely and assembled on the consumer side.
+ # Serialization and deserialization is performed by the ProducerController and
+ # ConsumerController respectively instead of in the remote transport layer.
+ chunk-large-messages = off
+
+ durable-queue {
+ # The ProducerController uses this timeout for the requests to
+ # the durable queue. If there is no reply within the timeout it
+ # will be retried.
+ request-timeout = 3s
+
+ # The ProducerController retries requests to the durable queue this
+ # number of times before failing.
+ retry-attempts = 10
+
+ # The ProducerController retries sending the first message with this interval
+ # until it has been confirmed.
+ resend-first-interval = 1s
+ }
+ }
+
+ consumer-controller {
+ # Number of messages in flight between ProducerController and
+ # ConsumerController. The ConsumerController requests for more messages
+ # when half of the window has been used.
+ flow-control-window = 50
+
+ # The ConsumerController resends flow control messages to the
+ # ProducerController with the resend-interval-min, and increasing
+ # it gradually to resend-interval-max when idle.
+ resend-interval-min = 2s
+ resend-interval-max = 30s
+
+ # If this is enabled lost messages will not be resent, but flow control is used.
+ # This can be more efficient since messages don't have to be
+ # kept in memory in the `ProducerController` until they have been
+ # confirmed, but the drawback is that lost messages will not be delivered.
+ only-flow-control = false
+ }
+
+ work-pulling {
+ producer-controller = ${akka.reliable-delivery.producer-controller}
+ producer-controller {
+ # Limit of how many messages that can be buffered when there
+ # is no demand from the consumer side.
+ buffer-size = 1000
+
+ # Ask timeout for sending message to worker until receiving Ack from worker
+ internal-ask-timeout = 60s
+
+ # Chunked messages not implemented for work-pulling yet. Override to not
+ # propagate property from akka.reliable-delivery.producer-controller.
+ chunk-large-messages = off
+ }
+ }
+}
--- /dev/null
+######################################
+# Akka Cluster Reference Config File #
+######################################
+
+# This is the reference config file that contains all the default settings.
+# Make your edits/overrides in your application.conf.
+
+akka {
+
+ cluster {
+ # Initial contact points of the cluster.
+ # The nodes to join automatically at startup.
+ # Comma separated full URIs defined by a string on the form of
+ # "akka://system@hostname:port"
+ # Leave as empty if the node is supposed to be joined manually.
+ seed-nodes = []
+
+ # How long to wait for one of the seed nodes to reply to initial join request.
+ # When this is the first seed node and there is no positive reply from the other
+ # seed nodes within this timeout it will join itself to bootstrap the cluster.
+ # When this is not the first seed node the join attempts will be performed with
+ # this interval.
+ seed-node-timeout = 5s
+
+ # If a join request fails it will be retried after this period.
+ # Disable join retry by specifying "off".
+ retry-unsuccessful-join-after = 10s
+
+ # The joining of given seed nodes will by default be retried indefinitely until
+ # a successful join. That process can be aborted if unsuccessful by defining this
+ # timeout. When aborted it will run CoordinatedShutdown, which by default will
+ # terminate the ActorSystem. CoordinatedShutdown can also be configured to exit
+ # the JVM. It is useful to define this timeout if the seed-nodes are assembled
+ # dynamically and a restart with new seed-nodes should be tried after unsuccessful
+ # attempts.
+ shutdown-after-unsuccessful-join-seed-nodes = off
+
+ # Time margin after which shards or singletons that belonged to a downed/removed
+ # partition are created in surviving partition. The purpose of this margin is that
+ # in case of a network partition the persistent actors in the non-surviving partitions
+ # must be stopped before corresponding persistent actors are started somewhere else.
+ # This is useful if you implement downing strategies that handle network partitions,
+ # e.g. by keeping the larger side of the partition and shutting down the smaller side.
+ # Disable with "off" or specify a duration to enable.
+ #
+ # When using the `akka.cluster.sbr.SplitBrainResolver` as downing provider it will use
+ # the akka.cluster.split-brain-resolver.stable-after as the default down-removal-margin
+ # if this down-removal-margin is undefined.
+ down-removal-margin = off
+
+ # Pluggable support for downing of nodes in the cluster.
+ # If this setting is left empty the `NoDowning` provider is used and no automatic downing will be performed.
+ #
+ # If specified the value must be the fully qualified class name of a subclass of
+ # `akka.cluster.DowningProvider` having a public one argument constructor accepting an `ActorSystem`
+ downing-provider-class = ""
+
+ # Artery only setting
+ # When a node has been gracefully removed, let this time pass (to allow for example
+ # cluster singleton handover to complete) and then quarantine the removed node.
+ quarantine-removed-node-after = 5s
+
+ # If this is set to "off", the leader will not move 'Joining' members to 'Up' during a network
+ # split. This feature allows the leader to accept 'Joining' members to be 'WeaklyUp'
+ # so they become part of the cluster even during a network split. The leader will
+ # move `Joining` members to 'WeaklyUp' after this configured duration without convergence.
+ # The leader will move 'WeaklyUp' members to 'Up' status once convergence has been reached.
+ allow-weakly-up-members = 7s
+
+ # The roles of this member. List of strings, e.g. roles = ["A", "B"].
+ # The roles are part of the membership information and can be used by
+ # routers or other services to distribute work to certain member types,
+ # e.g. front-end and back-end nodes.
+ # Roles are not allowed to start with "dc-" as that is reserved for the
+ # special role assigned from the data-center a node belongs to (see the
+ # multi-data-center section below)
+ roles = []
+
+ # Run the coordinated shutdown from phase 'cluster-shutdown' when the cluster
+ # is shutdown for other reasons than when leaving, e.g. when downing. This
+ # will terminate the ActorSystem when the cluster extension is shutdown.
+ run-coordinated-shutdown-when-down = on
+
+ role {
+ # Minimum required number of members of a certain role before the leader
+ # changes member status of 'Joining' members to 'Up'. Typically used together
+ # with 'Cluster.registerOnMemberUp' to defer some action, such as starting
+ # actors, until the cluster has reached a certain size.
+ # E.g. to require 2 nodes with role 'frontend' and 3 nodes with role 'backend':
+ # frontend.min-nr-of-members = 2
+ # backend.min-nr-of-members = 3
+ #<role-name>.min-nr-of-members = 1
+ }
+
+ # Application version of the deployment. Used by rolling update features
+ # to distinguish between old and new nodes. The typical convention is to use
+ # 3 digit version numbers `major.minor.patch`, but 1 or two digits are also
+ # supported.
+ #
+ # If no `.` is used it is interpreted as a single digit version number or as
+ # plain alphanumeric if it couldn't be parsed as a number.
+ #
+ # It may also have a qualifier at the end for 2 or 3 digit version numbers such
+ # as "1.2-RC1".
+ # For 1 digit with qualifier, 1-RC1, it is interpreted as plain alphanumeric.
+ #
+ # It has support for https://github.com/dwijnand/sbt-dynver format with `+` or
+ # `-` separator. The number of commits from the tag is handled as a numeric part.
+ # For example `1.0.0+3-73475dce26` is less than `1.0.10+10-ed316bd024` (3 < 10).
+ app-version = "0.0.0"
+
+ # Minimum required number of members before the leader changes member status
+ # of 'Joining' members to 'Up'. Typically used together with
+ # 'Cluster.registerOnMemberUp' to defer some action, such as starting actors,
+ # until the cluster has reached a certain size.
+ min-nr-of-members = 1
+
+ # Enable/disable info level logging of cluster events.
+ # These are logged with logger name `akka.cluster.Cluster`.
+ log-info = on
+
+ # Enable/disable verbose info-level logging of cluster events
+ # for temporary troubleshooting. Defaults to 'off'.
+ # These are logged with logger name `akka.cluster.Cluster`.
+ log-info-verbose = off
+
+ # Enable or disable JMX MBeans for management of the cluster
+ jmx.enabled = on
+
+ # Enable or disable multiple JMX MBeans in the same JVM
+ # If this is disabled, the MBean Object name is "akka:type=Cluster"
+ # If this is enabled, them MBean Object names become "akka:type=Cluster,port=$clusterPortNumber"
+ jmx.multi-mbeans-in-same-jvm = off
+
+ # how long should the node wait before starting the periodic tasks
+ # maintenance tasks?
+ periodic-tasks-initial-delay = 1s
+
+ # how often should the node send out gossip information?
+ gossip-interval = 1s
+
+ # discard incoming gossip messages if not handled within this duration
+ gossip-time-to-live = 2s
+
+ # how often should the leader perform maintenance tasks?
+ leader-actions-interval = 1s
+
+ # how often should the node move nodes, marked as unreachable by the failure
+ # detector, out of the membership ring?
+ unreachable-nodes-reaper-interval = 1s
+
+ # How often the current internal stats should be published.
+ # A value of 0s can be used to always publish the stats, when it happens.
+ # Disable with "off".
+ publish-stats-interval = off
+
+ # The id of the dispatcher to use for cluster actors.
+ # If specified you need to define the settings of the actual dispatcher.
+ use-dispatcher = "akka.actor.internal-dispatcher"
+
+ # Gossip to random node with newer or older state information, if any with
+ # this probability. Otherwise Gossip to any random live node.
+ # Probability value is between 0.0 and 1.0. 0.0 means never, 1.0 means always.
+ gossip-different-view-probability = 0.8
+
+ # Reduced the above probability when the number of nodes in the cluster
+ # greater than this value.
+ reduce-gossip-different-view-probability = 400
+
+ # When a node is removed the removal is marked with a tombstone
+ # which is kept at least this long, after which it is pruned, if there is a partition
+ # longer than this it could lead to removed nodes being re-added to the cluster
+ prune-gossip-tombstones-after = 24h
+
+ # Settings for the Phi accrual failure detector (http://www.jaist.ac.jp/~defago/files/pdf/IS_RR_2004_010.pdf
+ # [Hayashibara et al]) used by the cluster subsystem to detect unreachable
+ # members.
+ # The default PhiAccrualFailureDetector will trigger if there are no heartbeats within
+ # the duration heartbeat-interval + acceptable-heartbeat-pause + threshold_adjustment,
+ # i.e. around 5.5 seconds with default settings.
+ failure-detector {
+
+ # FQCN of the failure detector implementation.
+ # It must implement akka.remote.FailureDetector and have
+ # a public constructor with a com.typesafe.config.Config and
+ # akka.actor.EventStream parameter.
+ implementation-class = "akka.remote.PhiAccrualFailureDetector"
+
+ # How often keep-alive heartbeat messages should be sent to each connection.
+ heartbeat-interval = 1 s
+
+ # Defines the failure detector threshold.
+ # A low threshold is prone to generate many wrong suspicions but ensures
+ # a quick detection in the event of a real crash. Conversely, a high
+ # threshold generates fewer mistakes but needs more time to detect
+ # actual crashes.
+ threshold = 8.0
+
+ # Number of the samples of inter-heartbeat arrival times to adaptively
+ # calculate the failure timeout for connections.
+ max-sample-size = 1000
+
+ # Minimum standard deviation to use for the normal distribution in
+ # AccrualFailureDetector. Too low standard deviation might result in
+ # too much sensitivity for sudden, but normal, deviations in heartbeat
+ # inter arrival times.
+ min-std-deviation = 100 ms
+
+ # Number of potentially lost/delayed heartbeats that will be
+ # accepted before considering it to be an anomaly.
+ # This margin is important to be able to survive sudden, occasional,
+ # pauses in heartbeat arrivals, due to for example garbage collect or
+ # network drop.
+ acceptable-heartbeat-pause = 3 s
+
+ # Number of member nodes that each member will send heartbeat messages to,
+ # i.e. each node will be monitored by this number of other nodes.
+ monitored-by-nr-of-members = 9
+
+ # After the heartbeat request has been sent the first failure detection
+ # will start after this period, even though no heartbeat message has
+ # been received.
+ expected-response-after = 1 s
+
+ }
+
+ # Configures multi-dc specific heartbeating and other mechanisms,
+ # many of them have a direct counter-part in "one datacenter mode",
+ # in which case these settings would not be used at all - they only apply,
+ # if your cluster nodes are configured with at-least 2 different `akka.cluster.data-center` values.
+ multi-data-center {
+
+ # Defines which data center this node belongs to. It is typically used to make islands of the
+ # cluster that are colocated. This can be used to make the cluster aware that it is running
+ # across multiple availability zones or regions. It can also be used for other logical
+ # grouping of nodes.
+ self-data-center = "default"
+
+
+ # Try to limit the number of connections between data centers. Used for gossip and heartbeating.
+ # This will not limit connections created for the messaging of the application.
+ # If the cluster does not span multiple data centers, this value has no effect.
+ cross-data-center-connections = 5
+
+ # The n oldest nodes in a data center will choose to gossip to another data center with
+ # this probability. Must be a value between 0.0 and 1.0 where 0.0 means never, 1.0 means always.
+ # When a data center is first started (nodes < 5) a higher probability is used so other data
+ # centers find out about the new nodes more quickly
+ cross-data-center-gossip-probability = 0.2
+
+ failure-detector {
+ # FQCN of the failure detector implementation.
+ # It must implement akka.remote.FailureDetector and have
+ # a public constructor with a com.typesafe.config.Config and
+ # akka.actor.EventStream parameter.
+ implementation-class = "akka.remote.DeadlineFailureDetector"
+
+ # Number of potentially lost/delayed heartbeats that will be
+ # accepted before considering it to be an anomaly.
+ # This margin is important to be able to survive sudden, occasional,
+ # pauses in heartbeat arrivals, due to for example garbage collect or
+ # network drop.
+ acceptable-heartbeat-pause = 10 s
+
+ # How often keep-alive heartbeat messages should be sent to each connection.
+ heartbeat-interval = 3 s
+
+ # After the heartbeat request has been sent the first failure detection
+ # will start after this period, even though no heartbeat message has
+ # been received.
+ expected-response-after = 1 s
+ }
+ }
+
+ # If the tick-duration of the default scheduler is longer than the
+ # tick-duration configured here a dedicated scheduler will be used for
+ # periodic tasks of the cluster, otherwise the default scheduler is used.
+ # See akka.scheduler settings for more details.
+ scheduler {
+ tick-duration = 33ms
+ ticks-per-wheel = 512
+ }
+
+ debug {
+ # Log heartbeat events (very verbose, useful mostly when debugging heartbeating issues).
+ # These are logged with logger name `akka.cluster.ClusterHeartbeat`.
+ verbose-heartbeat-logging = off
+
+ # log verbose details about gossip
+ verbose-gossip-logging = off
+ }
+
+ configuration-compatibility-check {
+
+ # Enforce configuration compatibility checks when joining a cluster.
+ # Set to off to allow joining nodes to join a cluster even when configuration incompatibilities are detected or
+ # when the cluster does not support this feature. Compatibility checks are always performed and warning and
+ # error messages are logged.
+ #
+ # This is particularly useful for rolling updates on clusters that do not support that feature. Since the old
+ # cluster won't be able to send the compatibility confirmation to the joining node, the joining node won't be able
+ # to 'know' if its allowed to join.
+ enforce-on-join = on
+
+ # Add named entry to this section with fully qualified class name of the JoinConfigCompatChecker
+ # to enable.
+ # Checkers defined in reference.conf can be disabled by application by using empty string value
+ # for the named entry.
+ checkers {
+ akka-cluster = "akka.cluster.JoinConfigCompatCheckCluster"
+ }
+
+ # Some configuration properties might not be appropriate to transfer between nodes
+ # and such properties can be excluded from the configuration compatibility check by adding
+ # the paths of the properties to this list. Sensitive paths are grouped by key. Modules and third-party libraries
+ # can define their own set of sensitive paths without clashing with each other (as long they use unique keys).
+ #
+ # All properties starting with the paths defined here are excluded, i.e. you can add the path of a whole
+ # section here to skip everything inside that section.
+ sensitive-config-paths {
+ akka = [
+ "user.home", "user.name", "user.dir",
+ "socksNonProxyHosts", "http.nonProxyHosts", "ftp.nonProxyHosts",
+ "akka.remote.secure-cookie",
+ "akka.remote.classic.netty.ssl.security",
+ # Pre 2.6 path, keep around to avoid sending things misconfigured with old paths
+ "akka.remote.netty.ssl.security",
+ "akka.remote.artery.ssl"
+ ]
+ }
+
+ }
+ }
+
+ actor.deployment.default.cluster {
+ # enable cluster aware router that deploys to nodes in the cluster
+ enabled = off
+
+ # Maximum number of routees that will be deployed on each cluster
+ # member node.
+ # Note that max-total-nr-of-instances defines total number of routees, but
+ # number of routees per node will not be exceeded, i.e. if you
+ # define max-total-nr-of-instances = 50 and max-nr-of-instances-per-node = 2
+ # it will deploy 2 routees per new member in the cluster, up to
+ # 25 members.
+ max-nr-of-instances-per-node = 1
+
+ # Maximum number of routees that will be deployed, in total
+ # on all nodes. See also description of max-nr-of-instances-per-node.
+ # For backwards compatibility reasons, nr-of-instances
+ # has the same purpose as max-total-nr-of-instances for cluster
+ # aware routers and nr-of-instances (if defined by user) takes
+ # precedence over max-total-nr-of-instances.
+ max-total-nr-of-instances = 10000
+
+ # Defines if routees are allowed to be located on the same node as
+ # the head router actor, or only on remote nodes.
+ # Useful for master-worker scenario where all routees are remote.
+ allow-local-routees = on
+
+ # Use members with all specified roles, or all members if undefined or empty.
+ use-roles = []
+
+ # Deprecated, since Akka 2.5.4, replaced by use-roles
+ # Use members with specified role, or all members if undefined or empty.
+ use-role = ""
+ }
+
+ # Protobuf serializer for cluster messages
+ actor {
+ serializers {
+ akka-cluster = "akka.cluster.protobuf.ClusterMessageSerializer"
+ }
+
+ serialization-bindings {
+ "akka.cluster.ClusterMessage" = akka-cluster
+ "akka.cluster.routing.ClusterRouterPool" = akka-cluster
+ }
+
+ serialization-identifiers {
+ "akka.cluster.protobuf.ClusterMessageSerializer" = 5
+ }
+
+ }
+
+}
+
+#//#split-brain-resolver
+
+# To enable the split brain resolver you first need to enable the provider in your application.conf:
+# akka.cluster.downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider"
+
+akka.cluster.split-brain-resolver {
+ # Select one of the available strategies (see descriptions below):
+ # static-quorum, keep-majority, keep-oldest, down-all, lease-majority
+ active-strategy = keep-majority
+
+ #//#stable-after
+ # Time margin after which shards or singletons that belonged to a downed/removed
+ # partition are created in surviving partition. The purpose of this margin is that
+ # in case of a network partition the persistent actors in the non-surviving partitions
+ # must be stopped before corresponding persistent actors are started somewhere else.
+ # This is useful if you implement downing strategies that handle network partitions,
+ # e.g. by keeping the larger side of the partition and shutting down the smaller side.
+ # Decision is taken by the strategy when there has been no membership or
+ # reachability changes for this duration, i.e. the cluster state is stable.
+ stable-after = 20s
+ #//#stable-after
+
+ # When reachability observations by the failure detector are changed the SBR decisions
+ # are deferred until there are no changes within the 'stable-after' duration.
+ # If this continues for too long it might be an indication of an unstable system/network
+ # and it could result in delayed or conflicting decisions on separate sides of a network
+ # partition.
+ # As a precaution for that scenario all nodes are downed if no decision is made within
+ # `stable-after + down-all-when-unstable` from the first unreachability event.
+ # The measurement is reset if all unreachable have been healed, downed or removed, or
+ # if there are no changes within `stable-after * 2`.
+ # The value can be on, off, or a duration.
+ # By default it is 'on' and then it is derived to be 3/4 of stable-after, but not less than
+ # 4 seconds.
+ down-all-when-unstable = on
+
+}
+#//#split-brain-resolver
+
+# Down the unreachable nodes if the number of remaining nodes are greater than or equal to
+# the given 'quorum-size'. Otherwise down the reachable nodes, i.e. it will shut down that
+# side of the partition. In other words, the 'size' defines the minimum number of nodes
+# that the cluster must have to be operational. If there are unreachable nodes when starting
+# up the cluster, before reaching this limit, the cluster may shutdown itself immediately.
+# This is not an issue if you start all nodes at approximately the same time.
+#
+# Note that you must not add more members to the cluster than 'quorum-size * 2 - 1', because
+# then both sides may down each other and thereby form two separate clusters. For example,
+# quorum-size configured to 3 in a 6 node cluster may result in a split where each side
+# consists of 3 nodes each, i.e. each side thinks it has enough nodes to continue by
+# itself. A warning is logged if this recommendation is violated.
+#//#static-quorum
+akka.cluster.split-brain-resolver.static-quorum {
+ # minimum number of nodes that the cluster must have
+ quorum-size = undefined
+
+ # if the 'role' is defined the decision is based only on members with that 'role'
+ role = ""
+}
+#//#static-quorum
+
+# Down the unreachable nodes if the current node is in the majority part based the last known
+# membership information. Otherwise down the reachable nodes, i.e. the own part. If the
+# the parts are of equal size the part containing the node with the lowest address is kept.
+# Note that if there are more than two partitions and none is in majority each part
+# will shutdown itself, terminating the whole cluster.
+#//#keep-majority
+akka.cluster.split-brain-resolver.keep-majority {
+ # if the 'role' is defined the decision is based only on members with that 'role'
+ role = ""
+}
+#//#keep-majority
+
+# Down the part that does not contain the oldest member (current singleton).
+#
+# There is one exception to this rule if 'down-if-alone' is defined to 'on'.
+# Then, if the oldest node has partitioned from all other nodes the oldest
+# will down itself and keep all other nodes running. The strategy will not
+# down the single oldest node when it is the only remaining node in the cluster.
+#
+# Note that if the oldest node crashes the others will remove it from the cluster
+# when 'down-if-alone' is 'on', otherwise they will down themselves if the
+# oldest node crashes, i.e. shutdown the whole cluster together with the oldest node.
+#//#keep-oldest
+akka.cluster.split-brain-resolver.keep-oldest {
+ # Enable downing of the oldest node when it is partitioned from all other nodes
+ down-if-alone = on
+
+ # if the 'role' is defined the decision is based only on members with that 'role',
+ # i.e. using the oldest member (singleton) within the nodes with that role
+ role = ""
+}
+#//#keep-oldest
+
+# Keep the part that can acquire the lease, and down the other part.
+# Best effort is to keep the side that has most nodes, i.e. the majority side.
+# This is achieved by adding a delay before trying to acquire the lease on the
+# minority side.
+#//#lease-majority
+akka.cluster.split-brain-resolver.lease-majority {
+ lease-implementation = ""
+
+ # The recommended format for the lease name is "<service-name>-akka-sbr".
+ # When lease-name is not defined, the name will be set to "<actor-system-name>-akka-sbr"
+ lease-name = ""
+
+ # This delay is used on the minority side before trying to acquire the lease,
+ # as an best effort to try to keep the majority side.
+ acquire-lease-delay-for-minority = 2s
+
+ # Release the lease after this duration.
+ release-after = 40s
+
+ # If the 'role' is defined the majority/minority is based only on members with that 'role'.
+ role = ""
+}
+#//#lease-majority
--- /dev/null
+############################################
+# Akka Cluster Tools Reference Config File #
+############################################
+
+# This is the reference config file that contains all the default settings.
+# Make your edits/overrides in your application.conf.
+
+# //#pub-sub-ext-config
+# Settings for the DistributedPubSub extension
+akka.cluster.pub-sub {
+ # Actor name of the mediator actor, /system/distributedPubSubMediator
+ name = distributedPubSubMediator
+
+ # Start the mediator on members tagged with this role.
+ # All members are used if undefined or empty.
+ role = ""
+
+ # The routing logic to use for 'Send'
+ # Possible values: random, round-robin, broadcast
+ routing-logic = random
+
+ # How often the DistributedPubSubMediator should send out gossip information
+ gossip-interval = 1s
+
+ # Removed entries are pruned after this duration
+ removed-time-to-live = 120s
+
+ # Maximum number of elements to transfer in one message when synchronizing the registries.
+ # Next chunk will be transferred in next round of gossip.
+ max-delta-elements = 3000
+
+ # When a message is published to a topic with no subscribers send it to the dead letters.
+ send-to-dead-letters-when-no-subscribers = on
+
+ # The id of the dispatcher to use for DistributedPubSubMediator actors.
+ # If specified you need to define the settings of the actual dispatcher.
+ use-dispatcher = "akka.actor.internal-dispatcher"
+}
+# //#pub-sub-ext-config
+
+# Protobuf serializer for cluster DistributedPubSubMeditor messages
+akka.actor {
+ serializers {
+ akka-pubsub = "akka.cluster.pubsub.protobuf.DistributedPubSubMessageSerializer"
+ }
+ serialization-bindings {
+ "akka.cluster.pubsub.DistributedPubSubMessage" = akka-pubsub
+ "akka.cluster.pubsub.DistributedPubSubMediator$Internal$SendToOneSubscriber" = akka-pubsub
+ }
+ serialization-identifiers {
+ "akka.cluster.pubsub.protobuf.DistributedPubSubMessageSerializer" = 9
+ }
+}
+
+
+# //#receptionist-ext-config
+# Settings for the ClusterClientReceptionist extension
+akka.cluster.client.receptionist {
+ # Actor name of the ClusterReceptionist actor, /system/receptionist
+ name = receptionist
+
+ # Start the receptionist on members tagged with this role.
+ # All members are used if undefined or empty.
+ role = ""
+
+ # The receptionist will send this number of contact points to the client
+ number-of-contacts = 3
+
+ # The actor that tunnel response messages to the client will be stopped
+ # after this time of inactivity.
+ response-tunnel-receive-timeout = 30s
+
+ # The id of the dispatcher to use for ClusterReceptionist actors.
+ # If specified you need to define the settings of the actual dispatcher.
+ use-dispatcher = "akka.actor.internal-dispatcher"
+
+ # How often failure detection heartbeat messages should be received for
+ # each ClusterClient
+ heartbeat-interval = 2s
+
+ # Number of potentially lost/delayed heartbeats that will be
+ # accepted before considering it to be an anomaly.
+ # The ClusterReceptionist is using the akka.remote.DeadlineFailureDetector, which
+ # will trigger if there are no heartbeats within the duration
+ # heartbeat-interval + acceptable-heartbeat-pause, i.e. 15 seconds with
+ # the default settings.
+ acceptable-heartbeat-pause = 13s
+
+ # Failure detection checking interval for checking all ClusterClients
+ failure-detection-interval = 2s
+}
+# //#receptionist-ext-config
+
+# //#cluster-client-config
+# Settings for the ClusterClient
+akka.cluster.client {
+ # Actor paths of the ClusterReceptionist actors on the servers (cluster nodes)
+ # that the client will try to contact initially. It is mandatory to specify
+ # at least one initial contact.
+ # Comma separated full actor paths defined by a string on the form of
+ # "akka://system@hostname:port/system/receptionist"
+ initial-contacts = []
+
+ # Interval at which the client retries to establish contact with one of
+ # ClusterReceptionist on the servers (cluster nodes)
+ establishing-get-contacts-interval = 3s
+
+ # Interval at which the client will ask the ClusterReceptionist for
+ # new contact points to be used for next reconnect.
+ refresh-contacts-interval = 60s
+
+ # How often failure detection heartbeat messages should be sent
+ heartbeat-interval = 2s
+
+ # Number of potentially lost/delayed heartbeats that will be
+ # accepted before considering it to be an anomaly.
+ # The ClusterClient is using the akka.remote.DeadlineFailureDetector, which
+ # will trigger if there are no heartbeats within the duration
+ # heartbeat-interval + acceptable-heartbeat-pause, i.e. 15 seconds with
+ # the default settings.
+ acceptable-heartbeat-pause = 13s
+
+ # If connection to the receptionist is not established the client will buffer
+ # this number of messages and deliver them the connection is established.
+ # When the buffer is full old messages will be dropped when new messages are sent
+ # via the client. Use 0 to disable buffering, i.e. messages will be dropped
+ # immediately if the location of the singleton is unknown.
+ # Maximum allowed buffer size is 10000.
+ buffer-size = 1000
+
+ # If connection to the receiptionist is lost and the client has not been
+ # able to acquire a new connection for this long the client will stop itself.
+ # This duration makes it possible to watch the cluster client and react on a more permanent
+ # loss of connection with the cluster, for example by accessing some kind of
+ # service registry for an updated set of initial contacts to start a new cluster client with.
+ # If this is not wanted it can be set to "off" to disable the timeout and retry
+ # forever.
+ reconnect-timeout = off
+}
+# //#cluster-client-config
+
+# Protobuf serializer for ClusterClient messages
+akka.actor {
+ serializers {
+ akka-cluster-client = "akka.cluster.client.protobuf.ClusterClientMessageSerializer"
+ }
+ serialization-bindings {
+ "akka.cluster.client.ClusterClientMessage" = akka-cluster-client
+ }
+ serialization-identifiers {
+ "akka.cluster.client.protobuf.ClusterClientMessageSerializer" = 15
+ }
+}
+
+# //#singleton-config
+akka.cluster.singleton {
+ # The actor name of the child singleton actor.
+ singleton-name = "singleton"
+
+ # Singleton among the nodes tagged with specified role.
+ # If the role is not specified it's a singleton among all nodes in the cluster.
+ role = ""
+
+ # When a node is becoming oldest it sends hand-over request to previous oldest,
+ # that might be leaving the cluster. This is retried with this interval until
+ # the previous oldest confirms that the hand over has started or the previous
+ # oldest member is removed from the cluster (+ akka.cluster.down-removal-margin).
+ hand-over-retry-interval = 1s
+
+ # The number of retries are derived from hand-over-retry-interval and
+ # akka.cluster.down-removal-margin (or ClusterSingletonManagerSettings.removalMargin),
+ # but it will never be less than this property.
+ # After the hand over retries and it's still not able to exchange the hand over messages
+ # with the previous oldest it will restart itself by throwing ClusterSingletonManagerIsStuck,
+ # to start from a clean state. After that it will still not start the singleton instance
+ # until the previous oldest node has been removed from the cluster.
+ # On the other side, on the previous oldest node, the same number of retries - 3 are used
+ # and after that the singleton instance is stopped.
+ # For large clusters it might be necessary to increase this to avoid too early timeouts while
+ # gossip dissemination of the Leaving to Exiting phase occurs. For normal leaving scenarios
+ # it will not be a quicker hand over by reducing this value, but in extreme failure scenarios
+ # the recovery might be faster.
+ min-number-of-hand-over-retries = 15
+
+ # Config path of the lease to be taken before creating the singleton actor
+ # if the lease is lost then the actor is restarted and it will need to re-acquire the lease
+ # the default is no lease
+ use-lease = ""
+
+ # The interval between retries for acquiring the lease
+ lease-retry-interval = 5s
+}
+# //#singleton-config
+
+# //#singleton-proxy-config
+akka.cluster.singleton-proxy {
+ # The actor name of the singleton actor that is started by the ClusterSingletonManager
+ singleton-name = ${akka.cluster.singleton.singleton-name}
+
+ # The role of the cluster nodes where the singleton can be deployed.
+ # Corresponding to the role used by the `ClusterSingletonManager`. If the role is not
+ # specified it's a singleton among all nodes in the cluster, and the `ClusterSingletonManager`
+ # must then also be configured in same way.
+ role = ""
+
+ # Interval at which the proxy will try to resolve the singleton instance.
+ singleton-identification-interval = 1s
+
+ # If the location of the singleton is unknown the proxy will buffer this
+ # number of messages and deliver them when the singleton is identified.
+ # When the buffer is full old messages will be dropped when new messages are
+ # sent via the proxy.
+ # Use 0 to disable buffering, i.e. messages will be dropped immediately if
+ # the location of the singleton is unknown.
+ # Maximum allowed buffer size is 10000.
+ buffer-size = 1000
+}
+# //#singleton-proxy-config
+
+# Serializer for cluster ClusterSingleton messages
+akka.actor {
+ serializers {
+ akka-singleton = "akka.cluster.singleton.protobuf.ClusterSingletonMessageSerializer"
+ }
+ serialization-bindings {
+ "akka.cluster.singleton.ClusterSingletonMessage" = akka-singleton
+ }
+ serialization-identifiers {
+ "akka.cluster.singleton.protobuf.ClusterSingletonMessageSerializer" = 14
+ }
+}
--- /dev/null
+############################################
+# Akka Cluster Typed Reference Config File #
+############################################
+
+# This is the reference config file that contains all the default settings.
+# Make your edits/overrides in your application.conf.
+
+akka.cluster.typed.receptionist {
+ # Updates with Distributed Data are done with this consistency level.
+ # Possible values: local, majority, all, 2, 3, 4 (n)
+ write-consistency = local
+
+ # Period task to remove actor references that are hosted by removed nodes,
+ # in case of abrupt termination.
+ pruning-interval = 3 s
+
+ # The periodic task to remove actor references that are hosted by removed nodes
+ # will only remove entries older than this duration. The reason for this
+ # is to avoid removing entries of nodes that haven't been visible as joining.
+ prune-removed-older-than = 60 s
+
+ # Shard the services over this many Distributed Data keys, with large amounts of different
+ # service keys storing all of them in the same Distributed Data entry would lead to large updates
+ # etc. instead the keys are sharded across this number of keys. This must be the same on all nodes
+ # in a cluster, changing it requires a full cluster restart (stopping all nodes before starting them again)
+ distributed-key-count = 5
+
+ # Settings for the Distributed Data replicator used by Receptionist.
+ # Same layout as akka.cluster.distributed-data.
+ distributed-data = ${akka.cluster.distributed-data}
+ # make sure that by default it's for all roles (Play loads config in different way)
+ distributed-data.role = ""
+}
+
+akka.cluster.ddata.typed {
+ # The timeout to use for ask operations in ReplicatorMessageAdapter.
+ # This should be longer than the timeout given in Replicator.WriteConsistency and
+ # Replicator.ReadConsistency. The replicator will always send a reply within those
+ # timeouts so the unexpected ask timeout should not occur, but for cleanup in a
+ # failure situation it must still exist.
+ # If askUpdate, askGet or askDelete takes longer then this timeout a
+ # java.util.concurrent.TimeoutException will be thrown by the requesting actor and
+ # may be handled by supervision.
+ replicator-message-adapter-unexpected-ask-timeout = 20 s
+}
+
+akka {
+ actor {
+ serialization-identifiers {
+ "akka.cluster.typed.internal.AkkaClusterTypedSerializer" = 28
+ "akka.cluster.typed.internal.delivery.ReliableDeliverySerializer" = 36
+ }
+ serializers {
+ typed-cluster = "akka.cluster.typed.internal.AkkaClusterTypedSerializer"
+ reliable-delivery = "akka.cluster.typed.internal.delivery.ReliableDeliverySerializer"
+ }
+ serialization-bindings {
+ "akka.cluster.typed.internal.receptionist.ClusterReceptionist$Entry" = typed-cluster
+ "akka.actor.typed.internal.pubsub.TopicImpl$MessagePublished" = typed-cluster
+ "akka.actor.typed.delivery.internal.DeliverySerializable" = reliable-delivery
+ }
+ }
+ cluster.configuration-compatibility-check.checkers {
+ receptionist = "akka.cluster.typed.internal.receptionist.ClusterReceptionistConfigCompatChecker"
+ }
+}
--- /dev/null
+##############################################
+# Akka Distributed DataReference Config File #
+##############################################
+
+# This is the reference config file that contains all the default settings.
+# Make your edits/overrides in your application.conf.
+
+
+#//#distributed-data
+# Settings for the DistributedData extension
+akka.cluster.distributed-data {
+ # Actor name of the Replicator actor, /system/ddataReplicator
+ name = ddataReplicator
+
+ # Replicas are running on members tagged with this role.
+ # All members are used if undefined or empty.
+ role = ""
+
+ # How often the Replicator should send out gossip information
+ gossip-interval = 2 s
+
+ # How often the subscribers will be notified of changes, if any
+ notify-subscribers-interval = 500 ms
+
+ # Logging of data with payload size in bytes larger than
+ # this value. Maximum detected size per key is logged once,
+ # with an increase threshold of 10%.
+ # It can be disabled by setting the property to off.
+ log-data-size-exceeding = 10 KiB
+
+ # Maximum number of entries to transfer in one round of gossip exchange when
+ # synchronizing the replicas. Next chunk will be transferred in next round of gossip.
+ # The actual number of data entries in each Gossip message is dynamically
+ # adjusted to not exceed the maximum remote message size (maximum-frame-size).
+ max-delta-elements = 500
+
+ # The id of the dispatcher to use for Replicator actors.
+ # If specified you need to define the settings of the actual dispatcher.
+ use-dispatcher = "akka.actor.internal-dispatcher"
+
+ # How often the Replicator checks for pruning of data associated with
+ # removed cluster nodes. If this is set to 'off' the pruning feature will
+ # be completely disabled.
+ pruning-interval = 120 s
+
+ # How long time it takes to spread the data to all other replica nodes.
+ # This is used when initiating and completing the pruning process of data associated
+ # with removed cluster nodes. The time measurement is stopped when any replica is
+ # unreachable, but it's still recommended to configure this with certain margin.
+ # It should be in the magnitude of minutes even though typical dissemination time
+ # is shorter (grows logarithmic with number of nodes). There is no advantage of
+ # setting this too low. Setting it to large value will delay the pruning process.
+ max-pruning-dissemination = 300 s
+
+ # The markers of that pruning has been performed for a removed node are kept for this
+ # time and thereafter removed. If and old data entry that was never pruned is somehow
+ # injected and merged with existing data after this time the value will not be correct.
+ # This would be possible (although unlikely) in the case of a long network partition.
+ # It should be in the magnitude of hours. For durable data it is configured by
+ # 'akka.cluster.distributed-data.durable.pruning-marker-time-to-live'.
+ pruning-marker-time-to-live = 6 h
+
+ # Serialized Write and Read messages are cached when they are sent to
+ # several nodes. If no further activity they are removed from the cache
+ # after this duration.
+ serializer-cache-time-to-live = 10s
+
+ # Update and Get operations are sent to oldest nodes first.
+ # This is useful together with Cluster Singleton, which is running on oldest nodes.
+ prefer-oldest = off
+
+ # Settings for delta-CRDT
+ delta-crdt {
+ # enable or disable delta-CRDT replication
+ enabled = on
+
+ # Some complex deltas grow in size for each update and above this
+ # threshold such deltas are discarded and sent as full state instead.
+ # This is number of elements or similar size hint, not size in bytes.
+ max-delta-size = 50
+ }
+
+ durable {
+ # List of keys that are durable. Prefix matching is supported by using * at the
+ # end of a key.
+ keys = []
+
+ # The markers of that pruning has been performed for a removed node are kept for this
+ # time and thereafter removed. If and old data entry that was never pruned is
+ # injected and merged with existing data after this time the value will not be correct.
+ # This would be possible if replica with durable data didn't participate in the pruning
+ # (e.g. it was shutdown) and later started after this time. A durable replica should not
+ # be stopped for longer time than this duration and if it is joining again after this
+ # duration its data should first be manually removed (from the lmdb directory).
+ # It should be in the magnitude of days. Note that there is a corresponding setting
+ # for non-durable data: 'akka.cluster.distributed-data.pruning-marker-time-to-live'.
+ pruning-marker-time-to-live = 10 d
+
+ # Fully qualified class name of the durable store actor. It must be a subclass
+ # of akka.actor.Actor and handle the protocol defined in
+ # akka.cluster.ddata.DurableStore. The class must have a constructor with
+ # com.typesafe.config.Config parameter.
+ store-actor-class = akka.cluster.ddata.LmdbDurableStore
+
+ use-dispatcher = akka.cluster.distributed-data.durable.pinned-store
+
+ pinned-store {
+ executor = thread-pool-executor
+ type = PinnedDispatcher
+ }
+
+ # Config for the LmdbDurableStore
+ lmdb {
+ # Directory of LMDB file. There are two options:
+ # 1. A relative or absolute path to a directory that ends with 'ddata'
+ # the full name of the directory will contain name of the ActorSystem
+ # and its remote port.
+ # 2. Otherwise the path is used as is, as a relative or absolute path to
+ # a directory.
+ #
+ # When running in production you may want to configure this to a specific
+ # path (alt 2), since the default directory contains the remote port of the
+ # actor system to make the name unique. If using a dynamically assigned
+ # port (0) it will be different each time and the previously stored data
+ # will not be loaded.
+ dir = "ddata"
+
+ # Size in bytes of the memory mapped file.
+ map-size = 100 MiB
+
+ # Accumulate changes before storing improves performance with the
+ # risk of losing the last writes if the JVM crashes.
+ # The interval is by default set to 'off' to write each update immediately.
+ # Enabling write behind by specifying a duration, e.g. 200ms, is especially
+ # efficient when performing many writes to the same key, because it is only
+ # the last value for each key that will be serialized and stored.
+ # write-behind-interval = 200 ms
+ write-behind-interval = off
+ }
+ }
+
+}
+#//#distributed-data
+
+# Protobuf serializer for cluster DistributedData messages
+akka.actor {
+ serializers {
+ akka-data-replication = "akka.cluster.ddata.protobuf.ReplicatorMessageSerializer"
+ akka-replicated-data = "akka.cluster.ddata.protobuf.ReplicatedDataSerializer"
+ }
+ serialization-bindings {
+ "akka.cluster.ddata.Replicator$ReplicatorMessage" = akka-data-replication
+ "akka.cluster.ddata.ReplicatedDataSerialization" = akka-replicated-data
+ }
+ serialization-identifiers {
+ "akka.cluster.ddata.protobuf.ReplicatedDataSerializer" = 11
+ "akka.cluster.ddata.protobuf.ReplicatorMessageSerializer" = 12
+ }
+}
--- /dev/null
+###########################################################
+# Akka Persistence Extension Reference Configuration File #
+###########################################################
+
+# This is the reference config file that contains all the default settings.
+# Make your edits in your application.conf in order to override these settings.
+
+# Directory of persistence journal and snapshot store plugins is available at the
+# Akka Community Projects page https://akka.io/community/
+
+# Default persistence extension settings.
+akka.persistence {
+
+ # When starting many persistent actors at the same time the journal
+ # and its data store is protected from being overloaded by limiting number
+ # of recoveries that can be in progress at the same time. When
+ # exceeding the limit the actors will wait until other recoveries have
+ # been completed.
+ max-concurrent-recoveries = 50
+
+ # Fully qualified class name providing a default internal stash overflow strategy.
+ # It needs to be a subclass of akka.persistence.StashOverflowStrategyConfigurator.
+ # The default strategy throws StashOverflowException.
+ internal-stash-overflow-strategy = "akka.persistence.ThrowExceptionConfigurator"
+ journal {
+ # Absolute path to the journal plugin configuration entry used by
+ # persistent actor by default.
+ # Persistent actor can override `journalPluginId` method
+ # in order to rely on a different journal plugin.
+ plugin = ""
+ # List of journal plugins to start automatically. Use "" for the default journal plugin.
+ auto-start-journals = []
+ }
+ snapshot-store {
+ # Absolute path to the snapshot plugin configuration entry used by
+ # persistent actor by default.
+ # Persistent actor can override `snapshotPluginId` method
+ # in order to rely on a different snapshot plugin.
+ # It is not mandatory to specify a snapshot store plugin.
+ # If you don't use snapshots you don't have to configure it.
+ # Note that Cluster Sharding is using snapshots, so if you
+ # use Cluster Sharding you need to define a snapshot store plugin.
+ plugin = ""
+ # List of snapshot stores to start automatically. Use "" for the default snapshot store.
+ auto-start-snapshot-stores = []
+ }
+ # used as default-snapshot store if no plugin configured
+ # (see `akka.persistence.snapshot-store`)
+ no-snapshot-store {
+ class = "akka.persistence.snapshot.NoSnapshotStore"
+ }
+ # Default reliable delivery settings.
+ at-least-once-delivery {
+ # Interval between re-delivery attempts.
+ redeliver-interval = 5s
+ # Maximum number of unconfirmed messages that will be sent in one
+ # re-delivery burst.
+ redelivery-burst-limit = 10000
+ # After this number of delivery attempts a
+ # `ReliableRedelivery.UnconfirmedWarning`, message will be sent to the actor.
+ warn-after-number-of-unconfirmed-attempts = 5
+ # Maximum number of unconfirmed messages that an actor with
+ # AtLeastOnceDelivery is allowed to hold in memory.
+ max-unconfirmed-messages = 100000
+ }
+ # Default persistent extension thread pools.
+ dispatchers {
+ # Dispatcher used by every plugin which does not declare explicit
+ # `plugin-dispatcher` field.
+ default-plugin-dispatcher {
+ type = PinnedDispatcher
+ executor = "thread-pool-executor"
+ }
+ # Default dispatcher for message replay.
+ default-replay-dispatcher {
+ type = Dispatcher
+ executor = "fork-join-executor"
+ fork-join-executor {
+ parallelism-min = 2
+ parallelism-max = 8
+ }
+ }
+ # Default dispatcher for streaming snapshot IO
+ default-stream-dispatcher {
+ type = Dispatcher
+ executor = "fork-join-executor"
+ fork-join-executor {
+ parallelism-min = 2
+ parallelism-max = 8
+ }
+ }
+ }
+
+ # Fallback settings for journal plugin configurations.
+ # These settings are used if they are not defined in plugin config section.
+ journal-plugin-fallback {
+
+ # Fully qualified class name providing journal plugin api implementation.
+ # It is mandatory to specify this property.
+ # The class must have a constructor without parameters or constructor with
+ # one `com.typesafe.config.Config` parameter.
+ class = ""
+
+ # Dispatcher for the plugin actor.
+ plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
+
+ # Dispatcher for message replay.
+ replay-dispatcher = "akka.persistence.dispatchers.default-replay-dispatcher"
+
+ # Removed: used to be the Maximum size of a persistent message batch written to the journal.
+ # Now this setting is without function, PersistentActor will write as many messages
+ # as it has accumulated since the last write.
+ max-message-batch-size = 200
+
+ # If there is more time in between individual events gotten from the journal
+ # recovery than this the recovery will fail.
+ # Note that it also affects reading the snapshot before replaying events on
+ # top of it, even though it is configured for the journal.
+ recovery-event-timeout = 30s
+
+ circuit-breaker {
+ max-failures = 10
+ call-timeout = 10s
+ reset-timeout = 30s
+ }
+
+ # The replay filter can detect a corrupt event stream by inspecting
+ # sequence numbers and writerUuid when replaying events.
+ replay-filter {
+ # What the filter should do when detecting invalid events.
+ # Supported values:
+ # `repair-by-discard-old` : discard events from old writers,
+ # warning is logged
+ # `fail` : fail the replay, error is logged
+ # `warn` : log warning but emit events untouched
+ # `off` : disable this feature completely
+ mode = repair-by-discard-old
+
+ # It uses a look ahead buffer for analyzing the events.
+ # This defines the size (in number of events) of the buffer.
+ window-size = 100
+
+ # How many old writerUuid to remember
+ max-old-writers = 10
+
+ # Set this to `on` to enable detailed debug logging of each
+ # replayed event.
+ debug = off
+ }
+ }
+
+ # Fallback settings for snapshot store plugin configurations
+ # These settings are used if they are not defined in plugin config section.
+ snapshot-store-plugin-fallback {
+
+ # Fully qualified class name providing snapshot store plugin api
+ # implementation. It is mandatory to specify this property if
+ # snapshot store is enabled.
+ # The class must have a constructor without parameters or constructor with
+ # one `com.typesafe.config.Config` parameter.
+ class = ""
+
+ # Dispatcher for the plugin actor.
+ plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
+
+ circuit-breaker {
+ max-failures = 5
+ call-timeout = 20s
+ reset-timeout = 60s
+ }
+
+ # Set this to true if successful loading of snapshot is not necessary.
+ # This can be useful when it is alright to ignore snapshot in case of
+ # for example deserialization errors. When snapshot loading fails it will instead
+ # recover by replaying all events.
+ # Don't set to true if events are deleted because that would
+ # result in wrong recovered state if snapshot load fails.
+ snapshot-is-optional = false
+
+ }
+
+ fsm {
+ # PersistentFSM saves snapshots after this number of persistent
+ # events. Snapshots are used to reduce recovery times.
+ # When you disable this feature, specify snapshot-after = off.
+ # To enable the feature, specify a number like snapshot-after = 1000
+ # which means a snapshot is taken after persisting every 1000 events.
+ snapshot-after = off
+ }
+
+ # DurableStateStore settings
+ state {
+ # Absolute path to the KeyValueStore plugin configuration entry used by
+ # DurableStateBehavior actors by default.
+ # DurableStateBehavior can override `durableStateStorePluginId` method (`withDurableStateStorePluginId`)
+ # in order to rely on a different plugin.
+ plugin = ""
+ }
+
+ # Fallback settings for DurableStateStore plugin configurations
+ # These settings are used if they are not defined in plugin config section.
+ state-plugin-fallback {
+ recovery-timeout = 30s
+ }
+}
+
+# Protobuf serialization for the persistent extension messages.
+akka.actor {
+ serializers {
+ akka-persistence-message = "akka.persistence.serialization.MessageSerializer"
+ akka-persistence-snapshot = "akka.persistence.serialization.SnapshotSerializer"
+ }
+ serialization-bindings {
+ "akka.persistence.serialization.Message" = akka-persistence-message
+ "akka.persistence.serialization.Snapshot" = akka-persistence-snapshot
+ }
+ serialization-identifiers {
+ "akka.persistence.serialization.MessageSerializer" = 7
+ "akka.persistence.serialization.SnapshotSerializer" = 8
+ }
+}
+
+
+###################################################
+# Persistence plugins included with the extension #
+###################################################
+
+# In-memory journal plugin.
+akka.persistence.journal.inmem {
+ # Class name of the plugin.
+ class = "akka.persistence.journal.inmem.InmemJournal"
+ # Dispatcher for the plugin actor.
+ plugin-dispatcher = "akka.actor.default-dispatcher"
+
+ # Turn this on to test serialization of the events
+ test-serialization = off
+}
+
+# Local file system snapshot store plugin.
+akka.persistence.snapshot-store.local {
+ # Class name of the plugin.
+ class = "akka.persistence.snapshot.local.LocalSnapshotStore"
+ # Dispatcher for the plugin actor.
+ plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
+ # Dispatcher for streaming snapshot IO.
+ stream-dispatcher = "akka.persistence.dispatchers.default-stream-dispatcher"
+ # Storage location of snapshot files.
+ dir = "snapshots"
+ # Number load attempts when recovering from the latest snapshot fails
+ # yet older snapshot files are available. Each recovery attempt will try
+ # to recover using an older than previously failed-on snapshot file
+ # (if any are present). If all attempts fail the recovery will fail and
+ # the persistent actor will be stopped.
+ max-load-attempts = 3
+}
+
+# LevelDB journal plugin.
+# Note: this plugin requires explicit LevelDB dependency, see below.
+akka.persistence.journal.leveldb {
+ # Class name of the plugin.
+ class = "akka.persistence.journal.leveldb.LeveldbJournal"
+ # Dispatcher for the plugin actor.
+ plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
+ # Dispatcher for message replay.
+ replay-dispatcher = "akka.persistence.dispatchers.default-replay-dispatcher"
+ # Storage location of LevelDB files.
+ dir = "journal"
+ # Use fsync on write.
+ fsync = on
+ # Verify checksum on read.
+ checksum = off
+ # Native LevelDB (via JNI) or LevelDB Java port.
+ native = on
+ # Number of deleted messages per persistence id that will trigger journal compaction
+ compaction-intervals {
+ }
+}
+
+# Shared LevelDB journal plugin (for testing only).
+# Note: this plugin requires explicit LevelDB dependency, see below.
+akka.persistence.journal.leveldb-shared {
+ # Class name of the plugin.
+ class = "akka.persistence.journal.leveldb.SharedLeveldbJournal"
+ # Dispatcher for the plugin actor.
+ plugin-dispatcher = "akka.actor.default-dispatcher"
+ # Timeout for async journal operations.
+ timeout = 10s
+ store {
+ # Dispatcher for shared store actor.
+ store-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
+ # Dispatcher for message replay.
+ replay-dispatcher = "akka.persistence.dispatchers.default-replay-dispatcher"
+ # Storage location of LevelDB files.
+ dir = "journal"
+ # Use fsync on write.
+ fsync = on
+ # Verify checksum on read.
+ checksum = off
+ # Native LevelDB (via JNI) or LevelDB Java port.
+ native = on
+ # Number of deleted messages per persistence id that will trigger journal compaction
+ compaction-intervals {
+ }
+ }
+}
+
+akka.persistence.journal.proxy {
+ # Class name of the plugin.
+ class = "akka.persistence.journal.PersistencePluginProxy"
+ # Dispatcher for the plugin actor.
+ plugin-dispatcher = "akka.actor.default-dispatcher"
+ # Set this to on in the configuration of the ActorSystem
+ # that will host the target journal
+ start-target-journal = off
+ # The journal plugin config path to use for the target journal
+ target-journal-plugin = ""
+ # The address of the proxy to connect to from other nodes. Optional setting.
+ target-journal-address = ""
+ # Initialization timeout of target lookup
+ init-timeout = 10s
+}
+
+akka.persistence.snapshot-store.proxy {
+ # Class name of the plugin.
+ class = "akka.persistence.journal.PersistencePluginProxy"
+ # Dispatcher for the plugin actor.
+ plugin-dispatcher = "akka.actor.default-dispatcher"
+ # Set this to on in the configuration of the ActorSystem
+ # that will host the target snapshot-store
+ start-target-snapshot-store = off
+ # The journal plugin config path to use for the target snapshot-store
+ target-snapshot-store-plugin = ""
+ # The address of the proxy to connect to from other nodes. Optional setting.
+ target-snapshot-store-address = ""
+ # Initialization timeout of target lookup
+ init-timeout = 10s
+}
+
+# LevelDB persistence requires the following dependency declarations:
+#
+# SBT:
+# "org.iq80.leveldb" % "leveldb" % "0.7"
+# "org.fusesource.leveldbjni" % "leveldbjni-all" % "1.8"
+#
+# Maven:
+# <dependency>
+# <groupId>org.iq80.leveldb</groupId>
+# <artifactId>leveldb</artifactId>
+# <version>0.7</version>
+# </dependency>
+# <dependency>
+# <groupId>org.fusesource.leveldbjni</groupId>
+# <artifactId>leveldbjni-all</artifactId>
+# <version>1.8</version>
+# </dependency>
--- /dev/null
+include "actor_reference.conf"
+include "actor_typed_reference.conf"
+include "cluster_reference.conf"
+include "cluster_tools_reference.conf"
+include "cluster_typed_reference.conf"
+include "distributed_data_reference.conf"
+include "persistence_reference.conf"
+include "remote_reference.conf"
+include "stream_reference.conf"
--- /dev/null
+#//#shared
+#####################################
+# Akka Remote Reference Config File #
+#####################################
+
+# This is the reference config file that contains all the default settings.
+# Make your edits/overrides in your application.conf.
+
+# comments about akka.actor settings left out where they are already in akka-
+# actor.jar, because otherwise they would be repeated in config rendering.
+#
+# For the configuration of the new remoting implementation (Artery) please look
+# at the bottom section of this file as it is listed separately.
+
+akka {
+
+ actor {
+
+ serializers {
+ akka-containers = "akka.remote.serialization.MessageContainerSerializer"
+ akka-misc = "akka.remote.serialization.MiscMessageSerializer"
+ artery = "akka.remote.serialization.ArteryMessageSerializer"
+ proto = "akka.remote.serialization.ProtobufSerializer"
+ daemon-create = "akka.remote.serialization.DaemonMsgCreateSerializer"
+ akka-system-msg = "akka.remote.serialization.SystemMessageSerializer"
+ }
+
+ serialization-bindings {
+ "akka.actor.ActorSelectionMessage" = akka-containers
+
+ "akka.remote.DaemonMsgCreate" = daemon-create
+
+ "akka.remote.artery.ArteryMessage" = artery
+
+ # Since akka.protobuf.Message does not extend Serializable but
+ # GeneratedMessage does, need to use the more specific one here in order
+ # to avoid ambiguity.
+ # This is only loaded if akka-protobuf is on the classpath
+ # It should not be used and users should migrate to using the protobuf classes
+ # directly
+ # Remove in 2.7
+ "akka.protobuf.GeneratedMessage" = proto
+
+ "akka.protobufv3.internal.GeneratedMessageV3" = proto
+
+ # Since com.google.protobuf.Message does not extend Serializable but
+ # GeneratedMessage does, need to use the more specific one here in order
+ # to avoid ambiguity.
+ # This com.google.protobuf serialization binding is only used if the class can be loaded,
+ # i.e. com.google.protobuf dependency has been added in the application project.
+ "com.google.protobuf.GeneratedMessage" = proto
+ "com.google.protobuf.GeneratedMessageV3" = proto
+
+ "akka.actor.Identify" = akka-misc
+ "akka.actor.ActorIdentity" = akka-misc
+ "scala.Some" = akka-misc
+ "scala.None$" = akka-misc
+ "java.util.Optional" = akka-misc
+ "akka.actor.Status$Success" = akka-misc
+ "akka.actor.Status$Failure" = akka-misc
+ "akka.actor.ActorRef" = akka-misc
+ "akka.actor.PoisonPill$" = akka-misc
+ "akka.actor.Kill$" = akka-misc
+ "akka.remote.RemoteWatcher$Heartbeat$" = akka-misc
+ "akka.remote.RemoteWatcher$HeartbeatRsp" = akka-misc
+ "akka.Done" = akka-misc
+ "akka.NotUsed" = akka-misc
+ "akka.actor.Address" = akka-misc
+ "akka.remote.UniqueAddress" = akka-misc
+
+ "akka.actor.ActorInitializationException" = akka-misc
+ "akka.actor.IllegalActorStateException" = akka-misc
+ "akka.actor.ActorKilledException" = akka-misc
+ "akka.actor.InvalidActorNameException" = akka-misc
+ "akka.actor.InvalidMessageException" = akka-misc
+ "java.util.concurrent.TimeoutException" = akka-misc
+ "akka.remote.serialization.ThrowableNotSerializableException" = akka-misc
+
+ "akka.actor.LocalScope$" = akka-misc
+ "akka.remote.RemoteScope" = akka-misc
+
+ "com.typesafe.config.impl.SimpleConfig" = akka-misc
+ "com.typesafe.config.Config" = akka-misc
+
+ "akka.routing.FromConfig" = akka-misc
+ "akka.routing.DefaultResizer" = akka-misc
+ "akka.routing.BalancingPool" = akka-misc
+ "akka.routing.BroadcastGroup" = akka-misc
+ "akka.routing.BroadcastPool" = akka-misc
+ "akka.routing.RandomGroup" = akka-misc
+ "akka.routing.RandomPool" = akka-misc
+ "akka.routing.RoundRobinGroup" = akka-misc
+ "akka.routing.RoundRobinPool" = akka-misc
+ "akka.routing.ScatterGatherFirstCompletedGroup" = akka-misc
+ "akka.routing.ScatterGatherFirstCompletedPool" = akka-misc
+ "akka.routing.SmallestMailboxPool" = akka-misc
+ "akka.routing.TailChoppingGroup" = akka-misc
+ "akka.routing.TailChoppingPool" = akka-misc
+ "akka.remote.routing.RemoteRouterConfig" = akka-misc
+
+ "akka.pattern.StatusReply" = akka-misc
+
+ "akka.dispatch.sysmsg.SystemMessage" = akka-system-msg
+
+ # Java Serializer is by default used for exceptions and will by default
+ # not be allowed to be serialized, but in certain cases they are replaced
+ # by `akka.remote.serialization.ThrowableNotSerializableException` if
+ # no specific serializer has been defined:
+ # - when wrapped in `akka.actor.Status.Failure` for ask replies
+ # - when wrapped in system messages for exceptions from remote deployed child actors
+ #
+ # It's recommended that you implement custom serializer for exceptions that are
+ # sent remotely, You can add binding to akka-misc (MiscMessageSerializer) for the
+ # exceptions that have a constructor with single message String or constructor with
+ # message String as first parameter and cause Throwable as second parameter. Note that it's not
+ # safe to add this binding for general exceptions such as IllegalArgumentException
+ # because it may have a subclass without required constructor.
+ "java.lang.Throwable" = java
+ }
+
+ serialization-identifiers {
+ "akka.remote.serialization.ProtobufSerializer" = 2
+ "akka.remote.serialization.DaemonMsgCreateSerializer" = 3
+ "akka.remote.serialization.MessageContainerSerializer" = 6
+ "akka.remote.serialization.MiscMessageSerializer" = 16
+ "akka.remote.serialization.ArteryMessageSerializer" = 17
+
+ "akka.remote.serialization.SystemMessageSerializer" = 22
+
+ # deprecated in 2.6.0, moved to akka-actor
+ "akka.remote.serialization.LongSerializer" = 18
+ # deprecated in 2.6.0, moved to akka-actor
+ "akka.remote.serialization.IntSerializer" = 19
+ # deprecated in 2.6.0, moved to akka-actor
+ "akka.remote.serialization.StringSerializer" = 20
+ # deprecated in 2.6.0, moved to akka-actor
+ "akka.remote.serialization.ByteStringSerializer" = 21
+ }
+
+ deployment {
+
+ default {
+
+ # if this is set to a valid remote address, the named actor will be
+ # deployed at that node e.g. "akka://sys@host:port"
+ remote = ""
+
+ target {
+
+ # A list of hostnames and ports for instantiating the children of a
+ # router
+ # The format should be on "akka://sys@host:port", where:
+ # - sys is the remote actor system name
+ # - hostname can be either hostname or IP address the remote actor
+ # should connect to
+ # - port should be the port for the remote server on the other node
+ # The number of actor instances to be spawned is still taken from the
+ # nr-of-instances setting as for local routers; the instances will be
+ # distributed round-robin among the given nodes.
+ nodes = []
+
+ }
+ }
+ }
+ }
+
+ remote {
+ ### Settings shared by classic remoting and Artery (the new implementation of remoting)
+
+ # Using remoting directly is typically not desirable, so a warning will
+ # be shown to make this clear. Set this setting to 'off' to suppress that
+ # warning.
+ warn-about-direct-use = on
+
+
+ # If Cluster is not used, remote watch and deployment are disabled.
+ # To optionally use them while not using Cluster, set to 'on'.
+ use-unsafe-remote-features-outside-cluster = off
+
+ # A warning will be logged on remote watch attempts if Cluster
+ # is not in use and 'use-unsafe-remote-features-outside-cluster'
+ # is 'off'. Set this to 'off' to suppress these.
+ warn-unsafe-watch-outside-cluster = on
+
+ # Settings for the Phi accrual failure detector (http://www.jaist.ac.jp/~defago/files/pdf/IS_RR_2004_010.pdf
+ # [Hayashibara et al]) used for remote death watch.
+ # The default PhiAccrualFailureDetector will trigger if there are no heartbeats within
+ # the duration heartbeat-interval + acceptable-heartbeat-pause + threshold_adjustment,
+ # i.e. around 12.5 seconds with default settings.
+ watch-failure-detector {
+
+ # FQCN of the failure detector implementation.
+ # It must implement akka.remote.FailureDetector and have
+ # a public constructor with a com.typesafe.config.Config and
+ # akka.actor.EventStream parameter.
+ implementation-class = "akka.remote.PhiAccrualFailureDetector"
+
+ # How often keep-alive heartbeat messages should be sent to each connection.
+ heartbeat-interval = 1 s
+
+ # Defines the failure detector threshold.
+ # A low threshold is prone to generate many wrong suspicions but ensures
+ # a quick detection in the event of a real crash. Conversely, a high
+ # threshold generates fewer mistakes but needs more time to detect
+ # actual crashes.
+ threshold = 10.0
+
+ # Number of the samples of inter-heartbeat arrival times to adaptively
+ # calculate the failure timeout for connections.
+ max-sample-size = 200
+
+ # Minimum standard deviation to use for the normal distribution in
+ # AccrualFailureDetector. Too low standard deviation might result in
+ # too much sensitivity for sudden, but normal, deviations in heartbeat
+ # inter arrival times.
+ min-std-deviation = 100 ms
+
+ # Number of potentially lost/delayed heartbeats that will be
+ # accepted before considering it to be an anomaly.
+ # This margin is important to be able to survive sudden, occasional,
+ # pauses in heartbeat arrivals, due to for example garbage collect or
+ # network drop.
+ acceptable-heartbeat-pause = 10 s
+
+
+ # How often to check for nodes marked as unreachable by the failure
+ # detector
+ unreachable-nodes-reaper-interval = 1s
+
+ # After the heartbeat request has been sent the first failure detection
+ # will start after this period, even though no heartbeat mesage has
+ # been received.
+ expected-response-after = 1 s
+
+ }
+
+ # remote deployment configuration section
+ deployment {
+ # deprecated, use `enable-allow-list`
+ enable-whitelist = off
+
+ # If true, will only allow specific classes listed in `allowed-actor-classes` to be instanciated on this
+ # system via remote deployment
+ enable-allow-list = ${akka.remote.deployment.enable-whitelist}
+
+
+ # deprecated, use `allowed-actor-classes`
+ whitelist = []
+
+ allowed-actor-classes = ${akka.remote.deployment.whitelist}
+ }
+
+ ### Default dispatcher for the remoting subsystem
+ default-remote-dispatcher {
+ type = Dispatcher
+ executor = "fork-join-executor"
+ fork-join-executor {
+ parallelism-min = 2
+ parallelism-factor = 0.5
+ parallelism-max = 16
+ }
+ throughput = 10
+ }
+ #//#shared
+ }
+
+}
+
+akka {
+
+ remote {
+ #//#classic
+ classic {
+
+ ### Configuration for classic remoting. Classic remoting is deprecated, use artery.
+
+
+ # If set to a nonempty string remoting will use the given dispatcher for
+ # its internal actors otherwise the default dispatcher is used. Please note
+ # that since remoting can load arbitrary 3rd party drivers (see
+ # "enabled-transport" and "adapters" entries) it is not guaranteed that
+ # every module will respect this setting.
+ use-dispatcher = "akka.remote.default-remote-dispatcher"
+
+ # Settings for the failure detector to monitor connections.
+ # For TCP it is not important to have fast failure detection, since
+ # most connection failures are captured by TCP itself.
+ # The default DeadlineFailureDetector will trigger if there are no heartbeats within
+ # the duration heartbeat-interval + acceptable-heartbeat-pause, i.e. 124 seconds
+ # with the default settings.
+ transport-failure-detector {
+
+ # FQCN of the failure detector implementation.
+ # It must implement akka.remote.FailureDetector and have
+ # a public constructor with a com.typesafe.config.Config and
+ # akka.actor.EventStream parameter.
+ implementation-class = "akka.remote.DeadlineFailureDetector"
+
+ # How often keep-alive heartbeat messages should be sent to each connection.
+ heartbeat-interval = 4 s
+
+ # Number of potentially lost/delayed heartbeats that will be
+ # accepted before considering it to be an anomaly.
+ # A margin to the `heartbeat-interval` is important to be able to survive sudden,
+ # occasional, pauses in heartbeat arrivals, due to for example garbage collect or
+ # network drop.
+ acceptable-heartbeat-pause = 120 s
+ }
+
+
+ # Timeout after which the startup of the remoting subsystem is considered
+ # to be failed. Increase this value if your transport drivers (see the
+ # enabled-transports section) need longer time to be loaded.
+ startup-timeout = 10 s
+
+ # Timout after which the graceful shutdown of the remoting subsystem is
+ # considered to be failed. After the timeout the remoting system is
+ # forcefully shut down. Increase this value if your transport drivers
+ # (see the enabled-transports section) need longer time to stop properly.
+ shutdown-timeout = 10 s
+
+ # Before shutting down the drivers, the remoting subsystem attempts to flush
+ # all pending writes. This setting controls the maximum time the remoting is
+ # willing to wait before moving on to shut down the drivers.
+ flush-wait-on-shutdown = 2 s
+
+ # Reuse inbound connections for outbound messages
+ use-passive-connections = on
+
+ # Controls the backoff interval after a refused write is reattempted.
+ # (Transports may refuse writes if their internal buffer is full)
+ backoff-interval = 5 ms
+
+ # Acknowledgment timeout of management commands sent to the transport stack.
+ command-ack-timeout = 30 s
+
+ # The timeout for outbound associations to perform the handshake.
+ # If the transport is akka.remote.classic.netty.tcp or akka.remote.classic.netty.ssl
+ # the configured connection-timeout for the transport will be used instead.
+ handshake-timeout = 15 s
+
+ ### Security settings
+
+ # Enable untrusted mode for full security of server managed actors, prevents
+ # system messages to be send by clients, e.g. messages like 'Create',
+ # 'Suspend', 'Resume', 'Terminate', 'Supervise', 'Link' etc.
+ untrusted-mode = off
+
+ # When 'untrusted-mode=on' inbound actor selections are by default discarded.
+ # Actors with paths defined in this list are granted permission to receive actor
+ # selections messages.
+ # E.g. trusted-selection-paths = ["/user/receptionist", "/user/namingService"]
+ trusted-selection-paths = []
+
+ ### Logging
+
+ # If this is "on", Akka will log all inbound messages at DEBUG level,
+ # if off then they are not logged
+ log-received-messages = off
+
+ # If this is "on", Akka will log all outbound messages at DEBUG level,
+ # if off then they are not logged
+ log-sent-messages = off
+
+ # Sets the log granularity level at which Akka logs remoting events. This setting
+ # can take the values OFF, ERROR, WARNING, INFO, DEBUG, or ON. For compatibility
+ # reasons the setting "on" will default to "debug" level. Please note that the effective
+ # logging level is still determined by the global logging level of the actor system:
+ # for example debug level remoting events will be only logged if the system
+ # is running with debug level logging.
+ # Failures to deserialize received messages also fall under this flag.
+ log-remote-lifecycle-events = on
+
+ # Logging of message types with payload size in bytes larger than
+ # this value. Maximum detected size per message type is logged once,
+ # with an increase threshold of 10%.
+ # By default this feature is turned off. Activate it by setting the property to
+ # a value in bytes, such as 1000b. Note that for all messages larger than this
+ # limit there will be extra performance and scalability cost.
+ log-frame-size-exceeding = off
+
+ # Log warning if the number of messages in the backoff buffer in the endpoint
+ # writer exceeds this limit. It can be disabled by setting the value to off.
+ log-buffer-size-exceeding = 50000
+
+ # After failed to establish an outbound connection, the remoting will mark the
+ # address as failed. This configuration option controls how much time should
+ # be elapsed before reattempting a new connection. While the address is
+ # gated, all messages sent to the address are delivered to dead-letters.
+ # Since this setting limits the rate of reconnects setting it to a
+ # very short interval (i.e. less than a second) may result in a storm of
+ # reconnect attempts.
+ retry-gate-closed-for = 5 s
+
+ # After catastrophic communication failures that result in the loss of system
+ # messages or after the remote DeathWatch triggers the remote system gets
+ # quarantined to prevent inconsistent behavior.
+ # This setting controls how long the Quarantine marker will be kept around
+ # before being removed to avoid long-term memory leaks.
+ # WARNING: DO NOT change this to a small value to re-enable communication with
+ # quarantined nodes. Such feature is not supported and any behavior between
+ # the affected systems after lifting the quarantine is undefined.
+ prune-quarantine-marker-after = 5 d
+
+ # If system messages have been exchanged between two systems (i.e. remote death
+ # watch or remote deployment has been used) a remote system will be marked as
+ # quarantined after the two system has no active association, and no
+ # communication happens during the time configured here.
+ # The only purpose of this setting is to avoid storing system message redelivery
+ # data (sequence number state, etc.) for an undefined amount of time leading to long
+ # term memory leak. Instead, if a system has been gone for this period,
+ # or more exactly
+ # - there is no association between the two systems (TCP connection, if TCP transport is used)
+ # - neither side has been attempting to communicate with the other
+ # - there are no pending system messages to deliver
+ # for the amount of time configured here, the remote system will be quarantined and all state
+ # associated with it will be dropped.
+ #
+ # Maximum value depends on the scheduler's max limit (default 248 days) and if configured
+ # to a longer duration this feature will effectively be disabled. Setting the value to
+ # 'off' will also disable the feature. Note that if disabled there is a risk of a long
+ # term memory leak.
+ quarantine-after-silence = 2 d
+
+ # This setting defines the maximum number of unacknowledged system messages
+ # allowed for a remote system. If this limit is reached the remote system is
+ # declared to be dead and its UID marked as tainted.
+ system-message-buffer-size = 20000
+
+ # This setting defines the maximum idle time after an individual
+ # acknowledgement for system messages is sent. System message delivery
+ # is guaranteed by explicit acknowledgement messages. These acks are
+ # piggybacked on ordinary traffic messages. If no traffic is detected
+ # during the time period configured here, the remoting will send out
+ # an individual ack.
+ system-message-ack-piggyback-timeout = 0.3 s
+
+ # This setting defines the time after internal management signals
+ # between actors (used for DeathWatch and supervision) that have not been
+ # explicitly acknowledged or negatively acknowledged are resent.
+ # Messages that were negatively acknowledged are always immediately
+ # resent.
+ resend-interval = 2 s
+
+ # Maximum number of unacknowledged system messages that will be resent
+ # each 'resend-interval'. If you watch many (> 1000) remote actors you can
+ # increase this value to for example 600, but a too large limit (e.g. 10000)
+ # may flood the connection and might cause false failure detection to trigger.
+ # Test such a configuration by watching all actors at the same time and stop
+ # all watched actors at the same time.
+ resend-limit = 200
+
+ # WARNING: this setting should not be not changed unless all of its consequences
+ # are properly understood which assumes experience with remoting internals
+ # or expert advice.
+ # This setting defines the time after redelivery attempts of internal management
+ # signals are stopped to a remote system that has been not confirmed to be alive by
+ # this system before.
+ initial-system-message-delivery-timeout = 3 m
+
+ ### Transports and adapters
+
+ # List of the transport drivers that will be loaded by the remoting.
+ # A list of fully qualified config paths must be provided where
+ # the given configuration path contains a transport-class key
+ # pointing to an implementation class of the Transport interface.
+ # If multiple transports are provided, the address of the first
+ # one will be used as a default address.
+ enabled-transports = ["akka.remote.classic.netty.tcp"]
+
+ # Transport drivers can be augmented with adapters by adding their
+ # name to the applied-adapters setting in the configuration of a
+ # transport. The available adapters should be configured in this
+ # section by providing a name, and the fully qualified name of
+ # their corresponding implementation. The class given here
+ # must implement akka.akka.remote.transport.TransportAdapterProvider
+ # and have public constructor without parameters.
+ adapters {
+ gremlin = "akka.remote.transport.FailureInjectorProvider"
+ trttl = "akka.remote.transport.ThrottlerProvider"
+ }
+
+ ### Default configuration for the Netty based transport drivers
+
+ netty.tcp {
+ # The class given here must implement the akka.remote.transport.Transport
+ # interface and offer a public constructor which takes two arguments:
+ # 1) akka.actor.ExtendedActorSystem
+ # 2) com.typesafe.config.Config
+ transport-class = "akka.remote.transport.netty.NettyTransport"
+
+ # Transport drivers can be augmented with adapters by adding their
+ # name to the applied-adapters list. The last adapter in the
+ # list is the adapter immediately above the driver, while
+ # the first one is the top of the stack below the standard
+ # Akka protocol
+ applied-adapters = []
+
+ # The default remote server port clients should connect to.
+ # Default is 2552 (AKKA), use 0 if you want a random available port
+ # This port needs to be unique for each actor system on the same machine.
+ port = 2552
+
+ # The hostname or ip clients should connect to.
+ # InetAddress.getLocalHost.getHostAddress is used if empty
+ hostname = ""
+
+ # Use this setting to bind a network interface to a different port
+ # than remoting protocol expects messages at. This may be used
+ # when running akka nodes in a separated networks (under NATs or docker containers).
+ # Use 0 if you want a random available port. Examples:
+ #
+ # akka.remote.classic.netty.tcp.port = 2552
+ # akka.remote.classic.netty.tcp.bind-port = 2553
+ # Network interface will be bound to the 2553 port, but remoting protocol will
+ # expect messages sent to port 2552.
+ #
+ # akka.remote.classic.netty.tcp.port = 0
+ # akka.remote.classic.netty.tcp.bind-port = 0
+ # Network interface will be bound to a random port, and remoting protocol will
+ # expect messages sent to the bound port.
+ #
+ # akka.remote.classic.netty.tcp.port = 2552
+ # akka.remote.classic.netty.tcp.bind-port = 0
+ # Network interface will be bound to a random port, but remoting protocol will
+ # expect messages sent to port 2552.
+ #
+ # akka.remote.classic.netty.tcp.port = 0
+ # akka.remote.classic.netty.tcp.bind-port = 2553
+ # Network interface will be bound to the 2553 port, and remoting protocol will
+ # expect messages sent to the bound port.
+ #
+ # akka.remote.classic.netty.tcp.port = 2552
+ # akka.remote.classic.netty.tcp.bind-port = ""
+ # Network interface will be bound to the 2552 port, and remoting protocol will
+ # expect messages sent to the bound port.
+ #
+ # akka.remote.classic.netty.tcp.port if empty
+ bind-port = ""
+
+ # Use this setting to bind a network interface to a different hostname or ip
+ # than remoting protocol expects messages at.
+ # Use "0.0.0.0" to bind to all interfaces.
+ # akka.remote.classic.netty.tcp.hostname if empty
+ bind-hostname = ""
+
+ # Enables SSL support on this transport
+ enable-ssl = false
+
+ # Sets the connectTimeoutMillis of all outbound connections,
+ # i.e. how long a connect may take until it is timed out
+ connection-timeout = 15 s
+
+ # If set to "<id.of.dispatcher>" then the specified dispatcher
+ # will be used to accept inbound connections, and perform IO. If "" then
+ # dedicated threads will be used.
+ # Please note that the Netty driver only uses this configuration and does
+ # not read the "akka.remote.use-dispatcher" entry. Instead it has to be
+ # configured manually to point to the same dispatcher if needed.
+ use-dispatcher-for-io = ""
+
+ # Sets the high water mark for the in and outbound sockets,
+ # set to 0b for platform default
+ write-buffer-high-water-mark = 0b
+
+ # Sets the low water mark for the in and outbound sockets,
+ # set to 0b for platform default
+ write-buffer-low-water-mark = 0b
+
+ # Sets the send buffer size of the Sockets,
+ # set to 0b for platform default
+ send-buffer-size = 256000b
+
+ # Sets the receive buffer size of the Sockets,
+ # set to 0b for platform default
+ receive-buffer-size = 256000b
+
+ # Maximum message size the transport will accept, but at least
+ # 32000 bytes.
+ # Please note that UDP does not support arbitrary large datagrams,
+ # so this setting has to be chosen carefully when using UDP.
+ # Both send-buffer-size and receive-buffer-size settings has to
+ # be adjusted to be able to buffer messages of maximum size.
+ maximum-frame-size = 128000b
+
+ # Sets the size of the connection backlog
+ backlog = 4096
+
+ # Enables the TCP_NODELAY flag, i.e. disables Nagle’s algorithm
+ tcp-nodelay = on
+
+ # Enables TCP Keepalive, subject to the O/S kernel’s configuration
+ tcp-keepalive = on
+
+ # Enables SO_REUSEADDR, which determines when an ActorSystem can open
+ # the specified listen port (the meaning differs between *nix and Windows)
+ # Valid values are "on", "off" and "off-for-windows"
+ # due to the following Windows bug: https://bugs.java.com/bugdatabase/view_bug.do?bug_id=4476378
+ # "off-for-windows" of course means that it's "on" for all other platforms
+ tcp-reuse-addr = off-for-windows
+
+ # Used to configure the number of I/O worker threads on server sockets
+ server-socket-worker-pool {
+ # Min number of threads to cap factor-based number to
+ pool-size-min = 2
+
+ # The pool size factor is used to determine thread pool size
+ # using the following formula: ceil(available processors * factor).
+ # Resulting size is then bounded by the pool-size-min and
+ # pool-size-max values.
+ pool-size-factor = 1.0
+
+ # Max number of threads to cap factor-based number to
+ pool-size-max = 2
+ }
+
+ # Used to configure the number of I/O worker threads on client sockets
+ client-socket-worker-pool {
+ # Min number of threads to cap factor-based number to
+ pool-size-min = 2
+
+ # The pool size factor is used to determine thread pool size
+ # using the following formula: ceil(available processors * factor).
+ # Resulting size is then bounded by the pool-size-min and
+ # pool-size-max values.
+ pool-size-factor = 1.0
+
+ # Max number of threads to cap factor-based number to
+ pool-size-max = 2
+ }
+
+
+ }
+
+ netty.ssl = ${akka.remote.classic.netty.tcp}
+ netty.ssl = {
+ # Enable SSL/TLS encryption.
+ # This must be enabled on both the client and server to work.
+ enable-ssl = true
+
+ # Factory of SSLEngine.
+ # Must implement akka.remote.transport.netty.SSLEngineProvider and have a public
+ # constructor with an ActorSystem parameter.
+ # The default ConfigSSLEngineProvider is configured by properties in section
+ # akka.remote.classic.netty.ssl.security
+ #
+ # The SSLEngineProvider can also be defined via ActorSystemSetup with
+ # SSLEngineProviderSetup when starting the ActorSystem. That is useful when
+ # the SSLEngineProvider implementation requires other external constructor
+ # parameters or is created before the ActorSystem is created.
+ # If such SSLEngineProviderSetup is defined this config property is not used.
+ ssl-engine-provider = akka.remote.transport.netty.ConfigSSLEngineProvider
+
+ security {
+ # This is the Java Key Store used by the server connection
+ key-store = "keystore"
+
+ # This password is used for decrypting the key store
+ key-store-password = "changeme"
+
+ # This password is used for decrypting the key
+ key-password = "changeme"
+
+ # This is the Java Key Store used by the client connection
+ trust-store = "truststore"
+
+ # This password is used for decrypting the trust store
+ trust-store-password = "changeme"
+
+ # Protocol to use for SSL encryption.
+ protocol = "TLSv1.2"
+
+ # Example: ["TLS_DHE_RSA_WITH_AES_128_GCM_SHA256",
+ # "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+ # "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384",
+ # "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"]
+ # When doing rolling upgrades, make sure to include both the algorithm used
+ # by old nodes and the preferred algorithm.
+ # If you use a JDK 8 prior to 8u161 you need to install
+ # the JCE Unlimited Strength Jurisdiction Policy Files to use AES 256.
+ # More info here:
+ # https://www.oracle.com/java/technologies/javase-jce-all-downloads.html
+ enabled-algorithms = ["TLS_DHE_RSA_WITH_AES_256_GCM_SHA384",
+ "TLS_RSA_WITH_AES_128_CBC_SHA"]
+
+ # There are two options, and the default SecureRandom is recommended:
+ # "" or "SecureRandom" => (default)
+ # "SHA1PRNG" => Can be slow because of blocking issues on Linux
+ #
+ # Setting a value here may require you to supply the appropriate cipher
+ # suite (see enabled-algorithms section above)
+ random-number-generator = ""
+
+ # Require mutual authentication between TLS peers
+ #
+ # Without mutual authentication only the peer that actively establishes a connection (TLS client side)
+ # checks if the passive side (TLS server side) sends over a trusted certificate. With the flag turned on,
+ # the passive side will also request and verify a certificate from the connecting peer.
+ #
+ # To prevent man-in-the-middle attacks this setting is enabled by default.
+ #
+ # Note: Nodes that are configured with this setting to 'on' might not be able to receive messages from nodes that
+ # run on older versions of akka-remote. This is because in versions of Akka < 2.4.12 the active side of the remoting
+ # connection will not send over certificates even if asked.
+ #
+ # However, starting with Akka 2.4.12, even with this setting "off", the active side (TLS client side)
+ # will use the given key-store to send over a certificate if asked. A rolling upgrade from versions of
+ # Akka < 2.4.12 can therefore work like this:
+ # - upgrade all nodes to an Akka version >= 2.4.12, in the best case the latest version, but keep this setting at "off"
+ # - then switch this flag to "on" and do again a rolling upgrade of all nodes
+ # The first step ensures that all nodes will send over a certificate when asked to. The second
+ # step will ensure that all nodes finally enforce the secure checking of client certificates.
+ require-mutual-authentication = on
+ }
+ }
+
+ ### Default configuration for the failure injector transport adapter
+
+ gremlin {
+ # Enable debug logging of the failure injector transport adapter
+ debug = off
+ }
+
+ backoff-remote-dispatcher {
+ type = Dispatcher
+ executor = "fork-join-executor"
+ fork-join-executor {
+ # Min number of threads to cap factor-based parallelism number to
+ parallelism-min = 2
+ parallelism-max = 2
+ }
+ }
+ }
+ }
+}
+#//#classic
+
+#//#artery
+akka {
+
+ remote {
+
+ ### Configuration for Artery, the new implementation of remoting
+ artery {
+
+ # Disable artery with this flag
+ enabled = on
+
+ # Select the underlying transport implementation.
+ #
+ # Possible values: aeron-udp, tcp, tls-tcp
+ # See https://doc.akka.io/docs/akka/current/remoting-artery.html#selecting-a-transport for the tradeoffs
+ # for each transport
+ transport = tcp
+
+ # Canonical address is the address other clients should connect to.
+ # Artery transport will expect messages to this address.
+ canonical {
+
+ # The default remote server port clients should connect to.
+ # Default is 25520, use 0 if you want a random available port
+ # This port needs to be unique for each actor system on the same machine.
+ port = 25520
+
+ # Hostname clients should connect to. Can be set to an ip, hostname
+ # or one of the following special values:
+ # "<getHostAddress>" InetAddress.getLocalHost.getHostAddress
+ # "<getHostName>" InetAddress.getLocalHost.getHostName
+ #
+ hostname = "<getHostAddress>"
+ }
+
+ # Use these settings to bind a network interface to a different address
+ # than artery expects messages at. This may be used when running Akka
+ # nodes in a separated networks (under NATs or in containers). If canonical
+ # and bind addresses are different, then network configuration that relays
+ # communications from canonical to bind addresses is expected.
+ bind {
+
+ # Port to bind a network interface to. Can be set to a port number
+ # of one of the following special values:
+ # 0 random available port
+ # "" akka.remote.artery.canonical.port
+ #
+ port = ""
+
+ # Hostname to bind a network interface to. Can be set to an ip, hostname
+ # or one of the following special values:
+ # "0.0.0.0" all interfaces
+ # "" akka.remote.artery.canonical.hostname
+ # "<getHostAddress>" InetAddress.getLocalHost.getHostAddress
+ # "<getHostName>" InetAddress.getLocalHost.getHostName
+ #
+ hostname = ""
+
+ # Time to wait for Aeron/TCP to bind
+ bind-timeout = 3s
+ }
+
+
+ # Actor paths to use the large message stream for when a message
+ # is sent to them over remoting. The large message stream dedicated
+ # is separate from "normal" and system messages so that sending a
+ # large message does not interfere with them.
+ # Entries should be the full path to the actor. Wildcards in the form of "*"
+ # can be supplied at any place and matches any name at that segment -
+ # "/user/supervisor/actor/*" will match any direct child to actor,
+ # while "/supervisor/*/child" will match any grandchild to "supervisor" that
+ # has the name "child"
+ # Entries have to be specified on both the sending and receiving side.
+ # Messages sent to ActorSelections will not be passed through the large message
+ # stream, to pass such messages through the large message stream the selections
+ # but must be resolved to ActorRefs first.
+ large-message-destinations = []
+
+ # Enable untrusted mode, which discards inbound system messages, PossiblyHarmful and
+ # ActorSelection messages. E.g. remote watch and remote deployment will not work.
+ # ActorSelection messages can be enabled for specific paths with the trusted-selection-paths
+ untrusted-mode = off
+
+ # When 'untrusted-mode=on' inbound actor selections are by default discarded.
+ # Actors with paths defined in this list are granted permission to receive actor
+ # selections messages.
+ # E.g. trusted-selection-paths = ["/user/receptionist", "/user/namingService"]
+ trusted-selection-paths = []
+
+ # If this is "on", all inbound remote messages will be logged at DEBUG level,
+ # if off then they are not logged
+ log-received-messages = off
+
+ # If this is "on", all outbound remote messages will be logged at DEBUG level,
+ # if off then they are not logged
+ log-sent-messages = off
+
+ # Logging of message types with payload size in bytes larger than
+ # this value. Maximum detected size per message type is logged once,
+ # with an increase threshold of 10%.
+ # By default this feature is turned off. Activate it by setting the property to
+ # a value in bytes, such as 1000b. Note that for all messages larger than this
+ # limit there will be extra performance and scalability cost.
+ log-frame-size-exceeding = off
+
+ advanced {
+
+ # Maximum serialized message size, including header data.
+ maximum-frame-size = 256 KiB
+
+ # Direct byte buffers are reused in a pool with this maximum size.
+ # Each buffer has the size of 'maximum-frame-size'.
+ # This is not a hard upper limit on number of created buffers. Additional
+ # buffers will be created if needed, e.g. when using many outbound
+ # associations at the same time. Such additional buffers will be garbage
+ # collected, which is not as efficient as reusing buffers in the pool.
+ buffer-pool-size = 128
+
+ # Maximum serialized message size for the large messages, including header data.
+ # If the value of akka.remote.artery.transport is set to aeron-udp, it is currently
+ # restricted to 1/8th the size of a term buffer that can be configured by setting the
+ # 'aeron.term.buffer.length' system property.
+ # See 'large-message-destinations'.
+ maximum-large-frame-size = 2 MiB
+
+ # Direct byte buffers for the large messages are reused in a pool with this maximum size.
+ # Each buffer has the size of 'maximum-large-frame-size'.
+ # See 'large-message-destinations'.
+ # This is not a hard upper limit on number of created buffers. Additional
+ # buffers will be created if needed, e.g. when using many outbound
+ # associations at the same time. Such additional buffers will be garbage
+ # collected, which is not as efficient as reusing buffers in the pool.
+ large-buffer-pool-size = 32
+
+ # For enabling testing features, such as blackhole in akka-remote-testkit.
+ test-mode = off
+
+ # Settings for the materializer that is used for the remote streams.
+ materializer = ${akka.stream.materializer}
+
+ # Remoting will use the given dispatcher for the ordinary and large message
+ # streams.
+ use-dispatcher = "akka.remote.default-remote-dispatcher"
+
+ # Remoting will use the given dispatcher for the control stream.
+ # It can be good to not use the same dispatcher for the control stream as
+ # the dispatcher for the ordinary message stream so that heartbeat messages
+ # are not disturbed.
+ use-control-stream-dispatcher = "akka.actor.internal-dispatcher"
+
+
+ # Total number of inbound lanes, shared among all inbound associations. A value
+ # greater than 1 means that deserialization can be performed in parallel for
+ # different destination actors. The selection of lane is based on consistent
+ # hashing of the recipient ActorRef to preserve message ordering per receiver.
+ # Lowest latency can be achieved with inbound-lanes=1 because of one less
+ # asynchronous boundary.
+ inbound-lanes = 4
+
+ # Number of outbound lanes for each outbound association. A value greater than 1
+ # means that serialization and other work can be performed in parallel for different
+ # destination actors. The selection of lane is based on consistent hashing of the
+ # recipient ActorRef to preserve message ordering per receiver. Note that messages
+ # for different destination systems (hosts) are handled by different streams also
+ # when outbound-lanes=1. Lowest latency can be achieved with outbound-lanes=1
+ # because of one less asynchronous boundary.
+ outbound-lanes = 1
+
+ # Size of the send queue for outgoing messages. Messages will be dropped if
+ # the queue becomes full. This may happen if you send a burst of many messages
+ # without end-to-end flow control. Note that there is one such queue per
+ # outbound association. The trade-off of using a larger queue size is that
+ # it consumes more memory, since the queue is based on preallocated array with
+ # fixed size.
+ outbound-message-queue-size = 3072
+
+ # Size of the send queue for outgoing control messages, such as system messages.
+ # If this limit is reached the remote system is declared to be dead and its UID
+ # marked as quarantined. Note that there is one such queue per outbound association.
+ # It is a linked queue so it will not use more memory than needed but by increasing
+ # too much you may risk OutOfMemoryError in the worst case.
+ outbound-control-queue-size = 20000
+
+ # Size of the send queue for outgoing large messages. Messages will be dropped if
+ # the queue becomes full. This may happen if you send a burst of many messages
+ # without end-to-end flow control. Note that there is one such queue per
+ # outbound association.
+ # It is a linked queue so it will not use more memory than needed but by increasing
+ # too much you may risk OutOfMemoryError, especially since the message payload
+ # of these messages may be large.
+ outbound-large-message-queue-size = 256
+
+ # This setting defines the maximum number of unacknowledged system messages
+ # allowed for a remote system. If this limit is reached the remote system is
+ # declared to be dead and its UID marked as quarantined.
+ system-message-buffer-size = 20000
+
+ # unacknowledged system messages are re-delivered with this interval
+ system-message-resend-interval = 1 second
+
+
+
+ # The timeout for outbound associations to perform the initial handshake.
+ # This timeout must be greater than the 'image-liveness-timeout' when
+ # transport is aeron-udp.
+ handshake-timeout = 20 seconds
+
+ # incomplete initial handshake attempt is retried with this interval
+ handshake-retry-interval = 1 second
+
+ # Handshake requests are performed periodically with this interval,
+ # also after the handshake has been completed to be able to establish
+ # a new session with a restarted destination system.
+ inject-handshake-interval = 1 second
+
+
+ # System messages that are not acknowledged after re-sending for this period are
+ # dropped and will trigger quarantine. The value should be longer than the length
+ # of a network partition that you need to survive.
+ give-up-system-message-after = 6 hours
+
+ # Outbound streams are stopped when they haven't been used for this duration.
+ # They are started again when new messages are sent.
+ stop-idle-outbound-after = 5 minutes
+
+ # Outbound streams are quarantined when they haven't been used for this duration
+ # to cleanup resources used by the association, such as compression tables.
+ # This will cleanup association to crashed systems that didn't announce their
+ # termination.
+ # The value should be longer than the length of a network partition that you
+ # need to survive.
+ # The value must also be greater than stop-idle-outbound-after.
+ # Once every 1/10 of this duration an extra handshake message will be sent.
+ # Therfore it's also recommended to use a value that is greater than 10 times
+ # the stop-idle-outbound-after, since otherwise the idle streams will not be
+ # stopped.
+ quarantine-idle-outbound-after = 6 hours
+
+ # Stop outbound stream of a quarantined association after this idle timeout, i.e.
+ # when not used any more.
+ stop-quarantined-after-idle = 3 seconds
+
+ # After catastrophic communication failures that could result in the loss of system
+ # messages or after the remote DeathWatch triggers the remote system gets
+ # quarantined to prevent inconsistent behavior.
+ # This setting controls how long the quarantined association will be kept around
+ # before being removed to avoid long-term memory leaks. It must be quarantined
+ # and also unused for this duration before it's removed. When removed the historical
+ # information about which UIDs that were quarantined for that hostname:port is
+ # gone which could result in communication with a previously quarantined node
+ # if it wakes up again. Therfore this shouldn't be set too low.
+ remove-quarantined-association-after = 1 h
+
+ # during ActorSystem termination the remoting will wait this long for
+ # an acknowledgment by the destination system that flushing of outstanding
+ # remote messages has been completed
+ shutdown-flush-timeout = 1 second
+
+ # Before sending notificaiton of terminated actor (DeathWatchNotification) other messages
+ # will be flushed to make sure that the Terminated message arrives after other messages.
+ # It will wait this long for the flush acknowledgement before continuing.
+ # The flushing can be disabled by setting this to `off`.
+ death-watch-notification-flush-timeout = 3 seconds
+
+ # See 'inbound-max-restarts'
+ inbound-restart-timeout = 5 seconds
+
+ # Max number of restarts within 'inbound-restart-timeout' for the inbound streams.
+ # If more restarts occurs the ActorSystem will be terminated.
+ inbound-max-restarts = 5
+
+ # Retry outbound connection after this backoff.
+ # Only used when transport is tcp or tls-tcp.
+ outbound-restart-backoff = 1 second
+
+ # See 'outbound-max-restarts'
+ outbound-restart-timeout = 5 seconds
+
+ # Max number of restarts within 'outbound-restart-timeout' for the outbound streams.
+ # If more restarts occurs the ActorSystem will be terminated.
+ outbound-max-restarts = 5
+
+ # compression of common strings in remoting messages, like actor destinations, serializers etc
+ compression {
+
+ actor-refs {
+ # Max number of compressed actor-refs
+ # Note that compression tables are "rolling" (i.e. a new table replaces the old
+ # compression table once in a while), and this setting is only about the total number
+ # of compressions within a single such table.
+ # Must be a positive natural number. Can be disabled with "off".
+ max = 256
+
+ # interval between new table compression advertisements.
+ # this means the time during which we collect heavy-hitter data and then turn it into a compression table.
+ advertisement-interval = 1 minute
+ }
+ manifests {
+ # Max number of compressed manifests
+ # Note that compression tables are "rolling" (i.e. a new table replaces the old
+ # compression table once in a while), and this setting is only about the total number
+ # of compressions within a single such table.
+ # Must be a positive natural number. Can be disabled with "off".
+ max = 256
+
+ # interval between new table compression advertisements.
+ # this means the time during which we collect heavy-hitter data and then turn it into a compression table.
+ advertisement-interval = 1 minute
+ }
+ }
+
+ # List of fully qualified class names of remote instruments which should
+ # be initialized and used for monitoring of remote messages.
+ # The class must extend akka.remote.artery.RemoteInstrument and
+ # have a public constructor with empty parameters or one ExtendedActorSystem
+ # parameter.
+ # A new instance of RemoteInstrument will be created for each encoder and decoder.
+ # It's only called from the stage, so if it dosn't delegate to any shared instance
+ # it doesn't have to be thread-safe.
+ # Refer to `akka.remote.artery.RemoteInstrument` for more information.
+ instruments = ${?akka.remote.artery.advanced.instruments} []
+
+ # Only used when transport is aeron-udp
+ aeron {
+ # Periodically log out all Aeron counters. See https://github.com/real-logic/aeron/wiki/Monitoring-and-Debugging#counters
+ # Only used when transport is aeron-udp.
+ log-aeron-counters = false
+
+ # Controls whether to start the Aeron media driver in the same JVM or use external
+ # process. Set to 'off' when using external media driver, and then also set the
+ # 'aeron-dir'.
+ # Only used when transport is aeron-udp.
+ embedded-media-driver = on
+
+ # Directory used by the Aeron media driver. It's mandatory to define the 'aeron-dir'
+ # if using external media driver, i.e. when 'embedded-media-driver = off'.
+ # Embedded media driver will use a this directory, or a temporary directory if this
+ # property is not defined (empty).
+ # Only used when transport is aeron-udp.
+ aeron-dir = ""
+
+ # Whether to delete aeron embedded driver directory upon driver stop.
+ # Only used when transport is aeron-udp.
+ delete-aeron-dir = yes
+
+ # Level of CPU time used, on a scale between 1 and 10, during backoff/idle.
+ # The tradeoff is that to have low latency more CPU time must be used to be
+ # able to react quickly on incoming messages or send as fast as possible after
+ # backoff backpressure.
+ # Level 1 strongly prefer low CPU consumption over low latency.
+ # Level 10 strongly prefer low latency over low CPU consumption.
+ # Only used when transport is aeron-udp.
+ idle-cpu-level = 5
+
+ # messages that are not accepted by Aeron are dropped after retrying for this period
+ # Only used when transport is aeron-udp.
+ give-up-message-after = 60 seconds
+
+ # Timeout after which aeron driver has not had keepalive messages
+ # from a client before it considers the client dead.
+ # Only used when transport is aeron-udp.
+ client-liveness-timeout = 20 seconds
+
+ # Timout after after which an uncommitted publication will be unblocked
+ # Only used when transport is aeron-udp.
+ publication-unblock-timeout = 40 seconds
+
+ # Timeout for each the INACTIVE and LINGER stages an aeron image
+ # will be retained for when it is no longer referenced.
+ # This timeout must be less than the 'handshake-timeout'.
+ # Only used when transport is aeron-udp.
+ image-liveness-timeout = 10 seconds
+
+ # Timeout after which the aeron driver is considered dead
+ # if it does not update its C'n'C timestamp.
+ # Only used when transport is aeron-udp.
+ driver-timeout = 20 seconds
+ }
+
+ # Only used when transport is tcp or tls-tcp.
+ tcp {
+ # Timeout of establishing outbound connections.
+ connection-timeout = 5 seconds
+
+ # The local address that is used for the client side of the TCP connection.
+ outbound-client-hostname = ""
+ }
+
+ }
+
+ # SSL configuration that is used when transport=tls-tcp.
+ ssl {
+ # Factory of SSLEngine.
+ # Must implement akka.remote.artery.tcp.SSLEngineProvider and have a public
+ # constructor with an ActorSystem parameter.
+ # The default ConfigSSLEngineProvider is configured by properties in section
+ # akka.remote.artery.ssl.config-ssl-engine
+ ssl-engine-provider = akka.remote.artery.tcp.ConfigSSLEngineProvider
+
+ # Config of akka.remote.artery.tcp.ConfigSSLEngineProvider
+ config-ssl-engine {
+
+ # This is the Java Key Store used by the server connection
+ key-store = "keystore"
+
+ # This password is used for decrypting the key store
+ # Use substitution from environment variables for passwords. Don't define
+ # real passwords in config files. key-store-password=${SSL_KEY_STORE_PASSWORD}
+ key-store-password = "changeme"
+
+ # This password is used for decrypting the key
+ # Use substitution from environment variables for passwords. Don't define
+ # real passwords in config files. key-password=${SSL_KEY_PASSWORD}
+ key-password = "changeme"
+
+ # This is the Java Key Store used by the client connection
+ trust-store = "truststore"
+
+ # This password is used for decrypting the trust store
+ # Use substitution from environment variables for passwords. Don't define
+ # real passwords in config files. trust-store-password=${SSL_TRUST_STORE_PASSWORD}
+ trust-store-password = "changeme"
+
+ # Protocol to use for SSL encryption.
+ protocol = "TLSv1.2"
+
+ # Example: ["TLS_DHE_RSA_WITH_AES_128_GCM_SHA256",
+ # "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+ # "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384",
+ # "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"]
+ # When doing rolling upgrades, make sure to include both the algorithm used
+ # by old nodes and the preferred algorithm.
+ # If you use a JDK 8 prior to 8u161 you need to install
+ # the JCE Unlimited Strength Jurisdiction Policy Files to use AES 256.
+ # More info here:
+ # https://www.oracle.com/java/technologies/javase-jce-all-downloads.html
+ enabled-algorithms = ["TLS_DHE_RSA_WITH_AES_256_GCM_SHA384",
+ "TLS_RSA_WITH_AES_128_CBC_SHA"]
+
+ # There are two options, and the default SecureRandom is recommended:
+ # "" or "SecureRandom" => (default)
+ # "SHA1PRNG" => Can be slow because of blocking issues on Linux
+ #
+ # Setting a value here may require you to supply the appropriate cipher
+ # suite (see enabled-algorithms section above)
+ random-number-generator = ""
+
+ # Require mutual authentication between TLS peers
+ #
+ # Without mutual authentication only the peer that actively establishes a connection (TLS client side)
+ # checks if the passive side (TLS server side) sends over a trusted certificate. With the flag turned on,
+ # the passive side will also request and verify a certificate from the connecting peer.
+ #
+ # To prevent man-in-the-middle attacks this setting is enabled by default.
+ require-mutual-authentication = on
+
+ # Set this to `on` to verify hostnames with sun.security.util.HostnameChecker
+ # If possible it is recommended to have this enabled. Hostname verification is designed for
+ # situations where things locate each other by hostname, in scenarios where host names are dynamic
+ # and not known up front it can make sense to have this disabled.
+ hostname-verification = off
+ }
+
+ # Config of akka.remote.artery.tcp.ssl.RotatingKeysSSLEngineProvider
+ # This engine provider reads PEM files from a mount point shared with the secret
+ # manager. The constructed SSLContext is cached some time (configurable) so when
+ # the credentials rotate the new credentials are eventually picked up.
+ # By default mTLS is enabled.
+ # This provider also includes a verification phase that runs after the TLS handshake
+ # phase. In this verification, both peers run an authorization and verify they are
+ # part of the same akka cluster. The verification happens via comparing the subject
+ # names in the peer's certificate with the name on the own certificate so if you
+ # use this SSLEngineProvider you should make sure all nodes on the cluster include
+ # at least one common subject name (CN or SAN).
+ # The Key setup this implementation supports has some limitations:
+ # 1. the private key must be provided on a PKCS#1 or a non-encrypted PKCS#8 PEM-formatted file
+ # 2. the private key must be be of an algorythm supported by `akka-pki` tools (e.g. "RSA", not "EC")
+ # 3. the node certificate must be issued by a root CA (not an intermediate CA)
+ # 4. both the node and the CA certificates must be provided in PEM-formatted files
+ rotating-keys-engine {
+
+ # This is a convention that people may follow if they wish to save themselves some configuration
+ secret-mount-point = /var/run/secrets/akka-tls/rotating-keys-engine
+
+ # The absolute path the PEM file with the private key.
+ key-file = ${akka.remote.artery.ssl.rotating-keys-engine.secret-mount-point}/tls.key
+ # The absolute path to the PEM file of the certificate for the private key above.
+ cert-file = ${akka.remote.artery.ssl.rotating-keys-engine.secret-mount-point}/tls.crt
+ # The absolute path to the PEM file of the certificate of the CA that emited
+ # the node certificate above.
+ ca-cert-file = ${akka.remote.artery.ssl.rotating-keys-engine.secret-mount-point}/ca.crt
+
+ # There are two options, and the default SecureRandom is recommended:
+ # "" or "SecureRandom" => (default)
+ # "SHA1PRNG" => Can be slow because of blocking issues on Linux
+ #
+ # Setting a value here may require you to supply the appropriate cipher
+ # suite (see enabled-algorithms section)
+ random-number-generator = ""
+
+ # Example: ["TLS_DHE_RSA_WITH_AES_128_GCM_SHA256",
+ # "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+ # "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384",
+ # "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"]
+ # If you use a JDK 8 prior to 8u161 you need to install
+ # the JCE Unlimited Strength Jurisdiction Policy Files to use AES 256.
+ # More info here:
+ # https://www.oracle.com/java/technologies/javase-jce-all-downloads.html
+ enabled-algorithms = ["TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"]
+
+ # Protocol to use for SSL encryption.
+ protocol = "TLSv1.2"
+
+ # How long should an SSLContext instance be cached. When rotating keys and certificates,
+ # there must a time overlap between the old certificate/key and the new ones. The
+ # value of this setting should be lower than duration of that overlap.
+ ssl-context-cache-ttl = 5m
+ }
+ }
+ }
+ }
+
+}
+#//#artery
--- /dev/null
+#####################################
+# Akka Stream Reference Config File #
+#####################################
+
+# eager creation of the system wide materializer
+akka.library-extensions += "akka.stream.SystemMaterializer$"
+akka {
+ stream {
+
+ # Default materializer settings
+ materializer {
+
+ # Initial size of buffers used in stream elements
+ initial-input-buffer-size = 4
+ # Maximum size of buffers used in stream elements
+ max-input-buffer-size = 16
+
+ # Fully qualified config path which holds the dispatcher configuration
+ # or full dispatcher configuration to be used by ActorMaterializer when creating Actors.
+ dispatcher = "akka.actor.default-dispatcher"
+
+ # Fully qualified config path which holds the dispatcher configuration
+ # or full dispatcher configuration to be used by stream operators that
+ # perform blocking operations
+ blocking-io-dispatcher = "akka.actor.default-blocking-io-dispatcher"
+
+ # Cleanup leaked publishers and subscribers when they are not used within a given
+ # deadline
+ subscription-timeout {
+ # when the subscription timeout is reached one of the following strategies on
+ # the "stale" publisher:
+ # cancel - cancel it (via `onError` or subscribing to the publisher and
+ # `cancel()`ing the subscription right away
+ # warn - log a warning statement about the stale element (then drop the
+ # reference to it)
+ # noop - do nothing (not recommended)
+ mode = cancel
+
+ # time after which a subscriber / publisher is considered stale and eligible
+ # for cancelation (see `akka.stream.subscription-timeout.mode`)
+ timeout = 5s
+ }
+
+ # Enable additional troubleshooting logging at DEBUG log level
+ debug-logging = off
+
+ # Maximum number of elements emitted in batch if downstream signals large demand
+ output-burst-limit = 1000
+
+ # Enable automatic fusing of all graphs that are run. For short-lived streams
+ # this may cause an initial runtime overhead, but most of the time fusing is
+ # desirable since it reduces the number of Actors that are created.
+ # Deprecated, since Akka 2.5.0, setting does not have any effect.
+ auto-fusing = on
+
+ # Those stream elements which have explicit buffers (like mapAsync, mapAsyncUnordered,
+ # buffer, flatMapMerge, Source.actorRef, Source.queue, etc.) will preallocate a fixed
+ # buffer upon stream materialization if the requested buffer size is less than this
+ # configuration parameter. The default is very high because failing early is better
+ # than failing under load.
+ #
+ # Buffers sized larger than this will dynamically grow/shrink and consume more memory
+ # per element than the fixed size buffers.
+ max-fixed-buffer-size = 1000000000
+
+ # Maximum number of sync messages that actor can process for stream to substream communication.
+ # Parameter allows to interrupt synchronous processing to get upstream/downstream messages.
+ # Allows to accelerate message processing that happening within same actor but keep system responsive.
+ sync-processing-limit = 1000
+
+ debug {
+ # Enables the fuzzing mode which increases the chance of race conditions
+ # by aggressively reordering events and making certain operations more
+ # concurrent than usual.
+ # This setting is for testing purposes, NEVER enable this in a production
+ # environment!
+ # To get the best results, try combining this setting with a throughput
+ # of 1 on the corresponding dispatchers.
+ fuzzing-mode = off
+ }
+
+ io.tcp {
+ # The outgoing bytes are accumulated in a buffer while waiting for acknowledgment
+ # of pending write. This improves throughput for small messages (frames) without
+ # sacrificing latency. While waiting for the ack the stage will eagerly pull
+ # from upstream until the buffer exceeds this size. That means that the buffer may hold
+ # slightly more bytes than this limit (at most one element more). It can be set to 0
+ # to disable the usage of the buffer.
+ write-buffer-size = 16 KiB
+
+ # In addition to the buffering described for property write-buffer-size, try to collect
+ # more consecutive writes from the upstream stream producers.
+ #
+ # The rationale is to increase write efficiency by avoiding separate small
+ # writes to the network which is expensive to do. Merging those writes together
+ # (up to `write-buffer-size`) improves throughput for small writes.
+ #
+ # The idea is that a running stream may produce multiple small writes consecutively
+ # in one go without waiting for any external input. To probe the stream for
+ # data, this features delays sending a write immediately by probing the stream
+ # for more writes. This works by rescheduling the TCP connection stage via the
+ # actor mailbox of the underlying actor. Thus, before the stage is reactivated
+ # the upstream gets another opportunity to emit writes.
+ #
+ # When the stage is reactivated and if new writes are detected another round-trip
+ # is scheduled. The loop repeats until either the number of round trips given in this
+ # setting is reached, the buffer reaches `write-buffer-size`, or no new writes
+ # were detected during the last round-trip.
+ #
+ # This mechanism ensures that a write is guaranteed to be sent when the remaining stream
+ # becomes idle waiting for external signals.
+ #
+ # In most cases, the extra latency this mechanism introduces should be negligible,
+ # but depending on the stream setup it may introduce a noticeable delay,
+ # if the upstream continuously produces small amounts of writes in a
+ # blocking (CPU-bound) way.
+ #
+ # In that case, the feature can either be disabled, or the producing CPU-bound
+ # work can be taken off-stream to avoid excessive delays (e.g. using `mapAsync` instead of `map`).
+ #
+ # A value of 0 disables this feature.
+ coalesce-writes = 10
+ }
+
+ # Time to wait for async materializer creation before throwing an exception
+ creation-timeout = 20 seconds
+
+ //#stream-ref
+ # configure defaults for SourceRef and SinkRef
+ stream-ref {
+ # Buffer of a SinkRef that is used to batch Request elements from the other side of the stream ref
+ #
+ # The buffer will be attempted to be filled eagerly even while the local stage did not request elements,
+ # because the delay of requesting over network boundaries is much higher.
+ buffer-capacity = 32
+
+ # Demand is signalled by sending a cumulative demand message ("requesting messages until the n-th sequence number)
+ # Using a cumulative demand model allows us to re-deliver the demand message in case of message loss (which should
+ # be very rare in any case, yet possible -- mostly under connection break-down and re-establishment).
+ #
+ # The semantics of handling and updating the demand however are in-line with what Reactive Streams dictates.
+ #
+ # In normal operation, demand is signalled in response to arriving elements, however if no new elements arrive
+ # within `demand-redelivery-interval` a re-delivery of the demand will be triggered, assuming that it may have gotten lost.
+ demand-redelivery-interval = 1 second
+
+ # Subscription timeout, during which the "remote side" MUST subscribe (materialize) the handed out stream ref.
+ # This timeout does not have to be very low in normal situations, since the remote side may also need to
+ # prepare things before it is ready to materialize the reference. However the timeout is needed to avoid leaking
+ # in-active streams which are never subscribed to.
+ subscription-timeout = 30 seconds
+
+ # In order to guard the receiving end of a stream ref from never terminating (since awaiting a Completion or Failed
+ # message) after / before a Terminated is seen, a special timeout is applied once Terminated is received by it.
+ # This allows us to terminate stream refs that have been targeted to other nodes which are Downed, and as such the
+ # other side of the stream ref would never send the "final" terminal message.
+ #
+ # The timeout specifically means the time between the Terminated signal being received and when the local SourceRef
+ # determines to fail itself, assuming there was message loss or a complete partition of the completion signal.
+ final-termination-signal-deadline = 2 seconds
+ }
+ //#stream-ref
+ }
+
+ # Deprecated, left here to not break Akka HTTP which refers to it
+ blocking-io-dispatcher = "akka.actor.default-blocking-io-dispatcher"
+
+ # Deprecated, will not be used unless user code refer to it, use 'akka.stream.materializer.blocking-io-dispatcher'
+ # instead, or if from code, prefer the 'ActorAttributes.IODispatcher' attribute
+ default-blocking-io-dispatcher = "akka.actor.default-blocking-io-dispatcher"
+ }
+
+ # configure overrides to ssl-configuration here (to be used by akka-streams, and akka-http – i.e. when serving https connections)
+ ssl-config {
+ protocol = "TLSv1.2"
+ }
+
+ actor {
+
+ serializers {
+ akka-stream-ref = "akka.stream.serialization.StreamRefSerializer"
+ }
+
+ serialization-bindings {
+ "akka.stream.SinkRef" = akka-stream-ref
+ "akka.stream.SourceRef" = akka-stream-ref
+ "akka.stream.impl.streamref.StreamRefsProtocol" = akka-stream-ref
+ }
+
+ serialization-identifiers {
+ "akka.stream.serialization.StreamRefSerializer" = 30
+ }
+ }
+}
+
+# ssl configuration
+# folded in from former ssl-config-akka module
+ssl-config {
+ logger = "com.typesafe.sslconfig.akka.util.AkkaLoggerBridge"
+}
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- vi: set et smarttab sw=4 tabstop=4: -->
+<!--
+ Copyright (c) 2020 PANTHEON.tech, s.r.o. and others. All rights reserved.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>bundle-parent</artifactId>
+ <version>9.0.3-SNAPSHOT</version>
+ <relativePath>../../bundle-parent</relativePath>
+ </parent>
+
+ <artifactId>repackaged-akka</artifactId>
+ <packaging>bundle</packaging>
+ <name>${project.artifactId}</name>
+
+ <properties>
+ <!-- We are just juggling classes here -->
+ <odlparent.modernizer.skip>true</odlparent.modernizer.skip>
+ <odlparent.spotbugs.skip>true</odlparent.spotbugs.skip>
+
+ <!-- We do not want to generate javadoc -->
+ <maven.javadoc.skip>true</maven.javadoc.skip>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>repackaged-akka-jar</artifactId>
+ <version>${project.version}</version>
+ <scope>provided</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>com.typesafe</groupId>
+ <artifactId>config</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.typesafe</groupId>
+ <artifactId>ssl-config-core_2.13</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>io.aeron</groupId>
+ <artifactId>aeron-client</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>io.aeron</groupId>
+ <artifactId>aeron-driver</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty</artifactId>
+ <version>3.10.6.Final</version>
+ </dependency>
+ <dependency>
+ <groupId>org.agrona</groupId>
+ <artifactId>agrona</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.reactivestreams</groupId>
+ <artifactId>reactive-streams</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.lmdbjava</groupId>
+ <artifactId>lmdbjava</artifactId>
+ <version>0.7.0</version>
+ <exclusions>
+ <exclusion>
+ <groupId>com.github.jnr</groupId>
+ <artifactId>jffi</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>com.github.jnr</groupId>
+ <artifactId>jnr-ffi</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>com.github.jnr</groupId>
+ <artifactId>jnr-constants</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>org.scala-lang</groupId>
+ <artifactId>scala-library</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.scala-lang</groupId>
+ <artifactId>scala-reflect</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.scala-lang.modules</groupId>
+ <artifactId>scala-java8-compat_2.13</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.scala-lang.modules</groupId>
+ <artifactId>scala-parser-combinators_2.13</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <artifactId>maven-dependency-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>unpack-license</id>
+ <configuration>
+ <!-- Akka is Apache-2.0 licensed -->
+ <skip>true</skip>
+ </configuration>
+ </execution>
+ <execution>
+ <id>unpack</id>
+ <phase>compile</phase>
+ <goals>
+ <goal>unpack</goal>
+ </goals>
+ <configuration>
+ <artifactItems>
+ <artifactItem>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>repackaged-akka-jar</artifactId>
+ <version>${project.version}</version>
+ </artifactItem>
+ <artifactItem>
+ <groupId>com.hierynomus</groupId>
+ <artifactId>asn-one</artifactId>
+ <version>0.4.0</version>
+ </artifactItem>
+ </artifactItems>
+ <overWriteReleases>false</overWriteReleases>
+ <overWriteSnapshots>true</overWriteSnapshots>
+ <outputDirectory>${project.build.directory}/classes</outputDirectory>
+ </configuration>
+ </execution>
+ <execution>
+ <id>unpack-sources</id>
+ <phase>prepare-package</phase>
+ <goals>
+ <goal>unpack-dependencies</goal>
+ </goals>
+ <configuration>
+ <classifier>sources</classifier>
+ <includeArtifactIds>repackaged-akka-jar</includeArtifactIds>
+ <outputDirectory>${project.build.directory}/shaded-sources</outputDirectory>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <artifactId>maven-antrun-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>move-resources</id>
+ <phase>prepare-package</phase>
+ <goals>
+ <goal>run</goal>
+ </goals>
+ <configuration>
+ <target>
+ <move todir="${project.build.directory}/resources">
+ <fileset dir="${project.build.directory}/classes">
+ <include name="*.conf"/>
+ </fileset>
+ </move>
+ </target>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>shaded-sources</id>
+ <phase>prepare-package</phase>
+ <goals>
+ <goal>add-source</goal>
+ </goals>
+ <configuration>
+ <sources>${project.build.directory}/shaded-sources</sources>
+ </configuration>
+ </execution>
+ <execution>
+ <id>shaded-resources</id>
+ <phase>prepare-package</phase>
+ <goals>
+ <goal>add-resource</goal>
+ </goals>
+ <configuration>
+ <resources>
+ <resource>
+ <directory>${project.build.directory}/resources</directory>
+ </resource>
+ </resources>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <extensions>true</extensions>
+ <configuration>
+ <instructions>
+ <Automatic-Module-Name>org.opendaylight.controller.repackaged.akka</Automatic-Module-Name>
+ <Export-Package>
+ akka.*,
+ com.typesafe.sslconfig.akka.*,
+ jdk.jfr,
+ </Export-Package>
+ <Import-Package>
+ sun.misc;resolution:=optional,
+ sun.reflect;resolution:=optional,
+ org.fusesource.leveldbjni;resolution:=optional,
+ org.iq80.leveldb;resolution:=optional,
+ org.iq80.leveldb.impl;resolution:=optional,
+ *
+ </Import-Package>
+ </instructions>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+</project>
<parent>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>odlparent-lite</artifactId>
- <version>7.0.5</version>
+ <version>13.0.11</version>
<relativePath/>
</parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>controller-artifacts</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<packaging>pom</packaging>
<dependencyManagement>
<dependencies>
+ <!-- Repackaged Akka -->
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>repackaged-akka</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+
+ <!-- Atomix -->
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>atomix-storage</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+
<!-- Core API/implementation -->
<dependency>
<groupId>${project.groupId}</groupId>
<scope>test</scope>
</dependency>
- <!-- Base model augmentations -->
- <dependency>
- <groupId>org.opendaylight.controller.model</groupId>
- <artifactId>model-inventory</artifactId>
- <version>${project.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller.model</groupId>
- <artifactId>model-topology</artifactId>
- <version>${project.version}</version>
- </dependency>
-
<!-- Clustered implementation -->
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>sal-distributed-datastore</artifactId>
<version>${project.version}</version>
</dependency>
- <dependency>
- <groupId>${project.groupId}</groupId>
- <artifactId>sal-distributed-eos</artifactId>
- <version>${project.version}</version>
- </dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>sal-remoterpc-connector</artifactId>
<artifactId>sal-cluster-admin-impl</artifactId>
<version>${project.version}</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-cluster-admin-karaf-cli</artifactId>
+ <version>${project.version}</version>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>cds-dom-api</artifactId>
<version>${project.version}</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>cds-mgmt-api</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>eos-dom-akka</artifactId>
+ <version>${project.version}</version>
+ </dependency>
<!-- Toaster -->
<dependency>
<scope>runtime</scope>
</dependency>
- <!-- MessageBus -->
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>messagebus-api</artifactId>
- <version>${project.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>messagebus-spi</artifactId>
- <version>${project.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>messagebus-impl</artifactId>
- <version>${project.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>messagebus-util</artifactId>
- <version>${project.version}</version>
- </dependency>
- <dependency>
- <groupId>${project.groupId}</groupId>
- <artifactId>odl-controller-exp-messagebus</artifactId>
- <version>${project.version}</version>
- <type>xml</type>
- <classifier>features</classifier>
- </dependency>
-
<!-- Clustering system test support -->
<dependency>
<groupId>org.opendaylight.controller.samples</groupId>
<artifactId>clustering-it-provider</artifactId>
<version>${project.version}</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>clustering-it-karaf-cli</artifactId>
+ <version>${project.version}</version>
+ </dependency>
<!-- Config files -->
<dependency>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
- <artifactId>odl-mdsal-broker</artifactId>
+ <artifactId>odl-controller-akka</artifactId>
<version>${project.version}</version>
<type>xml</type>
<classifier>features</classifier>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
- <artifactId>odl-mdsal-broker-local</artifactId>
+ <artifactId>odl-controller-scala</artifactId>
<version>${project.version}</version>
<type>xml</type>
<classifier>features</classifier>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
- <artifactId>odl-mdsal-clustering-commons</artifactId>
+ <artifactId>odl-mdsal-broker</artifactId>
<version>${project.version}</version>
<type>xml</type>
<classifier>features</classifier>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
- <artifactId>odl-controller-mdsal-common</artifactId>
+ <artifactId>odl-controller-broker-local</artifactId>
<version>${project.version}</version>
<type>xml</type>
<classifier>features</classifier>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
- <artifactId>odl-controller-blueprint</artifactId>
+ <artifactId>odl-mdsal-clustering-commons</artifactId>
<version>${project.version}</version>
<type>xml</type>
<classifier>features</classifier>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
- <artifactId>odl-mdsal-distributed-datastore</artifactId>
+ <artifactId>odl-controller-mdsal-common</artifactId>
<version>${project.version}</version>
<type>xml</type>
<classifier>features</classifier>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
- <artifactId>odl-mdsal-remoterpc-connector</artifactId>
+ <artifactId>odl-controller-blueprint</artifactId>
<version>${project.version}</version>
<type>xml</type>
<classifier>features</classifier>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
- <artifactId>odl-mdsal-model-inventory</artifactId>
+ <artifactId>odl-mdsal-distributed-datastore</artifactId>
<version>${project.version}</version>
<type>xml</type>
<classifier>features</classifier>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
- <artifactId>odl-controller-model-topology</artifactId>
+ <artifactId>odl-mdsal-remoterpc-connector</artifactId>
<version>${project.version}</version>
<type>xml</type>
<classifier>features</classifier>
<classifier>features</classifier>
<type>xml</type>
</dependency>
-
- <!-- Config remnants -->
- <dependency>
- <groupId>${project.groupId}</groupId>
- <artifactId>netty-event-executor-config</artifactId>
- <version>0.13.4-SNAPSHOT</version>
- </dependency>
- <dependency>
- <groupId>${project.groupId}</groupId>
- <artifactId>netty-threadgroup-config</artifactId>
- <version>0.13.4-SNAPSHOT</version>
- </dependency>
- <dependency>
- <groupId>${project.groupId}</groupId>
- <artifactId>netty-timer-config</artifactId>
- <version>0.13.4-SNAPSHOT</version>
- </dependency>
- <dependency>
- <groupId>${project.groupId}</groupId>
- <artifactId>threadpool-config-api</artifactId>
- <version>0.13.4-SNAPSHOT</version>
- </dependency>
- <dependency>
- <groupId>${project.groupId}</groupId>
- <artifactId>threadpool-config-impl</artifactId>
- <version>0.13.4-SNAPSHOT</version>
- </dependency>
- <dependency>
- <groupId>${project.groupId}</groupId>
- <artifactId>odl-controller-exp-netty-config</artifactId>
- <version>${project.version}</version>
- <classifier>features</classifier>
- <type>xml</type>
- </dependency>
</dependencies>
</dependencyManagement>
</project>
--- /dev/null
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
--- /dev/null
+<!--
+ ~ Copyright 2017-2021 Open Networking Foundation
+ ~ Copyright 2023 PANTHEON.tech, s.r.o.
+ ~
+ ~ Licensed under the Apache License, Version 2.0 (the "License");
+ ~ you may not use this file except in compliance with the License.
+ ~ You may obtain a copy of the License at
+ ~
+ ~ http://www.apache.org/licenses/LICENSE-2.0
+ ~
+ ~ Unless required by applicable law or agreed to in writing, software
+ ~ distributed under the License is distributed on an "AS IS" BASIS,
+ ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ ~ See the License for the specific language governing permissions and
+ ~ limitations under the License.
+ -->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>bundle-parent</artifactId>
+ <version>9.0.3-SNAPSHOT</version>
+ <relativePath>../bundle-parent</relativePath>
+ </parent>
+
+ <artifactId>atomix-storage</artifactId>
+ <name>Atomix Storage</name>
+ <packaging>bundle</packaging>
+
+ <properties>
+ <odlparent.checkstyle.skip>true</odlparent.checkstyle.skip>
+ <odlparent.spotbugs.enforce>false</odlparent.spotbugs.enforce>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-buffer</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-common</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jdt</groupId>
+ <artifactId>org.eclipse.jdt.annotation</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.esotericsoftware</groupId>
+ <artifactId>kryo</artifactId>
+ <version>4.0.3</version>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
+ <groupId>com.esotericsoftware</groupId>
+ <artifactId>minlog</artifactId>
+ <version>1.3.1</version>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
+ <groupId>com.esotericsoftware</groupId>
+ <artifactId>reflectasm</artifactId>
+ <version>1.11.9</version>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.objenesis</groupId>
+ <artifactId>objenesis</artifactId>
+ <version>2.6</version>
+ <scope>provided</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava-testlib</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <!-- This project has a different license -->
+ <plugin>
+ <artifactId>maven-dependency-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>unpack-license</id>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <artifactId>maven-antrun-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>copy-license</id>
+ <phase>prepare-package</phase>
+ <goals>
+ <goal>run</goal>
+ </goals>
+ <configuration>
+ <target>
+ <copy file="LICENSE" tofile="${project.build.directory}/classes/LICENSE"/>
+ </target>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <artifactId>maven-checkstyle-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>check-license</id>
+ <goals>
+ <goal>check</goal>
+ </goals>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <extensions>true</extensions>
+ <configuration>
+ <instructions>
+ <Export-Package>
+ io.atomix.storage.journal
+ </Export-Package>
+ <Import-Package>
+ sun.nio.ch;resolution:=optional,
+ sun.misc;resolution:=optional,
+ !COM.newmonics.*,
+ !android.os,
+ *
+ </Import-Package>
+
+ <!-- Kryo is using ancient objenesis, so let's embed it to prevent duplicates -->
+ <Embed-Dependency>
+ *;inline=true;groupId=com.esotericsoftware,
+ *;inline=true;groupId=org.objenesis,
+ </Embed-Dependency>
+ </instructions>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+</project>
--- /dev/null
+/*
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import org.eclipse.jdt.annotation.NonNullByDefault;
+
+/**
+ * A {@link JournalReader} traversing only committed entries.
+ */
+@NonNullByDefault
+final class CommitsSegmentJournalReader<E> extends SegmentedJournalReader<E> {
+ CommitsSegmentJournalReader(final SegmentedJournal<E> journal, final JournalSegment segment) {
+ super(journal, segment);
+ }
+
+ @Override
+ public <T> T tryNext(final EntryMapper<E, T> mapper) {
+ return getNextIndex() <= journal.getCommitIndex() ? super.tryNext(mapper) : null;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import static com.google.common.base.Verify.verify;
+import static java.util.Objects.requireNonNull;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.file.Path;
+import org.eclipse.jdt.annotation.NonNull;
+
+/**
+ * A {@link StorageLevel#DISK} implementation of {@link FileReader}. Maintains an internal buffer.
+ */
+final class DiskFileReader extends FileReader {
+ /**
+ * Just do not bother with IO smaller than this many bytes.
+ */
+ private static final int MIN_IO_SIZE = 8192;
+
+ private final FileChannel channel;
+ private final ByteBuffer buffer;
+
+ // tracks where memory's first available byte maps to in terms of FileChannel.position()
+ private int bufferPosition;
+
+ DiskFileReader(final Path path, final FileChannel channel, final int maxSegmentSize, final int maxEntrySize) {
+ this(path, channel, allocateBuffer(maxSegmentSize, maxEntrySize));
+ }
+
+ // Note: take ownership of the buffer
+ DiskFileReader(final Path path, final FileChannel channel, final ByteBuffer buffer) {
+ super(path);
+ this.channel = requireNonNull(channel);
+ this.buffer = buffer.flip();
+ bufferPosition = 0;
+ }
+
+ static ByteBuffer allocateBuffer(final int maxSegmentSize, final int maxEntrySize) {
+ return ByteBuffer.allocate(chooseBufferSize(maxSegmentSize, maxEntrySize));
+ }
+
+ private static int chooseBufferSize(final int maxSegmentSize, final int maxEntrySize) {
+ if (maxSegmentSize <= MIN_IO_SIZE) {
+ // just buffer the entire segment
+ return maxSegmentSize;
+ }
+
+ // one full entry plus its header, or MIN_IO_SIZE, which benefits the read of many small entries
+ final int minBufferSize = maxEntrySize + SegmentEntry.HEADER_BYTES;
+ return minBufferSize <= MIN_IO_SIZE ? MIN_IO_SIZE : minBufferSize;
+ }
+
+ @Override
+ void invalidateCache() {
+ buffer.clear().flip();
+ bufferPosition = 0;
+ }
+
+ @Override
+ ByteBuffer read(final int position, final int size) {
+ // calculate logical seek distance between buffer's first byte and position and split flow between
+ // forward-moving and backwards-moving code paths.
+ final int seek = bufferPosition - position;
+ return seek >= 0 ? forwardAndRead(seek, position, size) : rewindAndRead(-seek, position, size);
+ }
+
+ private @NonNull ByteBuffer forwardAndRead(final int seek, final int position, final int size) {
+ final int missing = buffer.limit() - seek - size;
+ if (missing <= 0) {
+ // fast path: we have the requested region
+ return buffer.slice(seek, size).asReadOnlyBuffer();
+ }
+
+ // We need to read more data, but let's salvage what we can:
+ // - set buffer position to seek, which means it points to the same as position
+ // - run compact, which moves everything between position and limit onto the beginning of buffer and
+ // sets it up to receive more bytes
+ // - start the read accounting for the seek
+ buffer.position(seek).compact();
+ readAtLeast(position + seek, missing);
+ return setAndSlice(position, size);
+ }
+
+ private @NonNull ByteBuffer rewindAndRead(final int rewindBy, final int position, final int size) {
+ // TODO: Lazy solution. To be super crisp, we want to find out how much of the buffer we can salvage and
+ // do all the limit/position fiddling before and after read. Right now let's just flow the buffer up and
+ // read it.
+ buffer.clear();
+ readAtLeast(position, size);
+ return setAndSlice(position, size);
+ }
+
+ private void readAtLeast(final int readPosition, final int readAtLeast) {
+ final int bytesRead;
+ try {
+ bytesRead = channel.read(buffer, readPosition);
+ } catch (IOException e) {
+ throw new StorageException(e);
+ }
+ verify(bytesRead >= readAtLeast, "Short read %s, expected %s", bytesRead, readAtLeast);
+ buffer.flip();
+ }
+
+ private @NonNull ByteBuffer setAndSlice(final int position, final int size) {
+ bufferPosition = position;
+ return buffer.slice(0, size).asReadOnlyBuffer();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import static io.atomix.storage.journal.SegmentEntry.HEADER_BYTES;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.MappedByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.file.Path;
+
+/**
+ * A {@link StorageLevel#DISK} {@link FileWriter}.
+ */
+final class DiskFileWriter extends FileWriter {
+ private static final ByteBuffer ZERO_ENTRY_HEADER = ByteBuffer.wrap(new byte[HEADER_BYTES]);
+
+ private final DiskFileReader reader;
+ private final ByteBuffer buffer;
+
+ DiskFileWriter(final Path path, final FileChannel channel, final int maxSegmentSize, final int maxEntrySize) {
+ super(path, channel, maxSegmentSize, maxEntrySize);
+ buffer = DiskFileReader.allocateBuffer(maxSegmentSize, maxEntrySize);
+ reader = new DiskFileReader(path, channel, buffer);
+ }
+
+ @Override
+ DiskFileReader reader() {
+ return reader;
+ }
+
+ @Override
+ MappedByteBuffer buffer() {
+ return null;
+ }
+
+ @Override
+ MappedFileWriter toMapped() {
+ flush();
+ return new MappedFileWriter(path, channel, maxSegmentSize, maxEntrySize);
+ }
+
+ @Override
+ DiskFileWriter toDisk() {
+ return null;
+ }
+
+ @Override
+ void writeEmptyHeader(final int position) {
+ try {
+ channel.write(ZERO_ENTRY_HEADER.asReadOnlyBuffer(), position);
+ } catch (IOException e) {
+ throw new StorageException(e);
+ }
+ }
+
+ @Override
+ ByteBuffer startWrite(final int position, final int size) {
+ return buffer.clear().slice(0, size);
+ }
+
+ @Override
+ void commitWrite(final int position, final ByteBuffer entry) {
+ try {
+ channel.write(entry, position);
+ } catch (IOException e) {
+ throw new StorageException(e);
+ }
+ }
+
+ @Override
+ void flush() {
+ if (channel.isOpen()) {
+ try {
+ channel.force(true);
+ } catch (IOException e) {
+ throw new StorageException(e);
+ }
+ }
+ }
+
+ @Override
+ void close() {
+ flush();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.base.MoreObjects;
+import java.nio.ByteBuffer;
+import java.nio.file.Path;
+import org.eclipse.jdt.annotation.NonNull;
+
+/**
+ * An abstraction over how to read a {@link JournalSegmentFile}.
+ */
+abstract sealed class FileReader permits DiskFileReader, MappedFileReader {
+ private final Path path;
+
+ FileReader(final Path path) {
+ this.path = requireNonNull(path);
+ }
+
+ /**
+ * Invalidate any cache that is present, so that the next read is coherent with the backing file.
+ */
+ abstract void invalidateCache();
+
+ /**
+ * Read the some bytes as specified position. The sum of position and size is guaranteed not to exceed the maximum
+ * segment size nor maximum entry size.
+ *
+ * @param position position to the entry header
+ * @param size to read
+ * @return resulting buffer
+ */
+ abstract @NonNull ByteBuffer read(int position, int size);
+
+ @Override
+ public final String toString() {
+ return MoreObjects.toStringHelper(this).add("path", path).toString();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.base.MoreObjects;
+import java.nio.ByteBuffer;
+import java.nio.MappedByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.file.Path;
+import org.eclipse.jdt.annotation.Nullable;
+
+/**
+ * An abstraction over how to write a {@link JournalSegmentFile}.
+ */
+abstract sealed class FileWriter permits DiskFileWriter, MappedFileWriter {
+ final Path path;
+ final FileChannel channel;
+ final int maxSegmentSize;
+ final int maxEntrySize;
+
+ FileWriter(final Path path, final FileChannel channel, final int maxSegmentSize, final int maxEntrySize) {
+ this.path = requireNonNull(path);
+ this.channel = requireNonNull(channel);
+ this.maxSegmentSize = maxSegmentSize;
+ this.maxEntrySize = maxEntrySize;
+ }
+
+ /**
+ * Return the internal {@link FileReader}.
+ *
+ * @return the internal FileReader
+ */
+ abstract FileReader reader();
+
+ /**
+ * Write {@link SegmentEntry#HEADER_BYTES} worth of zeroes at specified position.
+ *
+ * @param position position to write to
+ */
+ abstract void writeEmptyHeader(int position);
+
+ abstract ByteBuffer startWrite(int position, int size);
+
+ abstract void commitWrite(int position, ByteBuffer entry);
+
+ /**
+ * Flushes written entries to disk.
+ */
+ abstract void flush();
+
+ /**
+ * Closes this writer.
+ */
+ abstract void close();
+
+ @Override
+ public final String toString() {
+ return MoreObjects.toStringHelper(this).add("path", path).toString();
+ }
+
+ abstract @Nullable MappedByteBuffer buffer();
+
+ abstract @Nullable MappedFileWriter toMapped();
+
+ abstract @Nullable DiskFileWriter toDisk();
+}
--- /dev/null
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others. All rights reserved.
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.base.MoreObjects;
+import org.eclipse.jdt.annotation.NonNullByDefault;
+
+/**
+ * Indexed journal entry.
+ *
+ * @param <E> entry type
+ * @param index the entry index
+ * @param entry the indexed entry
+ * @param size the serialized entry size
+ */
+// FIXME: it seems 'index' has to be non-zero, we should enforce that if that really is the case
+// FIXME: it seems 'size' has not be non-zero, we should enforce that if that really is the case
+@NonNullByDefault
+public record Indexed<E>(long index, E entry, int size) {
+ public Indexed {
+ requireNonNull(entry);
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(this).add("index", index).add("entry", entry).toString();
+ }
+}
--- /dev/null
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import java.io.Closeable;
+
+/**
+ * Journal.
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+public interface Journal<E> extends Closeable {
+
+ /**
+ * Returns the journal writer.
+ *
+ * @return The journal writer.
+ */
+ JournalWriter<E> writer();
+
+ /**
+ * Opens a new journal reader.
+ *
+ * @param index The index at which to start the reader.
+ * @return A new journal reader.
+ */
+ JournalReader<E> openReader(long index);
+
+ /**
+ * Opens a new journal reader.
+ *
+ * @param index The index at which to start the reader.
+ * @param mode the reader mode
+ * @return A new journal reader.
+ */
+ JournalReader<E> openReader(long index, JournalReader.Mode mode);
+
+ /**
+ * Returns a boolean indicating whether the journal is open.
+ *
+ * @return Indicates whether the journal is open.
+ */
+ boolean isOpen();
+
+ @Override
+ void close();
+}
--- /dev/null
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import org.eclipse.jdt.annotation.NonNullByDefault;
+import org.eclipse.jdt.annotation.Nullable;
+
+/**
+ * Log reader.
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+@NonNullByDefault
+public interface JournalReader<E> extends AutoCloseable {
+ /**
+ * Raft log reader mode.
+ */
+ enum Mode {
+ /**
+ * Reads all entries from the log.
+ */
+ ALL,
+ /**
+ * Reads committed entries from the log.
+ */
+ COMMITS,
+ }
+
+ /**
+ * A journal entry processor. Responsible for transforming entries into their internal representation.
+ *
+ * @param <E> Entry type
+ * @param <T> Internal representation type
+ */
+ @FunctionalInterface
+ interface EntryMapper<E, T> {
+ /**
+ * Process an entry.
+ *
+ * @param index entry index
+ * @param entry entry itself
+ * @param size entry size
+ * @return resulting internal representation
+ */
+ T mapEntry(long index, E entry, int size);
+ }
+
+ /**
+ * Returns the first index in the journal.
+ *
+ * @return the first index in the journal
+ */
+ long getFirstIndex();
+
+ /**
+ * Returns the next reader index.
+ *
+ * @return The next reader index.
+ */
+ long getNextIndex();
+
+ /**
+ * Try to move to the next entry.
+ *
+ * @param mapper callback to be invoked for the entry
+ * @return processed entry, or {@code null}
+ */
+ <T> @Nullable T tryNext(EntryMapper<E, T> mapper);
+
+ /**
+ * Resets the reader to the start.
+ */
+ void reset();
+
+ /**
+ * Resets the reader to the given index.
+ *
+ * @param index The index to which to reset the reader.
+ */
+ void reset(long index);
+
+ @Override
+ void close();
+}
--- /dev/null
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others. All rights reserved.
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import com.google.common.base.MoreObjects;
+import io.atomix.storage.journal.index.JournalIndex;
+import io.atomix.storage.journal.index.Position;
+import io.atomix.storage.journal.index.SparseJournalIndex;
+import java.io.IOException;
+import java.nio.channels.FileChannel;
+import java.nio.file.Files;
+import java.nio.file.StandardOpenOption;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.eclipse.jdt.annotation.Nullable;
+
+/**
+ * Log segment.
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+final class JournalSegment implements AutoCloseable {
+ private final JournalSegmentFile file;
+ private final JournalSegmentDescriptor descriptor;
+ private final StorageLevel storageLevel;
+ private final int maxEntrySize;
+ private final JournalIndex journalIndex;
+ private final Set<JournalSegmentReader> readers = ConcurrentHashMap.newKeySet();
+ private final AtomicInteger references = new AtomicInteger();
+ private final FileChannel channel;
+
+ private JournalSegmentWriter writer;
+ private boolean open = true;
+
+ JournalSegment(
+ final JournalSegmentFile file,
+ final JournalSegmentDescriptor descriptor,
+ final StorageLevel storageLevel,
+ final int maxEntrySize,
+ final double indexDensity) {
+ this.file = file;
+ this.descriptor = descriptor;
+ this.storageLevel = storageLevel;
+ this.maxEntrySize = maxEntrySize;
+ journalIndex = new SparseJournalIndex(indexDensity);
+ try {
+ channel = FileChannel.open(file.file().toPath(),
+ StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE);
+ } catch (IOException e) {
+ throw new StorageException(e);
+ }
+
+ final var fileWriter = switch (storageLevel) {
+ case DISK -> new DiskFileWriter(file.file().toPath(), channel, descriptor.maxSegmentSize(), maxEntrySize);
+ case MAPPED -> new MappedFileWriter(file.file().toPath(), channel, descriptor.maxSegmentSize(), maxEntrySize);
+ };
+ writer = new JournalSegmentWriter(fileWriter, this, maxEntrySize, journalIndex)
+ // relinquish mapped memory
+ .toFileChannel();
+ }
+
+ /**
+ * Returns the segment's starting index.
+ *
+ * @return The segment's starting index.
+ */
+ long firstIndex() {
+ return descriptor.index();
+ }
+
+ /**
+ * Returns the last index in the segment.
+ *
+ * @return The last index in the segment.
+ */
+ long lastIndex() {
+ return writer.getLastIndex();
+ }
+
+ /**
+ * Returns the size of the segment.
+ *
+ * @return the size of the segment
+ */
+ int size() {
+ try {
+ return (int) channel.size();
+ } catch (IOException e) {
+ throw new StorageException(e);
+ }
+ }
+
+ /**
+ * Returns the segment file.
+ *
+ * @return The segment file.
+ */
+ JournalSegmentFile file() {
+ return file;
+ }
+
+ /**
+ * Returns the segment descriptor.
+ *
+ * @return The segment descriptor.
+ */
+ JournalSegmentDescriptor descriptor() {
+ return descriptor;
+ }
+
+ /**
+ * Looks up the position of the given index.
+ *
+ * @param index the index to lookup
+ * @return the position of the given index or a lesser index, or {@code null}
+ */
+ @Nullable Position lookup(final long index) {
+ return journalIndex.lookup(index);
+ }
+
+ /**
+ * Acquires a reference to the log segment.
+ */
+ private void acquire() {
+ if (references.getAndIncrement() == 0 && storageLevel == StorageLevel.MAPPED) {
+ writer = writer.toMapped();
+ }
+ }
+
+ /**
+ * Releases a reference to the log segment.
+ */
+ private void release() {
+ if (references.decrementAndGet() == 0) {
+ if (storageLevel == StorageLevel.MAPPED) {
+ writer = writer.toFileChannel();
+ }
+ if (!open) {
+ finishClose();
+ }
+ }
+ }
+
+ /**
+ * Acquires a reference to the segment writer.
+ *
+ * @return The segment writer.
+ */
+ JournalSegmentWriter acquireWriter() {
+ checkOpen();
+ acquire();
+
+ return writer;
+ }
+
+ /**
+ * Releases the reference to the segment writer.
+ */
+ void releaseWriter() {
+ release();
+ }
+
+ /**
+ * Creates a new segment reader.
+ *
+ * @return A new segment reader.
+ */
+ JournalSegmentReader createReader() {
+ checkOpen();
+ acquire();
+
+ final var buffer = writer.buffer();
+ final var path = file.file().toPath();
+ final var fileReader = buffer != null ? new MappedFileReader(path, buffer)
+ : new DiskFileReader(path, channel, descriptor.maxSegmentSize(), maxEntrySize);
+ final var reader = new JournalSegmentReader(this, fileReader, maxEntrySize);
+ reader.setPosition(JournalSegmentDescriptor.BYTES);
+ readers.add(reader);
+ return reader;
+ }
+
+ /**
+ * Closes a segment reader.
+ *
+ * @param reader the closed segment reader
+ */
+ void closeReader(JournalSegmentReader reader) {
+ if (readers.remove(reader)) {
+ release();
+ }
+ }
+
+ /**
+ * Checks whether the segment is open.
+ */
+ private void checkOpen() {
+ if (!open) {
+ throw new IllegalStateException("Segment not open");
+ }
+ }
+
+ /**
+ * Returns a boolean indicating whether the segment is open.
+ *
+ * @return indicates whether the segment is open
+ */
+ public boolean isOpen() {
+ return open;
+ }
+
+ /**
+ * Closes the segment.
+ */
+ @Override
+ public void close() {
+ if (!open) {
+ return;
+ }
+
+ open = false;
+ readers.forEach(JournalSegmentReader::close);
+ if (references.get() == 0) {
+ finishClose();
+ }
+ }
+
+ private void finishClose() {
+ writer.close();
+ try {
+ channel.close();
+ } catch (IOException e) {
+ throw new StorageException(e);
+ }
+ }
+
+ /**
+ * Deletes the segment.
+ */
+ void delete() {
+ try {
+ Files.deleteIfExists(file.file().toPath());
+ } catch (IOException e) {
+ throw new StorageException(e);
+ }
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(this)
+ .add("id", descriptor.id())
+ .add("version", descriptor.version())
+ .add("index", firstIndex())
+ .toString();
+ }
+}
--- /dev/null
+/*
+ * Copyright 2015-2022 Open Networking Foundation and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import com.google.common.annotations.VisibleForTesting;
+
+import java.nio.ByteBuffer;
+
+import static com.google.common.base.MoreObjects.toStringHelper;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Stores information about a {@link JournalSegment} of the log.
+ * <p>
+ * The segment descriptor manages metadata related to a single segment of the log. Descriptors are stored within the
+ * first {@code 64} bytes of each segment in the following order:
+ * <ul>
+ * <li>{@code id} (64-bit signed integer) - A unique segment identifier. This is a monotonically increasing number within
+ * each log. Segments with in-sequence identifiers should contain in-sequence indexes.</li>
+ * <li>{@code index} (64-bit signed integer) - The effective first index of the segment. This indicates the index at which
+ * the first entry should be written to the segment. Indexes are monotonically increasing thereafter.</li>
+ * <li>{@code version} (64-bit signed integer) - The version of the segment. Versions are monotonically increasing
+ * starting at {@code 1}. Versions will only be incremented whenever the segment is rewritten to another memory/disk
+ * space, e.g. after log compaction.</li>
+ * <li>{@code maxSegmentSize} (32-bit unsigned integer) - The maximum number of bytes allowed in the segment.</li>
+ * <li>{@code maxEntries} (32-bit signed integer) - The total number of expected entries in the segment. This is the final
+ * number of entries allowed within the segment both before and after compaction. This entry count is used to determine
+ * the count of internal indexing and deduplication facilities.</li>
+ * <li>{@code updated} (64-bit signed integer) - The last update to the segment in terms of milliseconds since the epoch.
+ * When the segment is first constructed, the {@code updated} time is {@code 0}. Once all entries in the segment have
+ * been committed, the {@code updated} time should be set to the current time. Log compaction should not result in a
+ * change to {@code updated}.</li>
+ * <li>{@code locked} (8-bit boolean) - A boolean indicating whether the segment is locked. Segments will be locked once
+ * all entries have been committed to the segment. The lock state of each segment is used to determine log compaction
+ * and recovery behavior.</li>
+ * </ul>
+ * The remainder of the 64 segment header bytes are reserved for future metadata.
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+public final class JournalSegmentDescriptor {
+ public static final int BYTES = 64;
+
+ // Current segment version.
+ @VisibleForTesting
+ static final int VERSION = 1;
+
+ // The lengths of each field in the header.
+ private static final int VERSION_LENGTH = Integer.BYTES; // 32-bit signed integer
+ private static final int ID_LENGTH = Long.BYTES; // 64-bit signed integer
+ private static final int INDEX_LENGTH = Long.BYTES; // 64-bit signed integer
+ private static final int MAX_SIZE_LENGTH = Integer.BYTES; // 32-bit signed integer
+ private static final int MAX_ENTRIES_LENGTH = Integer.BYTES; // 32-bit signed integer
+ private static final int UPDATED_LENGTH = Long.BYTES; // 64-bit signed integer
+
+ // The positions of each field in the header.
+ private static final int VERSION_POSITION = 0; // 0
+ private static final int ID_POSITION = VERSION_POSITION + VERSION_LENGTH; // 4
+ private static final int INDEX_POSITION = ID_POSITION + ID_LENGTH; // 12
+ private static final int MAX_SIZE_POSITION = INDEX_POSITION + INDEX_LENGTH; // 20
+ private static final int MAX_ENTRIES_POSITION = MAX_SIZE_POSITION + MAX_SIZE_LENGTH; // 24
+ private static final int UPDATED_POSITION = MAX_ENTRIES_POSITION + MAX_ENTRIES_LENGTH; // 28
+
+ /**
+ * Returns a descriptor builder.
+ * <p>
+ * The descriptor builder will write segment metadata to a {@code 48} byte in-memory buffer.
+ *
+ * @return The descriptor builder.
+ */
+ public static Builder builder() {
+ return new Builder(ByteBuffer.allocate(BYTES));
+ }
+
+ /**
+ * Returns a descriptor builder for the given descriptor buffer.
+ *
+ * @param buffer The descriptor buffer.
+ * @return The descriptor builder.
+ * @throws NullPointerException if {@code buffer} is null
+ */
+ public static Builder builder(ByteBuffer buffer) {
+ return new Builder(buffer);
+ }
+
+ private final ByteBuffer buffer;
+ private final int version;
+ private final long id;
+ private final long index;
+ private final int maxSegmentSize;
+ private final int maxEntries;
+ private volatile long updated;
+ private volatile boolean locked;
+
+ /**
+ * @throws NullPointerException if {@code buffer} is null
+ */
+ public JournalSegmentDescriptor(ByteBuffer buffer) {
+ this.buffer = buffer;
+ this.version = buffer.getInt();
+ this.id = buffer.getLong();
+ this.index = buffer.getLong();
+ this.maxSegmentSize = buffer.getInt();
+ this.maxEntries = buffer.getInt();
+ this.updated = buffer.getLong();
+ this.locked = buffer.get() == 1;
+ }
+
+ /**
+ * Returns the segment version.
+ * <p>
+ * Versions are monotonically increasing starting at {@code 1}.
+ *
+ * @return The segment version.
+ */
+ public int version() {
+ return version;
+ }
+
+ /**
+ * Returns the segment identifier.
+ * <p>
+ * The segment ID is a monotonically increasing number within each log. Segments with in-sequence identifiers should
+ * contain in-sequence indexes.
+ *
+ * @return The segment identifier.
+ */
+ public long id() {
+ return id;
+ }
+
+ /**
+ * Returns the segment index.
+ * <p>
+ * The index indicates the index at which the first entry should be written to the segment. Indexes are monotonically
+ * increasing thereafter.
+ *
+ * @return The segment index.
+ */
+ public long index() {
+ return index;
+ }
+
+ /**
+ * Returns the maximum count of the segment.
+ *
+ * @return The maximum allowed count of the segment.
+ */
+ public int maxSegmentSize() {
+ return maxSegmentSize;
+ }
+
+ /**
+ * Returns the maximum number of entries allowed in the segment.
+ *
+ * @return The maximum number of entries allowed in the segment.
+ */
+ public int maxEntries() {
+ return maxEntries;
+ }
+
+ /**
+ * Returns last time the segment was updated.
+ * <p>
+ * When the segment is first constructed, the {@code updated} time is {@code 0}. Once all entries in the segment have
+ * been committed, the {@code updated} time should be set to the current time. Log compaction should not result in a
+ * change to {@code updated}.
+ *
+ * @return The last time the segment was updated in terms of milliseconds since the epoch.
+ */
+ public long updated() {
+ return updated;
+ }
+
+ /**
+ * Writes an update to the descriptor.
+ */
+ public void update(long timestamp) {
+ if (!locked) {
+ buffer.putLong(UPDATED_POSITION, timestamp);
+ this.updated = timestamp;
+ }
+ }
+
+ /**
+ * Copies the segment to a new buffer.
+ */
+ JournalSegmentDescriptor copyTo(ByteBuffer buffer) {
+ buffer.putInt(version);
+ buffer.putLong(id);
+ buffer.putLong(index);
+ buffer.putInt(maxSegmentSize);
+ buffer.putInt(maxEntries);
+ buffer.putLong(updated);
+ buffer.put(locked ? (byte) 1 : (byte) 0);
+ return this;
+ }
+
+ @Override
+ public String toString() {
+ return toStringHelper(this)
+ .add("version", version)
+ .add("id", id)
+ .add("index", index)
+ .add("updated", updated)
+ .toString();
+ }
+
+ /**
+ * Segment descriptor builder.
+ */
+ public static class Builder {
+ private final ByteBuffer buffer;
+
+ private Builder(ByteBuffer buffer) {
+ this.buffer = requireNonNull(buffer, "buffer cannot be null");
+ buffer.putInt(VERSION_POSITION, VERSION);
+ }
+
+ /**
+ * Sets the segment identifier.
+ *
+ * @param id The segment identifier.
+ * @return The segment descriptor builder.
+ */
+ public Builder withId(long id) {
+ buffer.putLong(ID_POSITION, id);
+ return this;
+ }
+
+ /**
+ * Sets the segment index.
+ *
+ * @param index The segment starting index.
+ * @return The segment descriptor builder.
+ */
+ public Builder withIndex(long index) {
+ buffer.putLong(INDEX_POSITION, index);
+ return this;
+ }
+
+ /**
+ * Sets maximum count of the segment.
+ *
+ * @param maxSegmentSize The maximum count of the segment.
+ * @return The segment descriptor builder.
+ */
+ public Builder withMaxSegmentSize(int maxSegmentSize) {
+ buffer.putInt(MAX_SIZE_POSITION, maxSegmentSize);
+ return this;
+ }
+
+ /**
+ * Sets the maximum number of entries in the segment.
+ *
+ * @param maxEntries The maximum number of entries in the segment.
+ * @return The segment descriptor builder.
+ * @deprecated since 3.0.2
+ */
+ @Deprecated
+ public Builder withMaxEntries(int maxEntries) {
+ buffer.putInt(MAX_ENTRIES_POSITION, maxEntries);
+ return this;
+ }
+
+ /**
+ * Builds the segment descriptor.
+ *
+ * @return The built segment descriptor.
+ */
+ public JournalSegmentDescriptor build() {
+ buffer.rewind();
+ return new JournalSegmentDescriptor(buffer);
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright 2015-2022 Open Networking Foundation and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import java.io.File;
+
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Segment file utility.
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+public final class JournalSegmentFile {
+ private static final char PART_SEPARATOR = '-';
+ private static final char EXTENSION_SEPARATOR = '.';
+ private static final String EXTENSION = "log";
+ private final File file;
+
+ /**
+ * Returns a boolean value indicating whether the given file appears to be a parsable segment file.
+ *
+ * @throws NullPointerException if {@code file} is null
+ */
+ public static boolean isSegmentFile(String name, File file) {
+ return isSegmentFile(name, file.getName());
+ }
+
+ /**
+ * Returns a boolean value indicating whether the given file appears to be a parsable segment file.
+ *
+ * @param journalName the name of the journal
+ * @param fileName the name of the file to check
+ * @throws NullPointerException if {@code file} is null
+ */
+ public static boolean isSegmentFile(String journalName, String fileName) {
+ requireNonNull(journalName, "journalName cannot be null");
+ requireNonNull(fileName, "fileName cannot be null");
+
+ int partSeparator = fileName.lastIndexOf(PART_SEPARATOR);
+ int extensionSeparator = fileName.lastIndexOf(EXTENSION_SEPARATOR);
+
+ if (extensionSeparator == -1
+ || partSeparator == -1
+ || extensionSeparator < partSeparator
+ || !fileName.endsWith(EXTENSION)) {
+ return false;
+ }
+
+ for (int i = partSeparator + 1; i < extensionSeparator; i++) {
+ if (!Character.isDigit(fileName.charAt(i))) {
+ return false;
+ }
+ }
+
+ return fileName.startsWith(journalName);
+ }
+
+ /**
+ * Creates a segment file for the given directory, log name, segment ID, and segment version.
+ */
+ static File createSegmentFile(String name, File directory, long id) {
+ return new File(directory, String.format("%s-%d.log", requireNonNull(name, "name cannot be null"), id));
+ }
+
+ /**
+ * @throws IllegalArgumentException if {@code file} is not a valid segment file
+ */
+ JournalSegmentFile(File file) {
+ this.file = file;
+ }
+
+ /**
+ * Returns the segment file.
+ *
+ * @return The segment file.
+ */
+ public File file() {
+ return file;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import static com.google.common.base.Verify.verify;
+import static java.util.Objects.requireNonNull;
+
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.Unpooled;
+import java.util.zip.CRC32;
+import org.eclipse.jdt.annotation.Nullable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class JournalSegmentReader {
+ private static final Logger LOG = LoggerFactory.getLogger(JournalSegmentReader.class);
+
+ private final JournalSegment segment;
+ private final FileReader fileReader;
+ private final int maxSegmentSize;
+ private final int maxEntrySize;
+
+ private int position;
+
+ JournalSegmentReader(final JournalSegment segment, final FileReader fileReader, final int maxEntrySize) {
+ this.segment = requireNonNull(segment);
+ this.fileReader = requireNonNull(fileReader);
+ maxSegmentSize = segment.descriptor().maxSegmentSize();
+ this.maxEntrySize = maxEntrySize;
+ }
+
+ /**
+ * Return the current position.
+ *
+ * @return current position.
+ */
+ int position() {
+ return position;
+ }
+
+ /**
+ * Set the file position.
+ *
+ * @param position new position
+ */
+ void setPosition(final int position) {
+ verify(position >= JournalSegmentDescriptor.BYTES && position < maxSegmentSize,
+ "Invalid position %s", position);
+ this.position = position;
+ fileReader.invalidateCache();
+ }
+
+ /**
+ * Invalidate any cache that is present, so that the next read is coherent with the backing file.
+ */
+ void invalidateCache() {
+ fileReader.invalidateCache();
+ }
+
+ /**
+ * Reads the next binary data block
+ *
+ * @param index entry index
+ * @return The binary data, or {@code null}
+ */
+ @Nullable ByteBuf readBytes(final long index) {
+ // Check if there is enough in the buffer remaining
+ final int remaining = maxSegmentSize - position - SegmentEntry.HEADER_BYTES;
+ if (remaining < 0) {
+ // Not enough space in the segment, there can never be another entry
+ return null;
+ }
+
+ // Calculate maximum entry length not exceeding file size nor maxEntrySize
+ final var maxLength = Math.min(remaining, maxEntrySize);
+ final var buffer = fileReader.read(position, maxLength + SegmentEntry.HEADER_BYTES);
+
+ // Read the entry length
+ final var length = buffer.getInt(0);
+ if (length < 1 || length > maxLength) {
+ // Invalid length, make sure next read re-tries
+ invalidateCache();
+ return null;
+ }
+
+ // Read the entry checksum
+ final int checksum = buffer.getInt(Integer.BYTES);
+
+ // Slice off the entry's bytes
+ final var entryBuffer = buffer.slice(SegmentEntry.HEADER_BYTES, length);
+ // Compute the checksum for the entry bytes.
+ final var crc32 = new CRC32();
+ crc32.update(entryBuffer);
+
+ // If the stored checksum does not equal the computed checksum, do not proceed further
+ final var computed = (int) crc32.getValue();
+ if (checksum != computed) {
+ LOG.warn("Expected checksum {}, computed {}", Integer.toHexString(checksum), Integer.toHexString(computed));
+ invalidateCache();
+ return null;
+ }
+
+ // update position
+ position += SegmentEntry.HEADER_BYTES + length;
+
+ // return bytes
+ entryBuffer.rewind();
+ return Unpooled.buffer(length).writeBytes(entryBuffer);
+ }
+
+ /**
+ * Close this reader.
+ */
+ void close() {
+ segment.closeReader(this);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import static io.atomix.storage.journal.SegmentEntry.HEADER_BYTES;
+import static java.util.Objects.requireNonNull;
+
+import io.atomix.storage.journal.index.JournalIndex;
+import io.netty.buffer.ByteBuf;
+import java.nio.MappedByteBuffer;
+import java.util.zip.CRC32;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class JournalSegmentWriter {
+ private static final Logger LOG = LoggerFactory.getLogger(JournalSegmentWriter.class);
+
+ private final FileWriter fileWriter;
+ final @NonNull JournalSegment segment;
+ private final @NonNull JournalIndex index;
+ final int maxSegmentSize;
+ final int maxEntrySize;
+
+ private int currentPosition;
+ private Long lastIndex;
+
+ JournalSegmentWriter(final FileWriter fileWriter, final JournalSegment segment, final int maxEntrySize,
+ final JournalIndex index) {
+ this.fileWriter = requireNonNull(fileWriter);
+ this.segment = requireNonNull(segment);
+ this.index = requireNonNull(index);
+ maxSegmentSize = segment.descriptor().maxSegmentSize();
+ this.maxEntrySize = maxEntrySize;
+ // adjust lastEntry value
+ reset(0);
+ }
+
+ JournalSegmentWriter(final JournalSegmentWriter previous, final FileWriter fileWriter) {
+ segment = previous.segment;
+ index = previous.index;
+ maxSegmentSize = previous.maxSegmentSize;
+ maxEntrySize = previous.maxEntrySize;
+ lastIndex = previous.lastIndex;
+ currentPosition = previous.currentPosition;
+ this.fileWriter = requireNonNull(fileWriter);
+ }
+
+ /**
+ * Returns the last written index.
+ *
+ * @return The last written index.
+ */
+ long getLastIndex() {
+ return lastIndex != null ? lastIndex : segment.firstIndex() - 1;
+ }
+
+ /**
+ * Returns the next index to be written.
+ *
+ * @return The next index to be written.
+ */
+ long getNextIndex() {
+ return lastIndex != null ? lastIndex + 1 : segment.firstIndex();
+ }
+
+ /**
+ * Tries to append a binary data to the journal.
+ *
+ * @param buf binary data to append
+ * @return The index of appended data, or {@code null} if segment has no space
+ */
+ Long append(final ByteBuf buf) {
+ final var length = buf.readableBytes();
+ if (length > maxEntrySize) {
+ throw new StorageException.TooLarge("Serialized entry size exceeds maximum allowed bytes ("
+ + maxEntrySize + ")");
+ }
+
+ // Store the entry index.
+ final long index = getNextIndex();
+ final int position = currentPosition;
+
+ // check space available
+ final int nextPosition = position + HEADER_BYTES + length;
+ if (nextPosition >= maxSegmentSize) {
+ LOG.trace("Not enough space for {} at {}", index, position);
+ return null;
+ }
+
+ // allocate buffer and write data
+ final var writeBuffer = fileWriter.startWrite(position, length + HEADER_BYTES).position(HEADER_BYTES);
+ writeBuffer.put(buf.nioBuffer());
+
+ // Compute the checksum for the entry.
+ final var crc32 = new CRC32();
+ crc32.update(writeBuffer.flip().position(HEADER_BYTES));
+
+ // Create a single byte[] in memory for the entire entry and write it as a batch to the underlying buffer.
+ writeBuffer.putInt(0, length).putInt(Integer.BYTES, (int) crc32.getValue());
+ fileWriter.commitWrite(position, writeBuffer.rewind());
+
+ // Update the last entry with the correct index/term/length.
+ currentPosition = nextPosition;
+ lastIndex = index;
+ this.index.index(index, position);
+
+ return index;
+ }
+
+ /**
+ * Resets the head of the segment to the given index.
+ *
+ * @param index the index to which to reset the head of the segment
+ */
+ void reset(final long index) {
+ // acquire ownership of cache and make sure reader does not see anything we've done once we're done
+ final var fileReader = fileWriter.reader();
+ try {
+ resetWithBuffer(fileReader, index);
+ } finally {
+ // Make sure reader does not see anything we've done
+ fileReader.invalidateCache();
+ }
+ }
+
+ private void resetWithBuffer(final FileReader fileReader, final long index) {
+ long nextIndex = segment.firstIndex();
+
+ // Clear the buffer indexes and acquire ownership of the buffer
+ currentPosition = JournalSegmentDescriptor.BYTES;
+ final var reader = new JournalSegmentReader(segment, fileReader, maxEntrySize);
+ reader.setPosition(JournalSegmentDescriptor.BYTES);
+
+ while (index == 0 || nextIndex <= index) {
+ final var buf = reader.readBytes(nextIndex);
+ if (buf == null) {
+ break;
+ }
+
+ lastIndex = nextIndex;
+ this.index.index(nextIndex, currentPosition);
+ nextIndex++;
+
+ // Update the current position for indexing.
+ currentPosition += HEADER_BYTES + buf.readableBytes();
+ }
+ }
+
+ /**
+ * Truncates the log to the given index.
+ *
+ * @param index The index to which to truncate the log.
+ */
+ void truncate(final long index) {
+ // If the index is greater than or equal to the last index, skip the truncate.
+ if (index >= getLastIndex()) {
+ return;
+ }
+
+ // Reset the last written
+ lastIndex = null;
+
+ // Truncate the index.
+ this.index.truncate(index);
+
+ if (index < segment.firstIndex()) {
+ // Reset the writer to the first entry.
+ currentPosition = JournalSegmentDescriptor.BYTES;
+ } else {
+ // Reset the writer to the given index.
+ reset(index);
+ }
+
+ // Zero the entry header at current channel position.
+ fileWriter.writeEmptyHeader(currentPosition);
+ }
+
+ /**
+ * Flushes written entries to disk.
+ */
+ void flush() {
+ fileWriter.flush();
+ }
+
+ /**
+ * Closes this writer.
+ */
+ void close() {
+ fileWriter.close();
+ }
+
+ /**
+ * Returns the mapped buffer underlying the segment writer, or {@code null} if the writer does not have such a
+ * buffer.
+ *
+ * @return the mapped buffer underlying the segment writer, or {@code null}.
+ */
+ @Nullable MappedByteBuffer buffer() {
+ return fileWriter.buffer();
+ }
+
+ @NonNull JournalSegmentWriter toMapped() {
+ final var newWriter = fileWriter.toMapped();
+ return newWriter == null ? this : new JournalSegmentWriter(this, newWriter);
+ }
+
+ @NonNull JournalSegmentWriter toFileChannel() {
+ final var newWriter = fileWriter.toDisk();
+ return newWriter == null ? this : new JournalSegmentWriter(this, newWriter);
+ }
+}
--- /dev/null
+/*
+ * Copyright 2014-2021 Open Networking Foundation
+ * Copyright 2023 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import com.google.common.annotations.Beta;
+import com.google.common.annotations.VisibleForTesting;
+import io.atomix.utils.serializer.KryoJournalSerdesBuilder;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+
+/**
+ * Support for serialization of {@link Journal} entries.
+ *
+ * @deprecated due to dependency on outdated Kryo library, {@link JournalSerializer} to be used instead.
+ */
+@Deprecated(forRemoval = true, since="9.0.3")
+public interface JournalSerdes {
+ /**
+ * Serializes given object to byte array.
+ *
+ * @param obj Object to serialize
+ * @return serialized bytes
+ */
+ byte[] serialize(Object obj);
+
+ /**
+ * Serializes given object to byte array.
+ *
+ * @param obj Object to serialize
+ * @param bufferSize maximum size of serialized bytes
+ * @return serialized bytes
+ */
+ byte[] serialize(Object obj, int bufferSize);
+
+ /**
+ * Serializes given object to byte buffer.
+ *
+ * @param obj Object to serialize
+ * @param buffer to write to
+ */
+ void serialize(Object obj, ByteBuffer buffer);
+
+ /**
+ * Serializes given object to OutputStream.
+ *
+ * @param obj Object to serialize
+ * @param stream to write to
+ */
+ void serialize(Object obj, OutputStream stream);
+
+ /**
+ * Serializes given object to OutputStream.
+ *
+ * @param obj Object to serialize
+ * @param stream to write to
+ * @param bufferSize size of the buffer in front of the stream
+ */
+ void serialize(Object obj, OutputStream stream, int bufferSize);
+
+ /**
+ * Deserializes given byte array to Object.
+ *
+ * @param bytes serialized bytes
+ * @param <T> deserialized Object type
+ * @return deserialized Object
+ */
+ <T> T deserialize(byte[] bytes);
+
+ /**
+ * Deserializes given byte buffer to Object.
+ *
+ * @param buffer input with serialized bytes
+ * @param <T> deserialized Object type
+ * @return deserialized Object
+ */
+ <T> T deserialize(final ByteBuffer buffer);
+
+ /**
+ * Deserializes given InputStream to an Object.
+ *
+ * @param stream input stream
+ * @param <T> deserialized Object type
+ * @return deserialized Object
+ */
+ <T> T deserialize(InputStream stream);
+
+ /**
+ * Deserializes given InputStream to an Object.
+ *
+ * @param stream input stream
+ * @param <T> deserialized Object type
+ * @param bufferSize size of the buffer in front of the stream
+ * @return deserialized Object
+ */
+ <T> T deserialize(final InputStream stream, final int bufferSize);
+
+ /**
+ * Creates a new {@link JournalSerdes} builder.
+ *
+ * @return builder
+ */
+ static Builder builder() {
+ return new KryoJournalSerdesBuilder();
+ }
+
+ /**
+ * Builder for {@link JournalSerdes}.
+ */
+ interface Builder {
+ /**
+ * Builds a {@link JournalSerdes} instance.
+ *
+ * @return A {@link JournalSerdes} implementation.
+ */
+ JournalSerdes build();
+
+ /**
+ * Builds a {@link JournalSerdes} instance.
+ *
+ * @param friendlyName friendly name for the namespace
+ * @return A {@link JournalSerdes} implementation.
+ */
+ JournalSerdes build(String friendlyName);
+
+ /**
+ * Registers serializer for the given set of classes.
+ * <p>
+ * When multiple classes are registered with an explicitly provided serializer, the namespace guarantees
+ * all instances will be serialized with the same type ID.
+ *
+ * @param classes list of classes to register
+ * @param serdes serializer to use for the class
+ * @return this builder
+ */
+ Builder register(EntrySerdes<?> serdes, Class<?>... classes);
+
+ /**
+ * Sets the namespace class loader.
+ *
+ * @param classLoader the namespace class loader
+ * @return this builder
+ */
+ Builder setClassLoader(ClassLoader classLoader);
+ }
+
+ /**
+ * Input data stream exposed to {@link EntrySerdes#read(EntryInput)}.
+ */
+ @Beta
+ interface EntryInput {
+
+ byte[] readBytes(int length) throws IOException;
+
+ long readLong() throws IOException;
+
+ String readString() throws IOException;
+
+ Object readObject() throws IOException;
+
+ @VisibleForTesting
+ int readVarInt() throws IOException;
+ }
+
+ /**
+ * Output data stream exposed to {@link EntrySerdes#write(EntryOutput, Object)}.
+ */
+ @Beta
+ interface EntryOutput {
+
+ void writeBytes(byte[] bytes) throws IOException;
+
+ void writeLong(long value) throws IOException;
+
+ void writeObject(Object value) throws IOException;
+
+ void writeString(String value) throws IOException;
+
+ @VisibleForTesting
+ void writeVarInt(int value) throws IOException;
+ }
+
+ /**
+ * A serializer/deserializer for an entry.
+ *
+ * @param <T> Entry type
+ */
+ interface EntrySerdes<T> {
+
+ T read(EntryInput input) throws IOException;
+
+ void write(EntryOutput output, T entry) throws IOException;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2024 PANTHEON.tech s.r.o. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package io.atomix.storage.journal;
+
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.ByteBufUtil;
+import io.netty.buffer.Unpooled;
+
+/**
+ * Support for serialization of {@link Journal} entries.
+ */
+public interface JournalSerializer<T> {
+
+ /**
+ * Serializes given object to byte array.
+ *
+ * @param obj Object to serialize
+ * @return serialized bytes as {@link ByteBuf}
+ */
+ ByteBuf serialize(T obj) ;
+
+ /**
+ * Deserializes given byte array to Object.
+ *
+ * @param buf serialized bytes as {@link ByteBuf}
+ * @return deserialized Object
+ */
+ T deserialize(final ByteBuf buf);
+
+ static <E> JournalSerializer<E> wrap(final JournalSerdes serdes) {
+ return new JournalSerializer<>() {
+ @Override
+ public ByteBuf serialize(final E obj) {
+ return Unpooled.wrappedBuffer(serdes.serialize(obj));
+ }
+
+ @Override
+ public E deserialize(final ByteBuf buf) {
+ return serdes.deserialize(ByteBufUtil.getBytes(buf));
+ }
+ };
+ }
+}
--- /dev/null
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import org.eclipse.jdt.annotation.NonNull;
+
+/**
+ * Log writer.
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+public interface JournalWriter<E> {
+ /**
+ * Returns the last written index.
+ *
+ * @return The last written index.
+ */
+ long getLastIndex();
+
+ /**
+ * Returns the next index to be written.
+ *
+ * @return The next index to be written.
+ */
+ long getNextIndex();
+
+ /**
+ * Appends an entry to the journal.
+ *
+ * @param entry The entry to append.
+ * @return The appended indexed entry.
+ */
+ <T extends E> @NonNull Indexed<T> append(T entry);
+
+ /**
+ * Commits entries up to the given index.
+ *
+ * @param index The index up to which to commit entries.
+ */
+ void commit(long index);
+
+ /**
+ * Resets the head of the journal to the given index.
+ *
+ * @param index the index to which to reset the head of the journal
+ */
+ void reset(long index);
+
+ /**
+ * Truncates the log to the given index.
+ *
+ * @param index The index to which to truncate the log.
+ */
+ void truncate(long index);
+
+ /**
+ * Flushes written entries to disk.
+ */
+ void flush();
+}
--- /dev/null
+/*
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import java.nio.ByteBuffer;
+import java.nio.file.Path;
+
+/**
+ * A {@link StorageLevel#MAPPED} implementation of {@link FileReader}. Operates on direct mapping of the entire file.
+ */
+final class MappedFileReader extends FileReader {
+ private final ByteBuffer buffer;
+
+ MappedFileReader(final Path path, final ByteBuffer buffer) {
+ super(path);
+ this.buffer = buffer.slice().asReadOnlyBuffer();
+ }
+
+ @Override
+ void invalidateCache() {
+ // No-op: the mapping is guaranteed to be coherent
+ }
+
+ @Override
+ ByteBuffer read(final int position, final int size) {
+ return buffer.slice(position, size);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import io.netty.util.internal.PlatformDependent;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.MappedByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.file.Path;
+import org.eclipse.jdt.annotation.NonNull;
+
+/**
+ * A {@link StorageLevel#MAPPED} {@link FileWriter}.
+ */
+final class MappedFileWriter extends FileWriter {
+ private final @NonNull MappedByteBuffer mappedBuffer;
+ private final MappedFileReader reader;
+ private final ByteBuffer buffer;
+
+ MappedFileWriter(final Path path, final FileChannel channel, final int maxSegmentSize, final int maxEntrySize) {
+ super(path, channel, maxSegmentSize, maxEntrySize);
+
+ mappedBuffer = mapBuffer(channel, maxSegmentSize);
+ buffer = mappedBuffer.slice();
+ reader = new MappedFileReader(path, mappedBuffer);
+ }
+
+ private static @NonNull MappedByteBuffer mapBuffer(final FileChannel channel, final int maxSegmentSize) {
+ try {
+ return channel.map(FileChannel.MapMode.READ_WRITE, 0, maxSegmentSize);
+ } catch (IOException e) {
+ throw new StorageException(e);
+ }
+ }
+
+ @Override
+ MappedFileReader reader() {
+ return reader;
+ }
+
+ @Override
+ MappedByteBuffer buffer() {
+ return mappedBuffer;
+ }
+
+ @Override
+ MappedFileWriter toMapped() {
+ return null;
+ }
+
+ @Override
+ DiskFileWriter toDisk() {
+ close();
+ return new DiskFileWriter(path, channel, maxSegmentSize, maxEntrySize);
+ }
+
+ @Override
+ void writeEmptyHeader(final int position) {
+ // Note: we issue a single putLong() instead of two putInt()s.
+ buffer.putLong(position, 0L);
+ }
+
+ @Override
+ ByteBuffer startWrite(final int position, final int size) {
+ return buffer.slice(position, size);
+ }
+
+ @Override
+ void commitWrite(final int position, final ByteBuffer entry) {
+ // No-op, buffer is write-through
+ }
+
+ @Override
+ void flush() {
+ mappedBuffer.force();
+ }
+
+ @Override
+ void close() {
+ flush();
+ PlatformDependent.freeDirectBuffer(mappedBuffer);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import java.nio.ByteBuffer;
+
+/**
+ * An {@link Indexed} entry read from {@link JournalSegment}.
+ *
+ * @param checksum The CRC32 checksum of data
+ * @param bytes Entry bytes
+ */
+record SegmentEntry(int checksum, ByteBuffer bytes) {
+ /**
+ * The size of the header, comprising of:
+ * <ul>
+ * <li>32-bit signed entry length</li>
+ * <li>32-bit unsigned CRC32 checksum</li>
+ * </li>
+ */
+ static final int HEADER_BYTES = Integer.BYTES + Integer.BYTES;
+
+ SegmentEntry {
+ if (bytes.remaining() < 1) {
+ throw new IllegalArgumentException("Invalid entry bytes " + bytes);
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Preconditions.checkState;
+import static java.util.Objects.requireNonNull;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.file.StandardOpenOption;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentNavigableMap;
+import java.util.concurrent.ConcurrentSkipListMap;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Segmented journal.
+ */
+public final class SegmentedJournal<E> implements Journal<E> {
+ /**
+ * Returns a new Raft log builder.
+ *
+ * @return A new Raft log builder.
+ */
+ public static <E> Builder<E> builder() {
+ return new Builder<>();
+ }
+
+ private static final Logger LOG = LoggerFactory.getLogger(SegmentedJournal.class);
+ private static final int SEGMENT_BUFFER_FACTOR = 3;
+
+ private final String name;
+ private final StorageLevel storageLevel;
+ private final File directory;
+ private final JournalSerializer<E> serializer;
+ private final int maxSegmentSize;
+ private final int maxEntrySize;
+ private final int maxEntriesPerSegment;
+ private final double indexDensity;
+ private final boolean flushOnCommit;
+ private final SegmentedJournalWriter<E> writer;
+ private volatile long commitIndex;
+
+ private final ConcurrentNavigableMap<Long, JournalSegment> segments = new ConcurrentSkipListMap<>();
+ private final Collection<SegmentedJournalReader> readers = ConcurrentHashMap.newKeySet();
+ private JournalSegment currentSegment;
+
+ private volatile boolean open = true;
+
+ public SegmentedJournal(
+ String name,
+ StorageLevel storageLevel,
+ File directory,
+ JournalSerdes namespace,
+ int maxSegmentSize,
+ int maxEntrySize,
+ int maxEntriesPerSegment,
+ double indexDensity,
+ boolean flushOnCommit) {
+ this.name = requireNonNull(name, "name cannot be null");
+ this.storageLevel = requireNonNull(storageLevel, "storageLevel cannot be null");
+ this.directory = requireNonNull(directory, "directory cannot be null");
+ this.serializer = JournalSerializer.wrap(requireNonNull(namespace, "namespace cannot be null"));
+ this.maxSegmentSize = maxSegmentSize;
+ this.maxEntrySize = maxEntrySize;
+ this.maxEntriesPerSegment = maxEntriesPerSegment;
+ this.indexDensity = indexDensity;
+ this.flushOnCommit = flushOnCommit;
+ open();
+ this.writer = new SegmentedJournalWriter<>(this);
+ }
+
+ /**
+ * Returns the segment file name prefix.
+ *
+ * @return The segment file name prefix.
+ */
+ public String name() {
+ return name;
+ }
+
+ /**
+ * Returns the storage directory.
+ * <p>
+ * The storage directory is the directory to which all segments write files. Segment files for multiple logs may be
+ * stored in the storage directory, and files for each log instance will be identified by the {@code prefix} provided
+ * when the log is opened.
+ *
+ * @return The storage directory.
+ */
+ public File directory() {
+ return directory;
+ }
+
+ /**
+ * Returns the storage level.
+ * <p>
+ * The storage level dictates how entries within individual journal segments should be stored.
+ *
+ * @return The storage level.
+ */
+ public StorageLevel storageLevel() {
+ return storageLevel;
+ }
+
+ /**
+ * Returns the maximum journal segment size.
+ * <p>
+ * The maximum segment size dictates the maximum size any segment in a segment may consume in bytes.
+ *
+ * @return The maximum segment size in bytes.
+ */
+ public int maxSegmentSize() {
+ return maxSegmentSize;
+ }
+
+ /**
+ * Returns the maximum journal entry size.
+ * <p>
+ * The maximum entry size dictates the maximum size any entry in the segment may consume in bytes.
+ *
+ * @return the maximum entry size in bytes
+ */
+ public int maxEntrySize() {
+ return maxEntrySize;
+ }
+
+ /**
+ * Returns the maximum number of entries per segment.
+ * <p>
+ * The maximum entries per segment dictates the maximum number of entries that are allowed to be stored in any segment
+ * in a journal.
+ *
+ * @return The maximum number of entries per segment.
+ * @deprecated since 3.0.2
+ */
+ @Deprecated
+ public int maxEntriesPerSegment() {
+ return maxEntriesPerSegment;
+ }
+
+ /**
+ * Returns the collection of journal segments.
+ *
+ * @return the collection of journal segments
+ */
+ public Collection<JournalSegment> segments() {
+ return segments.values();
+ }
+
+ /**
+ * Returns the collection of journal segments with indexes greater than the given index.
+ *
+ * @param index the starting index
+ * @return the journal segments starting with indexes greater than or equal to the given index
+ */
+ public Collection<JournalSegment> segments(long index) {
+ return segments.tailMap(index).values();
+ }
+
+ /**
+ * Returns serializer instance.
+ *
+ * @return serializer instance
+ */
+ JournalSerializer<E> serializer() {
+ return serializer;
+ }
+
+ /**
+ * Returns the total size of the journal.
+ *
+ * @return the total size of the journal
+ */
+ public long size() {
+ return segments.values().stream()
+ .mapToLong(segment -> segment.size())
+ .sum();
+ }
+
+ @Override
+ public JournalWriter<E> writer() {
+ return writer;
+ }
+
+ @Override
+ public JournalReader<E> openReader(long index) {
+ return openReader(index, JournalReader.Mode.ALL);
+ }
+
+ /**
+ * Opens a new Raft log reader with the given reader mode.
+ *
+ * @param index The index from which to begin reading entries.
+ * @param mode The mode in which to read entries.
+ * @return The Raft log reader.
+ */
+ public JournalReader<E> openReader(long index, JournalReader.Mode mode) {
+ final var segment = getSegment(index);
+ final var reader = switch (mode) {
+ case ALL -> new SegmentedJournalReader<>(this, segment);
+ case COMMITS -> new CommitsSegmentJournalReader<>(this, segment);
+ };
+
+ // Forward reader to specified index
+ long next = reader.getNextIndex();
+ while (index > next && reader.tryAdvance()) {
+ next = reader.getNextIndex();
+ }
+
+ readers.add(reader);
+ return reader;
+ }
+
+ /**
+ * Opens the segments.
+ */
+ private synchronized void open() {
+ // Load existing log segments from disk.
+ for (JournalSegment segment : loadSegments()) {
+ segments.put(segment.descriptor().index(), segment);
+ }
+
+ // If a segment doesn't already exist, create an initial segment starting at index 1.
+ if (!segments.isEmpty()) {
+ currentSegment = segments.lastEntry().getValue();
+ } else {
+ JournalSegmentDescriptor descriptor = JournalSegmentDescriptor.builder()
+ .withId(1)
+ .withIndex(1)
+ .withMaxSegmentSize(maxSegmentSize)
+ .withMaxEntries(maxEntriesPerSegment)
+ .build();
+
+ currentSegment = createSegment(descriptor);
+ currentSegment.descriptor().update(System.currentTimeMillis());
+
+ segments.put(1L, currentSegment);
+ }
+ }
+
+ /**
+ * Asserts that the manager is open.
+ *
+ * @throws IllegalStateException if the segment manager is not open
+ */
+ private void assertOpen() {
+ checkState(currentSegment != null, "journal not open");
+ }
+
+ /**
+ * Asserts that enough disk space is available to allocate a new segment.
+ */
+ private void assertDiskSpace() {
+ if (directory().getUsableSpace() < maxSegmentSize() * SEGMENT_BUFFER_FACTOR) {
+ throw new StorageException.OutOfDiskSpace("Not enough space to allocate a new journal segment");
+ }
+ }
+
+ /**
+ * Resets the current segment, creating a new segment if necessary.
+ */
+ private synchronized void resetCurrentSegment() {
+ JournalSegment lastSegment = getLastSegment();
+ if (lastSegment != null) {
+ currentSegment = lastSegment;
+ } else {
+ JournalSegmentDescriptor descriptor = JournalSegmentDescriptor.builder()
+ .withId(1)
+ .withIndex(1)
+ .withMaxSegmentSize(maxSegmentSize)
+ .withMaxEntries(maxEntriesPerSegment)
+ .build();
+
+ currentSegment = createSegment(descriptor);
+
+ segments.put(1L, currentSegment);
+ }
+ }
+
+ /**
+ * Resets and returns the first segment in the journal.
+ *
+ * @param index the starting index of the journal
+ * @return the first segment
+ */
+ JournalSegment resetSegments(long index) {
+ assertOpen();
+
+ // If the index already equals the first segment index, skip the reset.
+ JournalSegment firstSegment = getFirstSegment();
+ if (index == firstSegment.firstIndex()) {
+ return firstSegment;
+ }
+
+ for (JournalSegment segment : segments.values()) {
+ segment.close();
+ segment.delete();
+ }
+ segments.clear();
+
+ JournalSegmentDescriptor descriptor = JournalSegmentDescriptor.builder()
+ .withId(1)
+ .withIndex(index)
+ .withMaxSegmentSize(maxSegmentSize)
+ .withMaxEntries(maxEntriesPerSegment)
+ .build();
+ currentSegment = createSegment(descriptor);
+ segments.put(index, currentSegment);
+ return currentSegment;
+ }
+
+ /**
+ * Returns the first segment in the log.
+ *
+ * @throws IllegalStateException if the segment manager is not open
+ */
+ JournalSegment getFirstSegment() {
+ assertOpen();
+ Map.Entry<Long, JournalSegment> segment = segments.firstEntry();
+ return segment != null ? segment.getValue() : null;
+ }
+
+ /**
+ * Returns the last segment in the log.
+ *
+ * @throws IllegalStateException if the segment manager is not open
+ */
+ JournalSegment getLastSegment() {
+ assertOpen();
+ Map.Entry<Long, JournalSegment> segment = segments.lastEntry();
+ return segment != null ? segment.getValue() : null;
+ }
+
+ /**
+ * Creates and returns the next segment.
+ *
+ * @return The next segment.
+ * @throws IllegalStateException if the segment manager is not open
+ */
+ synchronized JournalSegment getNextSegment() {
+ assertOpen();
+ assertDiskSpace();
+
+ JournalSegment lastSegment = getLastSegment();
+ JournalSegmentDescriptor descriptor = JournalSegmentDescriptor.builder()
+ .withId(lastSegment != null ? lastSegment.descriptor().id() + 1 : 1)
+ .withIndex(currentSegment.lastIndex() + 1)
+ .withMaxSegmentSize(maxSegmentSize)
+ .withMaxEntries(maxEntriesPerSegment)
+ .build();
+
+ currentSegment = createSegment(descriptor);
+
+ segments.put(descriptor.index(), currentSegment);
+ return currentSegment;
+ }
+
+ /**
+ * Returns the segment following the segment with the given ID.
+ *
+ * @param index The segment index with which to look up the next segment.
+ * @return The next segment for the given index.
+ */
+ JournalSegment getNextSegment(long index) {
+ Map.Entry<Long, JournalSegment> nextSegment = segments.higherEntry(index);
+ return nextSegment != null ? nextSegment.getValue() : null;
+ }
+
+ /**
+ * Returns the segment for the given index.
+ *
+ * @param index The index for which to return the segment.
+ * @throws IllegalStateException if the segment manager is not open
+ */
+ synchronized JournalSegment getSegment(long index) {
+ assertOpen();
+ // Check if the current segment contains the given index first in order to prevent an unnecessary map lookup.
+ if (currentSegment != null && index > currentSegment.firstIndex()) {
+ return currentSegment;
+ }
+
+ // If the index is in another segment, get the entry with the next lowest first index.
+ Map.Entry<Long, JournalSegment> segment = segments.floorEntry(index);
+ if (segment != null) {
+ return segment.getValue();
+ }
+ return getFirstSegment();
+ }
+
+ /**
+ * Removes a segment.
+ *
+ * @param segment The segment to remove.
+ */
+ synchronized void removeSegment(JournalSegment segment) {
+ segments.remove(segment.firstIndex());
+ segment.close();
+ segment.delete();
+ resetCurrentSegment();
+ }
+
+ /**
+ * Creates a new segment.
+ */
+ JournalSegment createSegment(JournalSegmentDescriptor descriptor) {
+ File segmentFile = JournalSegmentFile.createSegmentFile(name, directory, descriptor.id());
+
+ RandomAccessFile raf;
+ FileChannel channel;
+ try {
+ raf = new RandomAccessFile(segmentFile, "rw");
+ raf.setLength(descriptor.maxSegmentSize());
+ channel = raf.getChannel();
+ } catch (IOException e) {
+ throw new StorageException(e);
+ }
+
+ ByteBuffer buffer = ByteBuffer.allocate(JournalSegmentDescriptor.BYTES);
+ descriptor.copyTo(buffer);
+ buffer.flip();
+ try {
+ channel.write(buffer);
+ } catch (IOException e) {
+ throw new StorageException(e);
+ } finally {
+ try {
+ channel.close();
+ raf.close();
+ } catch (IOException e) {
+ }
+ }
+ JournalSegment segment = newSegment(new JournalSegmentFile(segmentFile), descriptor);
+ LOG.debug("Created segment: {}", segment);
+ return segment;
+ }
+
+ /**
+ * Creates a new segment instance.
+ *
+ * @param segmentFile The segment file.
+ * @param descriptor The segment descriptor.
+ * @return The segment instance.
+ */
+ protected JournalSegment newSegment(JournalSegmentFile segmentFile, JournalSegmentDescriptor descriptor) {
+ return new JournalSegment(segmentFile, descriptor, storageLevel, maxEntrySize, indexDensity);
+ }
+
+ /**
+ * Loads a segment.
+ */
+ private JournalSegment loadSegment(long segmentId) {
+ File segmentFile = JournalSegmentFile.createSegmentFile(name, directory, segmentId);
+ ByteBuffer buffer = ByteBuffer.allocate(JournalSegmentDescriptor.BYTES);
+ try (FileChannel channel = openChannel(segmentFile)) {
+ channel.read(buffer);
+ buffer.flip();
+ JournalSegmentDescriptor descriptor = new JournalSegmentDescriptor(buffer);
+ JournalSegment segment = newSegment(new JournalSegmentFile(segmentFile), descriptor);
+ LOG.debug("Loaded disk segment: {} ({})", descriptor.id(), segmentFile.getName());
+ return segment;
+ } catch (IOException e) {
+ throw new StorageException(e);
+ }
+ }
+
+ private FileChannel openChannel(File file) {
+ try {
+ return FileChannel.open(file.toPath(), StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE);
+ } catch (IOException e) {
+ throw new StorageException(e);
+ }
+ }
+
+ /**
+ * Loads all segments from disk.
+ *
+ * @return A collection of segments for the log.
+ */
+ protected Collection<JournalSegment> loadSegments() {
+ // Ensure log directories are created.
+ directory.mkdirs();
+
+ TreeMap<Long, JournalSegment> segments = new TreeMap<>();
+
+ // Iterate through all files in the log directory.
+ for (File file : directory.listFiles(File::isFile)) {
+
+ // If the file looks like a segment file, attempt to load the segment.
+ if (JournalSegmentFile.isSegmentFile(name, file)) {
+ JournalSegmentFile segmentFile = new JournalSegmentFile(file);
+ ByteBuffer buffer = ByteBuffer.allocate(JournalSegmentDescriptor.BYTES);
+ try (FileChannel channel = openChannel(file)) {
+ channel.read(buffer);
+ buffer.flip();
+ } catch (IOException e) {
+ throw new StorageException(e);
+ }
+
+ JournalSegmentDescriptor descriptor = new JournalSegmentDescriptor(buffer);
+
+ // Load the segment.
+ JournalSegment segment = loadSegment(descriptor.id());
+
+ // Add the segment to the segments list.
+ LOG.debug("Found segment: {} ({})", segment.descriptor().id(), segmentFile.file().getName());
+ segments.put(segment.firstIndex(), segment);
+ }
+ }
+
+ // Verify that all the segments in the log align with one another.
+ JournalSegment previousSegment = null;
+ boolean corrupted = false;
+ Iterator<Map.Entry<Long, JournalSegment>> iterator = segments.entrySet().iterator();
+ while (iterator.hasNext()) {
+ JournalSegment segment = iterator.next().getValue();
+ if (previousSegment != null && previousSegment.lastIndex() != segment.firstIndex() - 1) {
+ LOG.warn("Journal is inconsistent. {} is not aligned with prior segment {}", segment.file().file(), previousSegment.file().file());
+ corrupted = true;
+ }
+ if (corrupted) {
+ segment.close();
+ segment.delete();
+ iterator.remove();
+ }
+ previousSegment = segment;
+ }
+
+ return segments.values();
+ }
+
+ /**
+ * Resets journal readers to the given head.
+ *
+ * @param index The index at which to reset readers.
+ */
+ void resetHead(long index) {
+ for (SegmentedJournalReader<E> reader : readers) {
+ if (reader.getNextIndex() < index) {
+ reader.reset(index);
+ }
+ }
+ }
+
+ /**
+ * Resets journal readers to the given tail.
+ *
+ * @param index The index at which to reset readers.
+ */
+ void resetTail(long index) {
+ for (SegmentedJournalReader<E> reader : readers) {
+ if (reader.getNextIndex() >= index) {
+ reader.reset(index);
+ }
+ }
+ }
+
+ void closeReader(SegmentedJournalReader<E> reader) {
+ readers.remove(reader);
+ }
+
+ @Override
+ public boolean isOpen() {
+ return open;
+ }
+
+ /**
+ * Returns a boolean indicating whether a segment can be removed from the journal prior to the given index.
+ *
+ * @param index the index from which to remove segments
+ * @return indicates whether a segment can be removed from the journal
+ */
+ public boolean isCompactable(long index) {
+ Map.Entry<Long, JournalSegment> segmentEntry = segments.floorEntry(index);
+ return segmentEntry != null && segments.headMap(segmentEntry.getValue().firstIndex()).size() > 0;
+ }
+
+ /**
+ * Returns the index of the last segment in the log.
+ *
+ * @param index the compaction index
+ * @return the starting index of the last segment in the log
+ */
+ public long getCompactableIndex(long index) {
+ Map.Entry<Long, JournalSegment> segmentEntry = segments.floorEntry(index);
+ return segmentEntry != null ? segmentEntry.getValue().firstIndex() : 0;
+ }
+
+ /**
+ * Compacts the journal up to the given index.
+ * <p>
+ * The semantics of compaction are not specified by this interface.
+ *
+ * @param index The index up to which to compact the journal.
+ */
+ public void compact(long index) {
+ final var segmentEntry = segments.floorEntry(index);
+ if (segmentEntry != null) {
+ final var compactSegments = segments.headMap(segmentEntry.getValue().firstIndex());
+ if (!compactSegments.isEmpty()) {
+ LOG.debug("{} - Compacting {} segment(s)", name, compactSegments.size());
+ for (JournalSegment segment : compactSegments.values()) {
+ LOG.trace("Deleting segment: {}", segment);
+ segment.close();
+ segment.delete();
+ }
+ compactSegments.clear();
+ resetHead(segmentEntry.getValue().firstIndex());
+ }
+ }
+ }
+
+ @Override
+ public void close() {
+ segments.values().forEach(segment -> {
+ LOG.debug("Closing segment: {}", segment);
+ segment.close();
+ });
+ currentSegment = null;
+ open = false;
+ }
+
+ /**
+ * Returns whether {@code flushOnCommit} is enabled for the log.
+ *
+ * @return Indicates whether {@code flushOnCommit} is enabled for the log.
+ */
+ boolean isFlushOnCommit() {
+ return flushOnCommit;
+ }
+
+ /**
+ * Commits entries up to the given index.
+ *
+ * @param index The index up to which to commit entries.
+ */
+ void setCommitIndex(long index) {
+ this.commitIndex = index;
+ }
+
+ /**
+ * Returns the Raft log commit index.
+ *
+ * @return The Raft log commit index.
+ */
+ long getCommitIndex() {
+ return commitIndex;
+ }
+
+ /**
+ * Raft log builder.
+ */
+ public static final class Builder<E> {
+ private static final boolean DEFAULT_FLUSH_ON_COMMIT = false;
+ private static final String DEFAULT_NAME = "atomix";
+ private static final String DEFAULT_DIRECTORY = System.getProperty("user.dir");
+ private static final int DEFAULT_MAX_SEGMENT_SIZE = 1024 * 1024 * 32;
+ private static final int DEFAULT_MAX_ENTRY_SIZE = 1024 * 1024;
+ private static final int DEFAULT_MAX_ENTRIES_PER_SEGMENT = 1024 * 1024;
+ private static final double DEFAULT_INDEX_DENSITY = .005;
+
+ private String name = DEFAULT_NAME;
+ private StorageLevel storageLevel = StorageLevel.DISK;
+ private File directory = new File(DEFAULT_DIRECTORY);
+ private JournalSerdes namespace;
+ private int maxSegmentSize = DEFAULT_MAX_SEGMENT_SIZE;
+ private int maxEntrySize = DEFAULT_MAX_ENTRY_SIZE;
+ private int maxEntriesPerSegment = DEFAULT_MAX_ENTRIES_PER_SEGMENT;
+ private double indexDensity = DEFAULT_INDEX_DENSITY;
+ private boolean flushOnCommit = DEFAULT_FLUSH_ON_COMMIT;
+
+ protected Builder() {
+ }
+
+ /**
+ * Sets the storage name.
+ *
+ * @param name The storage name.
+ * @return The storage builder.
+ */
+ public Builder<E> withName(String name) {
+ this.name = requireNonNull(name, "name cannot be null");
+ return this;
+ }
+
+ /**
+ * Sets the log storage level, returning the builder for method chaining.
+ * <p>
+ * The storage level indicates how individual entries should be persisted in the journal.
+ *
+ * @param storageLevel The log storage level.
+ * @return The storage builder.
+ */
+ public Builder<E> withStorageLevel(StorageLevel storageLevel) {
+ this.storageLevel = requireNonNull(storageLevel, "storageLevel cannot be null");
+ return this;
+ }
+
+ /**
+ * Sets the log directory, returning the builder for method chaining.
+ * <p>
+ * The log will write segment files into the provided directory.
+ *
+ * @param directory The log directory.
+ * @return The storage builder.
+ * @throws NullPointerException If the {@code directory} is {@code null}
+ */
+ public Builder<E> withDirectory(String directory) {
+ return withDirectory(new File(requireNonNull(directory, "directory cannot be null")));
+ }
+
+ /**
+ * Sets the log directory, returning the builder for method chaining.
+ * <p>
+ * The log will write segment files into the provided directory.
+ *
+ * @param directory The log directory.
+ * @return The storage builder.
+ * @throws NullPointerException If the {@code directory} is {@code null}
+ */
+ public Builder<E> withDirectory(File directory) {
+ this.directory = requireNonNull(directory, "directory cannot be null");
+ return this;
+ }
+
+ /**
+ * Sets the journal namespace, returning the builder for method chaining.
+ *
+ * @param namespace The journal serializer.
+ * @return The journal builder.
+ */
+ public Builder<E> withNamespace(JournalSerdes namespace) {
+ this.namespace = requireNonNull(namespace, "namespace cannot be null");
+ return this;
+ }
+
+ /**
+ * Sets the maximum segment size in bytes, returning the builder for method chaining.
+ * <p>
+ * The maximum segment size dictates when logs should roll over to new segments. As entries are written to a segment
+ * of the log, once the size of the segment surpasses the configured maximum segment size, the log will create a new
+ * segment and append new entries to that segment.
+ * <p>
+ * By default, the maximum segment size is {@code 1024 * 1024 * 32}.
+ *
+ * @param maxSegmentSize The maximum segment size in bytes.
+ * @return The storage builder.
+ * @throws IllegalArgumentException If the {@code maxSegmentSize} is not positive
+ */
+ public Builder<E> withMaxSegmentSize(int maxSegmentSize) {
+ checkArgument(maxSegmentSize > JournalSegmentDescriptor.BYTES, "maxSegmentSize must be greater than " + JournalSegmentDescriptor.BYTES);
+ this.maxSegmentSize = maxSegmentSize;
+ return this;
+ }
+
+ /**
+ * Sets the maximum entry size in bytes, returning the builder for method chaining.
+ *
+ * @param maxEntrySize the maximum entry size in bytes
+ * @return the storage builder
+ * @throws IllegalArgumentException if the {@code maxEntrySize} is not positive
+ */
+ public Builder<E> withMaxEntrySize(int maxEntrySize) {
+ checkArgument(maxEntrySize > 0, "maxEntrySize must be positive");
+ this.maxEntrySize = maxEntrySize;
+ return this;
+ }
+
+ /**
+ * Sets the maximum number of allows entries per segment, returning the builder for method chaining.
+ * <p>
+ * The maximum entry count dictates when logs should roll over to new segments. As entries are written to a segment
+ * of the log, if the entry count in that segment meets the configured maximum entry count, the log will create a
+ * new segment and append new entries to that segment.
+ * <p>
+ * By default, the maximum entries per segment is {@code 1024 * 1024}.
+ *
+ * @param maxEntriesPerSegment The maximum number of entries allowed per segment.
+ * @return The storage builder.
+ * @throws IllegalArgumentException If the {@code maxEntriesPerSegment} not greater than the default max entries
+ * per segment
+ * @deprecated since 3.0.2
+ */
+ @Deprecated
+ public Builder<E> withMaxEntriesPerSegment(int maxEntriesPerSegment) {
+ checkArgument(maxEntriesPerSegment > 0, "max entries per segment must be positive");
+ checkArgument(maxEntriesPerSegment <= DEFAULT_MAX_ENTRIES_PER_SEGMENT,
+ "max entries per segment cannot be greater than " + DEFAULT_MAX_ENTRIES_PER_SEGMENT);
+ this.maxEntriesPerSegment = maxEntriesPerSegment;
+ return this;
+ }
+
+ /**
+ * Sets the journal index density.
+ * <p>
+ * The index density is the frequency at which the position of entries written to the journal will be recorded in an
+ * in-memory index for faster seeking.
+ *
+ * @param indexDensity the index density
+ * @return the journal builder
+ * @throws IllegalArgumentException if the density is not between 0 and 1
+ */
+ public Builder<E> withIndexDensity(double indexDensity) {
+ checkArgument(indexDensity > 0 && indexDensity < 1, "index density must be between 0 and 1");
+ this.indexDensity = indexDensity;
+ return this;
+ }
+
+ /**
+ * Enables flushing buffers to disk when entries are committed to a segment, returning the builder for method
+ * chaining.
+ * <p>
+ * When flush-on-commit is enabled, log entry buffers will be automatically flushed to disk each time an entry is
+ * committed in a given segment.
+ *
+ * @return The storage builder.
+ */
+ public Builder<E> withFlushOnCommit() {
+ return withFlushOnCommit(true);
+ }
+
+ /**
+ * Sets whether to flush buffers to disk when entries are committed to a segment, returning the builder for method
+ * chaining.
+ * <p>
+ * When flush-on-commit is enabled, log entry buffers will be automatically flushed to disk each time an entry is
+ * committed in a given segment.
+ *
+ * @param flushOnCommit Whether to flush buffers to disk when entries are committed to a segment.
+ * @return The storage builder.
+ */
+ public Builder<E> withFlushOnCommit(boolean flushOnCommit) {
+ this.flushOnCommit = flushOnCommit;
+ return this;
+ }
+
+ /**
+ * Build the {@link SegmentedJournal}.
+ *
+ * @return A new {@link SegmentedJournal}.
+ */
+ public SegmentedJournal<E> build() {
+ return new SegmentedJournal<>(
+ name,
+ storageLevel,
+ directory,
+ namespace,
+ maxSegmentSize,
+ maxEntrySize,
+ maxEntriesPerSegment,
+ indexDensity,
+ flushOnCommit);
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others. All rights reserved.
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import static java.util.Objects.requireNonNull;
+
+import org.eclipse.jdt.annotation.NonNull;
+
+/**
+ * A {@link JournalReader} traversing all entries.
+ */
+sealed class SegmentedJournalReader<E> implements JournalReader<E> permits CommitsSegmentJournalReader {
+ // Marker non-null object for tryAdvance()
+ private static final @NonNull Object ADVANCED = new Object();
+
+ final SegmentedJournal<E> journal;
+
+ private JournalSegment currentSegment;
+ private JournalSegmentReader currentReader;
+ private long nextIndex;
+
+ SegmentedJournalReader(final SegmentedJournal<E> journal, final JournalSegment segment) {
+ this.journal = requireNonNull(journal);
+ currentSegment = requireNonNull(segment);
+ currentReader = segment.createReader();
+ nextIndex = currentSegment.firstIndex();
+ }
+
+ @Override
+ public final long getFirstIndex() {
+ return journal.getFirstSegment().firstIndex();
+ }
+
+ @Override
+ public final long getNextIndex() {
+ return nextIndex;
+ }
+
+ @Override
+ public final void reset() {
+ currentReader.close();
+
+ currentSegment = journal.getFirstSegment();
+ currentReader = currentSegment.createReader();
+ nextIndex = currentSegment.firstIndex();
+ }
+
+ @Override
+ public final void reset(final long index) {
+ // If the current segment is not open, it has been replaced. Reset the segments.
+ if (!currentSegment.isOpen()) {
+ reset();
+ }
+
+ if (index < nextIndex) {
+ rewind(index);
+ } else if (index > nextIndex) {
+ while (index > nextIndex && tryAdvance()) {
+ // Nothing else
+ }
+ } else {
+ resetCurrentReader(index);
+ }
+ }
+
+ private void resetCurrentReader(final long index) {
+ final var position = currentSegment.lookup(index - 1);
+ if (position != null) {
+ nextIndex = position.index();
+ currentReader.setPosition(position.position());
+ } else {
+ nextIndex = currentSegment.firstIndex();
+ currentReader.setPosition(JournalSegmentDescriptor.BYTES);
+ }
+ while (nextIndex < index && tryAdvance()) {
+ // Nothing else
+ }
+ }
+
+ /**
+ * Rewinds the journal to the given index.
+ */
+ private void rewind(final long index) {
+ if (currentSegment.firstIndex() >= index) {
+ JournalSegment segment = journal.getSegment(index - 1);
+ if (segment != null) {
+ currentReader.close();
+
+ currentSegment = segment;
+ currentReader = currentSegment.createReader();
+ }
+ }
+
+ resetCurrentReader(index);
+ }
+
+ @Override
+ public <T> T tryNext(final EntryMapper<E, T> mapper) {
+ final var index = nextIndex;
+ var buf = currentReader.readBytes(index);
+ if (buf == null) {
+ final var nextSegment = journal.getNextSegment(currentSegment.firstIndex());
+ if (nextSegment == null || nextSegment.firstIndex() != index) {
+ return null;
+ }
+
+ currentReader.close();
+
+ currentSegment = nextSegment;
+ currentReader = currentSegment.createReader();
+ buf = currentReader.readBytes(index);
+ if (buf == null) {
+ return null;
+ }
+ }
+
+ final var entry = journal.serializer().deserialize(buf);
+ final var ret = requireNonNull(mapper.mapEntry(index, entry, buf.readableBytes()));
+ nextIndex = index + 1;
+ return ret;
+ }
+
+ /**
+ * Try to move to the next entry.
+ *
+ * @return {@code true} if there was a next entry and this reader has moved to it
+ */
+ final boolean tryAdvance() {
+ return tryNext((index, entry, size) -> ADVANCED) != null;
+ }
+
+ @Override
+ public final void close() {
+ currentReader.close();
+ journal.closeReader(this);
+ }
+}
--- /dev/null
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import static com.google.common.base.Verify.verifyNotNull;
+
+/**
+ * Raft log writer.
+ */
+final class SegmentedJournalWriter<E> implements JournalWriter<E> {
+ private final SegmentedJournal<E> journal;
+ private JournalSegment currentSegment;
+ private JournalSegmentWriter currentWriter;
+
+ SegmentedJournalWriter(SegmentedJournal<E> journal) {
+ this.journal = journal;
+ this.currentSegment = journal.getLastSegment();
+ this.currentWriter = currentSegment.acquireWriter();
+ }
+
+ @Override
+ public long getLastIndex() {
+ return currentWriter.getLastIndex();
+ }
+
+ @Override
+ public long getNextIndex() {
+ return currentWriter.getNextIndex();
+ }
+
+ @Override
+ public void reset(long index) {
+ if (index > currentSegment.firstIndex()) {
+ currentSegment.releaseWriter();
+ currentSegment = journal.resetSegments(index);
+ currentWriter = currentSegment.acquireWriter();
+ } else {
+ truncate(index - 1);
+ }
+ journal.resetHead(index);
+ }
+
+ @Override
+ public void commit(long index) {
+ if (index > journal.getCommitIndex()) {
+ journal.setCommitIndex(index);
+ if (journal.isFlushOnCommit()) {
+ flush();
+ }
+ }
+ }
+
+ @Override
+ public <T extends E> Indexed<T> append(T entry) {
+ final var bytes = journal.serializer().serialize(entry);
+ var index = currentWriter.append(bytes);
+ if (index != null) {
+ return new Indexed<>(index, entry, bytes.readableBytes());
+ }
+
+ // Slow path: we do not have enough capacity
+ currentWriter.flush();
+ currentSegment.releaseWriter();
+ currentSegment = journal.getNextSegment();
+ currentWriter = currentSegment.acquireWriter();
+ final var newIndex = verifyNotNull(currentWriter.append(bytes));
+ return new Indexed<>(newIndex, entry, bytes.readableBytes());
+ }
+
+ @Override
+ public void truncate(long index) {
+ if (index < journal.getCommitIndex()) {
+ throw new IndexOutOfBoundsException("Cannot truncate committed index: " + index);
+ }
+
+ // Delete all segments with first indexes greater than the given index.
+ while (index < currentSegment.firstIndex() && currentSegment != journal.getFirstSegment()) {
+ currentSegment.releaseWriter();
+ journal.removeSegment(currentSegment);
+ currentSegment = journal.getLastSegment();
+ currentWriter = currentSegment.acquireWriter();
+ }
+
+ // Truncate the current index.
+ currentWriter.truncate(index);
+
+ // Reset segment readers.
+ journal.resetTail(index + 1);
+ }
+
+ @Override
+ public void flush() {
+ currentWriter.flush();
+ }
+}
--- /dev/null
+/*
+ * Copyright 2015-2021 Open Networking Foundation
+ * Copyright 2023 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+/**
+ * Log exception.
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+public class StorageException extends RuntimeException {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ public StorageException() {
+ }
+
+ public StorageException(final String message) {
+ super(message);
+ }
+
+ public StorageException(final String message, final Throwable cause) {
+ super(message, cause);
+ }
+
+ public StorageException(final Throwable cause) {
+ super(cause);
+ }
+
+ /**
+ * Exception thrown when an entry being stored is too large.
+ */
+ public static class TooLarge extends StorageException {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ public TooLarge(final String message) {
+ super(message);
+ }
+
+ public TooLarge(final String message, final Throwable cause) {
+ super(message, cause);
+ }
+ }
+
+ /**
+ * Exception thrown when storage runs out of disk space.
+ */
+ public static class OutOfDiskSpace extends StorageException {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ public OutOfDiskSpace(final String message) {
+ super(message);
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright 2015-2022 Open Networking Foundation and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+/**
+ * Storage level configuration values which control how logs are stored on disk or in memory.
+ */
+public enum StorageLevel {
+ /**
+ * Stores data in a memory-mapped file.
+ */
+ MAPPED,
+ /**
+ * Stores data on disk.
+ */
+ DISK
+}
--- /dev/null
+/*
+ * Copyright 2018-2022 Open Networking Foundation and others. All rights reserved.
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal.index;
+
+import org.eclipse.jdt.annotation.Nullable;
+
+/**
+ * Index of a particular JournalSegment.
+ */
+public interface JournalIndex {
+ /**
+ * Adds an entry for the given index at the given position.
+ *
+ * @param index the index for which to add the entry
+ * @param position the position of the given index
+ */
+ void index(long index, int position);
+
+ /**
+ * Looks up the position of the given index.
+ *
+ * @param index the index to lookup
+ * @return the position of the given index or a lesser index, or {@code null}
+ */
+ @Nullable Position lookup(long index);
+
+ /**
+ * Truncates the index to the given index and returns its position, if available.
+ *
+ * @param index the index to which to truncate the index, or {@code null}
+ * @return the position of the given index or a lesser index, or {@code null}
+ */
+ @Nullable Position truncate(long index);
+}
--- /dev/null
+/*
+ * Copyright 2018-2021 Open Networking Foundation
+ * Copyright 2023 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal.index;
+
+import java.util.Map.Entry;
+import org.eclipse.jdt.annotation.Nullable;
+
+/**
+ * Journal index position.
+ */
+public record Position(long index, int position) {
+ public Position(final Entry<Long, Integer> entry) {
+ this(entry.getKey(), entry.getValue());
+ }
+
+ public static @Nullable Position ofNullable(final Entry<Long, Integer> entry) {
+ return entry == null ? null : new Position(entry);
+ }
+}
--- /dev/null
+/*
+ * Copyright 2018-2022 Open Networking Foundation and others. All rights reserved.
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal.index;
+
+import java.util.TreeMap;
+
+/**
+ * A {@link JournalIndex} maintaining target density.
+ */
+public final class SparseJournalIndex implements JournalIndex {
+ private static final int MIN_DENSITY = 1000;
+
+ private final int density;
+ private final TreeMap<Long, Integer> positions = new TreeMap<>();
+
+ public SparseJournalIndex() {
+ density = MIN_DENSITY;
+ }
+
+ public SparseJournalIndex(final double density) {
+ this.density = (int) Math.ceil(MIN_DENSITY / (density * MIN_DENSITY));
+ }
+
+ @Override
+ public void index(final long index, final int position) {
+ if (index % density == 0) {
+ positions.put(index, position);
+ }
+ }
+
+ @Override
+ public Position lookup(final long index) {
+ return Position.ofNullable(positions.floorEntry(index));
+ }
+
+ @Override
+ public Position truncate(final long index) {
+ positions.tailMap(index, false).clear();
+ return Position.ofNullable(positions.lastEntry());
+ }
+}
--- /dev/null
+/*
+ * Copyright 2018-2022 Open Networking Foundation and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Provides classes and interfaces for efficiently managing journal indexes.
+ */
+package io.atomix.storage.journal.index;
--- /dev/null
+/*
+ * Copyright 2018-2022 Open Networking Foundation and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Provides a low-level journal abstraction for appending to logs and managing segmented logs.
+ */
+package io.atomix.storage.journal;
--- /dev/null
+/*
+ * Copyright 2014-2022 Open Networking Foundation and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import java.io.ByteArrayOutputStream;
+
+/**
+ * Exposes protected byte array length in {@link ByteArrayOutputStream}.
+ */
+final class BufferAwareByteArrayOutputStream extends ByteArrayOutputStream {
+
+ BufferAwareByteArrayOutputStream(int size) {
+ super(size);
+ }
+
+ int getBufferSize() {
+ return buf.length;
+ }
+}
--- /dev/null
+/*
+ * Copyright 2014-2022 Open Networking Foundation and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import com.esotericsoftware.kryo.io.Output;
+
+/**
+ * Convenience class to avoid extra object allocation and casting.
+ */
+final class ByteArrayOutput extends Output {
+
+ private final BufferAwareByteArrayOutputStream stream;
+
+ ByteArrayOutput(final int bufferSize, final int maxBufferSize, final BufferAwareByteArrayOutputStream stream) {
+ super(bufferSize, maxBufferSize);
+ super.setOutputStream(stream);
+ this.stream = stream;
+ }
+
+ BufferAwareByteArrayOutputStream getByteArrayOutputStream() {
+ return stream;
+ }
+}
--- /dev/null
+/*
+ * Copyright 2023 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import static java.util.Objects.requireNonNull;
+
+import com.esotericsoftware.kryo.Kryo;
+import com.esotericsoftware.kryo.KryoException;
+import com.esotericsoftware.kryo.Serializer;
+import com.esotericsoftware.kryo.io.Input;
+import com.esotericsoftware.kryo.io.Output;
+import com.esotericsoftware.kryo.serializers.JavaSerializer;
+import com.google.common.base.MoreObjects;
+import io.atomix.storage.journal.JournalSerdes.EntrySerdes;
+import java.io.IOException;
+
+final class EntrySerializer<T> extends Serializer<T> {
+ // Note: uses identity to create things in Kryo, hence we want an instance for every serdes we wrap
+ private final JavaSerializer javaSerializer = new JavaSerializer();
+ private final EntrySerdes<T> serdes;
+
+ EntrySerializer(final EntrySerdes<T> serdes) {
+ this.serdes = requireNonNull(serdes);
+ }
+
+ @Override
+ public T read(final Kryo kryo, final Input input, final Class<T> type) {
+ try {
+ return serdes.read(new KryoEntryInput(kryo, input, javaSerializer));
+ } catch (IOException e) {
+ throw new KryoException(e);
+ }
+ }
+
+ @Override
+ public void write(final Kryo kryo, final Output output, final T object) {
+ try {
+ serdes.write(new KryoEntryOutput(kryo, output, javaSerializer), object);
+ } catch (IOException e) {
+ throw new KryoException(e);
+ }
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(this).addValue(serdes).toString();
+ }
+}
--- /dev/null
+/* Copyright (c) 2008, Nathan Sweet
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided with the distribution.
+ * - Neither the name of Esoteric Software nor the names of its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
+
+package io.atomix.utils.serializer;
+
+import com.esotericsoftware.kryo.io.ByteBufferInput;
+import java.nio.ByteBuffer;
+
+/**
+ * A Kryo-4.0.3 ByteBufferInput adapted to deal with
+ * <a href="https://github.com/EsotericSoftware/kryo/issues/505">issue 505</a>.
+ *
+ * @author Roman Levenstein <romixlev@gmail.com>
+ * @author Robert Varga
+ */
+public final class Kryo505ByteBufferInput extends ByteBufferInput {
+ Kryo505ByteBufferInput (ByteBuffer buffer) {
+ super(buffer);
+ }
+
+ @Override
+ public String readString () {
+ niobuffer.position(position);
+ int available = require(1);
+ position++;
+ int b = niobuffer.get();
+ if ((b & 0x80) == 0) return readAscii(); // ASCII.
+ // Null, empty, or UTF8.
+ int charCount = available >= 5 ? readUtf8Length(b) : readUtf8Length_slow(b);
+ switch (charCount) {
+ case 0:
+ return null;
+ case 1:
+ return "";
+ }
+ charCount--;
+ if (chars.length < charCount) chars = new char[charCount];
+ readUtf8(charCount);
+ return new String(chars, 0, charCount);
+ }
+
+ private int readUtf8Length (int b) {
+ int result = b & 0x3F; // Mask all but first 6 bits.
+ if ((b & 0x40) != 0) { // Bit 7 means another byte, bit 8 means UTF8.
+ position++;
+ b = niobuffer.get();
+ result |= (b & 0x7F) << 6;
+ if ((b & 0x80) != 0) {
+ position++;
+ b = niobuffer.get();
+ result |= (b & 0x7F) << 13;
+ if ((b & 0x80) != 0) {
+ position++;
+ b = niobuffer.get();
+ result |= (b & 0x7F) << 20;
+ if ((b & 0x80) != 0) {
+ position++;
+ b = niobuffer.get();
+ result |= (b & 0x7F) << 27;
+ }
+ }
+ }
+ }
+ return result;
+ }
+
+ private int readUtf8Length_slow (int b) {
+ int result = b & 0x3F; // Mask all but first 6 bits.
+ if ((b & 0x40) != 0) { // Bit 7 means another byte, bit 8 means UTF8.
+ require(1);
+ position++;
+ b = niobuffer.get();
+ result |= (b & 0x7F) << 6;
+ if ((b & 0x80) != 0) {
+ require(1);
+ position++;
+ b = niobuffer.get();
+ result |= (b & 0x7F) << 13;
+ if ((b & 0x80) != 0) {
+ require(1);
+ position++;
+ b = niobuffer.get();
+ result |= (b & 0x7F) << 20;
+ if ((b & 0x80) != 0) {
+ require(1);
+ position++;
+ b = niobuffer.get();
+ result |= (b & 0x7F) << 27;
+ }
+ }
+ }
+ }
+ return result;
+ }
+
+ private void readUtf8 (int charCount) {
+ char[] chars = this.chars;
+ // Try to read 7 bit ASCII chars.
+ int charIndex = 0;
+ int count = Math.min(require(1), charCount);
+ int position = this.position;
+ int b;
+ while (charIndex < count) {
+ position++;
+ b = niobuffer.get();
+ if (b < 0) {
+ position--;
+ break;
+ }
+ chars[charIndex++] = (char)b;
+ }
+ this.position = position;
+ // If buffer didn't hold all chars or any were not ASCII, use slow path for remainder.
+ if (charIndex < charCount) {
+ niobuffer.position(position);
+ readUtf8_slow(charCount, charIndex);
+ }
+ }
+
+ private void readUtf8_slow (int charCount, int charIndex) {
+ char[] chars = this.chars;
+ while (charIndex < charCount) {
+ if (position == limit) require(1);
+ position++;
+ int b = niobuffer.get() & 0xFF;
+ switch (b >> 4) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ chars[charIndex] = (char)b;
+ break;
+ case 12:
+ case 13:
+ if (position == limit) require(1);
+ position++;
+ chars[charIndex] = (char)((b & 0x1F) << 6 | niobuffer.get() & 0x3F);
+ break;
+ case 14:
+ require(2);
+ position += 2;
+ int b2 = niobuffer.get();
+ int b3 = niobuffer.get();
+ chars[charIndex] = (char)((b & 0x0F) << 12 | (b2 & 0x3F) << 6 | b3 & 0x3F);
+ break;
+ }
+ charIndex++;
+ }
+ }
+
+ private String readAscii () {
+ int end = position;
+ int start = end - 1;
+ int limit = this.limit;
+ int b;
+ do {
+ if (end == limit) return readAscii_slow();
+ end++;
+ b = niobuffer.get();
+ } while ((b & 0x80) == 0);
+ int count = end - start;
+ byte[] tmp = new byte[count];
+ niobuffer.position(start);
+ niobuffer.get(tmp);
+ tmp[count - 1] &= 0x7F; // Mask end of ascii bit.
+ String value = new String(tmp, 0, 0, count);
+ position = end;
+ niobuffer.position(position);
+ return value;
+ }
+
+ private String readAscii_slow () {
+ position--; // Re-read the first byte.
+ // Copy chars currently in buffer.
+ int charCount = limit - position;
+ if (charCount > chars.length) chars = new char[charCount * 2];
+ char[] chars = this.chars;
+ for (int i = position, ii = 0, n = limit; i < n; i++, ii++)
+ chars[ii] = (char)niobuffer.get(i);
+ position = limit;
+ // Copy additional chars one by one.
+ while (true) {
+ require(1);
+ position++;
+ int b = niobuffer.get();
+ if (charCount == chars.length) {
+ char[] newChars = new char[charCount * 2];
+ System.arraycopy(chars, 0, newChars, 0, charCount);
+ chars = newChars;
+ this.chars = newChars;
+ }
+ if ((b & 0x80) == 0x80) {
+ chars[charCount++] = (char)(b & 0x7F);
+ break;
+ }
+ chars[charCount++] = (char)b;
+ }
+ return new String(chars, 0, charCount);
+ }
+
+ @Override
+ public StringBuilder readStringBuilder () {
+ niobuffer.position(position);
+ int available = require(1);
+ position++;
+ int b = niobuffer.get();
+ if ((b & 0x80) == 0) return new StringBuilder(readAscii()); // ASCII.
+ // Null, empty, or UTF8.
+ int charCount = available >= 5 ? readUtf8Length(b) : readUtf8Length_slow(b);
+ switch (charCount) {
+ case 0:
+ return null;
+ case 1:
+ return new StringBuilder("");
+ }
+ charCount--;
+ if (chars.length < charCount) chars = new char[charCount];
+ readUtf8(charCount);
+ StringBuilder builder = new StringBuilder(charCount);
+ builder.append(chars, 0, charCount);
+ return builder;
+ }
+}
--- /dev/null
+/*
+ * Copyright 2023 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import static java.util.Objects.requireNonNull;
+
+import com.esotericsoftware.kryo.Kryo;
+import com.esotericsoftware.kryo.KryoException;
+import com.esotericsoftware.kryo.io.Input;
+import com.esotericsoftware.kryo.serializers.JavaSerializer;
+import io.atomix.storage.journal.JournalSerdes.EntryInput;
+import java.io.IOException;
+
+final class KryoEntryInput implements EntryInput {
+ private final Kryo kryo;
+ private final Input input;
+ private final JavaSerializer javaSerializer;
+
+ KryoEntryInput(final Kryo kryo, final Input input, final JavaSerializer javaSerializer) {
+ this.kryo = requireNonNull(kryo);
+ this.input = requireNonNull(input);
+ this.javaSerializer = requireNonNull(javaSerializer);
+ }
+
+ @Override
+ public byte[] readBytes(final int length) throws IOException {
+ try {
+ return input.readBytes(length);
+ } catch (KryoException e) {
+ throw new IOException(e);
+ }
+ }
+
+ @Override
+ public long readLong() throws IOException {
+ try {
+ return input.readLong(false);
+ } catch (KryoException e) {
+ throw new IOException(e);
+ }
+ }
+
+ @Override
+ public Object readObject() throws IOException {
+ try {
+ return javaSerializer.read(kryo, input, null);
+ } catch (KryoException e) {
+ throw new IOException(e);
+ }
+ }
+
+ @Override
+ public String readString() throws IOException {
+ try {
+ return input.readString();
+ } catch (KryoException e) {
+ throw new IOException(e);
+ }
+ }
+
+ @Override
+ public int readVarInt() throws IOException {
+ try {
+ return input.readVarInt(true);
+ } catch (KryoException e) {
+ throw new IOException(e);
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright 2023 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import static java.util.Objects.requireNonNull;
+
+import com.esotericsoftware.kryo.Kryo;
+import com.esotericsoftware.kryo.KryoException;
+import com.esotericsoftware.kryo.io.Output;
+import com.esotericsoftware.kryo.serializers.JavaSerializer;
+import io.atomix.storage.journal.JournalSerdes.EntryOutput;
+import java.io.IOException;
+
+final class KryoEntryOutput implements EntryOutput {
+ private final Kryo kryo;
+ private final Output output;
+ private final JavaSerializer javaSerializer;
+
+ KryoEntryOutput(final Kryo kryo, final Output output, final JavaSerializer javaSerializer) {
+ this.kryo = requireNonNull(kryo);
+ this.output = requireNonNull(output);
+ this.javaSerializer = requireNonNull(javaSerializer);
+ }
+
+ @Override
+ public void writeBytes(final byte[] bytes) throws IOException {
+ try {
+ output.writeBytes(bytes);
+ } catch (KryoException e) {
+ throw new IOException(e);
+ }
+ }
+
+ @Override
+ public void writeLong(final long value) throws IOException {
+ try {
+ output.writeLong(value, false);
+ } catch (KryoException e) {
+ throw new IOException(e);
+ }
+ }
+
+ @Override
+ public void writeObject(final Object value) throws IOException {
+ try {
+ javaSerializer.write(kryo, output, value);
+ } catch (KryoException e) {
+ throw new IOException(e);
+ }
+ }
+
+ @Override
+ public void writeString(final String value) throws IOException {
+ try {
+ output.writeString(value);
+ } catch (KryoException e) {
+ throw new IOException(e);
+ }
+ }
+
+ @Override
+ public void writeVarInt(final int value) throws IOException {
+ try {
+ output.writeVarInt(value, true);
+ } catch (KryoException e) {
+ throw new IOException(e);
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright 2014-2022 Open Networking Foundation and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import java.lang.ref.SoftReference;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.function.Function;
+
+abstract class KryoIOPool<T> {
+
+ private final ConcurrentLinkedQueue<SoftReference<T>> queue = new ConcurrentLinkedQueue<>();
+
+ private T borrow(final int bufferSize) {
+ T element;
+ SoftReference<T> reference;
+ while ((reference = queue.poll()) != null) {
+ if ((element = reference.get()) != null) {
+ return element;
+ }
+ }
+ return create(bufferSize);
+ }
+
+ protected abstract T create(final int bufferSize);
+
+ protected abstract boolean recycle(final T element);
+
+ <R> R run(final Function<T, R> function, final int bufferSize) {
+ final T element = borrow(bufferSize);
+ try {
+ return function.apply(element);
+ } finally {
+ if (recycle(element)) {
+ queue.offer(new SoftReference<>(element));
+ }
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright 2014-2022 Open Networking Foundation and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import com.esotericsoftware.kryo.io.Input;
+
+class KryoInputPool extends KryoIOPool<Input> {
+
+ static final int MAX_POOLED_BUFFER_SIZE = 512 * 1024;
+
+ @Override
+ protected Input create(int bufferSize) {
+ return new Input(bufferSize);
+ }
+
+ @Override
+ protected boolean recycle(Input input) {
+ if (input.getBuffer().length < MAX_POOLED_BUFFER_SIZE) {
+ input.setInputStream(null);
+ return true;
+ }
+ return false; // discard
+ }
+}
--- /dev/null
+/*
+ * Copyright 2014-2021 Open Networking Foundation
+ * Copyright 2023 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import static java.util.Objects.requireNonNull;
+
+import com.esotericsoftware.kryo.Kryo;
+import com.esotericsoftware.kryo.Registration;
+import com.esotericsoftware.kryo.Serializer;
+import com.esotericsoftware.kryo.io.ByteBufferInput;
+import com.esotericsoftware.kryo.io.ByteBufferOutput;
+import com.esotericsoftware.kryo.pool.KryoCallback;
+import com.esotericsoftware.kryo.pool.KryoFactory;
+import com.esotericsoftware.kryo.pool.KryoPool;
+import com.google.common.base.MoreObjects;
+import io.atomix.storage.journal.JournalSerdes;
+import java.io.ByteArrayInputStream;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.List;
+import org.objenesis.strategy.StdInstantiatorStrategy;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Pool of Kryo instances, with classes pre-registered.
+ */
+final class KryoJournalSerdes implements JournalSerdes, KryoFactory, KryoPool {
+ /**
+ * Default buffer size used for serialization.
+ *
+ * @see #serialize(Object)
+ */
+ private static final int DEFAULT_BUFFER_SIZE = 4096;
+
+ /**
+ * Smallest ID free to use for user defined registrations.
+ */
+ private static final int INITIAL_ID = 16;
+
+ static final String NO_NAME = "(no name)";
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(KryoJournalSerdes.class);
+
+ private final KryoPool kryoPool = new KryoPool.Builder(this).softReferences().build();
+
+ private final KryoOutputPool kryoOutputPool = new KryoOutputPool();
+ private final KryoInputPool kryoInputPool = new KryoInputPool();
+
+ private final List<RegisteredType> registeredTypes;
+ private final ClassLoader classLoader;
+ private final String friendlyName;
+
+ /**
+ * Creates a Kryo instance pool.
+ *
+ * @param registeredTypes types to register
+ * @param registrationRequired whether registration is required
+ * @param friendlyName friendly name for the namespace
+ */
+ KryoJournalSerdes(
+ final List<RegisteredType> registeredTypes,
+ final ClassLoader classLoader,
+ final String friendlyName) {
+ this.registeredTypes = List.copyOf(registeredTypes);
+ this.classLoader = requireNonNull(classLoader);
+ this.friendlyName = requireNonNull(friendlyName);
+
+ // Pre-populate with a single instance
+ release(create());
+ }
+
+ @Override
+ public byte[] serialize(final Object obj) {
+ return serialize(obj, DEFAULT_BUFFER_SIZE);
+ }
+
+ @Override
+ public byte[] serialize(final Object obj, final int bufferSize) {
+ return kryoOutputPool.run(output -> kryoPool.run(kryo -> {
+ kryo.writeClassAndObject(output, obj);
+ output.flush();
+ return output.getByteArrayOutputStream().toByteArray();
+ }), bufferSize);
+ }
+
+ @Override
+ public void serialize(final Object obj, final ByteBuffer buffer) {
+ ByteBufferOutput out = new ByteBufferOutput(buffer);
+ Kryo kryo = borrow();
+ try {
+ kryo.writeClassAndObject(out, obj);
+ out.flush();
+ } finally {
+ release(kryo);
+ }
+ }
+
+ @Override
+ public void serialize(final Object obj, final OutputStream stream) {
+ serialize(obj, stream, DEFAULT_BUFFER_SIZE);
+ }
+
+ @Override
+ public void serialize(final Object obj, final OutputStream stream, final int bufferSize) {
+ ByteBufferOutput out = new ByteBufferOutput(stream, bufferSize);
+ Kryo kryo = borrow();
+ try {
+ kryo.writeClassAndObject(out, obj);
+ out.flush();
+ } finally {
+ release(kryo);
+ }
+ }
+
+ @Override
+ public <T> T deserialize(final byte[] bytes) {
+ return kryoInputPool.run(input -> {
+ input.setInputStream(new ByteArrayInputStream(bytes));
+ return kryoPool.run(kryo -> {
+ @SuppressWarnings("unchecked")
+ T obj = (T) kryo.readClassAndObject(input);
+ return obj;
+ });
+ }, DEFAULT_BUFFER_SIZE);
+ }
+
+ @Override
+ public <T> T deserialize(final ByteBuffer buffer) {
+ Kryo kryo = borrow();
+ try {
+ @SuppressWarnings("unchecked")
+ T obj = (T) kryo.readClassAndObject(new Kryo505ByteBufferInput(buffer));
+ return obj;
+ } finally {
+ release(kryo);
+ }
+ }
+
+ @Override
+ public <T> T deserialize(final InputStream stream) {
+ return deserialize(stream, DEFAULT_BUFFER_SIZE);
+ }
+
+ @Override
+ public <T> T deserialize(final InputStream stream, final int bufferSize) {
+ Kryo kryo = borrow();
+ try {
+ @SuppressWarnings("unchecked")
+ T obj = (T) kryo.readClassAndObject(new ByteBufferInput(stream, bufferSize));
+ return obj;
+ } finally {
+ release(kryo);
+ }
+ }
+
+ /**
+ * Creates a Kryo instance.
+ *
+ * @return Kryo instance
+ */
+ @Override
+ public Kryo create() {
+ LOGGER.trace("Creating Kryo instance for {}", this);
+ Kryo kryo = new Kryo();
+ kryo.setClassLoader(classLoader);
+ kryo.setRegistrationRequired(true);
+
+ // TODO rethink whether we want to use StdInstantiatorStrategy
+ kryo.setInstantiatorStrategy(
+ new Kryo.DefaultInstantiatorStrategy(new StdInstantiatorStrategy()));
+
+ int id = INITIAL_ID;
+ for (RegisteredType registeredType : registeredTypes) {
+ register(kryo, registeredType.types(), registeredType.serializer(), id++);
+ }
+ return kryo;
+ }
+
+ /**
+ * Register {@code type} and {@code serializer} to {@code kryo} instance.
+ *
+ * @param kryo Kryo instance
+ * @param types types to register
+ * @param serializer Specific serializer to register or null to use default.
+ * @param id type registration id to use
+ */
+ private void register(final Kryo kryo, final Class<?>[] types, final Serializer<?> serializer, final int id) {
+ Registration existing = kryo.getRegistration(id);
+ if (existing != null) {
+ boolean matches = false;
+ for (Class<?> type : types) {
+ if (existing.getType() == type) {
+ matches = true;
+ break;
+ }
+ }
+
+ if (!matches) {
+ LOGGER.error("{}: Failed to register {} as {}, {} was already registered.",
+ friendlyName, types, id, existing.getType());
+
+ throw new IllegalStateException(String.format(
+ "Failed to register %s as %s, %s was already registered.",
+ Arrays.toString(types), id, existing.getType()));
+ }
+ // falling through to register call for now.
+ // Consider skipping, if there's reasonable
+ // way to compare serializer equivalence.
+ }
+
+ for (Class<?> type : types) {
+ Registration r = null;
+ if (serializer == null) {
+ r = kryo.register(type, id);
+ } else if (type.isInterface()) {
+ kryo.addDefaultSerializer(type, serializer);
+ } else {
+ r = kryo.register(type, serializer, id);
+ }
+ if (r != null) {
+ if (r.getId() != id) {
+ LOGGER.debug("{}: {} already registered as {}. Skipping {}.",
+ friendlyName, r.getType(), r.getId(), id);
+ }
+ LOGGER.trace("{} registered as {}", r.getType(), r.getId());
+ }
+ }
+ }
+
+ @Override
+ public Kryo borrow() {
+ return kryoPool.borrow();
+ }
+
+ @Override
+ public void release(final Kryo kryo) {
+ kryoPool.release(kryo);
+ }
+
+ @Override
+ public <T> T run(final KryoCallback<T> callback) {
+ return kryoPool.run(callback);
+ }
+
+ @Override
+ public String toString() {
+ if (!NO_NAME.equals(friendlyName)) {
+ return MoreObjects.toStringHelper(getClass())
+ .omitNullValues()
+ .add("friendlyName", friendlyName)
+ // omit lengthy detail, when there's a name
+ .toString();
+ }
+ return MoreObjects.toStringHelper(getClass()).add("registeredTypes", registeredTypes).toString();
+ }
+}
--- /dev/null
+/*
+ * Copyright 2014-2021 Open Networking Foundation
+ * Copyright 2023 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import static com.google.common.base.Preconditions.checkState;
+import static java.util.Objects.requireNonNull;
+
+import io.atomix.storage.journal.JournalSerdes;
+import io.atomix.storage.journal.JournalSerdes.Builder;
+import io.atomix.storage.journal.JournalSerdes.EntrySerdes;
+import java.util.ArrayList;
+import java.util.List;
+
+public final class KryoJournalSerdesBuilder implements Builder {
+ private final List<RegisteredType> types = new ArrayList<>();
+ private ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
+
+ @Override
+ public KryoJournalSerdesBuilder register(final EntrySerdes<?> serdes, final Class<?>... classes) {
+ types.add(new RegisteredType(new EntrySerializer<>(serdes), classes));
+ return this;
+ }
+
+ @Override
+ public KryoJournalSerdesBuilder setClassLoader(final ClassLoader classLoader) {
+ this.classLoader = requireNonNull(classLoader);
+ return this;
+ }
+
+ @Override
+ public JournalSerdes build() {
+ return build(KryoJournalSerdes.NO_NAME);
+ }
+
+ @Override
+ public JournalSerdes build(final String friendlyName) {
+ checkState(!types.isEmpty(), "No serializers registered");
+ return new KryoJournalSerdes(types, classLoader, friendlyName);
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright 2014-2022 Open Networking Foundation and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+class KryoOutputPool extends KryoIOPool<ByteArrayOutput> {
+
+ private static final int MAX_BUFFER_SIZE = 768 * 1024;
+ static final int MAX_POOLED_BUFFER_SIZE = 512 * 1024;
+
+ @Override
+ protected ByteArrayOutput create(int bufferSize) {
+ return new ByteArrayOutput(bufferSize, MAX_BUFFER_SIZE, new BufferAwareByteArrayOutputStream(bufferSize));
+ }
+
+ @Override
+ protected boolean recycle(ByteArrayOutput output) {
+ if (output.getByteArrayOutputStream().getBufferSize() < MAX_POOLED_BUFFER_SIZE) {
+ output.getByteArrayOutputStream().reset();
+ output.clear();
+ return true;
+ }
+ return false; // discard
+ }
+}
--- /dev/null
+/*
+ * Copyright 2023 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import static java.util.Objects.requireNonNull;
+
+record RegisteredType(EntrySerializer<?> serializer, Class<?>[] types) {
+ RegisteredType {
+ requireNonNull(serializer);
+ requireNonNull(types);
+ }
+}
--- /dev/null
+/*
+ * Copyright 2018-2022 Open Networking Foundation and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Provides classes and interfaces for binary serialization.
+ */
+package io.atomix.utils.serializer;
--- /dev/null
+/*
+ * Copyright 2017-2021 Open Networking Foundation
+ * Copyright 2023 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+
+import java.io.IOException;
+import java.nio.file.FileVisitResult;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.SimpleFileVisitor;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.util.ArrayList;
+import java.util.List;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+/**
+ * Base journal test.
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+@RunWith(Parameterized.class)
+public abstract class AbstractJournalTest {
+ private static final JournalSerdes NAMESPACE = JournalSerdes.builder()
+ .register(new TestEntrySerdes(), TestEntry.class)
+ .register(new ByteArraySerdes(), byte[].class)
+ .build();
+
+ protected static final TestEntry ENTRY = new TestEntry(32);
+ private static final Path PATH = Paths.get("target/test-logs/");
+
+ private final StorageLevel storageLevel;
+ private final int maxSegmentSize;
+ protected final int entriesPerSegment;
+
+ protected AbstractJournalTest(final StorageLevel storageLevel, final int maxSegmentSize) {
+ this.storageLevel = storageLevel;
+ this.maxSegmentSize = maxSegmentSize;
+ int entryLength = NAMESPACE.serialize(ENTRY).length + 8;
+ entriesPerSegment = (maxSegmentSize - 64) / entryLength;
+ }
+
+ @Parameterized.Parameters
+ public static List<Object[]> primeNumbers() {
+ var runs = new ArrayList<Object[]>();
+ for (int i = 1; i <= 10; i++) {
+ for (int j = 1; j <= 10; j++) {
+ runs.add(new Object[] { 64 + i * (NAMESPACE.serialize(ENTRY).length + 8) + j });
+ }
+ }
+ return runs;
+ }
+
+ protected SegmentedJournal<TestEntry> createJournal() {
+ return SegmentedJournal.<TestEntry>builder()
+ .withName("test")
+ .withDirectory(PATH.toFile())
+ .withNamespace(NAMESPACE)
+ .withStorageLevel(storageLevel)
+ .withMaxSegmentSize(maxSegmentSize)
+ .withIndexDensity(.2)
+ .build();
+ }
+
+ @Test
+ public void testCloseMultipleTimes() {
+ // given
+ final Journal<TestEntry> journal = createJournal();
+
+ // when
+ journal.close();
+
+ // then
+ journal.close();
+ }
+
+ @Test
+ public void testWriteRead() throws Exception {
+ try (Journal<TestEntry> journal = createJournal()) {
+ JournalWriter<TestEntry> writer = journal.writer();
+ JournalReader<TestEntry> reader = journal.openReader(1);
+
+ // Append a couple entries.
+ assertEquals(1, writer.getNextIndex());
+ var indexed = writer.append(ENTRY);
+ assertEquals(1, indexed.index());
+
+ assertEquals(2, writer.getNextIndex());
+ writer.append(ENTRY);
+ reader.reset(2);
+ indexed = assertNext(reader);
+ assertEquals(2, indexed.index());
+ assertNoNext(reader);
+
+ // Test reading an entry
+ reader.reset();
+ var entry1 = assertNext(reader);
+ assertEquals(1, entry1.index());
+
+ // Test reading a second entry
+ assertEquals(2, reader.getNextIndex());
+ var entry2 = assertNext(reader);
+ assertEquals(2, entry2.index());
+ assertEquals(3, reader.getNextIndex());
+ assertNoNext(reader);
+
+ // Test opening a new reader and reading from the journal.
+ reader = journal.openReader(1);
+ entry1 = assertNext(reader);
+ assertEquals(1, entry1.index());
+
+ assertEquals(2, reader.getNextIndex());
+ entry2 = assertNext(reader);
+ assertEquals(2, entry2.index());
+ assertNoNext(reader);
+
+ // Reset the reader.
+ reader.reset();
+
+ // Test opening a new reader and reading from the journal.
+ reader = journal.openReader(1);
+ entry1 = assertNext(reader);
+ assertEquals(1, entry1.index());
+
+ assertEquals(2, reader.getNextIndex());
+ entry2 = assertNext(reader);
+ assertEquals(2, entry2.index());
+ assertNoNext(reader);
+
+ // Truncate the journal and write a different entry.
+ writer.truncate(1);
+ assertEquals(2, writer.getNextIndex());
+ writer.append(ENTRY);
+ reader.reset(2);
+ indexed = assertNext(reader);
+ assertEquals(2, indexed.index());
+
+ // Reset the reader to a specific index and read the last entry again.
+ reader.reset(2);
+
+ assertEquals(2, reader.getNextIndex());
+ entry2 = assertNext(reader);
+ assertEquals(2, entry2.index());
+ assertNoNext(reader);
+ }
+ }
+
+ @Test
+ public void testResetTruncateZero() throws Exception {
+ try (SegmentedJournal<TestEntry> journal = createJournal()) {
+ JournalWriter<TestEntry> writer = journal.writer();
+ JournalReader<TestEntry> reader = journal.openReader(1);
+
+ assertEquals(0, writer.getLastIndex());
+ writer.append(ENTRY);
+ writer.append(ENTRY);
+ writer.reset(1);
+ assertEquals(0, writer.getLastIndex());
+ writer.append(ENTRY);
+
+ var indexed = assertNext(reader);
+ assertEquals(1, indexed.index());
+ writer.reset(1);
+ assertEquals(0, writer.getLastIndex());
+ indexed = writer.append(ENTRY);
+ assertEquals(1, writer.getLastIndex());
+ assertEquals(1, indexed.index());
+
+ indexed = assertNext(reader);
+ assertEquals(1, indexed.index());
+
+ writer.truncate(0);
+ assertEquals(0, writer.getLastIndex());
+ indexed = writer.append(ENTRY);
+ assertEquals(1, writer.getLastIndex());
+ assertEquals(1, indexed.index());
+
+ indexed = assertNext(reader);
+ assertEquals(1, indexed.index());
+ }
+ }
+
+ @Test
+ public void testTruncateRead() throws Exception {
+ int i = 10;
+ try (Journal<TestEntry> journal = createJournal()) {
+ JournalWriter<TestEntry> writer = journal.writer();
+ JournalReader<TestEntry> reader = journal.openReader(1);
+
+ for (int j = 1; j <= i; j++) {
+ assertEquals(j, writer.append(new TestEntry(32)).index());
+ }
+
+ for (int j = 1; j <= i - 2; j++) {
+ assertEquals(j, assertNext(reader).index());
+ }
+
+ writer.truncate(i - 2);
+
+ assertNoNext(reader);
+ assertEquals(i - 1, writer.append(new TestEntry(32)).index());
+ assertEquals(i, writer.append(new TestEntry(32)).index());
+
+ var entry = assertNext(reader);
+ assertEquals(i - 1, entry.index());
+ entry = assertNext(reader);
+ assertNotNull(entry);
+ assertEquals(i, entry.index());
+ }
+ }
+
+ @Test
+ public void testWriteReadEntries() throws Exception {
+ try (Journal<TestEntry> journal = createJournal()) {
+ JournalWriter<TestEntry> writer = journal.writer();
+ JournalReader<TestEntry> reader = journal.openReader(1);
+
+ for (int i = 1; i <= entriesPerSegment * 5; i++) {
+ writer.append(ENTRY);
+ var entry = assertNext(reader);
+ assertEquals(i, entry.index());
+ assertArrayEquals(ENTRY.bytes(), entry.entry().bytes());
+ reader.reset(i);
+ entry = assertNext(reader);
+ assertEquals(i, entry.index());
+ assertArrayEquals(ENTRY.bytes(), entry.entry().bytes());
+
+ if (i > 6) {
+ reader.reset(i - 5);
+ assertEquals(i - 5, reader.getNextIndex());
+ assertNext(reader);
+ reader.reset(i + 1);
+ }
+
+ writer.truncate(i - 1);
+ writer.append(ENTRY);
+
+ assertNext(reader);
+ reader.reset(i);
+ entry = assertNext(reader);
+ assertEquals(i, entry.index());
+ assertArrayEquals(ENTRY.bytes(), entry.entry().bytes());
+ }
+ }
+ }
+
+ @Test
+ public void testWriteReadCommittedEntries() throws Exception {
+ try (Journal<TestEntry> journal = createJournal()) {
+ JournalWriter<TestEntry> writer = journal.writer();
+ JournalReader<TestEntry> reader = journal.openReader(1, JournalReader.Mode.COMMITS);
+
+ for (int i = 1; i <= entriesPerSegment * 5; i++) {
+ writer.append(ENTRY);
+ assertNoNext(reader);
+ writer.commit(i);
+ var entry = assertNext(reader);
+ assertEquals(i, entry.index());
+ assertArrayEquals(ENTRY.bytes(), entry.entry().bytes());
+ reader.reset(i);
+ entry = assertNext(reader);
+ assertEquals(i, entry.index());
+ assertArrayEquals(ENTRY.bytes(), entry.entry().bytes());
+ }
+ }
+ }
+
+ @Test
+ public void testReadAfterCompact() throws Exception {
+ try (SegmentedJournal<TestEntry> journal = createJournal()) {
+ JournalWriter<TestEntry> writer = journal.writer();
+ JournalReader<TestEntry> uncommittedReader = journal.openReader(1, JournalReader.Mode.ALL);
+ JournalReader<TestEntry> committedReader = journal.openReader(1, JournalReader.Mode.COMMITS);
+
+ for (int i = 1; i <= entriesPerSegment * 10; i++) {
+ assertEquals(i, writer.append(ENTRY).index());
+ }
+
+ assertEquals(1, uncommittedReader.getNextIndex());
+ assertEquals(1, committedReader.getNextIndex());
+
+ // This creates asymmetry, as uncommitted reader will move one step ahead...
+ assertNext(uncommittedReader);
+ assertEquals(2, uncommittedReader.getNextIndex());
+ assertNoNext(committedReader);
+ assertEquals(1, committedReader.getNextIndex());
+
+ writer.commit(entriesPerSegment * 9);
+
+ // ... so here we catch up ...
+ assertNext(committedReader);
+ assertEquals(2, committedReader.getNextIndex());
+
+ // ... and continue from the second entry
+ for (int i = 2; i <= entriesPerSegment * 2.5; i++) {
+ var entry = assertNext(uncommittedReader);
+ assertEquals(i, entry.index());
+
+ entry = assertNext(committedReader);
+ assertEquals(i, entry.index());
+ }
+
+ journal.compact(entriesPerSegment * 5 + 1);
+
+ assertEquals(entriesPerSegment * 5 + 1, uncommittedReader.getNextIndex());
+ var entry = assertNext(uncommittedReader);
+ assertEquals(entriesPerSegment * 5 + 1, entry.index());
+
+ assertEquals(entriesPerSegment * 5 + 1, committedReader.getNextIndex());
+ entry = assertNext(committedReader);
+ assertEquals(entriesPerSegment * 5 + 1, entry.index());
+ }
+ }
+
+ /**
+ * Tests reading from a compacted journal.
+ */
+ @Test
+ public void testCompactAndRecover() throws Exception {
+ try (var journal = createJournal()) {
+ // Write three segments to the journal.
+ final var writer = journal.writer();
+ for (int i = 0; i < entriesPerSegment * 3; i++) {
+ writer.append(ENTRY);
+ }
+
+ // Commit the entries and compact the first segment.
+ writer.commit(entriesPerSegment * 3);
+ journal.compact(entriesPerSegment + 1);
+ }
+
+ // Reopen the journal and create a reader.
+ try (var journal = createJournal()) {
+ final var writer = journal.writer();
+ final var reader = journal.openReader(1, JournalReader.Mode.COMMITS);
+ writer.append(ENTRY);
+ writer.append(ENTRY);
+ writer.commit(entriesPerSegment * 3);
+
+ // Ensure the reader starts at the first physical index in the journal.
+ assertEquals(entriesPerSegment + 1, reader.getNextIndex());
+ assertEquals(reader.getFirstIndex(), reader.getNextIndex());
+ assertEquals(entriesPerSegment + 1, assertNext(reader).index());
+ assertEquals(entriesPerSegment + 2, reader.getNextIndex());
+ }
+ }
+
+ @Before
+ @After
+ public void cleanupStorage() throws IOException {
+ if (Files.exists(PATH)) {
+ Files.walkFileTree(PATH, new SimpleFileVisitor<Path>() {
+ @Override
+ public FileVisitResult visitFile(final Path file, final BasicFileAttributes attrs) throws IOException {
+ Files.delete(file);
+ return FileVisitResult.CONTINUE;
+ }
+
+ @Override
+ public FileVisitResult postVisitDirectory(final Path dir, final IOException exc) throws IOException {
+ Files.delete(dir);
+ return FileVisitResult.CONTINUE;
+ }
+ });
+ }
+ }
+
+ private static @NonNull Indexed<TestEntry> assertNext(final JournalReader<TestEntry> reader) {
+ final var ret = tryNext(reader);
+ assertNotNull(ret);
+ return ret;
+ }
+
+ private static void assertNoNext(final JournalReader<TestEntry> reader) {
+ assertNull(tryNext(reader));
+ }
+
+ private static @Nullable Indexed<TestEntry> tryNext(final JournalReader<TestEntry> reader) {
+ return reader.tryNext(Indexed::new);
+ }
+}
--- /dev/null
+/*
+ * Copyright 2023 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import io.atomix.storage.journal.JournalSerdes.EntryInput;
+import io.atomix.storage.journal.JournalSerdes.EntryOutput;
+import io.atomix.storage.journal.JournalSerdes.EntrySerdes;
+import java.io.IOException;
+
+final class ByteArraySerdes implements EntrySerdes<byte[]> {
+ @Override
+ public byte[] read(final EntryInput input) throws IOException {
+ int length = input.readVarInt();
+ return length == 0 ? null : input.readBytes(length - 1);
+ }
+
+ @Override
+ public void write(final EntryOutput output, final byte[] entry) throws IOException {
+ if (entry != null) {
+ output.writeVarInt(entry.length + 1);
+ output.writeBytes(entry);
+ } else {
+ output.writeVarInt(0);
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+/**
+ * Disk journal test.
+ */
+public class DiskJournalTest extends AbstractJournalTest {
+ public DiskJournalTest(final int maxSegmentSize) {
+ super(StorageLevel.DISK, maxSegmentSize);
+ }
+}
--- /dev/null
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import org.junit.Test;
+
+import java.nio.ByteBuffer;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Segment descriptor test.
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+public class JournalSegmentDescriptorTest {
+
+ /**
+ * Tests the segment descriptor builder.
+ */
+ @Test
+ public void testDescriptorBuilder() {
+ JournalSegmentDescriptor descriptor = JournalSegmentDescriptor.builder(ByteBuffer.allocate(JournalSegmentDescriptor.BYTES))
+ .withId(2)
+ .withIndex(1025)
+ .withMaxSegmentSize(1024 * 1024)
+ .withMaxEntries(2048)
+ .build();
+
+ assertEquals(2, descriptor.id());
+ assertEquals(JournalSegmentDescriptor.VERSION, descriptor.version());
+ assertEquals(1025, descriptor.index());
+ assertEquals(1024 * 1024, descriptor.maxSegmentSize());
+ assertEquals(2048, descriptor.maxEntries());
+
+ assertEquals(0, descriptor.updated());
+ long time = System.currentTimeMillis();
+ descriptor.update(time);
+ assertEquals(time, descriptor.updated());
+ }
+
+ /**
+ * Tests copying the segment descriptor.
+ */
+ @Test
+ public void testDescriptorCopy() {
+ JournalSegmentDescriptor descriptor = JournalSegmentDescriptor.builder()
+ .withId(2)
+ .withIndex(1025)
+ .withMaxSegmentSize(1024 * 1024)
+ .withMaxEntries(2048)
+ .build();
+
+ long time = System.currentTimeMillis();
+ descriptor.update(time);
+
+ descriptor = descriptor.copyTo(ByteBuffer.allocate(JournalSegmentDescriptor.BYTES));
+
+ assertEquals(2, descriptor.id());
+ assertEquals(JournalSegmentDescriptor.VERSION, descriptor.version());
+ assertEquals(1025, descriptor.index());
+ assertEquals(1024 * 1024, descriptor.maxSegmentSize());
+ assertEquals(2048, descriptor.maxEntries());
+ assertEquals(time, descriptor.updated());
+ }
+}
--- /dev/null
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import java.io.File;
+
+import org.junit.Test;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Journal segment file test.
+ */
+public class JournalSegmentFileTest {
+
+ @Test
+ public void testIsSegmentFile() throws Exception {
+ assertTrue(JournalSegmentFile.isSegmentFile("foo", "foo-1.log"));
+ assertFalse(JournalSegmentFile.isSegmentFile("foo", "bar-1.log"));
+ assertTrue(JournalSegmentFile.isSegmentFile("foo", "foo-1-1.log"));
+ }
+
+ @Test
+ public void testCreateSegmentFile() throws Exception {
+ File file = JournalSegmentFile.createSegmentFile("foo", new File(System.getProperty("user.dir")), 1);
+ assertTrue(JournalSegmentFile.isSegmentFile("foo", file));
+ }
+
+}
--- /dev/null
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+/**
+ * Memory mapped journal test.
+ */
+public class MappedJournalTest extends AbstractJournalTest {
+ public MappedJournalTest(final int maxSegmentSize) {
+ super(StorageLevel.MAPPED, maxSegmentSize);
+ }
+}
--- /dev/null
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import java.util.Arrays;
+
+import static com.google.common.base.MoreObjects.toStringHelper;
+
+/**
+ * Test entry.
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+public class TestEntry {
+ private final byte[] bytes;
+
+ public TestEntry(int size) {
+ this(new byte[size]);
+ }
+
+ public TestEntry(byte[] bytes) {
+ this.bytes = bytes;
+ }
+
+ public byte[] bytes() {
+ return bytes;
+ }
+
+ @Override
+ public String toString() {
+ return toStringHelper(this)
+ .add("length", bytes.length)
+ .add("hash", Arrays.hashCode(bytes))
+ .toString();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2023 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import io.atomix.storage.journal.JournalSerdes.EntryInput;
+import io.atomix.storage.journal.JournalSerdes.EntryOutput;
+import io.atomix.storage.journal.JournalSerdes.EntrySerdes;
+import java.io.IOException;
+
+final class TestEntrySerdes implements EntrySerdes<TestEntry> {
+ private static final ByteArraySerdes BA_SERIALIZER = new ByteArraySerdes();
+
+ @Override
+ public TestEntry read(final EntryInput input) throws IOException {
+ return new TestEntry(BA_SERIALIZER.read(input));
+ }
+
+ @Override
+ public void write(final EntryOutput output, final TestEntry entry) throws IOException {
+ BA_SERIALIZER.write(output, entry.bytes());
+ }
+}
--- /dev/null
+/*
+ * Copyright 2018-2022 Open Networking Foundation and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal.index;
+
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+/**
+ * Sparse journal index test.
+ */
+public class SparseJournalIndexTest {
+ @Test
+ public void testSparseJournalIndex() throws Exception {
+ JournalIndex index = new SparseJournalIndex(.2);
+ assertNull(index.lookup(1));
+ index.index(1, 2);
+ assertNull(index.lookup(1));
+ index.index(2, 4);
+ index.index(3, 6);
+ index.index(4, 8);
+ index.index(5, 10);
+ assertEquals(new Position(5, 10), index.lookup(5));
+ index.index(6, 12);
+ index.index(7, 14);
+ index.index(8, 16);
+ assertEquals(new Position(5, 10), index.lookup(8));
+ index.index(9, 18);
+ index.index(10, 20);
+ assertEquals(new Position(10, 20), index.lookup(10));
+ index.truncate(8);
+ assertEquals(new Position(5, 10), index.lookup(8));
+ assertEquals(new Position(5, 10), index.lookup(10));
+ index.truncate(4);
+ assertNull(index.lookup(4));
+ assertNull(index.lookup(8));
+
+ index = new SparseJournalIndex(.2);
+ assertNull(index.lookup(100));
+ index.index(101, 2);
+ assertNull(index.lookup(1));
+ index.index(102, 4);
+ index.index(103, 6);
+ index.index(104, 8);
+ index.index(105, 10);
+ assertEquals(new Position(105, 10), index.lookup(105));
+ index.index(106, 12);
+ index.index(107, 14);
+ index.index(108, 16);
+ assertEquals(new Position(105, 10), index.lookup(108));
+ index.index(109, 18);
+ index.index(110, 20);
+ assertEquals(new Position(110, 20), index.lookup(110));
+ index.truncate(108);
+ assertEquals(new Position(105, 10), index.lookup(108));
+ assertEquals(new Position(105, 10), index.lookup(110));
+ index.truncate(104);
+ assertNull(index.lookup(104));
+ assertNull(index.lookup(108));
+ }
+}
--- /dev/null
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+public class BufferAwareByteArrayOutputStreamTest {
+
+ @Test
+ public void testBufferSize() throws Exception {
+ BufferAwareByteArrayOutputStream outputStream = new BufferAwareByteArrayOutputStream(8);
+ assertEquals(8, outputStream.getBufferSize());
+ outputStream.write(new byte[]{1, 2, 3, 4, 5, 6, 7, 8});
+ assertEquals(8, outputStream.getBufferSize());
+ outputStream.write(new byte[]{1, 2, 3, 4, 5, 6, 7, 8});
+ assertEquals(16, outputStream.getBufferSize());
+ outputStream.reset();
+ assertEquals(16, outputStream.getBufferSize());
+ }
+}
--- /dev/null
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import com.esotericsoftware.kryo.io.Input;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+public class KryoInputPoolTest {
+
+ private KryoInputPool kryoInputPool;
+
+ @Before
+ public void setUp() throws Exception {
+ kryoInputPool = new KryoInputPool();
+ }
+
+ @Test
+ public void discardOutput() {
+ final Input[] result = new Input[2];
+ kryoInputPool.run(input -> {
+ result[0] = input;
+ return null;
+ }, KryoInputPool.MAX_POOLED_BUFFER_SIZE + 1);
+ kryoInputPool.run(input -> {
+ result[1] = input;
+ return null;
+ }, 0);
+ assertTrue(result[0] != result[1]);
+ }
+
+ @Test
+ public void recycleOutput() {
+ final Input[] result = new Input[2];
+ kryoInputPool.run(input -> {
+ assertEquals(0, input.position());
+ byte[] payload = new byte[]{1, 2, 3, 4};
+ input.setBuffer(payload);
+ assertArrayEquals(payload, input.readBytes(4));
+ result[0] = input;
+ return null;
+ }, 0);
+ assertNull(result[0].getInputStream());
+ assertEquals(0, result[0].position());
+ kryoInputPool.run(input -> {
+ result[1] = input;
+ return null;
+ }, 0);
+ assertTrue(result[0] == result[1]);
+ }
+}
--- /dev/null
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import com.esotericsoftware.kryo.io.Output;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class KryoOutputPoolTest {
+
+ private KryoOutputPool kryoOutputPool;
+
+ @Before
+ public void setUp() throws Exception {
+ kryoOutputPool = new KryoOutputPool();
+ }
+
+ @Test
+ public void discardOutput() {
+ final Output[] result = new Output[2];
+ kryoOutputPool.run(output -> {
+ result[0] = output;
+ return null;
+ }, KryoOutputPool.MAX_POOLED_BUFFER_SIZE + 1);
+ kryoOutputPool.run(output -> {
+ result[1] = output;
+ return null;
+ }, 0);
+ assertTrue(result[0] != result[1]);
+ }
+
+ @Test
+ public void recycleOutput() {
+ final ByteArrayOutput[] result = new ByteArrayOutput[2];
+ kryoOutputPool.run(output -> {
+ output.writeInt(1);
+ assertEquals(Integer.BYTES, output.position());
+ result[0] = output;
+ return null;
+ }, 0);
+ assertEquals(0, result[0].position());
+ assertEquals(0, result[0].getByteArrayOutputStream().size());
+ kryoOutputPool.run(output -> {
+ assertEquals(0, output.position());
+ result[1] = output;
+ return null;
+ }, 0);
+ assertTrue(result[0] == result[1]);
+ }
+}
--- /dev/null
+<!--
+ ~ Copyright 2017-present Open Networking Laboratory
+ ~
+ ~ Licensed under the Apache License, Version 2.0 (the "License");
+ ~ you may not use this file except in compliance with the License.
+ ~ You may obtain a copy of the License at
+ ~
+ ~ http://www.apache.org/licenses/LICENSE-2.0
+ ~
+ ~ Unless required by applicable law or agreed to in writing, software
+ ~ distributed under the License is distributed on an "AS IS" BASIS,
+ ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ ~ See the License for the specific language governing permissions and
+ ~ limitations under the License.
+ -->
+<configuration>
+ <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+ <encoder>
+ <pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n
+ </pattern>
+ </encoder>
+ </appender>
+
+ <logger name="io.atomix.storage" level="INFO" />
+
+ <root level="${root.logging.level:-INFO}">
+ <appender-ref ref="STDOUT" />
+ </root>
+</configuration>
\ No newline at end of file
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>mdsal-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../../opendaylight/md-sal/parent</relativePath>
</parent>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>mdsal-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../../opendaylight/md-sal/parent</relativePath>
</parent>
<packaging>bundle</packaging>
<dependencies>
+ <dependency>
+ <groupId>com.github.spotbugs</groupId>
+ <artifactId>spotbugs-annotations</artifactId>
+ <optional>true</optional>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>benchmark-api</artifactId>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>yang-data-impl</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.osgi</groupId>
+ <artifactId>org.osgi.service.component.annotations</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.guicedee.services</groupId>
+ <artifactId>javax.inject</artifactId>
+ <optional>true</optional>
+ </dependency>
+ <dependency>
+ <groupId>jakarta.annotation</groupId>
+ <artifactId>jakarta.annotation-api</artifactId>
+ <optional>true</optional>
+ </dependency>
</dependencies>
</project>
List<OuterList> outerList = new ArrayList<>(outerElements);
for (int j = 0; j < outerElements; j++) {
outerList.add(new OuterListBuilder()
- .setId(j)
- .setInnerList(buildInnerList(j, innerElements))
- .withKey(new OuterListKey(j))
- .build());
+ .setId(j)
+ .setInnerList(buildInnerList(j, innerElements))
+ .withKey(new OuterListKey(j))
+ .build());
}
return outerList;
}
private static Map<InnerListKey, InnerList> buildInnerList(final int index, final int elements) {
Builder<InnerListKey, InnerList> innerList = ImmutableMap.builderWithExpectedSize(elements);
- final String itemStr = "Item-" + String.valueOf(index) + "-";
+ final String itemStr = "Item-" + index + "-";
for (int i = 0; i < elements; i++) {
final InnerListKey key = new InnerListKey(i);
innerList.put(key, new InnerListBuilder()
- .withKey(key)
- .setName(i)
- .setValue(itemStr + String.valueOf(i))
- .build());
+ .withKey(key)
+ .setName(i)
+ .setValue(itemStr + i)
+ .build());
}
return innerList.build();
}
*/
package org.opendaylight.dsbenchmark;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.util.Random;
import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput;
protected int txOk = 0;
protected int txError = 0;
+ @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "'this' passed to logging")
public DatastoreAbstractWriter(final StartTestInput.Operation oper,
final int outerListElem, final int innerListElem, final long writesPerTx, final DataStore dataStore) {
this.outerListElem = outerListElem;
}
protected LogicalDatastoreType getDataStoreType() {
- final LogicalDatastoreType dsType;
- if (dataStore == DataStore.CONFIG) {
- dsType = LogicalDatastoreType.CONFIGURATION;
- } else if (dataStore == DataStore.OPERATIONAL) {
- dsType = LogicalDatastoreType.OPERATIONAL;
- } else {
- if (rn.nextBoolean() == true) {
- dsType = LogicalDatastoreType.OPERATIONAL;
- } else {
- dsType = LogicalDatastoreType.CONFIGURATION;
- }
- }
- return dsType;
+ return dataStore == DataStore.CONFIG || dataStore != DataStore.OPERATIONAL && !rn.nextBoolean()
+ ? LogicalDatastoreType.CONFIGURATION : LogicalDatastoreType.OPERATIONAL;
}
}
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.test.exec.OuterList;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.test.exec.outer.list.InnerList;
import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
public final class DomListBuilder {
// Inner List Qname identifiers for yang model's 'name' and 'value'
}
public static List<MapEntryNode> buildOuterList(final int outerElements, final int innerElements) {
- List<MapEntryNode> outerList = new ArrayList<>(outerElements);
+ final var outerList = new ArrayList<MapEntryNode>(outerElements);
for (int j = 0; j < outerElements; j++) {
- outerList.add(ImmutableNodes.mapEntryBuilder()
- .withNodeIdentifier(NodeIdentifierWithPredicates.of(OuterList.QNAME, OL_ID, j))
- .withChild(ImmutableNodes.leafNode(OL_ID, j))
- .withChild(buildInnerList(j, innerElements))
- .build());
+ outerList.add(ImmutableNodes.newMapEntryBuilder()
+ .withNodeIdentifier(NodeIdentifierWithPredicates.of(OuterList.QNAME, OL_ID, j))
+ .withChild(ImmutableNodes.leafNode(OL_ID, j))
+ .withChild(buildInnerList(j, innerElements))
+ .build());
}
return outerList;
}
private static MapNode buildInnerList(final int index, final int elements) {
- CollectionNodeBuilder<MapEntryNode, MapNode> innerList = ImmutableNodes.mapNodeBuilder(InnerList.QNAME);
+ final var innerList = ImmutableNodes.newSystemMapBuilder()
+ .withNodeIdentifier(new NodeIdentifier(InnerList.QNAME));
- final String itemStr = "Item-" + String.valueOf(index) + "-";
+ final String itemStr = "Item-" + index + "-";
for (int i = 0; i < elements; i++) {
- innerList.addChild(ImmutableNodes.mapEntryBuilder()
- .withNodeIdentifier(NodeIdentifierWithPredicates.of(InnerList.QNAME, IL_NAME, i))
- .withChild(ImmutableNodes.leafNode(IL_NAME, i))
- .withChild(ImmutableNodes.leafNode(IL_VALUE, itemStr + String.valueOf(i)))
- .build());
+ innerList.addChild(ImmutableNodes.newMapEntryBuilder()
+ .withNodeIdentifier(NodeIdentifierWithPredicates.of(InnerList.QNAME, IL_NAME, i))
+ .withChild(ImmutableNodes.leafNode(IL_NAME, i))
+ .withChild(ImmutableNodes.leafNode(IL_VALUE, itemStr + String.valueOf(i)))
+ .build());
}
return innerList.build();
}
*/
package org.opendaylight.dsbenchmark;
+import static java.util.Objects.requireNonNull;
+
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import java.util.Collections;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.atomic.AtomicReference;
+import javax.annotation.PreDestroy;
+import javax.inject.Inject;
+import javax.inject.Singleton;
import org.opendaylight.dsbenchmark.listener.DsbenchmarkListenerProvider;
import org.opendaylight.dsbenchmark.simpletx.SimpletxBaDelete;
import org.opendaylight.dsbenchmark.simpletx.SimpletxBaRead;
import org.opendaylight.dsbenchmark.txchain.TxchainDomRead;
import org.opendaylight.dsbenchmark.txchain.TxchainDomWrite;
import org.opendaylight.mdsal.binding.api.DataBroker;
+import org.opendaylight.mdsal.binding.api.RpcProviderService;
import org.opendaylight.mdsal.binding.api.WriteTransaction;
import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
import org.opendaylight.mdsal.dom.api.DOMDataBroker;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.CleanupStore;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.CleanupStoreInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.CleanupStoreOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.CleanupStoreOutputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.DsbenchmarkService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTest;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestOutputBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestStatus;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestStatus.ExecStatus;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestStatusBuilder;
+import org.opendaylight.yangtools.concepts.Registration;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
import org.opendaylight.yangtools.yang.common.Uint32;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
+import org.osgi.service.component.annotations.RequireServiceComponentRuntime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class DsbenchmarkProvider implements DsbenchmarkService, AutoCloseable {
-
+@Singleton
+@Component(service = { })
+@RequireServiceComponentRuntime
+public final class DsbenchmarkProvider implements AutoCloseable {
private static final Logger LOG = LoggerFactory.getLogger(DsbenchmarkProvider.class);
- private static final InstanceIdentifier<TestExec> TEST_EXEC_IID =
- InstanceIdentifier.builder(TestExec.class).build();
- private static final InstanceIdentifier<TestStatus> TEST_STATUS_IID =
- InstanceIdentifier.builder(TestStatus.class).build();
+ private static final InstanceIdentifier<TestExec> TEST_EXEC_IID = InstanceIdentifier.create(TestExec.class);
+ private static final InstanceIdentifier<TestStatus> TEST_STATUS_IID = InstanceIdentifier.create(TestStatus.class);
private final AtomicReference<ExecStatus> execStatus = new AtomicReference<>(ExecStatus.Idle);
- private final DsbenchmarkListenerProvider listenerProvider = new DsbenchmarkListenerProvider();
- private final DOMDataBroker domDataBroker; // Async DOM Broker for use with all DOM operations
- private final DataBroker dataBroker; // Async Binding-Aware Broker for use in tx chains
+ private final DsbenchmarkListenerProvider listenerProvider;
+ // Async DOM Broker for use with all DOM operations
+ private final DOMDataBroker domDataBroker;
+ // Async Binding-Aware Broker for use in tx chains;
+ private final DataBroker dataBroker;
+ private final Registration rpcReg;
private long testsCompleted = 0;
- public DsbenchmarkProvider(final DOMDataBroker domDataBroker, final DataBroker dataBroker) {
- this.domDataBroker = domDataBroker;
- this.dataBroker = dataBroker;
- }
-
+ @Inject
+ @Activate
@SuppressWarnings("checkstyle:illegalCatch")
- public void init() {
- listenerProvider.setDataBroker(dataBroker);
+ public DsbenchmarkProvider(@Reference final DOMDataBroker domDataBroker, @Reference final DataBroker dataBroker,
+ @Reference final RpcProviderService rpcService) {
+ this.domDataBroker = requireNonNull(domDataBroker);
+ this.dataBroker = requireNonNull(dataBroker);
+ listenerProvider = new DsbenchmarkListenerProvider(dataBroker);
try {
// We want to set the initial operation status so users can detect we are ready to start test.
- setTestOperData(this.execStatus.get(), testsCompleted);
+ setTestOperData(execStatus.get(), testsCompleted);
} catch (final Exception e) {
// TODO: Use a singleton service to make sure the initial write is performed only once.
LOG.warn("Working around Bugs 8829 and 6793 by ignoring exception from setTestOperData", e);
}
+ rpcReg = rpcService.registerRpcImplementations((StartTest) this::startTest, (CleanupStore) this::cleanupStore);
LOG.info("DsbenchmarkProvider initiated");
}
@Override
+ @PreDestroy
+ @Deactivate
public void close() {
+ rpcReg.close();
LOG.info("DsbenchmarkProvider closed");
}
- @Override
- public ListenableFuture<RpcResult<CleanupStoreOutput>> cleanupStore(final CleanupStoreInput input) {
+ private ListenableFuture<RpcResult<CleanupStoreOutput>> cleanupStore(final CleanupStoreInput input) {
cleanupTestStore();
LOG.debug("Data Store cleaned up");
return Futures.immediateFuture(RpcResultBuilder.success(new CleanupStoreOutputBuilder().build()).build());
}
- @Override
@SuppressWarnings("checkstyle:illegalCatch")
- public ListenableFuture<RpcResult<StartTestOutput>> startTest(final StartTestInput input) {
+ private ListenableFuture<RpcResult<StartTestOutput>> startTest(final StartTestInput input) {
LOG.info("Starting the data store benchmark test, input: {}", input);
// Check if there is a test in progress
- if (execStatus.compareAndSet(ExecStatus.Idle, ExecStatus.Executing) == false) {
+ if (!execStatus.compareAndSet(ExecStatus.Idle, ExecStatus.Executing)) {
LOG.info("Test in progress");
return RpcResultBuilder.success(new StartTestOutputBuilder()
- .setStatus(StartTestOutput.Status.TESTINPROGRESS)
- .build()).buildFuture();
+ .setStatus(StartTestOutput.Status.TESTINPROGRESS)
+ .build()).buildFuture();
}
// Cleanup data that may be left over from a previous test run
endTime = System.nanoTime();
execTime = (endTime - startTime) / 1000;
- this.testsCompleted++;
+ testsCompleted++;
} catch (final Exception e) {
- LOG.error("Test error: {}", e.toString());
+ LOG.error("Test error", e);
execStatus.set(ExecStatus.Idle);
return RpcResultBuilder.success(new StartTestOutputBuilder()
- .setStatus(StartTestOutput.Status.FAILED)
- .build()).buildFuture();
+ .setStatus(StartTestOutput.Status.FAILED)
+ .build()).buildFuture();
}
LOG.info("Test finished");
if (txType == StartTestInput.TransactionType.SIMPLETX) {
if (dataFormat == StartTestInput.DataFormat.BINDINGAWARE) {
if (StartTestInput.Operation.DELETE == oper) {
- retVal = new SimpletxBaDelete(this.dataBroker, outerListElem,
+ retVal = new SimpletxBaDelete(dataBroker, outerListElem,
innerListElem,writesPerTx, dataStore);
} else if (StartTestInput.Operation.READ == oper) {
- retVal = new SimpletxBaRead(this.dataBroker, outerListElem,
+ retVal = new SimpletxBaRead(dataBroker, outerListElem,
innerListElem, writesPerTx, dataStore);
} else {
- retVal = new SimpletxBaWrite(this.dataBroker, oper, outerListElem,
+ retVal = new SimpletxBaWrite(dataBroker, oper, outerListElem,
innerListElem, writesPerTx, dataStore);
}
+ } else if (StartTestInput.Operation.DELETE == oper) {
+ retVal = new SimpletxDomDelete(domDataBroker, outerListElem,
+ innerListElem, writesPerTx, dataStore);
+ } else if (StartTestInput.Operation.READ == oper) {
+ retVal = new SimpletxDomRead(domDataBroker, outerListElem,
+ innerListElem, writesPerTx, dataStore);
} else {
- if (StartTestInput.Operation.DELETE == oper) {
- retVal = new SimpletxDomDelete(this.domDataBroker, outerListElem,
- innerListElem, writesPerTx, dataStore);
- } else if (StartTestInput.Operation.READ == oper) {
- retVal = new SimpletxDomRead(this.domDataBroker, outerListElem,
- innerListElem, writesPerTx, dataStore);
- } else {
- retVal = new SimpletxDomWrite(this.domDataBroker, oper, outerListElem,
- innerListElem, writesPerTx, dataStore);
- }
+ retVal = new SimpletxDomWrite(domDataBroker, oper, outerListElem,
+ innerListElem, writesPerTx, dataStore);
}
- } else {
- if (dataFormat == StartTestInput.DataFormat.BINDINGAWARE) {
- if (StartTestInput.Operation.DELETE == oper) {
- retVal = new TxchainBaDelete(this.dataBroker, outerListElem,
- innerListElem, writesPerTx, dataStore);
- } else if (StartTestInput.Operation.READ == oper) {
- retVal = new TxchainBaRead(this.dataBroker, outerListElem,
- innerListElem,writesPerTx, dataStore);
- } else {
- retVal = new TxchainBaWrite(this.dataBroker, oper, outerListElem,
- innerListElem, writesPerTx, dataStore);
- }
+ } else if (dataFormat == StartTestInput.DataFormat.BINDINGAWARE) {
+ if (StartTestInput.Operation.DELETE == oper) {
+ retVal = new TxchainBaDelete(dataBroker, outerListElem,
+ innerListElem, writesPerTx, dataStore);
+ } else if (StartTestInput.Operation.READ == oper) {
+ retVal = new TxchainBaRead(dataBroker, outerListElem,
+ innerListElem,writesPerTx, dataStore);
} else {
- if (StartTestInput.Operation.DELETE == oper) {
- retVal = new TxchainDomDelete(this.domDataBroker, outerListElem,
- innerListElem, writesPerTx, dataStore);
- } else if (StartTestInput.Operation.READ == oper) {
- retVal = new TxchainDomRead(this.domDataBroker, outerListElem,
- innerListElem, writesPerTx, dataStore);
-
- } else {
- retVal = new TxchainDomWrite(this.domDataBroker, oper, outerListElem,
- innerListElem,writesPerTx, dataStore);
- }
+ retVal = new TxchainBaWrite(dataBroker, oper, outerListElem,
+ innerListElem, writesPerTx, dataStore);
}
+ } else if (StartTestInput.Operation.DELETE == oper) {
+ retVal = new TxchainDomDelete(domDataBroker, outerListElem,
+ innerListElem, writesPerTx, dataStore);
+ } else if (StartTestInput.Operation.READ == oper) {
+ retVal = new TxchainDomRead(domDataBroker, outerListElem,
+ innerListElem, writesPerTx, dataStore);
+
+ } else {
+ retVal = new TxchainDomWrite(domDataBroker, oper, outerListElem,
+ innerListElem,writesPerTx, dataStore);
}
} finally {
execStatus.set(ExecStatus.Idle);
*/
package org.opendaylight.dsbenchmark.listener;
-import java.util.Collection;
+import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
-import org.opendaylight.mdsal.binding.api.DataObjectModification;
-import org.opendaylight.mdsal.binding.api.DataObjectModification.ModificationType;
import org.opendaylight.mdsal.binding.api.DataTreeChangeListener;
import org.opendaylight.mdsal.binding.api.DataTreeModification;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestExec;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier.PathArgument;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
private final AtomicInteger numDataChanges = new AtomicInteger(0);
@Override
- public void onDataTreeChanged(
- final Collection<DataTreeModification<TestExec>> changes) {
+ public void onDataTreeChanged(final List<DataTreeModification<TestExec>> changes) {
// Since we're registering the same DsbenchmarkListener object for both
// OPERATIONAL and CONFIG, the onDataTreeChanged() method can be called
// from different threads, and we need to use atomic counters.
}
private static synchronized void logDataTreeChangeEvent(final int eventNum,
- final Collection<DataTreeModification<TestExec>> changes) {
+ final List<DataTreeModification<TestExec>> changes) {
LOG.debug("DsbenchmarkListener-onDataTreeChanged: Event {}", eventNum);
- for (DataTreeModification<TestExec> change : changes) {
- final DataObjectModification<TestExec> rootNode = change.getRootNode();
- final ModificationType modType = rootNode.getModificationType();
- final PathArgument changeId = rootNode.getIdentifier();
- final Collection<? extends DataObjectModification<? extends DataObject>> modifications =
- rootNode.getModifiedChildren();
+ for (var change : changes) {
+ final var rootNode = change.getRootNode();
+ final var modType = rootNode.modificationType();
+ final var changeId = rootNode.step();
+ final var modifications = rootNode.modifiedChildren();
LOG.debug(" changeId {}, modType {}, mods: {}", changeId, modType, modifications.size());
- for (DataObjectModification<? extends DataObject> mod : modifications) {
- LOG.debug(" mod-getDataAfter: {}", mod.getDataAfter());
+ for (var mod : modifications) {
+ LOG.debug(" mod-getDataAfter: {}", mod.dataAfter());
}
}
}
*/
package org.opendaylight.dsbenchmark.listener;
+import static java.util.Objects.requireNonNull;
+
import java.util.ArrayList;
import java.util.List;
import org.opendaylight.mdsal.binding.api.DataBroker;
import org.opendaylight.mdsal.binding.api.DataTreeIdentifier;
import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestExec;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
private static final Logger LOG = LoggerFactory.getLogger(DsbenchmarkListenerProvider.class);
private static final InstanceIdentifier<TestExec> TEST_EXEC_IID =
InstanceIdentifier.builder(TestExec.class).build();
- private final List<ListenerRegistration<DsbenchmarkListener>> listeners =
- new ArrayList<>();
- private DataBroker dataBroker;
+ private final List<DsbenchmarkListener> listeners = new ArrayList<>();
+ private final List<Registration> registrations = new ArrayList<>();
+ private final DataBroker dataBroker;
- public void setDataBroker(final DataBroker dataBroker) {
- this.dataBroker = dataBroker;
+ public DsbenchmarkListenerProvider(final DataBroker dataBroker) {
+ this.dataBroker = requireNonNull(dataBroker);
LOG.debug("DsbenchmarkListenerProvider created");
}
public void createAndRegisterListeners(final int numListeners) {
for (int i = 0; i < numListeners; i++) {
- DsbenchmarkListener listener = new DsbenchmarkListener();
- listeners.add(dataBroker.registerDataTreeChangeListener(
- DataTreeIdentifier.create(LogicalDatastoreType.CONFIGURATION, TEST_EXEC_IID), listener));
- listeners.add(dataBroker.registerDataTreeChangeListener(
- DataTreeIdentifier.create(LogicalDatastoreType.OPERATIONAL, TEST_EXEC_IID), listener));
+ var listener = new DsbenchmarkListener();
+ listeners.add(listener);
+ registrations.add(dataBroker.registerTreeChangeListener(
+ DataTreeIdentifier.of(LogicalDatastoreType.CONFIGURATION, TEST_EXEC_IID), listener));
+ registrations.add(dataBroker.registerTreeChangeListener(
+ DataTreeIdentifier.of(LogicalDatastoreType.OPERATIONAL, TEST_EXEC_IID), listener));
}
LOG.debug("DsbenchmarkListenerProvider created {} listeneres", numListeners);
public long getDataChangeCount() {
long dataChanges = 0;
- for (ListenerRegistration<DsbenchmarkListener> listenerRegistration : listeners) {
- dataChanges += listenerRegistration.getInstance().getNumDataChanges();
+ for (var listener : listeners) {
+ dataChanges += listener.getNumDataChanges();
}
LOG.debug("DsbenchmarkListenerProvider , total data changes {}", dataChanges);
return dataChanges;
public long getEventCountAndDestroyListeners() {
long totalEvents = 0;
- for (ListenerRegistration<DsbenchmarkListener> listenerRegistration : listeners) {
- totalEvents += listenerRegistration.getInstance().getNumEvents();
- listenerRegistration.close();
+ registrations.forEach(Registration::close);
+ registrations.clear();
+
+ for (var listener : listeners) {
+ totalEvents += listener.getNumEvents();
}
listeners.clear();
+
LOG.debug("DsbenchmarkListenerProvider destroyed listeneres, total events {}", totalEvents);
return totalEvents;
}
try {
optionalDataObject = submitFuture.get();
if (optionalDataObject != null && optionalDataObject.isPresent()) {
- OuterList outerList = optionalDataObject.get();
+ OuterList outerList = optionalDataObject.orElseThrow();
String[] objectsArray = new String[outerList.getInnerList().size()];
}
for (int i = 0; i < outerList.getInnerList().size(); i++) {
String itemStr = objectsArray[i];
- if (!itemStr.contentEquals("Item-" + String.valueOf(l) + "-" + String.valueOf(i))) {
+ if (!itemStr.contentEquals("Item-" + l + "-" + i)) {
LOG.error("innerList: name: {}, value: {}", i, itemStr);
break;
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.dsbenchmark.simpletx;
+import static java.util.Objects.requireNonNull;
+
import java.util.List;
import java.util.concurrent.ExecutionException;
import org.opendaylight.dsbenchmark.BaListBuilder;
import org.opendaylight.dsbenchmark.DatastoreAbstractWriter;
import org.opendaylight.mdsal.binding.api.DataBroker;
-import org.opendaylight.mdsal.binding.api.WriteTransaction;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput.DataStore;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestExec;
public class SimpletxBaWrite extends DatastoreAbstractWriter {
private static final Logger LOG = LoggerFactory.getLogger(SimpletxBaWrite.class);
+
private final DataBroker dataBroker;
- private List<OuterList> list;
+ private List<OuterList> list = null;
public SimpletxBaWrite(final DataBroker dataBroker, final StartTestInput.Operation oper,
final int outerListElem, final int innerListElem, final long writesPerTx, final DataStore dataStore) {
super(oper, outerListElem, innerListElem, writesPerTx, dataStore);
- this.dataBroker = dataBroker;
+ this.dataBroker = requireNonNull(dataBroker);
LOG.debug("Created SimpletxBaWrite");
}
@Override
public void createList() {
- list = BaListBuilder.buildOuterList(this.outerListElem, this.innerListElem);
+ list = BaListBuilder.buildOuterList(outerListElem, innerListElem);
}
@Override
public void executeList() {
- final LogicalDatastoreType dsType = getDataStoreType();
+ final var dsType = getDataStoreType();
- WriteTransaction tx = dataBroker.newWriteOnlyTransaction();
+ var tx = dataBroker.newWriteOnlyTransaction();
long writeCnt = 0;
- for (OuterList element : this.list) {
- InstanceIdentifier<OuterList> iid = InstanceIdentifier.create(TestExec.class)
- .child(OuterList.class, element.key());
+ for (var element : list) {
+ final var iid = InstanceIdentifier.create(TestExec.class).child(OuterList.class, element.key());
if (oper == StartTestInput.Operation.PUT) {
tx.put(dsType, iid, element);
} else {
}
}
}
-
}
try (DOMDataTreeReadTransaction tx = domDataBroker.newReadOnlyTransaction()) {
for (int l = 0; l < outerListElem; l++) {
YangInstanceIdentifier yid = pid.node(NodeIdentifierWithPredicates.of(OuterList.QNAME, olId, l));
- FluentFuture<Optional<NormalizedNode<?, ?>>> submitFuture = tx.read(dsType, yid);
+ FluentFuture<Optional<NormalizedNode>> submitFuture = tx.read(dsType, yid);
try {
- Optional<NormalizedNode<?,?>> optionalDataObject = submitFuture.get();
+ Optional<NormalizedNode> optionalDataObject = submitFuture.get();
if (optionalDataObject != null && optionalDataObject.isPresent()) {
- NormalizedNode<?, ?> ret = optionalDataObject.get();
+ NormalizedNode ret = optionalDataObject.orElseThrow();
LOG.trace("optionalDataObject is {}", ret);
txOk++;
} else {
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.dsbenchmark.simpletx;
+import static java.util.Objects.requireNonNull;
+
import java.util.List;
import java.util.concurrent.ExecutionException;
import org.opendaylight.dsbenchmark.DatastoreAbstractWriter;
import org.opendaylight.dsbenchmark.DomListBuilder;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
import org.opendaylight.mdsal.dom.api.DOMDataBroker;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput.DataStore;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestExec;
public class SimpletxDomWrite extends DatastoreAbstractWriter {
private static final Logger LOG = LoggerFactory.getLogger(SimpletxDomWrite.class);
- private final DOMDataBroker domDataBroker;
- private List<MapEntryNode> list;
- public SimpletxDomWrite(final DOMDataBroker domDataBroker, final StartTestInput.Operation oper,
+ private final DOMDataBroker dataBroker;
+ private List<MapEntryNode> list = null;
+
+ public SimpletxDomWrite(final DOMDataBroker dataBroker, final StartTestInput.Operation oper,
final int outerListElem, final int innerListElem, final long putsPerTx, final DataStore dataStore) {
super(oper, outerListElem, innerListElem, putsPerTx, dataStore);
- this.domDataBroker = domDataBroker;
+ this.dataBroker = requireNonNull(dataBroker);
LOG.debug("Created SimpletxDomWrite");
}
@Override
public void createList() {
- list = DomListBuilder.buildOuterList(this.outerListElem, this.innerListElem);
+ list = DomListBuilder.buildOuterList(outerListElem, innerListElem);
}
@Override
public void executeList() {
- final LogicalDatastoreType dsType = getDataStoreType();
- final YangInstanceIdentifier pid =
- YangInstanceIdentifier.builder().node(TestExec.QNAME).node(OuterList.QNAME).build();
+ final var dsType = getDataStoreType();
+ final var pid = YangInstanceIdentifier.of(TestExec.QNAME, OuterList.QNAME);
- DOMDataTreeWriteTransaction tx = domDataBroker.newWriteOnlyTransaction();
+ var tx = dataBroker.newWriteOnlyTransaction();
long writeCnt = 0;
- for (MapEntryNode element : this.list) {
- YangInstanceIdentifier yid =
- pid.node(NodeIdentifierWithPredicates.of(OuterList.QNAME, element.getIdentifier().asMap()));
+ for (var element : list) {
+ final var yid = pid.node(NodeIdentifierWithPredicates.of(OuterList.QNAME, element.name().asMap()));
if (oper == StartTestInput.Operation.PUT) {
tx.put(dsType, yid, element);
LOG.error("Transaction failed", e);
txError++;
}
- tx = domDataBroker.newWriteOnlyTransaction();
+ tx = dataBroker.newWriteOnlyTransaction();
writeCnt = 0;
}
}
import java.util.concurrent.ExecutionException;
import org.opendaylight.dsbenchmark.DatastoreAbstractWriter;
import org.opendaylight.mdsal.binding.api.DataBroker;
-import org.opendaylight.mdsal.binding.api.Transaction;
import org.opendaylight.mdsal.binding.api.TransactionChain;
-import org.opendaylight.mdsal.binding.api.TransactionChainListener;
import org.opendaylight.mdsal.binding.api.WriteTransaction;
import org.opendaylight.mdsal.common.api.CommitInfo;
import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.test.exec.OuterList;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.test.exec.OuterListKey;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.Empty;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class TxchainBaDelete extends DatastoreAbstractWriter implements TransactionChainListener {
+public class TxchainBaDelete extends DatastoreAbstractWriter implements FutureCallback<Empty> {
private static final Logger LOG = LoggerFactory.getLogger(TxchainBaDelete.class);
private final DataBroker bindingDataBroker;
@Override
public void executeList() {
final LogicalDatastoreType dsType = getDataStoreType();
- final TransactionChain chain = bindingDataBroker.createMergingTransactionChain(this);
+ final TransactionChain chain = bindingDataBroker.createMergingTransactionChain();
+ chain.addCallback(this);
WriteTransaction tx = chain.newWriteOnlyTransaction();
int txSubmitted = 0;
}
@Override
- public void onTransactionChainFailed(final TransactionChain chain, final Transaction transaction,
- final Throwable cause) {
- LOG.error("Broken chain {} in TxchainBaDelete, transaction {}", chain, transaction.getIdentifier(), cause);
+ public void onFailure(final Throwable cause) {
+ LOG.error("Broken chain in TxchainBaDelete", cause);
}
@Override
- public void onTransactionChainSuccessful(final TransactionChain chain) {
- LOG.debug("TxchainBaDelete closed successfully, chain {}", chain);
+ public void onSuccess(final Empty chain) {
+ LOG.debug("TxchainBaDelete closed successfully");
}
}
import org.opendaylight.dsbenchmark.DatastoreAbstractWriter;
import org.opendaylight.mdsal.binding.api.DataBroker;
import org.opendaylight.mdsal.binding.api.ReadTransaction;
-import org.opendaylight.mdsal.binding.api.Transaction;
-import org.opendaylight.mdsal.binding.api.TransactionChain;
-import org.opendaylight.mdsal.binding.api.TransactionChainListener;
import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput.DataStore;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class TxchainBaRead extends DatastoreAbstractWriter implements TransactionChainListener {
+public class TxchainBaRead extends DatastoreAbstractWriter {
private static final Logger LOG = LoggerFactory.getLogger(TxchainBaRead.class);
private final DataBroker bindingDataBroker;
try {
Optional<OuterList> optionalDataObject = submitFuture.get();
if (optionalDataObject != null && optionalDataObject.isPresent()) {
- OuterList outerList = optionalDataObject.get();
+ OuterList outerList = optionalDataObject.orElseThrow();
- String[] objectsArray = new String[outerList.getInnerList().size()];
- for (InnerList innerList : outerList.getInnerList().values()) {
+ String[] objectsArray = new String[outerList.nonnullInnerList().size()];
+ for (InnerList innerList : outerList.nonnullInnerList().values()) {
if (objectsArray[innerList.getName()] != null) {
LOG.error("innerList: DUPLICATE name: {}, value: {}", innerList.getName(),
innerList.getValue());
}
objectsArray[innerList.getName()] = innerList.getValue();
}
- for (int i = 0; i < outerList.getInnerList().size(); i++) {
+ for (int i = 0; i < outerList.nonnullInnerList().size(); i++) {
String itemStr = objectsArray[i];
- if (!itemStr.contentEquals("Item-" + String.valueOf(l) + "-" + String.valueOf(i))) {
+ if (!itemStr.contentEquals("Item-" + l + "-" + i)) {
LOG.error("innerList: name: {}, value: {}", i, itemStr);
break;
}
}
}
}
-
- @Override
- public void onTransactionChainFailed(final TransactionChain chain, final Transaction transaction,
- final Throwable cause) {
- LOG.error("Broken chain {} in TxchainBaDelete, transaction {}", chain, transaction.getIdentifier(), cause);
- }
-
- @Override
- public void onTransactionChainSuccessful(final TransactionChain chain) {
- LOG.debug("TxchainBaDelete closed successfully, chain {}", chain);
- }
}
*/
package org.opendaylight.dsbenchmark.txchain;
+import static java.util.Objects.requireNonNull;
+
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.MoreExecutors;
import java.util.List;
import org.opendaylight.dsbenchmark.BaListBuilder;
import org.opendaylight.dsbenchmark.DatastoreAbstractWriter;
import org.opendaylight.mdsal.binding.api.DataBroker;
-import org.opendaylight.mdsal.binding.api.Transaction;
-import org.opendaylight.mdsal.binding.api.TransactionChain;
-import org.opendaylight.mdsal.binding.api.TransactionChainListener;
-import org.opendaylight.mdsal.binding.api.WriteTransaction;
import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput.DataStore;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput.Operation;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestExec;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.test.exec.OuterList;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.Empty;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class TxchainBaWrite extends DatastoreAbstractWriter implements TransactionChainListener {
+public class TxchainBaWrite extends DatastoreAbstractWriter implements FutureCallback<Empty> {
private static final Logger LOG = LoggerFactory.getLogger(TxchainBaWrite.class);
- private final DataBroker bindingDataBroker;
- private List<OuterList> list;
- public TxchainBaWrite(final DataBroker bindingDataBroker, final Operation oper,
- final int outerListElem, final int innerListElem, final long writesPerTx, final DataStore dataStore) {
+ private final DataBroker dataBroker;
+ private List<OuterList> list = null;
+
+ public TxchainBaWrite(final DataBroker dataBroker, final Operation oper, final int outerListElem,
+ final int innerListElem, final long writesPerTx, final DataStore dataStore) {
super(oper, outerListElem, innerListElem, writesPerTx, dataStore);
- this.bindingDataBroker = bindingDataBroker;
+ this.dataBroker = requireNonNull(dataBroker);
LOG.debug("Created TxchainBaWrite");
}
@Override
public void createList() {
- list = BaListBuilder.buildOuterList(this.outerListElem, this.innerListElem);
+ list = BaListBuilder.buildOuterList(outerListElem, innerListElem);
}
@Override
public void executeList() {
- final TransactionChain chain = bindingDataBroker.createMergingTransactionChain(this);
- final LogicalDatastoreType dsType = getDataStoreType();
+ final var chain = dataBroker.createMergingTransactionChain();
+ chain.addCallback(this);
+ final var dsType = getDataStoreType();
- WriteTransaction tx = chain.newWriteOnlyTransaction();
+ var tx = chain.newWriteOnlyTransaction();
int txSubmitted = 0;
int writeCnt = 0;
- for (OuterList element : this.list) {
- InstanceIdentifier<OuterList> iid = InstanceIdentifier.create(TestExec.class)
- .child(OuterList.class, element.key());
+ for (var element : list) {
+ final var iid = InstanceIdentifier.create(TestExec.class).child(OuterList.class, element.key());
if (oper == StartTestInput.Operation.PUT) {
tx.put(dsType, iid, element);
}
@Override
- public void onTransactionChainFailed(final TransactionChain chain, final Transaction transaction,
- final Throwable cause) {
- LOG.error("Broken chain {} in DatastoreBaAbstractWrite, transaction {}", chain, transaction.getIdentifier(),
- cause);
+ public void onFailure(final Throwable cause) {
+ LOG.error("Broken chain in DatastoreBaAbstractWrite", cause);
}
@Override
- public void onTransactionChainSuccessful(final TransactionChain chain) {
- LOG.debug("DatastoreBaAbstractWrite closed successfully, chain {}", chain);
+ public void onSuccess(final Empty result) {
+ LOG.debug("DatastoreBaAbstractWrite closed successfully");
}
}
import org.opendaylight.mdsal.common.api.CommitInfo;
import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
import org.opendaylight.mdsal.dom.api.DOMDataBroker;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeTransaction;
import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput.DataStore;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestExec;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.test.exec.OuterList;
+import org.opendaylight.yangtools.yang.common.Empty;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class TxchainDomDelete extends DatastoreAbstractWriter implements DOMTransactionChainListener {
+public class TxchainDomDelete extends DatastoreAbstractWriter implements FutureCallback<Empty> {
private static final Logger LOG = LoggerFactory.getLogger(TxchainDomDelete.class);
private final DOMDataBroker domDataBroker;
final org.opendaylight.yangtools.yang.common.QName olId = QName.create(OuterList.QNAME, "id");
final YangInstanceIdentifier pid =
YangInstanceIdentifier.builder().node(TestExec.QNAME).node(OuterList.QNAME).build();
- final DOMTransactionChain chain = domDataBroker.createMergingTransactionChain(this);
+ final DOMTransactionChain chain = domDataBroker.createMergingTransactionChain();
+ chain.addCallback(this);
DOMDataTreeWriteTransaction tx = chain.newWriteOnlyTransaction();
int txSubmitted = 0;
}
@Override
- public void onTransactionChainFailed(final DOMTransactionChain chain, final DOMDataTreeTransaction transaction,
- final Throwable cause) {
- LOG.error("Broken chain {} in TxchainDomDelete, transaction {}", chain, transaction.getIdentifier(), cause);
+ public void onFailure(final Throwable cause) {
+ LOG.error("Broken chain in TxchainDomDelete", cause);
}
@Override
- public void onTransactionChainSuccessful(final DOMTransactionChain chain) {
- LOG.debug("TxchainDomDelete closed successfully, chain {}", chain);
+ public void onSuccess(final Empty result) {
+ LOG.debug("TxchainDomDelete closed successfully");
}
}
import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
import org.opendaylight.mdsal.dom.api.DOMDataBroker;
import org.opendaylight.mdsal.dom.api.DOMDataTreeReadTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeTransaction;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput.DataStore;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestExec;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class TxchainDomRead extends DatastoreAbstractWriter implements DOMTransactionChainListener {
+public class TxchainDomRead extends DatastoreAbstractWriter {
private static final Logger LOG = LoggerFactory.getLogger(TxchainDomRead.class);
private final DOMDataBroker domDataBroker;
@Override
public void executeList() {
final LogicalDatastoreType dsType = getDataStoreType();
- final org.opendaylight.yangtools.yang.common.QName olId = QName.create(OuterList.QNAME, "id");
+ final QName olId = QName.create(OuterList.QNAME, "id");
final YangInstanceIdentifier pid =
YangInstanceIdentifier.builder().node(TestExec.QNAME).node(OuterList.QNAME).build();
try (DOMDataTreeReadTransaction tx = domDataBroker.newReadOnlyTransaction()) {
for (int l = 0; l < outerListElem; l++) {
YangInstanceIdentifier yid = pid.node(NodeIdentifierWithPredicates.of(OuterList.QNAME, olId, l));
- Optional<NormalizedNode<?,?>> optionalDataObject;
- FluentFuture<Optional<NormalizedNode<?, ?>>> submitFuture = tx.read(dsType, yid);
+ Optional<NormalizedNode> optionalDataObject;
+ FluentFuture<Optional<NormalizedNode>> submitFuture = tx.read(dsType, yid);
try {
optionalDataObject = submitFuture.get();
if (optionalDataObject != null && optionalDataObject.isPresent()) {
}
}
}
-
- @Override
- public void onTransactionChainFailed(final DOMTransactionChain chain, final DOMDataTreeTransaction transaction,
- final Throwable cause) {
- LOG.error("Broken chain {} in TxchainDomDelete, transaction {}", chain, transaction.getIdentifier(), cause);
- }
-
- @Override
- public void onTransactionChainSuccessful(final DOMTransactionChain chain) {
- LOG.debug("TxchainDomDelete closed successfully, chain {}", chain);
- }
}
import org.opendaylight.dsbenchmark.DatastoreAbstractWriter;
import org.opendaylight.dsbenchmark.DomListBuilder;
import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
import org.opendaylight.mdsal.dom.api.DOMDataBroker;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput.DataStore;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestExec;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.test.exec.OuterList;
+import org.opendaylight.yangtools.yang.common.Empty;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class TxchainDomWrite extends DatastoreAbstractWriter implements DOMTransactionChainListener {
+public class TxchainDomWrite extends DatastoreAbstractWriter implements FutureCallback<Empty> {
private static final Logger LOG = LoggerFactory.getLogger(TxchainDomWrite.class);
- private final DOMDataBroker domDataBroker;
- private List<MapEntryNode> list;
- public TxchainDomWrite(final DOMDataBroker domDataBroker, final StartTestInput.Operation oper,
+ private final DOMDataBroker dataBroker;
+ private List<MapEntryNode> list = null;
+
+ public TxchainDomWrite(final DOMDataBroker dataBroker, final StartTestInput.Operation oper,
final int outerListElem, final int innerListElem, final long writesPerTx, final DataStore dataStore) {
super(oper, outerListElem, innerListElem, writesPerTx, dataStore);
- this.domDataBroker = domDataBroker;
+ this.dataBroker = dataBroker;
LOG.debug("Created TxchainDomWrite");
}
@Override
public void createList() {
- list = DomListBuilder.buildOuterList(this.outerListElem, this.innerListElem);
+ list = DomListBuilder.buildOuterList(outerListElem, innerListElem);
}
@Override
public void executeList() {
- final LogicalDatastoreType dsType = getDataStoreType();
- final YangInstanceIdentifier pid =
- YangInstanceIdentifier.builder().node(TestExec.QNAME).node(OuterList.QNAME).build();
- final DOMTransactionChain chain = domDataBroker.createMergingTransactionChain(this);
+ final var dsType = getDataStoreType();
+ final var pid = YangInstanceIdentifier.of(TestExec.QNAME, OuterList.QNAME);
+ final var chain = dataBroker.createMergingTransactionChain();
+ chain.addCallback(this);
- DOMDataTreeWriteTransaction tx = chain.newWriteOnlyTransaction();
+ var tx = chain.newWriteOnlyTransaction();
int txSubmitted = 0;
int writeCnt = 0;
- for (MapEntryNode element : this.list) {
- YangInstanceIdentifier yid =
- pid.node(NodeIdentifierWithPredicates.of(OuterList.QNAME, element.getIdentifier().asMap()));
+ for (var element : list) {
+ var yid = pid.node(NodeIdentifierWithPredicates.of(OuterList.QNAME, element.name().asMap()));
if (oper == StartTestInput.Operation.PUT) {
tx.put(dsType, yid, element);
}
@Override
- public void onTransactionChainFailed(final DOMTransactionChain chain, final DOMDataTreeTransaction transaction,
- final Throwable cause) {
- LOG.error("Broken chain {} in TxchainDomWrite, transaction {}", chain, transaction.getIdentifier(), cause);
+ public void onFailure(final Throwable cause) {
+ LOG.error("Broken chain in TxchainDomWrite", cause);
}
@Override
- public void onTransactionChainSuccessful(final DOMTransactionChain chain) {
- LOG.debug("Chain {} closed successfully", chain);
+ public void onSuccess(final Empty result) {
+ LOG.debug("Chain closed successfully");
}
}
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright (c) 2017 Inocybe Technologies Inc. and others. All rights reserved.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
- xmlns:odl="http://opendaylight.org/xmlns/blueprint/v1.0.0"
- odl:use-default-for-reference-types="true">
-
- <reference id="domDataBroker" interface="org.opendaylight.mdsal.dom.api.DOMDataBroker"/>
- <reference id="dataBroker" interface="org.opendaylight.mdsal.binding.api.DataBroker"/>
-
- <bean id="provider" class="org.opendaylight.dsbenchmark.DsbenchmarkProvider"
- init-method="init" destroy-method="close">
- <argument ref="domDataBroker"/>
- <argument ref="dataBroker"/>
- </bean>
-
- <odl:rpc-implementation ref="provider"/>
-</blueprint>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>mdsal-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../../opendaylight/md-sal/parent</relativePath>
</parent>
<artifactId>mdsal-binding-api</artifactId>
</dependency>
<dependency>
- <groupId>org.opendaylight.mdsal</groupId>
- <artifactId>mdsal-dom-api</artifactId>
+ <groupId>org.osgi</groupId>
+ <artifactId>org.osgi.service.component.annotations</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.guicedee.services</groupId>
+ <artifactId>javax.inject</artifactId>
+ <optional>true</optional>
</dependency>
<dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-data-impl</artifactId>
+ <groupId>jakarta.annotation</groupId>
+ <artifactId>jakarta.annotation-api</artifactId>
+ <optional>true</optional>
</dependency>
</dependencies>
</project>
package ntfbenchmark.impl;
import org.opendaylight.mdsal.binding.api.NotificationPublishService;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class NtfbenchBlockingProducer extends AbstractNtfbenchProducer {
+ private static final Logger LOG = LoggerFactory.getLogger(NtfbenchBlockingProducer.class);
public NtfbenchBlockingProducer(final NotificationPublishService publishService, final int iterations,
final int payloadSize) {
int ntfOk = 0;
int ntfError = 0;
- for (int i = 0; i < this.iterations; i++) {
+ for (int i = 0; i < iterations; i++) {
try {
- this.publishService.putNotification(this.ntf);
+ publishService.putNotification(ntf);
ntfOk++;
} catch (final Exception e) {
ntfError++;
+ LOG.debug("Failed to push notification", e);
}
}
int ntfOk = 0;
int ntfError = 0;
ListenableFuture<?> lastOkFuture = null;
- for (int i = 0; i < this.iterations; i++) {
+ for (int i = 0; i < iterations; i++) {
try {
- final ListenableFuture<?> result = this.publishService.offerNotification(this.ntf);
+ final ListenableFuture<?> result = publishService.offerNotification(ntf);
if (NotificationPublishService.REJECTED == result) {
ntfError++;
} else {
try {
lastOkFuture.get();
} catch (InterruptedException | ExecutionException e) {
- throw new RuntimeException(e);
+ throw new IllegalStateException(e);
}
}
}
-
}
import com.google.common.util.concurrent.Futures;
import java.util.concurrent.Future;
+import org.opendaylight.mdsal.binding.api.NotificationService.Listener;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbench.payload.rev150709.Ntfbench;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbench.payload.rev150709.NtfbenchPayloadListener;
-
-public class NtfbenchTestListener implements NtfbenchPayloadListener {
+public class NtfbenchTestListener implements Listener<Ntfbench> {
private final int expectedSize;
private int received = 0;
}
@Override
- public void onNtfbench(final Ntfbench notification) {
+ public void onNotification(final Ntfbench notification) {
if (expectedSize == notification.getPayload().size()) {
received++;
}
}
@Override
- public void onNtfbench(final Ntfbench notification) {
- // TODO Auto-generated method stub
- super.onNtfbench(notification);
+ public void onNotification(final Ntfbench notification) {
+ super.onNotification(notification);
if (expectedCount == getReceived()) {
allDone.set(null);
}
package ntfbenchmark.impl;
import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
import com.google.common.util.concurrent.ListenableFuture;
import java.util.ArrayList;
-import java.util.List;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
+import javax.annotation.PreDestroy;
+import javax.inject.Inject;
+import javax.inject.Singleton;
import org.opendaylight.mdsal.binding.api.NotificationPublishService;
import org.opendaylight.mdsal.binding.api.NotificationService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.NtfbenchmarkService;
+import org.opendaylight.mdsal.binding.api.RpcProviderService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbench.payload.rev150709.Ntfbench;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.StartTest;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.StartTestInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.StartTestInput.ProducerType;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.StartTestOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.StartTestOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.TestStatus;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.TestStatusInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.TestStatusOutput;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
import org.opendaylight.yangtools.yang.common.Uint32;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
+import org.osgi.service.component.annotations.RequireServiceComponentRuntime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class NtfbenchmarkProvider implements AutoCloseable, NtfbenchmarkService {
+@Singleton
+@Component(service = {})
+@RequireServiceComponentRuntime
+public final class NtfbenchmarkProvider implements AutoCloseable {
private static final Logger LOG = LoggerFactory.getLogger(NtfbenchmarkProvider.class);
private static final int TEST_TIMEOUT = 5;
private final NotificationService listenService;
private final NotificationPublishService publishService;
-
- public NtfbenchmarkProvider(final NotificationService listenServiceDependency,
- final NotificationPublishService publishServiceDependency) {
- LOG.debug("NtfbenchmarkProvider Constructor");
- listenService = listenServiceDependency;
- publishService = publishServiceDependency;
- }
-
- public void init() {
- LOG.info("NtfbenchmarkProvider initiated");
+ private final Registration reg;
+
+ @Inject
+ @Activate
+ public NtfbenchmarkProvider(@Reference final NotificationService listenService,
+ @Reference final NotificationPublishService publishService,
+ @Reference final RpcProviderService rpcService) {
+ this.listenService = requireNonNull(listenService);
+ this.publishService = requireNonNull(publishService);
+ reg = rpcService.registerRpcImplementations((TestStatus) this::testStatus, (StartTest) this::startTest);
+ LOG.debug("NtfbenchmarkProvider initiated");
}
@Override
+ @PreDestroy
+ @Deactivate
public void close() {
+ reg.close();
LOG.info("NtfbenchmarkProvider closed");
}
- @Override
- public ListenableFuture<RpcResult<StartTestOutput>> startTest(final StartTestInput input) {
+ private ListenableFuture<RpcResult<StartTestOutput>> startTest(final StartTestInput input) {
final int producerCount = input.getProducers().intValue();
final int listenerCount = input.getListeners().intValue();
final int iterations = input.getIterations().intValue();
final int payloadSize = input.getIterations().intValue();
- final List<AbstractNtfbenchProducer> producers = new ArrayList<>(producerCount);
- final List<ListenerRegistration<NtfbenchTestListener>> listeners = new ArrayList<>(listenerCount);
+ final var producers = new ArrayList<AbstractNtfbenchProducer>(producerCount);
for (int i = 0; i < producerCount; i++) {
producers.add(new NtfbenchBlockingProducer(publishService, iterations, payloadSize));
}
int expectedCntPerListener = producerCount * iterations;
+ final var listeners = new ArrayList<NtfbenchTestListener>(listenerCount);
+ final var registrations = new ArrayList<Registration>(listenerCount);
for (int i = 0; i < listenerCount; i++) {
final NtfbenchTestListener listener;
if (input.getProducerType() == ProducerType.BLOCKING) {
} else {
listener = new NtfbenchTestListener(payloadSize);
}
- listeners.add(listenService.registerNotificationListener(listener));
+ listeners.add(listener);
+ registrations.add(listenService.registerListener(Ntfbench.class, listener));
}
try {
executor.shutdown();
try {
executor.awaitTermination(TEST_TIMEOUT, TimeUnit.MINUTES);
- for (ListenerRegistration<NtfbenchTestListener> listenerRegistration : listeners) {
- listenerRegistration.getInstance().getAllDone().get();
+ for (var listener : listeners) {
+ listener.getAllDone().get();
}
} catch (final InterruptedException | ExecutionException e) {
- LOG.error("Out of time: test did not finish within the {} min deadline ", TEST_TIMEOUT);
+ LOG.error("Out of time: test did not finish within the {} min deadline ", TEST_TIMEOUT, e);
}
final long producerEndTime = System.nanoTime();
long allProducersOk = 0;
long allProducersError = 0;
- for (final ListenerRegistration<NtfbenchTestListener> listenerRegistration : listeners) {
- allListeners += listenerRegistration.getInstance().getReceived();
+ for (var listener : listeners) {
+ allListeners += listener.getReceived();
}
final long listenerElapsedTime = producerEndTime - startTime;
allProducersError += abstractNtfbenchProducer.getNtfError();
}
- final StartTestOutput output =
- new StartTestOutputBuilder()
- .setProducerElapsedTime(Uint32.valueOf(producerElapsedTime / 1000000))
- .setListenerElapsedTime(Uint32.valueOf(listenerElapsedTime / 1000000))
- .setListenerOk(Uint32.valueOf(allListeners))
- .setProducerOk(Uint32.valueOf(allProducersOk))
- .setProducerError(Uint32.valueOf(allProducersError))
- .setProducerRate(Uint32.valueOf((allProducersOk + allProducersError) * 1000000000
- / producerElapsedTime))
- .setListenerRate(Uint32.valueOf(allListeners * 1000000000 / listenerElapsedTime))
- .build();
+ final StartTestOutput output = new StartTestOutputBuilder()
+ .setProducerElapsedTime(Uint32.valueOf(producerElapsedTime / 1000000))
+ .setListenerElapsedTime(Uint32.valueOf(listenerElapsedTime / 1000000))
+ .setListenerOk(Uint32.valueOf(allListeners))
+ .setProducerOk(Uint32.valueOf(allProducersOk))
+ .setProducerError(Uint32.valueOf(allProducersError))
+ .setProducerRate(
+ Uint32.valueOf((allProducersOk + allProducersError) * 1000000000 / producerElapsedTime))
+ .setListenerRate(Uint32.valueOf(allListeners * 1000000000 / listenerElapsedTime))
+ .build();
return RpcResultBuilder.success(output).buildFuture();
} finally {
- for (final ListenerRegistration<NtfbenchTestListener> listenerRegistration : listeners) {
- listenerRegistration.close();
- }
+ registrations.forEach(Registration::close);
}
}
- @Override
- public ListenableFuture<RpcResult<TestStatusOutput>> testStatus(final TestStatusInput input) {
- // TODO Auto-generated method stub
- return null;
+ private ListenableFuture<RpcResult<TestStatusOutput>> testStatus(final TestStatusInput input) {
+ throw new UnsupportedOperationException("Not implemented");
}
-
}
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright (c) 2017 Inocybe Technologies Inc. and others. All rights reserved.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
- xmlns:odl="http://opendaylight.org/xmlns/blueprint/v1.0.0"
- odl:use-default-for-reference-types="true">
-
- <reference id="publishService" interface="org.opendaylight.mdsal.binding.api.NotificationPublishService"/>
- <reference id="listenerService" interface="org.opendaylight.mdsal.binding.api.NotificationService"/>
-
- <bean id="provider" class="ntfbenchmark.impl.NtfbenchmarkProvider"
- init-method="init" destroy-method="close">
- <argument ref="publishService"/>
- <argument ref="listenerService"/>
- </bean>
-
- <odl:rpc-implementation ref="provider"/>
-</blueprint>
<parent>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>odlparent-lite</artifactId>
- <version>7.0.5</version>
+ <version>13.0.11</version>
<relativePath/>
</parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>benchmark-aggregator</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<packaging>pom</packaging>
<properties>
<module>dsbenchmark</module>
<module>ntfbenchmark</module>
<module>rpcbenchmark</module>
+ <module>segjournal-benchmark</module>
</modules>
</project>
and is available at http://www.eclipse.org/legal/epl-v10.html
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>mdsal-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
- <relativePath>../../opendaylight/md-sal/parent</relativePath>
- </parent>
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>mdsal-parent</artifactId>
+ <version>9.0.3-SNAPSHOT</version>
+ <relativePath>../../opendaylight/md-sal/parent</relativePath>
+ </parent>
- <artifactId>rpcbenchmark</artifactId>
- <packaging>bundle</packaging>
+ <artifactId>rpcbenchmark</artifactId>
+ <packaging>bundle</packaging>
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>benchmark-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-data-impl</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.mdsal</groupId>
- <artifactId>mdsal-binding-api</artifactId>
- </dependency>
- </dependencies>
+ <dependencies>
+ <dependency>
+ <groupId>com.guicedee.services</groupId>
+ <artifactId>javax.inject</artifactId>
+ <optional>true</optional>
+ </dependency>
+ <dependency>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>jakarta.annotation</groupId>
+ <artifactId>jakarta.annotation-api</artifactId>
+ <optional>true</optional>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>benchmark-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>yang-binding</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>mdsal-binding-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>concepts</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-common</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.osgi</groupId>
+ <artifactId>org.osgi.service.component.annotations</artifactId>
+ </dependency>
+ </dependencies>
</project>
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package rpcbenchmark.impl;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBenchInput;
+import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBenchOutput;
+import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBenchOutputBuilder;
+import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBenchInput;
+import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBenchOutput;
+import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBenchOutputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+
+abstract class AbstractRpcbenchPayloadService {
+ private int numRpcs = 0;
+
+ final ListenableFuture<RpcResult<GlobalRpcBenchOutput>> globalRpcBench(final GlobalRpcBenchInput input) {
+ numRpcs++;
+ return RpcResultBuilder.success(new GlobalRpcBenchOutputBuilder(input).build()).buildFuture();
+ }
+
+ final ListenableFuture<RpcResult<RoutedRpcBenchOutput>> routedRpcBench(final RoutedRpcBenchInput input) {
+ numRpcs++;
+ return RpcResultBuilder.success(new RoutedRpcBenchOutputBuilder(input).build()).buildFuture();
+ }
+
+ final int getNumRpcs() {
+ return numRpcs;
+ }
+}
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableMap.Builder;
-import java.util.Map;
import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicLong;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBench;
import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBenchInput;
import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBenchInputBuilder;
-import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBenchOutput;
-import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RpcbenchPayloadService;
import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.payload.Payload;
import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.payload.PayloadBuilder;
import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.payload.PayloadKey;
-import org.opendaylight.yangtools.yang.common.RpcResult;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class GlobalBindingRTCClient implements RTCClient {
private static final Logger LOG = LoggerFactory.getLogger(GlobalBindingRTCClient.class);
- private final RpcbenchPayloadService service;
+ private final GlobalRpcBench globalRpcBench;
private final AtomicLong rpcOk = new AtomicLong(0);
private final AtomicLong rpcError = new AtomicLong(0);
private final GlobalRpcBenchInput inVal;
return rpcError.get();
}
- public GlobalBindingRTCClient(final RpcConsumerRegistry registry, final int inSize) {
- if (registry != null) {
- this.service = registry.getRpcService(RpcbenchPayloadService.class);
- } else {
- this.service = null;
- }
+ public GlobalBindingRTCClient(final RpcService rpcService, final int inSize) {
+ globalRpcBench = rpcService.getRpc(GlobalRpcBench.class);
this.inSize = inSize;
Builder<PayloadKey, Payload> listVals = ImmutableMap.builderWithExpectedSize(inSize);
int error = 0;
for (int i = 0; i < iterations; i++) {
- Future<RpcResult<GlobalRpcBenchOutput>> output = service.globalRpcBench(inVal);
+ final var output = globalRpcBench.invoke(inVal);
try {
- RpcResult<GlobalRpcBenchOutput> rpcResult = output.get();
+ final var rpcResult = output.get();
if (rpcResult.isSuccessful()) {
- Map<PayloadKey, Payload> retVal = rpcResult.getResult().getPayload();
+ final var retVal = rpcResult.getResult().getPayload();
if (retVal.size() == inSize) {
ok++;
}
// TODO Auto-generated method stub
}
-
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package rpcbenchmark.impl;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBenchInput;
-import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBenchOutput;
-import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBenchOutputBuilder;
-import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBenchInput;
-import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBenchOutput;
-import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBenchOutputBuilder;
-import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RpcbenchPayloadService;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.opendaylight.mdsal.binding.api.RpcProviderService;
+import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBench;
+import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBench;
+import org.opendaylight.yangtools.concepts.Registration;
+import org.osgi.service.component.annotations.Reference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class GlobalBindingRTCServer implements RpcbenchPayloadService {
-
+final class GlobalBindingRTCServer extends AbstractRpcbenchPayloadService implements AutoCloseable {
private static final Logger LOG = LoggerFactory.getLogger(GlobalBindingRTCServer.class);
- private int numRpcs = 0;
- public GlobalBindingRTCServer() {
- LOG.debug("GlobalBindingRTCServer created.");
- }
+ private final Registration reg;
- @Override
- public ListenableFuture<RpcResult<GlobalRpcBenchOutput>> globalRpcBench(final GlobalRpcBenchInput input) {
- GlobalRpcBenchOutput output = new GlobalRpcBenchOutputBuilder(input).build();
- RpcResult<GlobalRpcBenchOutput> result = RpcResultBuilder.success(output).build();
- numRpcs++;
- return Futures.immediateFuture(result);
+ GlobalBindingRTCServer(@Reference final RpcProviderService rpcProvider) {
+ reg = rpcProvider.registerRpcImplementations(
+ (GlobalRpcBench) this::globalRpcBench,
+ (RoutedRpcBench) this::routedRpcBench);
+ LOG.debug("GlobalBindingRTCServer started");
}
@Override
- public ListenableFuture<RpcResult<RoutedRpcBenchOutput>> routedRpcBench(final RoutedRpcBenchInput input) {
- RoutedRpcBenchOutput output = new RoutedRpcBenchOutputBuilder(input).build();
- RpcResult<RoutedRpcBenchOutput> result = RpcResultBuilder.success(output).build();
- numRpcs++;
- return Futures.immediateFuture(result);
- }
-
- public int getNumRpcs() {
- return numRpcs;
+ public void close() {
+ reg.close();
+ LOG.debug("GlobalBindingRTCServer stopped");
}
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package rpcbenchmark.impl;
+
+import java.util.List;
+import java.util.Set;
+import org.opendaylight.mdsal.binding.api.RpcProviderService;
+import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBench;
+import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBench;
+import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+final class RoutedBindingRTCServer extends AbstractRpcbenchPayloadService implements AutoCloseable {
+ private final Registration reg;
+
+ RoutedBindingRTCServer(final RpcProviderService rpcProvider, final Set<InstanceIdentifier<?>> paths) {
+ reg = rpcProvider.registerRpcImplementations(List.of(
+ (GlobalRpcBench) this::globalRpcBench,
+ (RoutedRpcBench) this::routedRpcBench), paths);
+ }
+
+ @Override
+ public void close() {
+ reg.close();
+ }
+}
import java.util.concurrent.Future;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicLong;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBench;
import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBenchInput;
import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBenchInputBuilder;
import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBenchOutput;
-import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RpcbenchPayloadService;
import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.payload.Payload;
import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.payload.PayloadBuilder;
import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.payload.PayloadKey;
public class RoutedBindingRTClient implements RTCClient {
private static final Logger LOG = LoggerFactory.getLogger(RoutedBindingRTClient.class);
- private final RpcbenchPayloadService service;
+ private final RoutedRpcBench routedRpcBench;
private final AtomicLong rpcOk = new AtomicLong(0);
private final AtomicLong rpcError = new AtomicLong(0);
- private final List<RoutedRpcBenchInput> inVal;
+ private final List<RoutedRpcBenchInput> inVal = new ArrayList<>();
private final int inSize;
- public RoutedBindingRTClient(final RpcConsumerRegistry registry, final int inSize,
+ public RoutedBindingRTClient(final RpcService rpcService, final int inSize,
final List<InstanceIdentifier<?>> routeIid) {
- if (registry != null) {
- this.service = registry.getRpcService(RpcbenchPayloadService.class);
- } else {
- this.service = null;
- }
+ routedRpcBench = rpcService.getRpc(RoutedRpcBench.class);
this.inSize = inSize;
- this.inVal = new ArrayList<>();
Builder<PayloadKey, Payload> listVals = ImmutableMap.builderWithExpectedSize(inSize);
for (int i = 0; i < inSize; i++) {
int rpcServerCnt = inVal.size();
for (int i = 0; i < iterations; i++) {
RoutedRpcBenchInput input = inVal.get(ThreadLocalRandom.current().nextInt(rpcServerCnt));
- Future<RpcResult<RoutedRpcBenchOutput>> output = service.routedRpcBench(input);
+ Future<RpcResult<RoutedRpcBenchOutput>> output = routedRpcBench.invoke(input);
try {
RpcResult<RoutedRpcBenchOutput> rpcResult = output.get();
package rpcbenchmark.impl;
import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
import com.google.common.util.concurrent.ListenableFuture;
import java.util.ArrayList;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
+import javax.annotation.PreDestroy;
+import javax.inject.Inject;
+import javax.inject.Singleton;
import org.opendaylight.mdsal.binding.api.RpcProviderService;
-import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RpcbenchPayloadService;
+import org.opendaylight.mdsal.binding.api.RpcService;
import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RpcbenchRpcRoutes;
import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.rpcbench.rpc.routes.RpcRoute;
import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.rpcbench.rpc.routes.RpcRouteKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.RpcbenchmarkService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.StartTest;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.StartTestInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.StartTestOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.StartTestOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.TestStatus;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.TestStatusInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.TestStatusOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.TestStatusOutput.ExecStatus;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.TestStatusOutputBuilder;
-import org.opendaylight.yangtools.concepts.ObjectRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
import org.opendaylight.yangtools.yang.common.Uint32;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
+import org.osgi.service.component.annotations.RequireServiceComponentRuntime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class RpcbenchmarkProvider implements AutoCloseable, RpcbenchmarkService {
-
+@Singleton
+@Component(service = {})
+@RequireServiceComponentRuntime
+public final class RpcbenchmarkProvider implements AutoCloseable {
private static final Logger LOG = LoggerFactory.getLogger(RpcbenchmarkProvider.class);
private static final int TEST_TIMEOUT = 5;
- private final GlobalBindingRTCServer globalServer;
private final AtomicReference<ExecStatus> execStatus = new AtomicReference<>(ExecStatus.Idle);
private final RpcProviderService providerRegistry;
- private final RpcConsumerRegistry consumerRegistry;
-
- public RpcbenchmarkProvider(final RpcProviderService providerRegistry, final RpcConsumerRegistry consumerRegistry,
- final GlobalBindingRTCServer globalServer) {
- this.providerRegistry = providerRegistry;
- this.consumerRegistry = consumerRegistry;
- this.globalServer = globalServer;
- }
-
- public void init() {
+ private final RpcService consumerRegistry;
+ private final GlobalBindingRTCServer globalServer;
+ private final Registration reg;
+
+ @Inject
+ @Activate
+ public RpcbenchmarkProvider(@Reference final RpcProviderService providerRegistry,
+ @Reference final RpcService consumerRegistry) {
+ this.providerRegistry = requireNonNull(providerRegistry);
+ this.consumerRegistry = requireNonNull(consumerRegistry);
+ globalServer = new GlobalBindingRTCServer(providerRegistry);
+ reg = providerRegistry.registerRpcImplementations((TestStatus) this::testStatus, (StartTest) this::startTest);
LOG.info("RpcbenchmarkProvider initiated");
}
@Override
+ @Deactivate
+ @PreDestroy
public void close() {
+ globalServer.close();
+ reg.close();
LOG.info("RpcbenchmarkProvider closed");
}
- @Override
- public ListenableFuture<RpcResult<StartTestOutput>> startTest(final StartTestInput input) {
+ private ListenableFuture<RpcResult<StartTestOutput>> startTest(final StartTestInput input) {
LOG.debug("startTest {}", input);
final RTCClient client;
- final List<ObjectRegistration<?>> rpcRegs = new ArrayList<>();
+ RoutedBindingRTCServer routed = null;
switch (input.getOperation()) {
case ROUTEDRTC:
List<InstanceIdentifier<?>> routeIid = new ArrayList<>();
for (int i = 0; i < input.getNumServers().intValue(); i++) {
- GlobalBindingRTCServer server = new GlobalBindingRTCServer();
- KeyedInstanceIdentifier<RpcRoute, RpcRouteKey> iid =
- InstanceIdentifier.create(RpcbenchRpcRoutes.class)
- .child(RpcRoute.class, new RpcRouteKey(Integer.toString(i)));
- routeIid.add(iid);
-
- ObjectRegistration<?> routedReg = providerRegistry.registerRpcImplementation(
- RpcbenchPayloadService.class, server, Set.of(iid));
-
- rpcRegs.add(routedReg);
+ routeIid.add(InstanceIdentifier.create(RpcbenchRpcRoutes.class)
+ .child(RpcRoute.class, new RpcRouteKey(Integer.toString(i))));
}
+ routed = new RoutedBindingRTCServer(providerRegistry, Set.copyOf(routeIid));
client = new RoutedBindingRTClient(consumerRegistry, input.getPayloadSize().intValue(), routeIid);
break;
.build();
return RpcResultBuilder.success(output).buildFuture();
} finally {
- rpcRegs.forEach(ObjectRegistration::close);
+ if (routed != null) {
+ routed.close();
+ }
}
}
- @Override
- public ListenableFuture<RpcResult<TestStatusOutput>> testStatus(final TestStatusInput input) {
+ private ListenableFuture<RpcResult<TestStatusOutput>> testStatus(final TestStatusInput input) {
LOG.info("testStatus");
TestStatusOutput output = new TestStatusOutputBuilder()
.setGlobalServerCnt(Uint32.valueOf(globalServer.getNumRpcs()))
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright (c) 2017 Inocybe Technologies Inc. and others. All rights reserved.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
- xmlns:odl="http://opendaylight.org/xmlns/blueprint/v1.0.0"
- odl:use-default-for-reference-types="true">
-
- <bean id="globalServer" class="rpcbenchmark.impl.GlobalBindingRTCServer"/>
-
- <reference id="rpcProviderService" interface="org.opendaylight.mdsal.binding.api.RpcProviderService"/>
- <reference id="rpcConsumerRegistry" interface="org.opendaylight.mdsal.binding.api.RpcConsumerRegistry"/>
-
- <bean id="provider" class="rpcbenchmark.impl.RpcbenchmarkProvider"
- init-method="init" destroy-method="close">
- <argument ref="rpcProviderService"/>
- <argument ref="rpcConsumerRegistry"/>
- <argument ref="globalServer"/>
- </bean>
-
- <odl:rpc-implementation ref="globalServer"/>
- <odl:rpc-implementation ref="provider"/>
-</blueprint>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright (c) 2024 PANTHEON.tech s.r.o. and others. All rights reserved.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>mdsal-parent</artifactId>
+ <version>9.0.3-SNAPSHOT</version>
+ <relativePath>../../opendaylight/md-sal/parent</relativePath>
+ </parent>
+
+ <artifactId>segjournal-benchmark</artifactId>
+ <packaging>jar</packaging>
+
+ <properties>
+ <maven.javadoc.skip>true</maven.javadoc.skip>
+ <maven.deploy.skip>true</maven.deploy.skip>
+ <maven.install.skip>true</maven.install.skip>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>com.github.spotbugs</groupId>
+ <artifactId>spotbugs-annotations</artifactId>
+ <optional>true</optional>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jdt</groupId>
+ <artifactId>org.eclipse.jdt.annotation</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>net.sourceforge.argparse4j</groupId>
+ <artifactId>argparse4j</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>ch.qos.logback</groupId>
+ <artifactId>logback-classic</artifactId>
+ <scope>compile</scope>
+ </dependency>
+ <dependency>
+ <groupId>commons-io</groupId>
+ <artifactId>commons-io</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>io.dropwizard.metrics</groupId>
+ <artifactId>metrics-core</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>repackaged-akka</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-clustering-commons</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>atomix-storage</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-akka-segmented-journal</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-clustering-config</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-shade-plugin</artifactId>
+ <configuration>
+ <createDependencyReducedPom>false</createDependencyReducedPom>
+ </configuration>
+ <executions>
+ <execution>
+ <phase>package</phase>
+ <goals>
+ <goal>shade</goal>
+ </goals>
+ <configuration>
+ <shadedArtifactAttached>true</shadedArtifactAttached>
+ <shadedClassifierName>executable</shadedClassifierName>
+ <filters>
+ <filter>
+ <artifact>*:*</artifact>
+ <!-- causing SecurityException -->
+ <excludes>
+ <exclude>META-INF/*.SF</exclude>
+ <exclude>META-INF/*.DSA</exclude>
+ <exclude>META-INF/*.RSA</exclude>
+ </excludes>
+ </filter>
+ </filters>
+ <transformers>
+ <transformer implementation= "org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
+ <mainClass>org.opendaylight.controller.akka.segjournal.BenchmarkMain</mainClass>
+ </transformer>
+ </transformers>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+
+ <profiles>
+ <profile>
+ <id>benchmarks</id>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>exec-maven-plugin</artifactId>
+ <version>3.1.1</version>
+ <executions>
+ <execution>
+ <id>execute-segmented-journal-benchmark</id>
+ <phase>integration-test</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ </execution>
+ </executions>
+ <configuration>
+ <executable>java</executable>
+ <useMavenLogger>true</useMavenLogger>
+ <arguments>
+ <argument>-classpath</argument>
+ <!-- includes all dependencies to class path -->
+ <classpath/>
+ <argument>org.opendaylight.controller.akka.segjournal.BenchmarkMain</argument>
+ <!-- configuration taken from factory-akka.conf of sal-clustering-config -->
+ <argument>--current</argument>
+ <!-- 100_000 messages to write -->
+ <argument>-n100000</argument>
+ <!-- message payload is 100K -->
+ <argument>-p100K</argument>
+ </arguments>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+ </profile>
+ </profiles>
+</project>
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2024 PANTHEON.tech s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.akka.segjournal;
+
+import static org.opendaylight.controller.akka.segjournal.BenchmarkUtils.buildConfig;
+import static org.opendaylight.controller.akka.segjournal.BenchmarkUtils.formatBytes;
+import static org.opendaylight.controller.akka.segjournal.BenchmarkUtils.formatNanos;
+import static org.opendaylight.controller.akka.segjournal.BenchmarkUtils.toMetricId;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.persistence.AtomicWrite;
+import akka.persistence.PersistentRepr;
+import com.google.common.base.Stopwatch;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import java.io.Serializable;
+import java.util.Optional;
+import java.util.Queue;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.TimeUnit;
+import org.apache.commons.io.FileUtils;
+import org.opendaylight.controller.akka.segjournal.BenchmarkUtils.BenchmarkConfig;
+import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.WriteMessages;
+import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
+import org.opendaylight.controller.cluster.reporting.MetricsReporter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.concurrent.Future;
+
+public final class BenchmarkMain {
+ private static final String BENCHMARK = "benchmark";
+ private static final Logger LOG = LoggerFactory.getLogger("benchmark");
+
+ public static void main(String[] args) {
+ final var config = buildConfig(args);
+ final var benchmark = new BenchmarkMain(config);
+ Runtime.getRuntime().addShutdownHook(new Thread(benchmark::shutdown));
+ benchmark.execute();
+ System.exit(0);
+ }
+
+ private final BenchmarkConfig config;
+ private final ActorSystem system;
+ private final ScheduledExecutorService executor;
+ private ActorRef actor;
+
+ private BenchmarkMain(BenchmarkConfig config) {
+ this.config = config;
+ system = ActorSystem.create(BENCHMARK);
+ executor = Executors.newSingleThreadScheduledExecutor(
+ new ThreadFactoryBuilder().setNameFormat("progress-check-%d").build());
+ }
+
+ void execute() {
+ LOG.info("Starting with settings");
+ LOG.info("\tstorage : {}", config.storage());
+ LOG.info("\tworking dir : {}", config.workingDir().getAbsolutePath());
+ LOG.info("\tmaxEntrySize : {}", formatBytes(config.maxEntrySize()));
+ LOG.info("\tmaxSegmentSize : {}", formatBytes(config.maxSegmentSize()));
+ LOG.info("\tmaxUnflushedBytes : {}", formatBytes(config.maxUnflushedBytes()));
+
+ final var minLoadSize = Math.round(config.payloadSize() * 0.8f);
+ final var maxLoadSize = Math.min(Math.round(config.payloadSize() * 1.2f), config.maxEntrySize());
+ LOG.info("Preparing load");
+ LOG.info("\tnumber of messages : {}", config.messagesNum());
+ LOG.info("\tpayload size : {} .. {}", formatBytes(minLoadSize), formatBytes(maxLoadSize));
+
+ // reset metrics
+ final var metricsRegistry = MetricsReporter.getInstance(MeteringBehavior.DOMAIN).getMetricsRegistry();
+ final var keys = metricsRegistry.getMetrics().keySet();
+ keys.forEach(metricsRegistry::remove);
+
+ // get actor
+ actor = system.actorOf(
+ SegmentedJournalActor.props("perf", config.workingDir(), config.storage(),
+ config.maxEntrySize(), config.maxSegmentSize(), config.maxUnflushedBytes()));
+
+ // randomize payloads
+ final var random = ThreadLocalRandom.current();
+ final var payloads = new Payload[1_000];
+ for (int i = 0; i < payloads.length; ++i) {
+ final var bytes = new byte[random.nextInt(minLoadSize, maxLoadSize)];
+ random.nextBytes(bytes);
+ payloads[i] = new Payload(bytes);
+ }
+
+ // enable periodic check for completed writes
+ final var results = new ConcurrentLinkedQueue<Future<Optional<Exception>>>();
+ final var progressReporter =
+ new ProgressReporter(executor, results, config.messagesNum(), 10, TimeUnit.SECONDS);
+
+ // start async message writing
+ final var sw = Stopwatch.createStarted();
+ for (int i = 0; i < config.messagesNum(); ++i) {
+ results.add(writeMessage(i, payloads[random.nextInt(payloads.length)]));
+ }
+ LOG.info("{} Messages sent to akka in {}", config.messagesNum(), sw);
+
+ // await completion
+ try {
+ progressReporter.awaitCompletion();
+ } catch (InterruptedException e) {
+ LOG.error("Interrupted", e);
+ }
+ LOG.info("Messages written in {}", sw.stop());
+
+ // report
+ LOG.info("Following metrics collected");
+ // meters
+ metricsRegistry.getMeters().forEach((key, meter) -> {
+ LOG.info("Meter '{}'", toMetricId(key));
+ LOG.info("\tCount = {}", meter.getCount());
+ LOG.info("\tMean Rate = {}", meter.getMeanRate());
+ LOG.info("\t1 Min Rate = {}", meter.getOneMinuteRate());
+ LOG.info("\t5 Min Rate = {}", meter.getFiveMinuteRate());
+ LOG.info("\t15 Min Rate = {}", meter.getFifteenMinuteRate());
+ });
+ // timers
+ metricsRegistry.getTimers().forEach((key, timer) -> {
+ LOG.info("Timer '{}'", toMetricId(key));
+ final var snap = timer.getSnapshot();
+ LOG.info("\tMin = {}", formatNanos(snap.getMin()));
+ LOG.info("\tMax = {}", formatNanos(snap.getMax()));
+ LOG.info("\tMean = {}", formatNanos(snap.getMean()));
+ LOG.info("\tStdDev = {}", formatNanos(snap.getStdDev()));
+ LOG.info("\tMedian = {}", formatNanos(snap.getMedian()));
+ LOG.info("\t75th = {}", formatNanos(snap.get75thPercentile()));
+ LOG.info("\t95th = {}", formatNanos(snap.get95thPercentile()));
+ LOG.info("\t98th = {}", formatNanos(snap.get98thPercentile()));
+ LOG.info("\t99th = {}", formatNanos(snap.get99thPercentile()));
+ LOG.info("\t99.9th = {}", formatNanos(snap.get999thPercentile()));
+ });
+ // histograms
+ metricsRegistry.getHistograms().forEach((key, histogram) -> {
+ LOG.info("Histogram '{}'", toMetricId(key));
+ final var snap = histogram.getSnapshot();
+ LOG.info("\tMin = {}", snap.getMin());
+ LOG.info("\tMax = {}", snap.getMax());
+ LOG.info("\tMean = {}", snap.getMean());
+ LOG.info("\tStdDev = {}", snap.getStdDev());
+ LOG.info("\tMedian = {}", snap.getMedian());
+ LOG.info("\t75th = {}", snap.get75thPercentile());
+ LOG.info("\t95th = {}", snap.get95thPercentile());
+ LOG.info("\t98th = {}", snap.get98thPercentile());
+ LOG.info("\t99th = {}", snap.get99thPercentile());
+ LOG.info("\t99.9th = {}", snap.get999thPercentile());
+ });
+ }
+
+ Future<Optional<Exception>> writeMessage(final long seqNum, final Payload payload) {
+ final var writeMessage = new WriteMessages();
+ final var result = writeMessage.add(AtomicWrite.apply(
+ PersistentRepr.apply(payload, seqNum, BENCHMARK, null, false, ActorRef.noSender(), "uuid")));
+ actor.tell(writeMessage, ActorRef.noSender());
+ return result;
+ }
+
+ void shutdown() {
+ LOG.info("shutting down ...");
+ executor.shutdown();
+ if (actor != null) {
+ system.stop(actor);
+ }
+ if (config.workingDir().exists()) {
+ FileUtils.deleteQuietly(config.workingDir());
+ }
+ system.terminate();
+ LOG.info("Done.");
+ }
+
+ private static final class Payload implements Serializable {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+ final byte[] bytes;
+
+ Payload(final byte[] bytes) {
+ this.bytes = bytes;
+ }
+ }
+
+ private static final class ProgressReporter implements Runnable {
+ final ScheduledExecutorService executor;
+ final CountDownLatch latch = new CountDownLatch(1);
+ final Queue<Future<Optional<Exception>>> queue;
+ final long total;
+ final int checkInterval;
+ final TimeUnit timeUnit;
+ long completed;
+ long errors;
+
+ ProgressReporter(final ScheduledExecutorService executor, final Queue<Future<Optional<Exception>>> queue,
+ final long total, final int checkInterval, final TimeUnit timeUnit) {
+ this.executor = executor;
+ this.queue = queue;
+ this.total = total;
+ this.checkInterval = checkInterval;
+ this.timeUnit = timeUnit;
+ scheduleNextCheck();
+ }
+
+ @Override
+ public void run() {
+ // release completed from the beginning of the queue
+ while (!queue.isEmpty() && queue.peek().isCompleted()) {
+ final var future = queue.poll();
+ completed++;
+ if (!future.value().get().get().isEmpty()) {
+ errors++;
+ }
+ }
+ LOG.info("{} of {} = {}% messages written, {} in queue",
+ completed, total, completed * 100 / total, queue.size());
+ if (total == completed) {
+ LOG.info("Check completed, errors found : {}", errors);
+ latch.countDown();
+ return;
+ }
+ scheduleNextCheck();
+ }
+
+ void scheduleNextCheck() {
+ executor.schedule(this, checkInterval, timeUnit);
+ }
+
+ void awaitCompletion() throws InterruptedException {
+ latch.await();
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2024 PANTHEON.tech s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.akka.segjournal;
+
+import static org.opendaylight.controller.akka.segjournal.SegmentedFileJournal.STORAGE_MAX_ENTRY_SIZE;
+import static org.opendaylight.controller.akka.segjournal.SegmentedFileJournal.STORAGE_MAX_ENTRY_SIZE_DEFAULT;
+import static org.opendaylight.controller.akka.segjournal.SegmentedFileJournal.STORAGE_MAX_SEGMENT_SIZE;
+import static org.opendaylight.controller.akka.segjournal.SegmentedFileJournal.STORAGE_MAX_SEGMENT_SIZE_DEFAULT;
+import static org.opendaylight.controller.akka.segjournal.SegmentedFileJournal.STORAGE_MAX_UNFLUSHED_BYTES;
+import static org.opendaylight.controller.akka.segjournal.SegmentedFileJournal.STORAGE_MEMORY_MAPPED;
+
+import com.google.common.base.Stopwatch;
+import com.google.common.base.Ticker;
+import com.typesafe.config.Config;
+import com.typesafe.config.ConfigFactory;
+import io.atomix.storage.journal.StorageLevel;
+import java.io.File;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.util.HashMap;
+import java.util.Map;
+import net.sourceforge.argparse4j.ArgumentParsers;
+import net.sourceforge.argparse4j.impl.Arguments;
+import net.sourceforge.argparse4j.inf.ArgumentParser;
+import net.sourceforge.argparse4j.inf.ArgumentParserException;
+
+@SuppressWarnings("RegexpSinglelineJava")
+final class BenchmarkUtils {
+
+ static final String PROG_NAME = "segjourlan-benchmark";
+
+ static final String BENCHMARK_USE_CURRENT = "current";
+ static final String BENCHMARK_NUMBER_OF_MESSAGES = "messages-num";
+ static final String BENCHMARK_PAYLOAD_SIZE = "payload-size";
+ static final String BENCHMARK_PAYLOAD_SIZE_DEFAULT = "10K";
+
+ static final String CURRENT_CONFIG_RESOURCE = "/initial/factory-akka.conf";
+ static final String CURRENT_CONFIG_PATH = "odl-cluster-data.akka.persistence.journal.segmented-file";
+
+ private static final String[] BYTE_SFX = {"G", "M", "K"};
+ private static final int[] BYTE_THRESH = {1024 * 1024 * 1024, 1024 * 1024, 1024};
+
+ record BenchmarkConfig(StorageLevel storage, File workingDir, int maxEntrySize, int maxSegmentSize,
+ int maxUnflushedBytes, int payloadSize, int messagesNum) {
+ }
+
+ private BenchmarkUtils() {
+ // utility class
+ }
+
+ static BenchmarkConfig buildConfig(final String[] args) {
+ final var parser = getArgumentParser();
+ final var paramsMap = new HashMap<String, Object>();
+ try {
+ parser.parseArgs(args, paramsMap);
+ } catch (ArgumentParserException e) {
+ parser.handleError(e);
+ System.exit(1);
+ return null;
+ }
+ return toConfig(paramsMap);
+ }
+
+ private static ArgumentParser getArgumentParser() {
+ final var parser = ArgumentParsers.newArgumentParser(PROG_NAME).defaultHelp(true);
+
+ parser.description("Performs asynchronous write to segmented journal, collects and prints variety of metrics");
+
+ parser.addArgument("--current")
+ .type(Boolean.class).setDefault(Boolean.FALSE)
+ .action(Arguments.storeConst()).setConst(Boolean.TRUE)
+ .dest(BENCHMARK_USE_CURRENT)
+ .help("indicates base configuration to be taken from current cluster configuration, "
+ + "all other arguments excepting 'requests' and 'payload size' will be ignored");
+
+ parser.addArgument("--memory-mapped")
+ .type(Boolean.class).setDefault(Boolean.FALSE)
+ .action(Arguments.storeConst()).setConst(Boolean.TRUE)
+ .dest(STORAGE_MEMORY_MAPPED)
+ .help("indicates mapping journal segments to memory, otherwise file system is used");
+
+ parser.addArgument("-e", "--max-entry-size")
+ .type(String.class).setDefault(formatBytes(STORAGE_MAX_ENTRY_SIZE_DEFAULT))
+ .dest(STORAGE_MAX_ENTRY_SIZE)
+ .help("max entry size, bytes format");
+
+ parser.addArgument("-s", "--max-segment-size")
+ .type(String.class).setDefault(formatBytes(STORAGE_MAX_SEGMENT_SIZE_DEFAULT))
+ .dest(STORAGE_MAX_SEGMENT_SIZE)
+ .help("max segment size, bytes ");
+
+ parser.addArgument("-u", "--max-unflushed-bytes")
+ .type(String.class)
+ .dest(STORAGE_MAX_UNFLUSHED_BYTES)
+ .help("max unflushed bytes, bytes format, "
+ + "if not defined the value is taken from 'max-entry-size'");
+
+ parser.addArgument("-n", "--messages-num")
+ .type(Integer.class).required(true)
+ .dest(BENCHMARK_NUMBER_OF_MESSAGES)
+ .setDefault(10_000)
+ .help("number of messages to write");
+
+ parser.addArgument("-p", "--payload-size")
+ .type(String.class).setDefault(BENCHMARK_PAYLOAD_SIZE_DEFAULT)
+ .dest(BENCHMARK_PAYLOAD_SIZE)
+ .help("median for request payload size, bytes format supported, "
+ + "actual size is variable 80% to 120% from defined median value");
+
+ return parser;
+ }
+
+ static BenchmarkConfig toConfig(final Map<String, Object> paramsMap) {
+ final var inputConfig = ConfigFactory.parseMap(paramsMap);
+ final var finalConfig = (Boolean) paramsMap.get(BENCHMARK_USE_CURRENT)
+ ? currentConfig().withFallback(inputConfig) : inputConfig;
+
+ final var benchmarkConfig = new BenchmarkConfig(
+ finalConfig.getBoolean(STORAGE_MEMORY_MAPPED) ? StorageLevel.MAPPED : StorageLevel.DISK,
+ createTempDirectory(),
+ bytes(finalConfig, STORAGE_MAX_ENTRY_SIZE),
+ bytes(finalConfig, STORAGE_MAX_SEGMENT_SIZE),
+ finalConfig.hasPath(STORAGE_MAX_UNFLUSHED_BYTES)
+ ? bytes(finalConfig, STORAGE_MAX_UNFLUSHED_BYTES) : bytes(finalConfig, STORAGE_MAX_ENTRY_SIZE),
+ bytes(finalConfig, BENCHMARK_PAYLOAD_SIZE),
+ finalConfig.getInt(BENCHMARK_NUMBER_OF_MESSAGES)
+ );
+ // validate
+ if (benchmarkConfig.payloadSize > benchmarkConfig.maxEntrySize) {
+ printAndExit("payloadSize should be less than maxEntrySize");
+ }
+ return benchmarkConfig;
+ }
+
+ private static int bytes(final Config config, final String key) {
+ final var bytesLong = config.getBytes(key);
+ if (bytesLong <= 0 || bytesLong > Integer.MAX_VALUE) {
+ printAndExit(
+ key + " value (" + bytesLong + ") is invalid, expected in range 1 .. " + Integer.MAX_VALUE);
+ }
+ return bytesLong.intValue();
+ }
+
+ static Config currentConfig() {
+ try (var in = BenchmarkUtils.class.getResourceAsStream(CURRENT_CONFIG_RESOURCE)) {
+ final var content = new String(in.readAllBytes(), StandardCharsets.UTF_8);
+ final var globalConfig = ConfigFactory.parseString(content);
+ final var currentConfig = globalConfig.getConfig(CURRENT_CONFIG_PATH);
+ System.out.println("Current configuration loaded from " + CURRENT_CONFIG_RESOURCE);
+ return currentConfig;
+
+ } catch (IOException e) {
+ printAndExit("Error loading current configuration from resource " + CURRENT_CONFIG_RESOURCE, e);
+ return null;
+ }
+ }
+
+ private static File createTempDirectory() {
+ try {
+ return Files.createTempDirectory(PROG_NAME).toFile();
+ } catch (IOException e) {
+ printAndExit("Cannot create temp directory", e);
+ }
+ return null;
+ }
+
+ private static void printAndExit(final String message) {
+ printAndExit(message, null);
+ }
+
+ private static void printAndExit(final String message, final Exception exception) {
+ System.err.println(message);
+ if (exception != null) {
+ exception.printStackTrace(System.err);
+ }
+ System.exit(1);
+ }
+
+ static String formatBytes(int bytes) {
+ for (int i = 0; i < 3; i++) {
+ if (bytes > BYTE_THRESH[i]) {
+ return bytes / BYTE_THRESH[i] + BYTE_SFX[i];
+ }
+ }
+ return String.valueOf(bytes);
+ }
+
+ static String formatNanos(final double nanos) {
+ return formatNanos(Math.round(nanos));
+ }
+
+ static String formatNanos(final long nanos) {
+ return Stopwatch.createStarted(new Ticker() {
+ boolean started;
+
+ @Override
+ public long read() {
+ if (started) {
+ return nanos;
+ }
+ started = true;
+ return 0;
+ }
+ }).toString();
+ }
+
+ static String toMetricId(final String metricKey) {
+ return metricKey.substring(metricKey.lastIndexOf('.') + 1);
+ }
+}
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright (c) 2024 PANTHEON.tech s.r.o. and others. All rights reserved.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+<configuration>
+ <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+ <encoder>
+ <pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n
+ </pattern>
+ </encoder>
+ </appender>
+
+ <root level="INFO">
+ <appender-ref ref="STDOUT" />
+ </root>
+</configuration>
\ No newline at end of file
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright (c) 2020 PANTHEON.tech, s.r.o. and others. All rights reserved.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>bundle-parent</artifactId>
+ <version>13.0.1</version>
+ <relativePath/>
+ </parent>
+
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>bundle-parent</artifactId>
+ <version>9.0.3-SNAPSHOT</version>
+ <packaging>pom</packaging>
+
+ <dependencyManagement>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>controller-artifacts</artifactId>
+ <version>9.0.3-SNAPSHOT</version>
+ <type>pom</type>
+ <scope>import</scope>
+ </dependency>
+
+ <!-- Scala and its modules -->
+ <dependency>
+ <groupId>org.scala-lang</groupId>
+ <artifactId>scala-library</artifactId>
+ <version>2.13.13</version>
+ </dependency>
+ <dependency>
+ <groupId>org.scala-lang</groupId>
+ <artifactId>scala-reflect</artifactId>
+ <version>2.13.13</version>
+ </dependency>
+ <dependency>
+ <groupId>org.scala-lang.modules</groupId>
+ <artifactId>scala-java8-compat_2.13</artifactId>
+ <version>1.0.2</version>
+ </dependency>
+ <dependency>
+ <groupId>org.scala-lang.modules</groupId>
+ <artifactId>scala-parser-combinators_2.13</artifactId>
+ <version>1.1.2</version>
+ </dependency>
+ <dependency>
+ <groupId>org.scalatestplus</groupId>
+ <artifactId>junit-4-13_2.13</artifactId>
+ <version>3.2.13.0</version>
+ <scope>test</scope>
+ </dependency>
+
+ <!-- Configuration library -->
+ <!-- This needs to be kept in sync with the version used by akka -->
+ <dependency>
+ <groupId>com.typesafe</groupId>
+ <artifactId>config</artifactId>
+ <version>1.4.2</version>
+ </dependency>
+ <dependency>
+ <groupId>com.typesafe</groupId>
+ <artifactId>ssl-config-core_2.13</artifactId>
+ <version>0.4.3</version>
+ </dependency>
+
+ <!-- Akka testkit -->
+ <dependency>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-testkit_2.13</artifactId>
+ <version>2.6.21</version>
+ <scope>test</scope>
+ <exclusions>
+ <exclusion>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-actor_2.13</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-actor-testkit-typed_2.13</artifactId>
+ <version>2.6.21</version>
+ <scope>test</scope>
+ <exclusions>
+ <exclusion>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-actor-typed_2.13</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-slf4j_2.13</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-persistence-tck_2.13</artifactId>
+ <version>2.6.21</version>
+ <scope>test</scope>
+ <exclusions>
+ <exclusion>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-persistence_2.13</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+
+ <!-- Reactive Streams, used by Akka -->
+ <dependency>
+ <groupId>org.reactivestreams</groupId>
+ <artifactId>reactive-streams</artifactId>
+ <version>1.0.4</version>
+ </dependency>
+
+ <!-- Aeron, required by Akka -->
+ <dependency>
+ <groupId>org.agrona</groupId>
+ <artifactId>agrona</artifactId>
+ <version>1.15.2</version>
+ </dependency>
+ <dependency>
+ <groupId>io.aeron</groupId>
+ <artifactId>aeron-client</artifactId>
+ <version>1.38.1</version>
+ </dependency>
+ <dependency>
+ <groupId>io.aeron</groupId>
+ <artifactId>aeron-driver</artifactId>
+ <version>1.38.1</version>
+ </dependency>
+ </dependencies>
+ </dependencyManagement>
+</project>
from docs_conf.conf import *
data = ET.parse('pom.xml')
-project_version = data.getroot().find('*//{http://maven.apache.org/POM/4.0.0}version').text
+project_version = data.getroot().find('./{http://maven.apache.org/POM/4.0.0}version').text
version = project_version
release = project_version
The OpenDaylight Controller provides following model-driven subsystems
as a foundation for Java applications:
-- :ref:`config_subsystem` - an activation,
- dependency-injection and configuration framework, which allows
- two-phase commits of configuration and dependency-injection, and
- allows for run-time rewiring.
-
- :ref:`MD-SAL <mdsal_dev_guide>` - messaging and data storage
functionality for data, notifications and RPCs modeled by application
developers. MD-SAL uses YANG as the modeling for both interface and
.. note::
- | Each request must start with the URI /restconf.
+ | Each request must start with the URI /rests.
| RESTCONF listens on port 8080 for HTTP requests.
RESTCONF supports **OPTIONS**, **GET**, **PUT**, **POST**, and
**DELETE** operations. Request and response data can either be in the
XML or JSON format. XML structures according to yang are defined at:
-`XML-YANG <http://tools.ietf.org/html/rfc6020>`__. JSON structures are
+`XML-YANG <https://www.rfc-editor.org/rfc/rfc6020>`__. JSON structures are
defined at:
-`JSON-YANG <http://tools.ietf.org/html/draft-lhotka-netmod-yang-json-02>`__.
+`JSON-YANG <https://datatracker.ietf.org/doc/html/draft-lhotka-netmod-yang-json-02>`__.
Data in the request must have a correctly set **Content-Type** field in
the http header with the allowed value of the media type. The media type
of the requested data has to be set in the **Accept** field. Get the
media types for each resource by calling the OPTIONS operation. Most of
the paths of the pathsRestconf endpoints use `Instance
-Identifier <https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL:Concepts#Instance_Identifier>`__.
+Identifier <https://wiki-archive.opendaylight.org/view/OpenDaylight_Controller:MD-SAL:Concepts#Instance_Identifier>`__.
``<identifier>`` is used in the explanation of the operations.
| **<identifier>**
- <nodeName> can represent a data node which is a list or container
yang built-in type. If the data node is a list, there must be defined
keys of the list behind the data node name for example,
- <nodeName>/<valueOfKey1>/<valueOfKey2>.
+ <nodeName>=<valueOfKey1>,<valueOfKey2>.
- | The format <moduleName>:<nodeName> has to be used in this case as
well:
| Module A has node A1. Module B augments node A1 by adding node X.
Module C augments node A1 by adding node X. For clarity, it has to
be known which node is X (for example: C:X). For more details about
- encoding, see: `RESTCONF 02 - Encoding YANG Instance Identifiers in
+ encoding, see: `RESTCONF RFC 8040 - Encoding YANG Instance Identifiers in
the Request
- URI. <http://tools.ietf.org/html/draft-bierman-netconf-restconf-02#section-5.3.1>`__
+ URI. <https://datatracker.ietf.org/doc/html/rfc8040#section-3.5.3>`__
Mount point
~~~~~~~~~~~
point itself by using <identifier>/**yang-ext:mount**.
| More information on how to actually use mountpoints is available at:
`OpenDaylight
- Controller:Config:Examples:Netconf <https://wiki.opendaylight.org/view/OpenDaylight_Controller:Config:Examples:Netconf>`__.
+ Controller:Config:Examples:Netconf <https://wiki-archive.opendaylight.org/view/OpenDaylight_Controller:Config:Examples:Netconf>`__.
HTTP methods
~~~~~~~~~~~~
-OPTIONS /restconf
-^^^^^^^^^^^^^^^^^
+OPTIONS /rests
+^^^^^^^^^^^^^^
- Returns the XML description of the resources with the required
request and response media types in Web Application Description
Language (WADL)
-GET /restconf/config/<identifier>
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+GET /rests/data/<identifier>?content=config
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Returns a data node from the Config datastore.
- <identifier> points to a data node which must be retrieved.
-GET /restconf/operational/<identifier>
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+GET /rests/data/<identifier>?content=nonconfig
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-- Returns the value of the data node from the Operational datastore.
+- Returns the value of the data node from the non-configuration datastore.
- <identifier> points to a data node which must be retrieved.
-PUT /restconf/config/<identifier>
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+PUT /rests/data/<identifier>
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Updates or creates data in the Config datastore and returns the state
about success.
::
- PUT http://<controllerIP>:8080/restconf/config/module1:foo/bar
+ PUT http://<controllerIP>:8080/rests/data/module1:foo/bar
Content-Type: applicaton/xml
<bar>
…
::
- PUT http://<controllerIP>:8080/restconf/config/module1:foo1/foo2/yang-ext:mount/module2:foo/bar
+ PUT http://<controllerIP>:8080/rests/data/module1:foo1/foo2/yang-ext:mount/module2:foo/bar
Content-Type: applicaton/xml
<bar>
…
</bar>
-POST /restconf/config
-^^^^^^^^^^^^^^^^^^^^^
+POST /rests/data
+^^^^^^^^^^^^^^^^
- Creates the data if it does not exist
::
- POST URL: http://localhost:8080/restconf/config/
+ POST URL: http://localhost:8080/rests/data/
content-type: application/yang.data+json
JSON payload:
}
}
-POST /restconf/config/<identifier>
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+POST /rests/data/<identifier>
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Creates the data if it does not exist in the Config datastore, and
returns the state about success.
::
- POST http://<controllerIP>:8080/restconf/config/module1:foo
+ POST http://<controllerIP>:8080/rests/data/module1:foo
Content-Type: applicaton/xml/
<bar xmlns=“module1namespace”>
…
::
- http://<controllerIP>:8080/restconf/config/module1:foo1/foo2/yang-ext:mount/module2:foo
+ http://<controllerIP>:8080/rests/data/module1:foo1/foo2/yang-ext:mount/module2:foo
Content-Type: applicaton/xml
<bar xmlns=“module2namespace”>
…
</bar>
-POST /restconf/operations/<moduleName>:<rpcName>
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+POST /rests/operations/<moduleName>:<rpcName>
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Invokes RPC.
::
- POST http://<controllerIP>:8080/restconf/operations/module1:fooRpc
+ POST http://<controllerIP>:8080/rests/operations/module1:fooRpc
Content-Type: applicaton/xml
Accept: applicaton/xml
<input>
::
- POST http://localhost:8080/restconf/operations/toaster:make-toast
+ POST http://localhost:8080/rests/operations/toaster:make-toast
Content-Type: application/yang.data+json
{
"input" :
Even though this is a default for the toasterToastType value in the
yang, you still need to define it.
-DELETE /restconf/config/<identifier>
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+DELETE /rests/data/<identifier>
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Removes the data node in the Config datastore and returns the state
about success.
- <identifier> points to a data node which must be removed.
More information is available in the `RESTCONF
-RFC <http://tools.ietf.org/html/draft-bierman-netconf-restconf-02>`__.
+RFC <https://datatracker.ietf.org/doc/html/rfc8040>`__.
How RESTCONF works
~~~~~~~~~~~~~~~~~~
GET in action
~~~~~~~~~~~~~
-Figure 1 shows the GET operation with URI restconf/config/M:N where M is
-the module name, and N is the node name.
+Figure 1 shows the GET operation with URI rests/data/M:N?content=config
+where M is the module name, and N is the node name.
.. figure:: ./images/Get.png
:alt: Get
PUT in action
~~~~~~~~~~~~~
-Figure 2 shows the PUT operation with the URI restconf/config/M:N where
+Figure 2 shows the PUT operation with the URI rests/data/M:N where
M is the module name, and N is the node name. Data is sent in the
request either in the XML or JSON format.
::
Operation: POST
- URI: http://192.168.11.1:8080/restconf/config/opendaylight-inventory:nodes/node/openflow:1/table/2
+ URI: http://192.168.11.1:8080/rests/data/opendaylight-inventory:nodes/node=openflow:1/table=2
Content-Type: application/xml
::
::
Operation: PUT
- URI: http://192.168.11.1:8080/restconf/config/opendaylight-inventory:nodes/node/openflow:1/table/2/flow/111
+ URI: http://192.168.11.1:8080/rests/data/opendaylight-inventory:nodes/node=openflow:1/table=2/flow=111
Content-Type: application/xml
::
::
Operation: GET
- URI: http://192.168.11.1:8080/restconf/config/opendaylight-inventory:nodes/node/openflow:1/table/2/flow/111
+ URI: http://192.168.11.1:8080/rests/data/opendaylight-inventory:nodes/node=openflow:1/table=2/flow=111?content=config
Accept: application/xml
| **HTTP response**
::
Operation: DELETE
- URI: http://192.168.11.1:8080/restconf/config/opendaylight-inventory:nodes/node/openflow:1/table/2/flow/111
+ URI: http://192.168.11.1:8080/rests/data/opendaylight-inventory:nodes/node=openflow:1/table=2/flow=111
| **HTTP response**
Status: 200 OK
-Websocket change event notification subscription tutorial
----------------------------------------------------------
-
-Subscribing to data change notifications makes it possible to obtain
-notifications about data manipulation (insert, change, delete) which are
-done on any specified **path** of any specified **datastore** with
-specific **scope**. In following examples *{odlAddress}* is address of
-server where ODL is running and *{odlPort}* is port on which
-OpenDaylight is running.
-
-Websocket notifications subscription process
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-In this section we will learn what steps need to be taken in order to
-successfully subscribe to data change event notifications.
-
-Create stream
-^^^^^^^^^^^^^
-
-In order to use event notifications you first need to call RPC that
-creates notification stream that you can later listen to. You need to
-provide three parameters to this RPC:
-
-- **path**: data store path that you plan to listen to. You can
- register listener on containers, lists and leaves.
-
-- **datastore**: data store type. *OPERATIONAL* or *CONFIGURATION*.
-
-- **scope**: Represents scope of data change. Possible options are:
-
- - BASE: only changes directly to the data tree node specified in the
- path will be reported
-
- - ONE: changes to the node and to direct child nodes will be
- reported
-
- - SUBTREE: changes anywhere in the subtree starting at the node will
- be reported
-
-The RPC to create the stream can be invoked via RESTCONF like this:
-
-- URI:
- http://{odlAddress}:{odlPort}/restconf/operations/sal-remote:create-data-change-event-subscription
-
-- HEADER: Content-Type=application/json
-
-- OPERATION: POST
-
-- DATA:
-
- .. code:: json
-
- {
- "input": {
- "path": "/toaster:toaster/toaster:toasterStatus",
- "sal-remote-augment:datastore": "OPERATIONAL",
- "sal-remote-augment:scope": "ONE"
- }
- }
-
-The response should look something like this:
-
-.. code:: json
-
- {
- "output": {
- "stream-name": "data-change-event-subscription/toaster:toaster/toaster:toasterStatus/datastore=CONFIGURATION/scope=SUBTREE"
- }
- }
-
-**stream-name** is important because you will need to use it when you
-subscribe to the stream in the next step.
-
-.. note::
-
- Internally, this will create a new listener for *stream-name* if it
- did not already exist.
-
-Subscribe to stream
-^^^^^^^^^^^^^^^^^^^
-
-In order to subscribe to stream and obtain WebSocket location you need
-to call *GET* on your stream path. The URI should generally be
-http://{odlAddress}:{odlPort}/restconf/streams/stream/{streamName},
-where *{streamName}* is the *stream-name* parameter contained in
-response from *create-data-change-event-subscription* RPC from the
-previous step.
-
-- URI:
- http://{odlAddress}:{odlPort}/restconf/streams/stream/data-change-event-subscription/toaster:toaster/datastore=CONFIGURATION/scope=SUBTREE
-
-- OPERATION: GET
-
-The subscription call may be modified with the following query parameters defined in the RESTCONF RFC:
-
-- `filter <https://tools.ietf.org/html/draft-ietf-netconf-restconf-05#section-4.8.6>`__
-
-- `start-time <https://tools.ietf.org/html/draft-ietf-netconf-restconf-05#section-4.8.7>`__
-
-- `end-time <https://tools.ietf.org/html/draft-ietf-netconf-restconf-05#section-4.8.8>`__
-
-In addition, the following ODL extension query parameter is supported:
-
-:odl-leaf-nodes-only:
- If this parameter is set to "true", create and update notifications will only
- contain the leaf nodes modified instead of the entire subscription subtree.
- This can help in reducing the size of the notifications.
-
-The expected response status is 200 OK and response body should be
-empty. You will get your WebSocket location from **Location** header of
-response. For example in our particular toaster example location header
-would have this value:
-*ws://{odlAddress}:8185/toaster:toaster/datastore=CONFIGURATION/scope=SUBTREE*
-
-.. note::
-
- During this phase there is an internal check for to see if a
- listener for the *stream-name* from the URI exists. If not, new a
- new listener is registered with the DOM data broker.
-
-Receive notifications
-^^^^^^^^^^^^^^^^^^^^^
-
-You should now have a data change notification stream created and have
-location of a WebSocket. You can use this WebSocket to listen to data
-change notifications. To listen to notifications you can use a
-JavaScript client or if you are using chrome browser you can use the
-`Simple WebSocket
-Client <https://chrome.google.com/webstore/detail/simple-websocket-client/pfdhoblngboilpfeibdedpjgfnlcodoo>`__.
-
-Also, for testing purposes, there is simple Java application named
-WebSocketClient. The application is placed in the
-*-sal-rest-connector-classes.class* project. It accepts a WebSocket URI
-as and input parameter. After starting the utility (WebSocketClient
-class directly in Eclipse/InteliJ Idea) received notifications should be
-displayed in console.
-
-Notifications are always in XML format and look like this:
-
-.. code:: xml
-
- <notification xmlns="urn:ietf:params:xml:ns:netconf:notification:1.0">
- <eventTime>2014-09-11T09:58:23+02:00</eventTime>
- <data-changed-notification xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:remote">
- <data-change-event>
- <path xmlns:meae="http://netconfcentral.org/ns/toaster">/meae:toaster</path>
- <operation>updated</operation>
- <data>
- <!-- updated data -->
- </data>
- </data-change-event>
- </data-changed-notification>
- </notification>
-
-Example use case
-~~~~~~~~~~~~~~~~
-
-The typical use case is listening to data change events to update web
-page data in real-time. In this tutorial we will be using toaster as the
-base.
-
-When you call *make-toast* RPC, it sets *toasterStatus* to "down" to
-reflect that the toaster is busy making toast. When it finishes,
-*toasterStatus* is set to "up" again. We will listen to this toaster
-status changes in data store and will reflect it on our web page in
-real-time thanks to WebSocket data change notification.
-
-Simple javascript client implementation
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-We will create simple JavaScript web application that will listen
-updates on *toasterStatus* leaf and update some element of our web page
-according to new toaster status state.
-
-Create stream
-^^^^^^^^^^^^^
-
-First you need to create stream that you are planing to subscribe to.
-This can be achieved by invoking "create-data-change-event-subscription"
-RPC on RESTCONF via AJAX request. You need to provide data store
-**path** that you plan to listen on, **data store type** and **scope**.
-If the request is successful you can extract the **stream-name** from
-the response and use that to subscribe to the newly created stream. The
-*{username}* and *{password}* fields represent your credentials that you
-use to connect to OpenDaylight via RESTCONF:
-
-.. note::
-
- The default user name and password are "admin".
-
-.. code:: javascript
-
- function createStream() {
- $.ajax(
- {
- url: 'http://{odlAddress}:{odlPort}/restconf/operations/sal-remote:create-data-change-event-subscription',
- type: 'POST',
- headers: {
- 'Authorization': 'Basic ' + btoa('{username}:{password}'),
- 'Content-Type': 'application/json'
- },
- data: JSON.stringify(
- {
- 'input': {
- 'path': '/toaster:toaster/toaster:toasterStatus',
- 'sal-remote-augment:datastore': 'OPERATIONAL',
- 'sal-remote-augment:scope': 'ONE'
- }
- }
- )
- }).done(function (data) {
- // this function will be called when ajax call is executed successfully
- subscribeToStream(data.output['stream-name']);
- }).fail(function (data) {
- // this function will be called when ajax call fails
- console.log("Create stream call unsuccessful");
- })
- }
-
-Subscribe to stream
-^^^^^^^^^^^^^^^^^^^
-
-The Next step is to subscribe to the stream. To subscribe to the stream
-you need to call *GET* on
-*http://{odlAddress}:{odlPort}/restconf/streams/stream/{stream-name}*.
-If the call is successful, you get WebSocket address for this stream in
-**Location** parameter inside response header. You can get response
-header by calling *getResponseHeader(\ *Location*)* on HttpRequest
-object inside *done()* function call:
-
-.. code:: javascript
-
- function subscribeToStream(streamName) {
- $.ajax(
- {
- url: 'http://{odlAddress}:{odlPort}/restconf/streams/stream/' + streamName;
- type: 'GET',
- headers: {
- 'Authorization': 'Basic ' + btoa('{username}:{password}'),
- }
- }
- ).done(function (data, textStatus, httpReq) {
- // we need function that has http request object parameter in order to access response headers.
- listenToNotifications(httpReq.getResponseHeader('Location'));
- }).fail(function (data) {
- console.log("Subscribe to stream call unsuccessful");
- });
- }
-
-Receive notifications
-^^^^^^^^^^^^^^^^^^^^^
-
-Once you got WebSocket server location you can now connect to it and
-start receiving data change events. You need to define functions that
-will handle events on WebSocket. In order to process incoming events
-from OpenDaylight you need to provide a function that will handle
-*onmessage* events. The function must have one parameter that represents
-the received event object. The event data will be stored in
-*event.data*. The data will be in an XML format that you can then easily
-parse using jQuery.
-
-.. code:: javascript
-
- function listenToNotifications(socketLocation) {
- try {
- var notificatinSocket = new WebSocket(socketLocation);
-
- notificatinSocket.onmessage = function (event) {
- // we process our received event here
- console.log('Received toaster data change event.');
- $($.parseXML(event.data)).find('data-change-event').each(
- function (index) {
- var operation = $(this).find('operation').text();
- if (operation == 'updated') {
- // toaster status was updated so we call function that gets the value of toasterStatus leaf
- updateToasterStatus();
- return false;
- }
- }
- );
- }
- notificatinSocket.onerror = function (error) {
- console.log("Socket error: " + error);
- }
- notificatinSocket.onopen = function (event) {
- console.log("Socket connection opened.");
- }
- notificatinSocket.onclose = function (event) {
- console.log("Socket connection closed.");
- }
- // if there is a problem on socket creation we get exception (i.e. when socket address is incorrect)
- } catch(e) {
- alert("Error when creating WebSocket" + e );
- }
- }
-
-The *updateToasterStatus()* function represents function that calls
-*GET* on the path that was modified and sets toaster status in some web
-page element according to received data. After the WebSocket connection
-has been established you can test events by calling make-toast RPC via
-RESTCONF.
-
-.. note::
-
- for more information about WebSockets in JavaScript visit `Writing
- WebSocket client
- applications <https://developer.mozilla.org/en-US/docs/WebSockets/Writing_WebSocket_client_applications>`__
-
-.. _config_subsystem:
-
-Config Subsystem
-----------------
-
-Overview
-~~~~~~~~
-
-The Controller configuration operation has three stages:
-
-- First, a Proposed configuration is created. Its target is to replace
- the old configuration.
-
-- Second, the Proposed configuration is validated, and then committed.
- If it passes validation successfully, the Proposed configuration
- state will be changed to Validated.
-
-- Finally, a Validated configuration can be Committed, and the affected
- modules can be reconfigured.
-
-In fact, each configuration operation is wrapped in a transaction. Once
-a transaction is created, it can be configured, that is to say, a user
-can abort the transaction during this stage. After the transaction
-configuration is done, it is committed to the validation stage. In this
-stage, the validation procedures are invoked. If one or more validations
-fail, the transaction can be reconfigured. Upon success, the second
-phase commit is invoked. If this commit is successful, the transaction
-enters the last stage, committed. After that, the desired modules are
-reconfigured. If the second phase commit fails, it means that the
-transaction is unhealthy - basically, a new configuration instance
-creation failed, and the application can be in an inconsistent state.
-
-.. figure:: ./images/configuration.jpg
- :alt: Configuration states
-
- Configuration states
-
-.. figure:: ./images/Transaction.jpg
- :alt: Transaction states
-
- Transaction states
-
-Validation
-~~~~~~~~~~
-
-To secure the consistency and safety of the new configuration and to
-avoid conflicts, the configuration validation process is necessary.
-Usually, validation checks the input parameters of a new configuration,
-and mostly verifies module-specific relationships. The validation
-procedure results in a decision on whether the proposed configuration is
-healthy.
-
-Dependency resolver
-~~~~~~~~~~~~~~~~~~~
-
-Since there can be dependencies between modules, a change in a module
-configuration can affect the state of other modules. Therefore, we need
-to verify whether dependencies on other modules can be resolved. The
-Dependency Resolver acts in a manner similar to dependency injectors.
-Basically, a dependency tree is built.
-
-APIs and SPIs
-~~~~~~~~~~~~~
-
-This section describes configuration system APIs and SPIs.
-
-SPIs
-^^^^
-
-**Module** org.opendaylight.controller.config.spi. Module is the common
-interface for all modules: every module must implement it. The module is
-designated to hold configuration attributes, validate them, and create
-instances of service based on the attributes. This instance must
-implement the AutoCloseable interface, owing to resources clean up. If
-the module was created from an already running instance, it contains an
-old instance of the module. A module can implement multiple services. If
-the module depends on other modules, setters need to be annotated with
-@RequireInterface.
-
-**Module creation**
-
-1. The module needs to be configured, set with all required attributes.
-
-2. The module is then moved to the commit stage for validation. If the
- validation fails, the module attributes can be reconfigured.
- Otherwise, a new instance is either created, or an old instance is
- reconfigured. A module instance is identified by ModuleIdentifier,
- consisting of the factory name and instance name.
-
-| **ModuleFactory** org.opendaylight.controller.config.spi. The
- ModuleFactory interface must be implemented by each module factory.
-| A module factory can create a new module instance in two ways:
-
-- From an existing module instance
-
-- | An entirely new instance
- | ModuleFactory can also return default modules, useful for
- populating registry with already existing configurations. A module
- factory implementation must have a globally unique name.
-
-APIs
-^^^^
-
-+--------------------------------------+--------------------------------------+
-| ConfigRegistry | Represents functionality provided by |
-| | a configuration transaction (create, |
-| | destroy module, validate, or abort |
-| | transaction). |
-+--------------------------------------+--------------------------------------+
-| ConfigTransactionController | Represents functionality for |
-| | manipulating with configuration |
-| | transactions (begin, commit config). |
-+--------------------------------------+--------------------------------------+
-| RuntimeBeanRegistratorAwareConfiBean | The module implementing this |
-| | interface will receive |
-| | RuntimeBeanRegistrator before |
-| | getInstance is invoked. |
-+--------------------------------------+--------------------------------------+
-
-Runtime APIs
-^^^^^^^^^^^^
-
-+--------------------------------------+--------------------------------------+
-| RuntimeBean | Common interface for all runtime |
-| | beans |
-+--------------------------------------+--------------------------------------+
-| RootRuntimeBeanRegistrator | Represents functionality for root |
-| | runtime bean registration, which |
-| | subsequently allows hierarchical |
-| | registrations |
-+--------------------------------------+--------------------------------------+
-| HierarchicalRuntimeBeanRegistration | Represents functionality for runtime |
-| | bean registration and |
-| | unreregistration from hierarchy |
-+--------------------------------------+--------------------------------------+
-
-JMX APIs
-^^^^^^^^
-
-| JMX API is purposed as a transition between the Client API and the JMX
- platform.
-
-+--------------------------------------+--------------------------------------+
-| ConfigTransactionControllerMXBean | Extends ConfigTransactionController, |
-| | executed by Jolokia clients on |
-| | configuration transaction. |
-+--------------------------------------+--------------------------------------+
-| ConfigRegistryMXBean | Represents entry point of |
-| | configuration management for |
-| | MXBeans. |
-+--------------------------------------+--------------------------------------+
-| Object names | Object Name is the pattern used in |
-| | JMX to locate JMX beans. It consists |
-| | of domain and key properties (at |
-| | least one key-value pair). Domain is |
-| | defined as |
-| | "org.opendaylight.controller". The |
-| | only mandatory property is "type". |
-+--------------------------------------+--------------------------------------+
-
-Use case scenarios
-^^^^^^^^^^^^^^^^^^
-
-| A few samples of successful and unsuccessful transaction scenarios
- follow:
-
-**Successful commit scenario**
-
-1. The user creates a transaction calling creteTransaction() method on
- ConfigRegistry.
-
-2. ConfigRegisty creates a transaction controller, and registers the
- transaction as a new bean.
-
-3. Runtime configurations are copied to the transaction. The user can
- create modules and set their attributes.
-
-4. The configuration transaction is to be committed.
-
-5. The validation process is performed.
-
-6. After successful validation, the second phase commit begins.
-
-7. Modules proposed to be destroyed are destroyed, and their service
- instances are closed.
-
-8. Runtime beans are set to registrator.
-
-9. The transaction controller invokes the method getInstance on each
- module.
-
-10. The transaction is committed, and resources are either closed or
- released.
-
-| **Validation failure scenario**
-| The transaction is the same as the previous case until the validation
- process.
-
-1. If validation fails, (that is to day, illegal input attributes values
- or dependency resolver failure), the validationException is thrown
- and exposed to the user.
-
-2. The user can decide to reconfigure the transaction and commit again,
- or abort the current transaction.
-
-3. On aborted transactions, TransactionController and JMXRegistrator are
- properly closed.
-
-4. Unregistration event is sent to ConfigRegistry.
-
-Default module instances
-^^^^^^^^^^^^^^^^^^^^^^^^
-
-The configuration subsystem provides a way for modules to create default
-instances. A default instance is an instance of a module, that is
-created at the module bundle start-up (module becomes visible for
-configuration subsystem, for example, its bundle is activated in the
-OSGi environment). By default, no default instances are produced.
-
-The default instance does not differ from instances created later in the
-module life-cycle. The only difference is that the configuration for the
-default instance cannot be provided by the configuration subsystem. The
-module has to acquire the configuration for these instances on its own.
-It can be acquired from, for example, environment variables. After the
-creation of a default instance, it acts as a regular instance and fully
-participates in the configuration subsystem (It can be reconfigured or
-deleted in following transactions.).
<parent>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>odlparent</artifactId>
- <version>7.0.5</version>
+ <version>13.0.11</version>
<relativePath/>
</parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>controller-docs</artifactId>
<packaging>jar</packaging>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<name>${project.artifactId}</name>
<description>Controller documentation</description>
</dependencyManagement>
<dependencies>
- <!-- Config Subsystem remnants -->
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netty-event-executor-config</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netty-threadgroup-config</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netty-timer-config</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>threadpool-config-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>threadpool-config-impl</artifactId>
- </dependency>
-
- <!-- Base model augmentations -->
- <dependency>
- <groupId>org.opendaylight.controller.model</groupId>
- <artifactId>model-inventory</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller.model</groupId>
- <artifactId>model-topology</artifactId>
- </dependency>
-
<!-- Clustered implementation -->
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>cds-dom-api</artifactId>
</dependency>
- <!-- MessageBus -->
+ <!-- Third-party dependencies -->
<dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>messagebus-api</artifactId>
+ <groupId>com.github.spotbugs</groupId>
+ <artifactId>spotbugs-annotations</artifactId>
</dependency>
<dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>messagebus-spi</artifactId>
+ <groupId>com.guicedee.services</groupId>
+ <artifactId>javax.inject</artifactId>
</dependency>
<dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>messagebus-impl</artifactId>
+ <groupId>jakarta.annotation</groupId>
+ <artifactId>jakarta.annotation-api</artifactId>
</dependency>
<dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>messagebus-util</artifactId>
+ <groupId>org.kohsuke.metainf-services</groupId>
+ <artifactId>metainf-services</artifactId>
</dependency>
-
- <!-- Third-party dependencies -->
<dependency>
- <groupId>javax.inject</groupId>
- <artifactId>javax.inject</artifactId>
- <scope>provided</scope>
+ <groupId>org.osgi</groupId>
+ <artifactId>org.osgi.framework</artifactId>
</dependency>
<dependency>
- <groupId>org.kohsuke.metainf-services</groupId>
- <artifactId>metainf-services</artifactId>
+ <groupId>org.osgi</groupId>
+ <artifactId>org.osgi.service.component</artifactId>
</dependency>
<dependency>
<groupId>org.osgi</groupId>
- <artifactId>osgi.cmpn</artifactId>
+ <artifactId>org.osgi.service.component.annotations</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.osgi</groupId>
+ <artifactId>org.osgi.service.metatype.annotations</artifactId>
</dependency>
</dependencies>
<configuration combine.children="append">
<links>
<link>https://junit.org/junit4/javadoc/4.13/</link>
- <link>http://hamcrest.org/JavaHamcrest/javadoc/2.2/</link>
- <link>http://google.github.io/truth/api/1.0.1/</link>
- <link>http://www.slf4j.org/apidocs/</link>
- <link>https://google.github.io/guava/releases/28.2-jre/api/docs/</link>
- <link>http://doc.akka.io/japi/akka/2.5.31/</link>
- <link>http://netty.io/4.1/api/</link>
- <link>https://commons.apache.org/proper/commons-lang/javadocs/api-2.6/</link>
- <link>https://commons.apache.org/proper/commons-lang/javadocs/api-3.9/</link>
- <link>https://commons.apache.org/proper/commons-codec/apidocs/</link>
+ <link>https://hamcrest.org/JavaHamcrest/javadoc/2.2/</link>
+ <link>https://www.slf4j.org/apidocs/</link>
+ <link>https://guava.dev/releases/32.0.1-jre/api/docs/</link>
+ <link>https://doc.akka.io/japi/akka/2.6/</link>
+ <link>https://netty.io/4.1/api/</link>
+ <link>https://commons.apache.org/proper/commons-lang/javadocs/api-release/</link>
- <link>https://www.javadoc.io/doc/org.opendaylight.odlparent/odlparent-docs/7.0.5/</link>
- <link>https://www.javadoc.io/doc/org.opendaylight.yangtools/yangtools-docs/5.0.5/</link>
- <link>https://www.javadoc.io/doc/org.opendaylight.mdsal/mdsal-docs/6.0.4/</link>
+ <link>https://www.javadoc.io/doc/org.opendaylight.odlparent/odlparent-docs/13.0.11/</link>
+ <link>https://www.javadoc.io/doc/org.opendaylight.yangtools/yangtools-docs/13.0.2/</link>
+ <link>https://www.javadoc.io/doc/org.opendaylight.mdsal/mdsal-docs/13.0.1/</link>
</links>
<groups>
<group>
<title>Akka RAFT implementation</title>
<packages>org.opendaylight.controller.cluster.raft*</packages>
</group>
- <group>
- <title>MD-SAL Message Bus Bridge (experimental)</title>
- <packages>org.opendaylight.controller.messagebus.*:org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.*</packages>
- </group>
<group>
<title>MD-SAL Tracing Utilities</title>
<packages>org.opendaylight.controller.md.sal.trace.*:org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.mdsaltrace.rev160908*</packages>
<parent>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>feature-repo-parent</artifactId>
- <version>7.0.5</version>
+ <version>13.0.11</version>
<relativePath/>
</parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>features-controller-experimental</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<packaging>feature</packaging>
<name>OpenDaylight :: Controller :: Experimental Features</name>
<description>Controller Experimental Features</description>
</dependencyManagement>
<dependencies>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>odl-controller-exp-messagebus</artifactId>
- <type>xml</type>
- <classifier>features</classifier>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>odl-controller-exp-netty-config</artifactId>
- <type>xml</type>
- <classifier>features</classifier>
- </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>odl-toaster</artifactId>
<parent>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>feature-repo-parent</artifactId>
- <version>7.0.5</version>
+ <version>13.0.11</version>
<relativePath/>
</parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>features-controller-testing</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<packaging>feature</packaging>
<name>OpenDaylight :: Controller :: Features to support CSIT testing</name>
<description>Controller CSIT Features</description>
<parent>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>feature-repo-parent</artifactId>
- <version>7.0.5</version>
+ <version>13.0.11</version>
<relativePath/>
</parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>features-controller</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<packaging>feature</packaging>
<name>OpenDaylight :: Controller :: Features</name>
<description>Controller Production Features</description>
<dependencies>
<dependency>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>odl-mdsal-broker</artifactId>
+ <artifactId>odl-controller-akka</artifactId>
<type>xml</type>
<classifier>features</classifier>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>odl-mdsal-broker-local</artifactId>
+ <artifactId>odl-controller-scala</artifactId>
<type>xml</type>
<classifier>features</classifier>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>odl-mdsal-clustering-commons</artifactId>
+ <artifactId>odl-mdsal-broker</artifactId>
<type>xml</type>
<classifier>features</classifier>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>odl-controller-mdsal-common</artifactId>
+ <artifactId>odl-controller-broker-local</artifactId>
<type>xml</type>
<classifier>features</classifier>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>odl-mdsal-distributed-datastore</artifactId>
+ <artifactId>odl-mdsal-clustering-commons</artifactId>
<type>xml</type>
<classifier>features</classifier>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>odl-mdsal-model-inventory</artifactId>
+ <artifactId>odl-controller-mdsal-common</artifactId>
<type>xml</type>
<classifier>features</classifier>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>odl-controller-model-topology</artifactId>
+ <artifactId>odl-mdsal-distributed-datastore</artifactId>
<type>xml</type>
<classifier>features</classifier>
</dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>single-feature-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../single-feature-parent</relativePath>
</parent>
<groupId>org.opendaylight.controller.samples</groupId>
<artifactId>clustering-it-provider</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>clustering-it-karaf-cli</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-cluster-admin-karaf-cli</artifactId>
+ </dependency>
</dependencies>
</project>
-->
<features xmlns="http://karaf.apache.org/xmlns/features/v1.2.0" name="odl-clustering-test-app-${project.version}">
<feature name="odl-clustering-test-app" version="${project.version}">
- <feature version="[6,7)">odl-mdsal-model-rfc6991</feature>
+ <feature version="[13,14)">odl-mdsal-model-rfc6991</feature>
</feature>
</features>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright © 2020 PANTHEON.tech, s.r.o. and others. All rights reserved.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>single-feature-parent</artifactId>
+ <version>9.0.3-SNAPSHOT</version>
+ <relativePath>../single-feature-parent</relativePath>
+ </parent>
+
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>odl-controller-akka</artifactId>
+ <packaging>feature</packaging>
+
+ <name>Akka Runtime for OpenDaylight</name>
+
+ <properties>
+ <checkDependencyChange>true</checkDependencyChange>
+ <failOnDependencyChange>true</failOnDependencyChange>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>odl-controller-scala</artifactId>
+ <type>xml</type>
+ <classifier>features</classifier>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>repackaged-akka</artifactId>
+ </dependency>
+ </dependencies>
+</project>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<features xmlns="http://karaf.apache.org/xmlns/features/v1.6.0" name="odl-controller-akka">
+ <feature version="0.0.0">
+ <feature>odl-controller-scala</feature>
+ <bundle>mvn:com.typesafe/config/1.4.2</bundle>
+ <bundle>mvn:com.typesafe/ssl-config-core_2.13/0.4.3</bundle>
+ <bundle>mvn:io.aeron/aeron-client/1.38.1</bundle>
+ <bundle>mvn:io.aeron/aeron-driver/1.38.1</bundle>
+ <bundle>mvn:io.netty/netty/3.10.6.Final</bundle>
+ <bundle>mvn:org.agrona/agrona/1.15.2</bundle>
+ <bundle>mvn:org.opendaylight.controller/repackaged-akka/${project.version}</bundle>
+ <bundle>mvn:org.reactivestreams/reactive-streams/1.0.4</bundle>
+ <feature>wrap</feature>
+ <bundle>wrap:mvn:org.lmdbjava/lmdbjava/0.7.0</bundle>
+ </feature>
+</features>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>single-feature-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../single-feature-parent</relativePath>
</parent>
-->
<features xmlns="http://karaf.apache.org/xmlns/features/v1.2.0" name="odl-controller-blueprint-${project.version}">
<feature name="odl-controller-blueprint" version="${project.version}">
- <feature version="[5,6)">odl-yangtools-codec</feature>
- <feature version="[6,7)">odl-mdsal-binding-api</feature>
- <feature version="[6,7)">odl-mdsal-binding-runtime</feature>
- <feature version="[6,7)">odl-mdsal-dom-api</feature>
+ <feature version="[13,14)">odl-yangtools-codec</feature>
+ <feature version="[13,14)">odl-mdsal-binding-api</feature>
+ <feature version="[13,14)">odl-mdsal-binding-runtime</feature>
+ <feature version="[13,14)">odl-mdsal-dom-api</feature>
<bundle start-level="40">mvn:org.opendaylight.controller/blueprint/${project.version}</bundle>
</feature>
</features>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>single-feature-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../single-feature-parent</relativePath>
</parent>
- <artifactId>odl-mdsal-broker-local</artifactId>
+ <artifactId>odl-controller-broker-local</artifactId>
<packaging>feature</packaging>
- <name>OpenDaylight :: MDSAL :: Broker (local)</name>
+ <name>OpenDaylight :: Controller :: Broker (local)</name>
<description>OpenDaylight Controller stack without clustering</description>
- <properties>
- <!-- FIXME: CONTROLLER-1584:
- - mdsal-eos-dom-simple does not activate
- - we need a DOMDataBroker instance activation
- -->
- <skip.karaf.featureTest>true</skip.karaf.featureTest>
- </properties>
-
<dependencies>
<dependency>
<groupId>org.opendaylight.mdsal</groupId>
and is available at http://www.eclipse.org/legal/epl-v10.html
-->
<features xmlns="http://karaf.apache.org/xmlns/features/v1.2.0" name="odl-mdsal-${project.version}">
- <feature name="odl-mdsal-broker-local" version="${project.version}">
- <feature version="[6,7)">odl-mdsal-dom</feature>
- <feature version="[6,7)">odl-mdsal-eos-binding</feature>
- <feature version="[6,7)">odl-mdsal-eos-dom</feature>
- <feature version="[6,7)">odl-mdsal-singleton-dom</feature>
+ <feature name="odl-controller-broker-local" version="${project.version}">
+ <feature version="[13,14)">odl-mdsal-dom</feature>
+ <feature version="[13,14)">odl-mdsal-eos-binding</feature>
+ <feature version="[13,14)">odl-mdsal-eos-dom</feature>
+ <feature version="[13,14)">odl-mdsal-singleton-dom</feature>
</feature>
</features>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright © 2016, 2017 Red Hat, Inc. and others.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
- -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>single-feature-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
- <relativePath>../single-feature-parent</relativePath>
- </parent>
-
- <artifactId>odl-controller-exp-messagebus</artifactId>
- <packaging>feature</packaging>
- <name>OpenDaylight :: Controller :: Experimental :: Message Bus</name>
- <description>Experimental Message Bus Collector</description>
-
- <properties>
- <config.configfile.directory>etc/opendaylight/karaf</config.configfile.directory>
- </properties>
-
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>odl-mdsal-model-inventory</artifactId>
- <type>xml</type>
- <classifier>features</classifier>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.mdsal.model</groupId>
- <artifactId>odl-mdsal-model-draft-clemm-netmod-yang-network-topo-01-minimal</artifactId>
- <type>xml</type>
- <classifier>features</classifier>
- </dependency>
-
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>odl-mdsal-broker</artifactId>
- <type>xml</type>
- <classifier>features</classifier>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>messagebus-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>messagebus-spi</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>messagebus-util</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>messagebus-impl</artifactId>
- </dependency>
- </dependencies>
-</project>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright © 2020 PANTHEON.tech, s.r.o. and others.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
- -->
-<features xmlns="http://karaf.apache.org/xmlns/features/v1.2.0" name="odl-controller-exp-messagebus-${project.version}">
- <feature name="odl-controller-exp-messagebus" version="${project.version}">
- <feature version="[6,7)">odl-mdsal-model-draft-clemm-netmod-yang-network-topo-01-minimal</feature>
- </feature>
-</features>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright © 2016, 2017 Red Hat, Inc. and others.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
- -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>single-feature-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
- <relativePath>../single-feature-parent</relativePath>
- </parent>
-
- <artifactId>odl-controller-exp-netty-config</artifactId>
- <packaging>feature</packaging>
- <name>OpenDaylight :: Controller :: Experimental :: Netty Configuration</name>
- <description>Common configuration for Netty resources</description>
-
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.odlparent</groupId>
- <artifactId>odl-netty-4</artifactId>
- <type>xml</type>
- <classifier>features</classifier>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>odl-controller-blueprint</artifactId>
- <type>xml</type>
- <classifier>features</classifier>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netty-event-executor-config</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netty-threadgroup-config</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netty-timer-config</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>threadpool-config-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>threadpool-config-impl</artifactId>
- </dependency>
- </dependencies>
-</project>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
-<!--
- Copyright © 2018 Red Hat, Inc. and others.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
- -->
-<features xmlns="http://karaf.apache.org/xmlns/features/v1.4.0" name="odl-controller-exp-netty-config">
- <feature name="odl-controller-exp-netty-config">
- <feature version="[7,8)">odl-netty-4</feature>
- </feature>
-</features>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>single-feature-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../single-feature-parent</relativePath>
</parent>
-->
<features xmlns="http://karaf.apache.org/xmlns/features/v1.2.0" name="odl-controller-mdsal-common-${project.version}">
<feature name="odl-controller-mdsal-common" version="${project.version}">
- <feature version="[6,7)">odl-mdsal-common</feature>
- <feature version="[6,7)">odl-mdsal-binding-runtime</feature>
+ <feature version="[13,14)">odl-mdsal-common</feature>
+ <feature version="[13,14)">odl-mdsal-binding-runtime</feature>
</feature>
</features>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright (c) 2018 Ericsson India Global Services Pvt Ltd. and others. All rights reserved.
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
- -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>single-feature-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
- <relativePath>../single-feature-parent</relativePath>
- </parent>
-
- <artifactId>odl-controller-model-topology</artifactId>
- <packaging>feature</packaging>
- <name>OpenDaylight :: MD-SAL :: Topology Model</name>
-
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.mdsal.model</groupId>
- <artifactId>odl-mdsal-model-draft-clemm-netmod-yang-network-topo-01-minimal</artifactId>
- <type>xml</type>
- <classifier>features</classifier>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>odl-mdsal-model-inventory</artifactId>
- <type>xml</type>
- <classifier>features</classifier>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller.model</groupId>
- <artifactId>model-topology</artifactId>
- </dependency>
- </dependencies>
-</project>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright © 2020 PANTHEON.tech, s.r.o. and others.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
- -->
-<features xmlns="http://karaf.apache.org/xmlns/features/v1.2.0" name="odl-controller-model-topology-${project.version}">
- <feature name="odl-controller-model-topology" version="${project.version}">
- <feature version="[6,7)">odl-mdsal-model-draft-clemm-netmod-yang-network-topo-01-minimal</feature>
- </feature>
-</features>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright © 2016 Red Hat, Inc. and others. All rights reserved.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>single-feature-parent</artifactId>
+ <version>9.0.3-SNAPSHOT</version>
+ <relativePath>../single-feature-parent</relativePath>
+ </parent>
+
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>odl-controller-scala</artifactId>
+ <packaging>feature</packaging>
+
+ <name>Scala Runtime for OpenDaylight</name>
+
+ <properties>
+ <checkDependencyChange>true</checkDependencyChange>
+ <failOnDependencyChange>true</failOnDependencyChange>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.scala-lang</groupId>
+ <artifactId>scala-library</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.scala-lang</groupId>
+ <artifactId>scala-reflect</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.scala-lang.modules</groupId>
+ <artifactId>scala-java8-compat_2.13</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.scala-lang.modules</groupId>
+ <artifactId>scala-parser-combinators_2.13</artifactId>
+ </dependency>
+ </dependencies>
+</project>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<features xmlns="http://karaf.apache.org/xmlns/features/v1.6.0" name="odl-controller-scala">
+ <feature version="0.0.0">
+ <bundle>mvn:org.scala-lang.modules/scala-java8-compat_2.13/1.0.2</bundle>
+ <bundle>mvn:org.scala-lang.modules/scala-parser-combinators_2.13/1.1.2</bundle>
+ <bundle>mvn:org.scala-lang/scala-library/2.13.13</bundle>
+ <bundle>mvn:org.scala-lang/scala-reflect/2.13.13</bundle>
+ </feature>
+</features>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>single-feature-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../single-feature-parent</relativePath>
</parent>
<dependency>
<groupId>org.jolokia</groupId>
<artifactId>jolokia-osgi</artifactId>
+ <exclusions>
+ <exclusion>
+ <groupId>org.jolokia</groupId>
+ <artifactId>jolokia-core</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>org.jolokia</groupId>
+ <artifactId>jolokia-jsr160</artifactId>
+ </exclusion>
+ </exclusions>
</dependency>
<dependency>
<!-- finalname="/etc/org.jolokia.osgi.cfg" -->
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>single-feature-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../single-feature-parent</relativePath>
</parent>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>single-feature-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../single-feature-parent</relativePath>
</parent>
-->
<features xmlns="http://karaf.apache.org/xmlns/features/v1.2.0" name="odl-mdsal-${project.version}">
<feature name="odl-mdsal-broker" version="${project.version}">
- <feature version="[6,7)">odl-mdsal-singleton-dom</feature>
- <feature version="[6,7)">odl-mdsal-eos-binding</feature>
+ <feature version="[13,14)">odl-mdsal-singleton-dom</feature>
+ <feature version="[13,14)">odl-mdsal-eos-binding</feature>
</feature>
</features>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>single-feature-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../single-feature-parent</relativePath>
</parent>
<dependencies>
<dependency>
- <groupId>org.opendaylight.odlparent</groupId>
- <artifactId>odl-akka-system-2.5</artifactId>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>odl-controller-akka</artifactId>
<type>xml</type>
<classifier>features</classifier>
</dependency>
<dependency>
<groupId>org.opendaylight.odlparent</groupId>
- <artifactId>odl-akka-persistence-2.5</artifactId>
+ <artifactId>odl-apache-commons-lang3</artifactId>
<type>xml</type>
<classifier>features</classifier>
</dependency>
<dependency>
<groupId>org.opendaylight.odlparent</groupId>
- <artifactId>odl-akka-clustering-2.5</artifactId>
+ <artifactId>odl-dropwizard-metrics</artifactId>
<type>xml</type>
<classifier>features</classifier>
</dependency>
<dependency>
<groupId>org.opendaylight.odlparent</groupId>
- <artifactId>odl-apache-commons-lang3</artifactId>
- <type>xml</type>
- <classifier>features</classifier>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.odlparent</groupId>
- <artifactId>odl-dropwizard-metrics</artifactId>
+ <artifactId>odl-netty-4</artifactId>
<type>xml</type>
<classifier>features</classifier>
</dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-akka-segmented-journal</artifactId>
</dependency>
- <dependency>
- <groupId>com.esotericsoftware</groupId>
- <artifactId>kryo</artifactId>
- <version>4.0.2</version>
- </dependency>
- <dependency>
- <groupId>com.esotericsoftware</groupId>
- <artifactId>minlog</artifactId>
- <version>1.3.1</version>
- </dependency>
- <dependency>
- <groupId>com.esotericsoftware</groupId>
- <artifactId>reflectasm</artifactId>
- <version>1.11.8</version>
- </dependency>
- <dependency>
- <groupId>org.ow2.asm</groupId>
- <artifactId>asm</artifactId>
- <version>5.2</version>
- </dependency>
</dependencies>
</project>
-->
<features xmlns="http://karaf.apache.org/xmlns/features/v1.4.0" name="odl-controller-${project.version}">
<feature name="odl-mdsal-clustering-commons" version="${project.version}">
- <feature version="[7,8)">odl-akka-system-2.5</feature>
- <feature version="[7,8)">odl-akka-persistence-2.5</feature>
- <feature version="[7,8)">odl-akka-clustering-2.5</feature>
- <feature version="[7,8)">odl-apache-commons-lang3</feature>
- <feature version="[7,8)">odl-dropwizard-metrics</feature>
- <feature version="[7,8)">odl-servlet-api</feature>
- <feature version="[5,6)">odl-yangtools-data</feature>
- <feature version="[5,6)">odl-yangtools-codec</feature>
+ <feature version="[13,14)">odl-apache-commons-lang3</feature>
+ <feature version="[13,14)">odl-dropwizard-metrics</feature>
+ <feature version="[13,14)">odl-netty-4</feature>
+ <feature version="[13,14)">odl-servlet-api</feature>
+ <feature version="[13,14)">odl-yangtools-data</feature>
+ <feature version="[13,14)">odl-yangtools-codec</feature>
</feature>
</features>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>single-feature-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../single-feature-parent</relativePath>
</parent>
<type>xml</type>
<classifier>features</classifier>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>odl-controller-blueprint</artifactId>
- <type>xml</type>
- <classifier>features</classifier>
- </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>odl-controller-mdsal-common</artifactId>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-distributed-eos</artifactId>
+ <artifactId>eos-dom-akka</artifactId>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
-->
<features xmlns="http://karaf.apache.org/xmlns/features/v1.4.0" name="odl-controller-${project.version}">
<feature name="odl-mdsal-distributed-datastore" version="${project.version}">
- <feature version="[7,8)">odl-apache-commons-text</feature>
- <feature version="[5,6)">odl-yangtools-codec</feature>
- <feature version="[6,7)">odl-mdsal-eos-dom</feature>
- <feature version="[6,7)">odl-mdsal-dom-broker</feature>
- <feature version="[6,7)">odl-mdsal-binding-dom-adapter</feature>
+ <feature version="[13,14)">odl-apache-commons-text</feature>
+ <feature version="[13,14)">odl-yangtools-codec</feature>
+ <feature version="[13,14)">odl-mdsal-eos-dom</feature>
+ <feature version="[13,14)">odl-mdsal-dom-broker</feature>
+ <feature version="[13,14)">odl-mdsal-binding-dom-adapter</feature>
<configfile finalname="configuration/initial/akka.conf">
mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/akkaconf
</configfile>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright © 2016, 2017 Red Hat, Inc. and others.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
- -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>single-feature-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
- <relativePath>../single-feature-parent</relativePath>
- </parent>
-
- <artifactId>odl-mdsal-model-inventory</artifactId>
- <packaging>feature</packaging>
- <name>OpenDaylight :: MD-SAL :: Inventory Model</name>
-
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.mdsal.model</groupId>
- <artifactId>odl-mdsal-model-rfc6991</artifactId>
- <type>xml</type>
- <classifier>features</classifier>
- </dependency>
-
- <dependency>
- <groupId>org.opendaylight.controller.model</groupId>
- <artifactId>model-inventory</artifactId>
- </dependency>
- </dependencies>
-</project>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright © 2020 PANTHEON.tech, s.r.o. and others.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
- -->
-<features xmlns="http://karaf.apache.org/xmlns/features/v1.2.0" name="odl-mdsal-model-inventory-${project.version}">
- <feature name="odl-mdsal-model-inventory" version="${project.version}">
- <feature version="[6,7)">odl-mdsal-model-rfc6991</feature>
- </feature>
-</features>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>single-feature-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../single-feature-parent</relativePath>
</parent>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>single-feature-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../single-feature-parent</relativePath>
</parent>
-->
<features xmlns="http://karaf.apache.org/xmlns/features/v1.2.0" name="odl-toaster-${project.version}">
<feature name="odl-toaster" version="${project.version}">
- <feature version="[6,7)">odl-mdsal-binding-runtime</feature>
+ <feature version="[13,14)">odl-mdsal-binding-runtime</feature>
</feature>
</features>
<parent>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>odlparent-lite</artifactId>
- <version>7.0.5</version>
+ <version>13.0.11</version>
<relativePath/>
</parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>features-aggregator</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<packaging>pom</packaging>
<properties>
<!-- Experimental features -->
<module>features-controller-experimental</module>
- <module>odl-controller-exp-netty-config</module>
- <module>odl-controller-exp-messagebus</module>
+ <module>odl-toaster</module>
<!-- CSIT features -->
<module>features-controller-testing</module>
<module>odl-clustering-test-app</module>
<module>odl-mdsal-benchmark</module>
+ <!-- Scala/Akka features -->
+ <module>odl-controller-scala</module>
+ <module>odl-controller-akka</module>
+
<!-- Single features, to be cleaned up -->
<module>odl-controller-blueprint</module>
+ <module>odl-controller-broker-local</module>
<module>odl-controller-mdsal-common</module>
- <module>odl-controller-model-topology</module>
<module>odl-jolokia</module>
- <module>odl-mdsal-broker-local</module>
<module>odl-mdsal-broker</module>
<module>odl-mdsal-clustering-commons</module>
<module>odl-mdsal-distributed-datastore</module>
- <module>odl-mdsal-model-inventory</module>
<module>odl-mdsal-remoterpc-connector</module>
- <module>odl-toaster</module>
</modules>
</project>
<parent>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>single-feature-parent</artifactId>
- <version>7.0.5</version>
+ <version>13.0.11</version>
<relativePath/>
</parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>single-feature-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<packaging>pom</packaging>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>mdsal-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <artifactId>bundle-parent</artifactId>
+ <version>9.0.3-SNAPSHOT</version>
<type>pom</type>
<scope>import</scope>
</dependency>
<parent>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>odlparent</artifactId>
- <version>7.0.5</version>
+ <version>13.0.11</version>
<relativePath/>
</parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>odl-jolokia-osgi</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<packaging>jar</packaging>
<build>
<parent>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>karaf4-parent</artifactId>
- <version>7.0.5</version>
+ <version>13.0.11</version>
<relativePath/>
</parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>controller-test-karaf</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<packaging>pom</packaging>
<dependencyManagement>
<parent>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>bundle-parent</artifactId>
- <version>7.0.5</version>
+ <version>13.0.11</version>
<relativePath/>
</parent>
<artifactId>blueprint</artifactId>
<packaging>bundle</packaging>
<name>${project.artifactId}</name>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>yangtools-artifacts</artifactId>
- <version>5.0.5</version>
+ <version>13.0.2</version>
<type>pom</type>
<scope>import</scope>
</dependency>
<dependency>
<groupId>org.opendaylight.mdsal</groupId>
<artifactId>mdsal-artifacts</artifactId>
- <version>6.0.4</version>
+ <version>13.0.1</version>
<type>pom</type>
<scope>import</scope>
</dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
</dependency>
+ <dependency>
+ <groupId>com.github.spotbugs</groupId>
+ <artifactId>spotbugs-annotations</artifactId>
+ <optional>true</optional>
+ </dependency>
<dependency>
<groupId>org.apache.aries.blueprint</groupId>
<artifactId>org.apache.aries.blueprint.core</artifactId>
<groupId>org.apache.aries</groupId>
<artifactId>org.apache.aries.util</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>concepts</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>util</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-common</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-impl</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-codec-xml</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-model-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-model-util</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>mdsal-common-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>mdsal-dom-api</artifactId>
+ </dependency>
<dependency>
<groupId>org.opendaylight.mdsal</groupId>
<artifactId>mdsal-dom-spi</artifactId>
</dependency>
<dependency>
<groupId>org.opendaylight.mdsal</groupId>
- <artifactId>mdsal-binding-dom-codec</artifactId>
+ <artifactId>mdsal-binding-dom-codec-api</artifactId>
</dependency>
<dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-data-codec-xml</artifactId>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>mdsal-binding-spec-util</artifactId>
</dependency>
<dependency>
- <groupId>org.osgi</groupId>
- <artifactId>org.osgi.core</artifactId>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>yang-binding</artifactId>
</dependency>
<dependency>
<groupId>org.osgi</groupId>
- <artifactId>osgi.cmpn</artifactId>
+ <artifactId>org.osgi.framework</artifactId>
</dependency>
<dependency>
- <groupId>org.slf4j</groupId>
- <artifactId>slf4j-api</artifactId>
+ <groupId>org.osgi</groupId>
+ <artifactId>org.osgi.service.cm</artifactId>
</dependency>
<dependency>
<groupId>org.osgi</groupId>
<artifactId>org.osgi.service.event</artifactId>
</dependency>
-
<dependency>
- <groupId>com.google.truth</groupId>
- <artifactId>truth</artifactId>
+ <groupId>org.osgi</groupId>
+ <artifactId>org.osgi.util.tracker</artifactId>
</dependency>
+
<dependency>
<groupId>org.opendaylight.mdsal</groupId>
<artifactId>mdsal-binding-test-model</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>mdsal-binding-dom-adapter</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>mdsal-binding-dom-adapter</artifactId>
+ <type>test-jar</type>
+ <scope>test</scope>
+ </dependency>
<dependency>
<groupId>org.opendaylight.mdsal</groupId>
<artifactId>mdsal-binding-test-utils</artifactId>
*/
package org.opendaylight.controller.blueprint;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
-import java.util.Dictionary;
import java.util.Enumeration;
import java.util.HashSet;
-import java.util.Hashtable;
import java.util.List;
+import java.util.Map;
import org.apache.aries.blueprint.NamespaceHandler;
import org.apache.aries.blueprint.services.BlueprintExtenderService;
import org.apache.aries.quiesce.participant.QuiesceParticipant;
import org.apache.aries.util.AriesFrameworkUtil;
import org.eclipse.jdt.annotation.Nullable;
-import org.gaul.modernizer_maven_annotations.SuppressModernizer;
import org.opendaylight.controller.blueprint.ext.OpendaylightNamespaceHandler;
import org.opendaylight.yangtools.util.xml.UntrustedXML;
import org.osgi.framework.Bundle;
import org.osgi.framework.BundleActivator;
import org.osgi.framework.BundleContext;
import org.osgi.framework.BundleEvent;
+import org.osgi.framework.FrameworkUtil;
import org.osgi.framework.ServiceReference;
import org.osgi.framework.ServiceRegistration;
import org.osgi.framework.SynchronousBundleListener;
quiesceParticipantTracker.open();
}
- @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
- justification = "https://github.com/spotbugs/spotbugs/issues/811")
private QuiesceParticipant onQuiesceParticipantAdded(final ServiceReference<QuiesceParticipant> reference) {
quiesceParticipant = reference.getBundle().getBundleContext().getService(reference);
return quiesceParticipant;
}
- @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
- justification = "https://github.com/spotbugs/spotbugs/issues/811")
private BlueprintExtenderService onBlueprintExtenderServiceAdded(
final ServiceReference<BlueprintExtenderService> reference) {
blueprintExtenderService = reference.getBundle().getBundleContext().getService(reference);
}
private void registerNamespaceHandler(final BundleContext context) {
- Dictionary<String, Object> props = emptyDict();
- props.put("osgi.service.blueprint.namespace", OpendaylightNamespaceHandler.NAMESPACE_1_0_0);
- namespaceReg = context.registerService(NamespaceHandler.class, new OpendaylightNamespaceHandler(), props);
+ namespaceReg = context.registerService(NamespaceHandler.class, new OpendaylightNamespaceHandler(),
+ FrameworkUtil.asDictionary(Map.of(
+ "osgi.service.blueprint.namespace", OpendaylightNamespaceHandler.NAMESPACE_1_0_0)));
}
private void registerBlueprintEventHandler(final BundleContext context) {
eventHandlerReg = context.registerService(BlueprintListener.class, this, null);
}
- @SuppressModernizer
- private static Dictionary<String, Object> emptyDict() {
- return new Hashtable<>();
- }
-
/**
* Implemented from BundleActivator.
*/
return !paths.isEmpty() ? paths : findBlueprintPaths(bundle, ODL_CUSTOM_BLUEPRINT_FILE_PATH);
}
- @SuppressWarnings({ "rawtypes", "unchecked" })
private static List<Object> findBlueprintPaths(final Bundle bundle, final String path) {
Enumeration<?> rntries = bundle.findEntries(path, BLUEPRINT_FLE_PATTERN, false);
if (rntries == null) {
- return Collections.emptyList();
+ return List.of();
} else {
- return Collections.list((Enumeration)rntries);
+ return List.copyOf(Collections.list(rntries));
}
}
+++ /dev/null
-/*
- * Copyright (c) 2016 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.blueprint.ext;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.base.MoreObjects;
-import com.google.common.collect.ImmutableSet;
-import java.util.Collection;
-import java.util.Set;
-import java.util.function.Predicate;
-import org.apache.aries.blueprint.services.ExtendedBlueprintContainer;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.mdsal.dom.api.DOMRpcAvailabilityListener;
-import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMRpcService;
-import org.opendaylight.mdsal.dom.api.DOMSchemaService;
-import org.opendaylight.mdsal.dom.spi.RpcRoutingStrategy;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-import org.osgi.service.blueprint.container.ComponentDefinitionException;
-
-abstract class AbstractInvokableServiceMetadata extends AbstractDependentComponentFactoryMetadata {
- private final String interfaceName;
-
- private ListenerRegistration<DOMRpcAvailabilityListener> rpcListenerReg;
- private RpcConsumerRegistry rpcRegistry;
- private Class<RpcService> rpcInterface;
- private Set<SchemaPath> rpcSchemaPaths;
-
- AbstractInvokableServiceMetadata(final String id, final String interfaceName) {
- super(id);
- this.interfaceName = requireNonNull(interfaceName);
- }
-
- Class<RpcService> rpcInterface() {
- return rpcInterface;
- }
-
- @SuppressWarnings({ "checkstyle:IllegalCatch", "unchecked" })
- @Override
- public final void init(final ExtendedBlueprintContainer container) {
- super.init(container);
-
- final Class<?> interfaceClass;
- try {
- interfaceClass = container().getBundleContext().getBundle().loadClass(interfaceName);
- } catch (final Exception e) {
- throw new ComponentDefinitionException(String.format("%s: Error obtaining interface class %s",
- logName(), interfaceName), e);
- }
-
- if (!RpcService.class.isAssignableFrom(interfaceClass)) {
- throw new ComponentDefinitionException(String.format(
- "%s: The specified interface %s is not an RpcService", logName(), interfaceName));
- }
-
- rpcInterface = (Class<RpcService>)interfaceClass;
- }
-
- @Override
- protected final void startTracking() {
- // Request RpcProviderRegistry first ...
- retrieveService("RpcConsumerRegistry", RpcConsumerRegistry.class, this::onRpcRegistry);
- }
-
- private void onRpcRegistry(final Object service) {
- log.debug("{}: Retrieved RpcProviderRegistry {}", logName(), service);
- rpcRegistry = (RpcConsumerRegistry)service;
-
- // Now acquire SchemaService...
- retrieveService("SchemaService", DOMSchemaService.class, this::onSchemaService);
- }
-
- private void onSchemaService(final Object service) {
- log.debug("{}: Retrieved SchemaService {}", logName(), service);
-
- // Now get the SchemaContext and trigger RPC resolution
- retrievedSchemaContext(((DOMSchemaService)service).getGlobalContext());
- }
-
- private void retrievedSchemaContext(final SchemaContext schemaContext) {
- log.debug("{}: retrievedSchemaContext", logName());
-
- final Collection<SchemaPath> schemaPaths = RpcUtil.decomposeRpcService(rpcInterface, schemaContext,
- rpcFilter());
- if (schemaPaths.isEmpty()) {
- log.debug("{}: interface {} has no acceptable entries, assuming it is satisfied", logName(), rpcInterface);
- setSatisfied();
- return;
- }
-
- rpcSchemaPaths = ImmutableSet.copyOf(schemaPaths);
- log.debug("{}: Got SchemaPaths: {}", logName(), rpcSchemaPaths);
-
- // First get the DOMRpcService OSGi service. This will be used to register a listener to be notified
- // when the underlying DOM RPC service is available.
- retrieveService("DOMRpcService", DOMRpcService.class, this::retrievedDOMRpcService);
- }
-
- private void retrievedDOMRpcService(final Object service) {
- log.debug("{}: retrievedDOMRpcService {}", logName(), service);
- final DOMRpcService domRpcService = (DOMRpcService)service;
-
- setDependencyDesc("Available DOM RPC for binding RPC: " + rpcInterface);
- rpcListenerReg = domRpcService.registerRpcListener(new DOMRpcAvailabilityListener() {
- @Override
- public void onRpcAvailable(final Collection<DOMRpcIdentifier> rpcs) {
- onRpcsAvailable(rpcs);
- }
-
- @Override
- public void onRpcUnavailable(final Collection<DOMRpcIdentifier> rpcs) {
- }
- });
- }
-
- abstract Predicate<RpcRoutingStrategy> rpcFilter();
-
- @SuppressWarnings("checkstyle:IllegalCatch")
- @Override
- public final Object create() throws ComponentDefinitionException {
- log.debug("{}: In create: interfaceName: {}", logName(), interfaceName);
-
- super.onCreate();
-
- try {
- RpcService rpcService = rpcRegistry.getRpcService(rpcInterface);
-
- log.debug("{}: create returning service {}", logName(), rpcService);
-
- return rpcService;
- } catch (final RuntimeException e) {
- throw new ComponentDefinitionException("Error getting RPC service for " + interfaceName, e);
- }
- }
-
- protected final void onRpcsAvailable(final Collection<DOMRpcIdentifier> rpcs) {
- for (DOMRpcIdentifier identifier : rpcs) {
- if (rpcSchemaPaths.contains(identifier.getType())) {
- log.debug("{}: onRpcsAvailable - found SchemaPath {}", logName(), identifier.getType());
- setSatisfied();
- break;
- }
- }
- }
-
- @Override
- public final void stopTracking() {
- super.stopTracking();
- closeRpcListenerReg();
- }
-
- private void closeRpcListenerReg() {
- if (rpcListenerReg != null) {
- rpcListenerReg.close();
- rpcListenerReg = null;
- }
- }
-
- @Override
- public final void destroy(final Object instance) {
- super.destroy(instance);
- closeRpcListenerReg();
- }
-
- @Override
- public final String toString() {
- return MoreObjects.toStringHelper(this).add("id", getId()).add("interfaceName", interfaceName).toString();
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.blueprint.ext;
-
-import com.google.common.collect.Collections2;
-import com.google.common.collect.ImmutableSet;
-import java.util.Collection;
-import java.util.Set;
-import org.opendaylight.mdsal.binding.api.RpcProviderService;
-import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMRpcImplementationNotAvailableException;
-import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
-import org.opendaylight.mdsal.dom.api.DOMSchemaService;
-import org.opendaylight.mdsal.dom.spi.RpcRoutingStrategy;
-import org.opendaylight.yangtools.concepts.Registration;
-import org.opendaylight.yangtools.util.concurrent.FluentFutures;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-import org.osgi.framework.Bundle;
-import org.osgi.service.blueprint.container.ComponentDefinitionException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Blueprint bean corresponding to the "action-provider" element that registers the promise to instantiate action
- * instances with RpcProviderRegistry.
- *
- * <p>
- * This bean has two distinct facets:
- * - if a reference bean is provided, it registers it with {@link RpcProviderService}
- * - if a reference bean is not provided, it registers the corresponding no-op implementation with
- * {@link DOMRpcProviderService} for all action (Routed RPC) elements in the provided interface
- *
- * @author Robert Varga
- */
-public class ActionProviderBean {
- static final String ACTION_PROVIDER = "action-provider";
-
- private static final Logger LOG = LoggerFactory.getLogger(ActionProviderBean.class);
-
- private DOMRpcProviderService domRpcProvider;
- private RpcProviderService bindingRpcProvider;
- private DOMSchemaService schemaService;
- private RpcService implementation;
- private String interfaceName;
- private Registration reg;
- private Bundle bundle;
-
- public void setBundle(final Bundle bundle) {
- this.bundle = bundle;
- }
-
- public void setInterfaceName(final String interfaceName) {
- this.interfaceName = interfaceName;
- }
-
- public void setImplementation(final RpcService implementation) {
- this.implementation = implementation;
- }
-
- public void setDomRpcProvider(final DOMRpcProviderService rpcProviderService) {
- this.domRpcProvider = rpcProviderService;
- }
-
- public void setBindingRpcProvider(final RpcProviderService rpcProvider) {
- this.bindingRpcProvider = rpcProvider;
- }
-
- public void setSchemaService(final DOMSchemaService schemaService) {
- this.schemaService = schemaService;
- }
-
- public void init() {
- // First resolve the interface class
- final Class<RpcService> interfaceClass = getRpcClass();
-
- LOG.debug("{}: resolved interface {} to {}", ACTION_PROVIDER, interfaceName, interfaceClass);
-
- if (implementation != null) {
- registerImplementation(interfaceClass);
- } else {
- registerFallback(interfaceClass);
- }
- }
-
- @SuppressWarnings("checkstyle:IllegalCatch")
- public void destroy() {
- if (reg != null) {
- try {
- reg.close();
- } catch (final Exception e) {
- LOG.warn("{}: error while unregistering", ACTION_PROVIDER, e);
- } finally {
- reg = null;
- }
- }
- }
-
- @SuppressWarnings("unchecked")
- private Class<RpcService> getRpcClass() {
- final Class<?> iface;
-
- try {
- iface = bundle.loadClass(interfaceName);
- } catch (final ClassNotFoundException e) {
- throw new ComponentDefinitionException(String.format(
- "The specified \"interface\" for %s \"%s\" does not refer to an available class", interfaceName,
- ACTION_PROVIDER), e);
- }
- if (!RpcService.class.isAssignableFrom(iface)) {
- throw new ComponentDefinitionException(String.format(
- "The specified \"interface\" %s for \"%s\" is not an RpcService", interfaceName, ACTION_PROVIDER));
- }
-
- return (Class<RpcService>) iface;
- }
-
- private void registerFallback(final Class<RpcService> interfaceClass) {
- final Collection<SchemaPath> paths = RpcUtil.decomposeRpcService(interfaceClass,
- schemaService.getGlobalContext(), RpcRoutingStrategy::isContextBasedRouted);
- if (paths.isEmpty()) {
- LOG.warn("{}: interface {} has no actions defined", ACTION_PROVIDER, interfaceClass);
- return;
- }
-
- final Set<DOMRpcIdentifier> rpcs = ImmutableSet.copyOf(Collections2.transform(paths, DOMRpcIdentifier::create));
- reg = domRpcProvider.registerRpcImplementation(
- (rpc, input) -> FluentFutures.immediateFailedFluentFuture(new DOMRpcImplementationNotAvailableException(
- "Action %s has no instance matching %s", rpc, input)), rpcs);
- LOG.debug("Registered provider for {}", interfaceName);
- }
-
- private void registerImplementation(final Class<RpcService> interfaceClass) {
- if (!interfaceClass.isInstance(implementation)) {
- throw new ComponentDefinitionException(String.format(
- "The specified \"interface\" %s for \"%s\" is not implemented by RpcService \"ref\" %s",
- interfaceName, ACTION_PROVIDER, implementation.getClass()));
- }
-
- reg = bindingRpcProvider.registerRpcImplementation(interfaceClass, implementation);
- LOG.debug("Registered implementation {} for {}", implementation, interfaceName);
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.blueprint.ext;
-
-import java.util.function.Predicate;
-import org.opendaylight.mdsal.dom.spi.RpcRoutingStrategy;
-
-/**
- * Factory metadata corresponding to the "action-service" element. It waits for a DOM promise of registration
- * to appear in the {@link DOMRpcService} and then acquires a dynamic proxy via RpcProviderRegistry.
- *
- * @author Robert Varga
- */
-final class ActionServiceMetadata extends AbstractInvokableServiceMetadata {
- /*
- * Implementation note:
- *
- * This implementation assumes Binding V1 semantics for actions, which means actions are packaged along with RPCs
- * into a single interface. This has interesting implications on working with RpcServiceMetadata, which only
- * handles the RPC side of the contract.
- *
- * Further interesting interactions stem from the fact that in DOM world each action is a separate entity, so the
- * interface contract can let some actions to be invoked, while failing for others. This is a shortcoming of the
- * Binding Specification and will be addressed in Binding V2 -- where each action is its own interface.
- */
- ActionServiceMetadata(final String id, final String interfaceName) {
- super(id, interfaceName);
- }
-
- @Override
- Predicate<RpcRoutingStrategy> rpcFilter() {
- return RpcRoutingStrategy::isContextBasedRouted;
- }
-}
*/
package org.opendaylight.controller.blueprint.ext;
-import com.google.common.base.Preconditions;
+import static com.google.common.base.Preconditions.checkArgument;
+
import com.google.common.base.Strings;
+import com.google.common.collect.Iterables;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.net.URISyntaxException;
-import java.util.List;
-import javax.xml.parsers.ParserConfigurationException;
+import java.util.Set;
import javax.xml.stream.XMLStreamException;
import javax.xml.transform.dom.DOMSource;
import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections;
import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.Identifiable;
-import org.opendaylight.yangtools.yang.binding.Identifier;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.Key;
+import org.opendaylight.yangtools.yang.binding.KeyAware;
+import org.opendaylight.yangtools.yang.binding.contract.Naming;
import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
import org.opendaylight.yangtools.yang.data.codec.xml.XmlParserStream;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNormalizedNodeStreamWriter;
-import org.opendaylight.yangtools.yang.data.impl.schema.NormalizedNodeResult;
+import org.opendaylight.yangtools.yang.data.impl.schema.NormalizationResultHolder;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
import org.opendaylight.yangtools.yang.model.api.ListSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.SchemaTreeInference;
+import org.opendaylight.yangtools.yang.model.api.stmt.KeyEffectiveStatement;
+import org.opendaylight.yangtools.yang.model.api.stmt.SchemaTreeEffectiveStatement;
import org.osgi.service.blueprint.container.ComponentDefinitionException;
import org.w3c.dom.Element;
import org.xml.sax.SAXException;
* @author Thomas Pantelis (originally; re-factored by Michael Vorburger.ch)
*/
public abstract class BindingContext {
- private static String GET_KEY_METHOD = "key";
-
public static BindingContext create(final String logName, final Class<? extends DataObject> klass,
final String appConfigListKeyValue) {
- if (Identifiable.class.isAssignableFrom(klass)) {
+ if (KeyAware.class.isAssignableFrom(klass)) {
// The binding class corresponds to a yang list.
if (Strings.isNullOrEmpty(appConfigListKeyValue)) {
throw new ComponentDefinitionException(String.format(
}
public final InstanceIdentifier<DataObject> appConfigPath;
- public final Class<DataObject> appConfigBindingClass;
+ public final Class<?> appConfigBindingClass;
public final Class<? extends DataSchemaNode> schemaType;
public final QName bindingQName;
- private BindingContext(final Class<DataObject> appConfigBindingClass,
- final InstanceIdentifier<DataObject> appConfigPath, final Class<? extends DataSchemaNode> schemaType) {
+ private BindingContext(final Class<?> appConfigBindingClass, final InstanceIdentifier<DataObject> appConfigPath,
+ final Class<? extends DataSchemaNode> schemaType) {
this.appConfigBindingClass = appConfigBindingClass;
this.appConfigPath = appConfigPath;
this.schemaType = schemaType;
bindingQName = BindingReflections.findQName(appConfigBindingClass);
}
- public NormalizedNode<?, ?> parseDataElement(final Element element, final DataSchemaNode dataSchema,
- final EffectiveModelContext schemaContext) throws XMLStreamException, IOException,
- ParserConfigurationException, SAXException, URISyntaxException {
- final NormalizedNodeResult resultHolder = new NormalizedNodeResult();
+ public NormalizedNode parseDataElement(final Element element, final SchemaTreeInference dataSchema)
+ throws XMLStreamException, IOException, SAXException, URISyntaxException {
+ final NormalizationResultHolder resultHolder = new NormalizationResultHolder();
final NormalizedNodeStreamWriter writer = ImmutableNormalizedNodeStreamWriter.from(resultHolder);
- final XmlParserStream xmlParser = XmlParserStream.create(writer, schemaContext, dataSchema);
+ final XmlParserStream xmlParser = XmlParserStream.create(writer, dataSchema);
xmlParser.traverse(new DOMSource(element));
- final NormalizedNode<?, ?> result = resultHolder.getResult();
- if (result instanceof MapNode) {
- final MapNode mapNode = (MapNode) result;
- final MapEntryNode mapEntryNode = mapNode.getValue().iterator().next();
- return mapEntryNode;
- }
-
- return result;
+ final NormalizedNode result = resultHolder.getResult().data();
+ return result instanceof MapNode mapNode ? mapNode.body().iterator().next() : result;
}
- public abstract NormalizedNode<?, ?> newDefaultNode(DataSchemaNode dataSchema);
+ public abstract NormalizedNode newDefaultNode(SchemaTreeInference dataSchema);
/**
* BindingContext implementation for a container binding.
private static class ContainerBindingContext extends BindingContext {
@SuppressWarnings("unchecked")
ContainerBindingContext(final Class<? extends DataObject> appConfigBindingClass) {
- super((Class<DataObject>) appConfigBindingClass,
- InstanceIdentifier.create((Class<DataObject>) appConfigBindingClass), ContainerSchemaNode.class);
+ super(appConfigBindingClass, InstanceIdentifier.create((Class) appConfigBindingClass),
+ ContainerSchemaNode.class);
}
@Override
- public NormalizedNode<?, ?> newDefaultNode(final DataSchemaNode dataSchema) {
- return ImmutableNodes.containerNode(bindingQName);
+ public ContainerNode newDefaultNode(final SchemaTreeInference dataSchema) {
+ return ImmutableNodes.newContainerBuilder().withNodeIdentifier(new NodeIdentifier(bindingQName)).build();
}
}
final String listKeyValue) throws InstantiationException, IllegalAccessException,
IllegalArgumentException, InvocationTargetException, NoSuchMethodException, SecurityException {
// We assume the yang list key type is string.
- Identifier keyInstance = (Identifier) bindingClass.getMethod(GET_KEY_METHOD).getReturnType()
- .getConstructor(String.class).newInstance(listKeyValue);
+ Key keyInstance = (Key) bindingClass.getMethod(Naming.KEY_AWARE_KEY_NAME)
+ .getReturnType().getConstructor(String.class).newInstance(listKeyValue);
InstanceIdentifier appConfigPath = InstanceIdentifier.builder((Class)bindingClass, keyInstance).build();
return new ListBindingContext(bindingClass, appConfigPath, listKeyValue);
}
@Override
- public NormalizedNode<?, ?> newDefaultNode(final DataSchemaNode dataSchema) {
+ public NormalizedNode newDefaultNode(final SchemaTreeInference dataSchema) {
+ final SchemaTreeEffectiveStatement<?> stmt = Iterables.getLast(dataSchema.statementPath());
+
// We assume there's only one key for the list.
- List<QName> keys = ((ListSchemaNode)dataSchema).getKeyDefinition();
- Preconditions.checkArgument(keys.size() == 1, "Expected only 1 key for list %s", appConfigBindingClass);
- QName listKeyQName = keys.get(0);
- return ImmutableNodes.mapEntryBuilder(bindingQName, listKeyQName, appConfigListKeyValue).build();
+ final Set<QName> keys = stmt.findFirstEffectiveSubstatementArgument(KeyEffectiveStatement.class)
+ .orElseThrow();
+
+ checkArgument(keys.size() == 1, "Expected only 1 key for list %s", appConfigBindingClass);
+ QName listKeyQName = keys.iterator().next();
+ return ImmutableNodes.newMapEntryBuilder()
+ .withNodeIdentifier(NodeIdentifierWithPredicates.of(bindingQName, listKeyQName, appConfigListKeyValue))
+ .withChild(ImmutableNodes.leafNode(listKeyQName, appConfigListKeyValue))
+ .build();
}
}
}
import com.google.common.base.Strings;
import java.util.ArrayList;
import java.util.Dictionary;
-import java.util.Hashtable;
import java.util.List;
+import java.util.Map;
import java.util.Objects;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.aries.blueprint.ComponentDefinitionRegistry;
import org.apache.aries.blueprint.mutable.MutableBeanMetadata;
import org.apache.aries.blueprint.mutable.MutableServiceReferenceMetadata;
import org.apache.aries.util.AriesFrameworkUtil;
-import org.gaul.modernizer_maven_annotations.SuppressModernizer;
import org.opendaylight.controller.blueprint.BlueprintContainerRestartService;
import org.osgi.framework.Bundle;
import org.osgi.framework.Constants;
+import org.osgi.framework.FrameworkUtil;
import org.osgi.framework.ServiceRegistration;
-import org.osgi.service.blueprint.reflect.BeanProperty;
-import org.osgi.service.blueprint.reflect.ComponentMetadata;
import org.osgi.service.blueprint.reflect.ValueMetadata;
import org.osgi.service.cm.ManagedService;
import org.slf4j.Logger;
private static final String CM_PERSISTENT_ID_PROPERTY = "persistentId";
private final List<ServiceRegistration<?>> managedServiceRegs = new ArrayList<>();
- private Bundle bundle;
- private BlueprintContainerRestartService blueprintContainerRestartService;
+ private Bundle bundle = null;
+ private BlueprintContainerRestartService blueprintContainerRestartService = null;
private boolean restartDependentsOnUpdates;
private boolean useDefaultForReferenceTypes;
}
public void setBlueprintContainerRestartService(final BlueprintContainerRestartService restartService) {
- this.blueprintContainerRestartService = restartService;
+ blueprintContainerRestartService = restartService;
}
public void setRestartDependentsOnUpdates(final boolean restartDependentsOnUpdates) {
}
public void destroy() {
- for (ServiceRegistration<?> reg: managedServiceRegs) {
- AriesFrameworkUtil.safeUnregisterService(reg);
- }
+ managedServiceRegs.forEach(AriesFrameworkUtil::safeUnregisterService);
}
@Override
public void process(final ComponentDefinitionRegistry registry) {
LOG.debug("{}: In process", logName());
- for (String name : registry.getComponentDefinitionNames()) {
- ComponentMetadata component = registry.getComponentDefinition(name);
- if (component instanceof MutableBeanMetadata) {
- processMutableBeanMetadata((MutableBeanMetadata) component);
- } else if (component instanceof MutableServiceReferenceMetadata) {
- processServiceReferenceMetadata((MutableServiceReferenceMetadata)component);
+ for (var name : registry.getComponentDefinitionNames()) {
+ final var component = registry.getComponentDefinition(name);
+ if (component instanceof MutableBeanMetadata bean) {
+ processMutableBeanMetadata(bean);
+ } else if (component instanceof MutableServiceReferenceMetadata serviceRef) {
+ processServiceReferenceMetadata(serviceRef);
}
}
}
LOG.debug("{}: Found PropertyPlaceholder bean: {}, runtime {}", logName(), bean.getId(),
bean.getRuntimeClass());
- for (BeanProperty prop : bean.getProperties()) {
+ for (var prop : bean.getProperties()) {
if (CM_PERSISTENT_ID_PROPERTY.equals(prop.getName())) {
- if (prop.getValue() instanceof ValueMetadata) {
- ValueMetadata persistentId = (ValueMetadata)prop.getValue();
-
- LOG.debug("{}: Found {} property, value : {}", logName(),
- CM_PERSISTENT_ID_PROPERTY, persistentId.getStringValue());
-
+ if (prop.getValue() instanceof ValueMetadata persistentId) {
+ LOG.debug("{}: Found {} property, value : {}", logName(), CM_PERSISTENT_ID_PROPERTY,
+ persistentId.getStringValue());
registerManagedService(persistentId.getStringValue());
} else {
- LOG.debug("{}: {} property metadata {} is not instanceof ValueMetadata",
- logName(), CM_PERSISTENT_ID_PROPERTY, prop.getValue());
+ LOG.debug("{}: {} property metadata {} is not instanceof ValueMetadata", logName(),
+ CM_PERSISTENT_ID_PROPERTY, prop.getValue());
}
break;
}
}
- @SuppressModernizer
private void registerManagedService(final String persistentId) {
// Register a ManagedService so we get updates from the ConfigAdmin when the cfg file corresponding
// to the persistentId changes.
- final ManagedService managedService = new ManagedService() {
+ final var managedService = new ManagedService() {
private final AtomicBoolean initialUpdate = new AtomicBoolean(true);
private volatile Dictionary<String, ?> previousProperties;
}
};
- Dictionary<String, Object> props = new Hashtable<>();
- props.put(Constants.SERVICE_PID, persistentId);
- props.put(Constants.BUNDLE_SYMBOLICNAME, bundle.getSymbolicName());
- props.put(Constants.BUNDLE_VERSION, bundle.getHeaders().get(Constants.BUNDLE_VERSION));
- managedServiceRegs.add(bundle.getBundleContext().registerService(ManagedService.class, managedService, props));
+ managedServiceRegs.add(bundle.getBundleContext().registerService(ManagedService.class, managedService,
+ FrameworkUtil.asDictionary(Map.of(
+ Constants.SERVICE_PID, persistentId,
+ Constants.BUNDLE_SYMBOLICNAME, bundle.getSymbolicName(),
+ Constants.BUNDLE_VERSION, bundle.getHeaders().get(Constants.BUNDLE_VERSION)))));
}
private String logName() {
import java.net.URISyntaxException;
import java.net.URL;
import java.util.Optional;
-import javax.xml.parsers.ParserConfigurationException;
import javax.xml.stream.XMLStreamException;
import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
import org.opendaylight.mdsal.dom.api.DOMSchemaService;
import org.opendaylight.yangtools.util.xml.UntrustedXML;
import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
import org.opendaylight.yangtools.yang.model.api.Module;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.api.SchemaTreeInference;
+import org.opendaylight.yangtools.yang.model.api.stmt.SchemaTreeEffectiveStatement;
+import org.opendaylight.yangtools.yang.model.util.SchemaInferenceStack;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
@FunctionalInterface
public interface FallbackConfigProvider {
- NormalizedNode<?,?> get(EffectiveModelContext schemaContext, DataSchemaNode dataSchema) throws IOException,
- XMLStreamException, ParserConfigurationException, SAXException, URISyntaxException;
+ NormalizedNode get(SchemaTreeInference dataSchema)
+ throws IOException, XMLStreamException, SAXException, URISyntaxException;
}
@FunctionalInterface
return Resources.getResource(testClass, defaultAppConfigFileName);
}
- public T createDefaultInstance() throws ConfigXMLReaderException, ParserConfigurationException, XMLStreamException,
- IOException, SAXException, URISyntaxException {
- return createDefaultInstance((schemaContext, dataSchema) -> {
- throw new IllegalArgumentException("Failed to read XML "
- + "(not creating model from defaults as runtime would, for better clarity in tests)");
+ public T createDefaultInstance() throws ConfigXMLReaderException, XMLStreamException, IOException, SAXException,
+ URISyntaxException {
+ return createDefaultInstance(dataSchema -> {
+ throw new IllegalArgumentException(
+ "Failed to read XML (not creating model from defaults as runtime would, for better clarity in tests)");
});
}
@SuppressWarnings("unchecked")
public T createDefaultInstance(final FallbackConfigProvider fallback) throws ConfigXMLReaderException,
- URISyntaxException, ParserConfigurationException, XMLStreamException, SAXException, IOException {
+ URISyntaxException, XMLStreamException, SAXException, IOException {
YangInstanceIdentifier yangPath = bindingSerializer.toYangInstanceIdentifier(bindingContext.appConfigPath);
LOG.debug("{}: Creating app config instance from path {}, Qname: {}", logName, yangPath,
checkNotNull(module, "%s: Could not obtain the module schema for namespace %s, revision %s",
logName, bindingContext.bindingQName.getNamespace(), bindingContext.bindingQName.getRevision());
- QName qname = bindingContext.bindingQName;
- DataSchemaNode dataSchema = module.findDataChildByName(qname).orElseThrow(
- () -> new ConfigXMLReaderException(logName + ": Could not obtain the schema for " + qname));
-
- checkNotNull(dataSchema, "%s: Could not obtain the schema for %s", logName, bindingContext.bindingQName);
+ final SchemaInferenceStack schemaStack = SchemaInferenceStack.of(schemaContext);
+ final SchemaTreeEffectiveStatement<?> dataSchema;
+ try {
+ dataSchema = schemaStack.enterSchemaTree(bindingContext.bindingQName);
+ } catch (IllegalArgumentException e) {
+ throw new ConfigXMLReaderException(
+ logName + ": Could not obtain the schema for " + bindingContext.bindingQName, e);
+ }
- checkCondition(bindingContext.schemaType.isAssignableFrom(dataSchema.getClass()),
+ checkCondition(bindingContext.schemaType.isInstance(dataSchema),
"%s: Expected schema type %s for %s but actual type is %s", logName,
bindingContext.schemaType, bindingContext.bindingQName, dataSchema.getClass());
- NormalizedNode<?, ?> dataNode = parsePossibleDefaultAppConfigXMLFile(schemaContext, dataSchema);
+ NormalizedNode dataNode = parsePossibleDefaultAppConfigXMLFile(schemaStack);
if (dataNode == null) {
- dataNode = fallback.get(schemaService.getGlobalContext(), dataSchema);
+ dataNode = fallback.get(schemaStack.toSchemaTreeInference());
}
DataObject appConfig = bindingSerializer.fromNormalizedNode(yangPath, dataNode).getValue();
}
}
- private NormalizedNode<?, ?> parsePossibleDefaultAppConfigXMLFile(final EffectiveModelContext schemaContext,
- final DataSchemaNode dataSchema) throws ConfigXMLReaderException {
-
+ private NormalizedNode parsePossibleDefaultAppConfigXMLFile(final SchemaInferenceStack schemaStack)
+ throws ConfigXMLReaderException {
String appConfigFileName = defaultAppConfigFileName;
if (Strings.isNullOrEmpty(appConfigFileName)) {
- String moduleName = findYangModuleName(bindingContext.bindingQName, schemaContext);
+ String moduleName = schemaStack.currentModule().argument().getLocalName();
+
appConfigFileName = moduleName + "_" + bindingContext.bindingQName.getLocalName() + ".xml";
}
if (!optionalURL.isPresent()) {
return null;
}
- URL url = optionalURL.get();
+ URL url = optionalURL.orElseThrow();
try (InputStream is = url.openStream()) {
Document root = UntrustedXML.newDocumentBuilder().parse(is);
- NormalizedNode<?, ?> dataNode = bindingContext.parseDataElement(root.getDocumentElement(), dataSchema,
- schemaContext);
+ NormalizedNode dataNode = bindingContext.parseDataElement(root.getDocumentElement(),
+ schemaStack.toSchemaTreeInference());
LOG.debug("{}: Parsed data node: {}", logName, dataNode);
return dataNode;
- } catch (final IOException | SAXException | XMLStreamException | ParserConfigurationException
- | URISyntaxException e) {
+ } catch (final IOException | SAXException | XMLStreamException | URISyntaxException e) {
String msg = String.format("%s: Could not read/parse app config %s", logName, url);
LOG.error(msg, e);
throw new ConfigXMLReaderException(msg, e);
}
}
-
- private String findYangModuleName(final QName qname, final SchemaContext schemaContext)
- throws ConfigXMLReaderException {
- for (Module m : schemaContext.getModules()) {
- if (qname.getModule().equals(m.getQNameModule())) {
- return m.getName();
- }
- }
- throw new ConfigXMLReaderException(
- String.format("%s: Could not find yang module for QName %s", logName, qname));
- }
-
}
import java.util.Objects;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicBoolean;
-import javax.xml.parsers.ParserConfigurationException;
import javax.xml.stream.XMLStreamException;
import org.apache.aries.blueprint.services.ExtendedBlueprintContainer;
import org.eclipse.jdt.annotation.NonNull;
import org.eclipse.jdt.annotation.Nullable;
import org.opendaylight.controller.blueprint.ext.DataStoreAppConfigDefaultXMLReader.ConfigURLProvider;
-import org.opendaylight.mdsal.binding.api.ClusteredDataTreeChangeListener;
import org.opendaylight.mdsal.binding.api.DataBroker;
import org.opendaylight.mdsal.binding.api.DataObjectModification;
import org.opendaylight.mdsal.binding.api.DataObjectModification.ModificationType;
import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
import org.opendaylight.mdsal.dom.api.DOMSchemaService;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.yang.binding.ChildOf;
import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
+import org.opendaylight.yangtools.yang.model.api.SchemaTreeInference;
import org.osgi.service.blueprint.container.ComponentDefinitionException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
private final AtomicBoolean readingInitialAppConfig = new AtomicBoolean(true);
private volatile BindingContext bindingContext;
- private volatile ListenerRegistration<?> appConfigChangeListenerReg;
+ private volatile Registration appConfigChangeListenerReg;
private volatile DataObject currentAppConfig;
// Note: the BindingNormalizedNodeSerializer interface is annotated as deprecated because there's an
this.defaultAppConfigFileName = defaultAppConfigFileName;
this.appConfigBindingClassName = appConfigBindingClassName;
this.appConfigListKeyValue = appConfigListKeyValue;
- this.appConfigUpdateStrategy = updateStrategyValue;
+ appConfigUpdateStrategy = updateStrategyValue;
}
@Override
Class<DataObject> appConfigBindingClass;
try {
Class<?> bindingClass = container.getBundleContext().getBundle().loadClass(appConfigBindingClassName);
- if (!DataObject.class.isAssignableFrom(bindingClass)) {
+ if (!ChildOf.class.isAssignableFrom(bindingClass)) {
throw new ComponentDefinitionException(String.format(
"%s: Specified app config binding class %s does not extend %s",
- logName(), appConfigBindingClassName, DataObject.class.getName()));
+ logName(), appConfigBindingClassName, ChildOf.class.getName()));
}
appConfigBindingClass = (Class<DataObject>) bindingClass;
setDependencyDesc("Initial app config " + bindingContext.appConfigBindingClass.getSimpleName());
- // We register a DTCL to get updates and also read the app config data from the data store. If
- // the app config data is present then both the read and initial DTCN update will return it. If the
- // the data isn't present, we won't get an initial DTCN update so the read will indicate the data
- // isn't present.
-
- DataTreeIdentifier<DataObject> dataTreeId = DataTreeIdentifier.create(LogicalDatastoreType.CONFIGURATION,
- bindingContext.appConfigPath);
- appConfigChangeListenerReg = dataBroker.registerDataTreeChangeListener(dataTreeId,
- (ClusteredDataTreeChangeListener<DataObject>) this::onAppConfigChanged);
+ // We register a DTCL to get updates and also read the app config data from the data store. If the app config
+ // data is present then both the read and initial DTCN update will return it. If the the data isn't present, we
+ // will not get an initial DTCN update so the read will indicate the data is not present.
+ appConfigChangeListenerReg = dataBroker.registerTreeChangeListener(
+ DataTreeIdentifier.of(LogicalDatastoreType.CONFIGURATION, bindingContext.appConfigPath),
+ this::onAppConfigChanged);
readInitialAppConfig(dataBroker);
}
private void onAppConfigChanged(final Collection<DataTreeModification<DataObject>> changes) {
for (DataTreeModification<DataObject> change: changes) {
DataObjectModification<DataObject> changeRoot = change.getRootNode();
- ModificationType type = changeRoot.getModificationType();
+ ModificationType type = changeRoot.modificationType();
LOG.debug("{}: onAppConfigChanged: {}, {}", logName(), type, change.getRootPath());
if (type == ModificationType.SUBTREE_MODIFIED || type == ModificationType.WRITE) {
- DataObject newAppConfig = changeRoot.getDataAfter();
+ DataObject newAppConfig = changeRoot.dataAfter();
LOG.debug("New app config instance: {}, previous: {}", newAppConfig, currentAppConfig);
if (result) {
DataObject localAppConfig;
if (possibleAppConfig.isPresent()) {
- localAppConfig = possibleAppConfig.get();
+ localAppConfig = possibleAppConfig.orElseThrow();
} else {
// No app config data is present so create an empty instance via the bindingSerializer service.
// This will also return default values for leafs that haven't been explicitly set.
DataStoreAppConfigDefaultXMLReader<?> reader = new DataStoreAppConfigDefaultXMLReader<>(logName(),
defaultAppConfigFileName, getOSGiService(DOMSchemaService.class), bindingSerializer, bindingContext,
inputStreamProvider);
- return reader.createDefaultInstance((schemaContext, dataSchema) -> {
+ return reader.createDefaultInstance(dataSchema -> {
// Fallback if file cannot be read, try XML from Config
- NormalizedNode<?, ?> dataNode = parsePossibleDefaultAppConfigElement(schemaContext, dataSchema);
+ NormalizedNode dataNode = parsePossibleDefaultAppConfigElement(dataSchema);
if (dataNode == null) {
// or, as last resort, defaults from the model
return bindingContext.newDefaultNode(dataSchema);
}
});
- } catch (final ConfigXMLReaderException | IOException | SAXException | XMLStreamException
- | ParserConfigurationException | URISyntaxException e) {
+ } catch (ConfigXMLReaderException | IOException | SAXException | XMLStreamException | URISyntaxException e) {
if (e.getCause() == null) {
setFailureMessage(e.getMessage());
} else {
}
}
- private @Nullable NormalizedNode<?, ?> parsePossibleDefaultAppConfigElement(
- final EffectiveModelContext schemaContext, final DataSchemaNode dataSchema) throws URISyntaxException,
- IOException, ParserConfigurationException, SAXException, XMLStreamException {
+ private @Nullable NormalizedNode parsePossibleDefaultAppConfigElement(final SchemaTreeInference dataSchema)
+ throws URISyntaxException, IOException, SAXException, XMLStreamException {
if (defaultAppConfigElement == null) {
return null;
}
LOG.debug("{}: Got app config schema: {}", logName(), dataSchema);
- NormalizedNode<?, ?> dataNode = bindingContext.parseDataElement(defaultAppConfigElement, dataSchema,
- schemaContext);
+ NormalizedNode dataNode = bindingContext.parseDataElement(defaultAppConfigElement, dataSchema);
LOG.debug("{}: Parsed data node: {}", logName(), dataNode);
+++ /dev/null
-/*
- * Copyright (c) 2016 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.blueprint.ext;
-
-import org.opendaylight.mdsal.binding.api.NotificationService;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.NotificationListener;
-import org.osgi.framework.Bundle;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Blueprint bean corresponding to the "notification-listener" element that registers a NotificationListener
- * with the NotificationService.
- *
- * @author Thomas Pantelis
- */
-public class NotificationListenerBean {
- private static final Logger LOG = LoggerFactory.getLogger(NotificationListenerBean.class);
- static final String NOTIFICATION_LISTENER = "notification-listener";
-
- private Bundle bundle;
- private NotificationService notificationService;
- private NotificationListener notificationListener;
- private ListenerRegistration<?> registration;
-
- public void setNotificationService(final NotificationService notificationService) {
- this.notificationService = notificationService;
- }
-
- public void setNotificationListener(final NotificationListener notificationListener) {
- this.notificationListener = notificationListener;
- }
-
- public void setBundle(final Bundle bundle) {
- this.bundle = bundle;
- }
-
- public void init() {
- LOG.debug("{}: init - registering NotificationListener {}", bundle.getSymbolicName(), notificationListener);
-
- registration = notificationService.registerNotificationListener(notificationListener);
- }
-
- public void destroy() {
- if (registration != null) {
- LOG.debug("{}: destroy - closing ListenerRegistration {}", bundle.getSymbolicName(), notificationListener);
- registration.close();
- } else {
- LOG.debug("{}: destroy - listener was not registered", bundle.getSymbolicName());
- }
- }
-}
import java.io.IOException;
import java.io.StringReader;
import java.net.URL;
-import java.util.Collections;
import java.util.Set;
import org.apache.aries.blueprint.ComponentDefinitionRegistry;
import org.apache.aries.blueprint.NamespaceHandler;
import org.apache.aries.blueprint.ParserContext;
-import org.apache.aries.blueprint.ext.ComponentFactoryMetadata;
import org.apache.aries.blueprint.mutable.MutableBeanMetadata;
import org.apache.aries.blueprint.mutable.MutableRefMetadata;
import org.apache.aries.blueprint.mutable.MutableReferenceMetadata;
import org.apache.aries.blueprint.mutable.MutableServiceReferenceMetadata;
import org.apache.aries.blueprint.mutable.MutableValueMetadata;
import org.opendaylight.controller.blueprint.BlueprintContainerRestartService;
-import org.opendaylight.mdsal.binding.api.NotificationService;
-import org.opendaylight.mdsal.binding.api.RpcProviderService;
-import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
-import org.opendaylight.mdsal.dom.api.DOMSchemaService;
import org.opendaylight.yangtools.util.xml.UntrustedXML;
import org.osgi.service.blueprint.container.ComponentDefinitionException;
import org.osgi.service.blueprint.reflect.BeanMetadata;
*/
public final class OpendaylightNamespaceHandler implements NamespaceHandler {
public static final String NAMESPACE_1_0_0 = "http://opendaylight.org/xmlns/blueprint/v1.0.0";
- static final String ROUTED_RPC_REG_CONVERTER_NAME = "org.opendaylight.blueprint.RoutedRpcRegConverter";
- static final String DOM_RPC_PROVIDER_SERVICE_NAME = "org.opendaylight.blueprint.DOMRpcProviderService";
- static final String RPC_REGISTRY_NAME = "org.opendaylight.blueprint.RpcRegistry";
- static final String BINDING_RPC_PROVIDER_SERVICE_NAME = "org.opendaylight.blueprint.RpcProviderService";
- static final String SCHEMA_SERVICE_NAME = "org.opendaylight.blueprint.SchemaService";
- static final String NOTIFICATION_SERVICE_NAME = "org.opendaylight.blueprint.NotificationService";
- static final String TYPE_ATTR = "type";
- static final String UPDATE_STRATEGY_ATTR = "update-strategy";
private static final Logger LOG = LoggerFactory.getLogger(OpendaylightNamespaceHandler.class);
+ private static final String TYPE_ATTR = "type";
+ private static final String UPDATE_STRATEGY_ATTR = "update-strategy";
private static final String COMPONENT_PROCESSOR_NAME = ComponentProcessor.class.getName();
private static final String RESTART_DEPENDENTS_ON_UPDATES = "restart-dependents-on-updates";
private static final String USE_DEFAULT_FOR_REFERENCE_TYPES = "use-default-for-reference-types";
private static final String CLUSTERED_APP_CONFIG = "clustered-app-config";
- private static final String INTERFACE = "interface";
- private static final String REF_ATTR = "ref";
private static final String ID_ATTR = "id";
- private static final String RPC_SERVICE = "rpc-service";
- private static final String ACTION_SERVICE = "action-service";
- private static final String SPECIFIC_SERVICE_REF_LIST = "specific-reference-list";
- private static final String STATIC_REFERENCE = "static-reference";
@SuppressWarnings("rawtypes")
@Override
public Set<Class> getManagedClasses() {
- return Collections.emptySet();
+ return Set.of();
}
@Override
public Metadata parse(final Element element, final ParserContext context) {
LOG.debug("In parse for {}", element);
- if (nodeNameEquals(element, RpcImplementationBean.RPC_IMPLEMENTATION)) {
- return parseRpcImplementation(element, context);
- } else if (nodeNameEquals(element, RPC_SERVICE)) {
- return parseRpcService(element, context);
- } else if (nodeNameEquals(element, NotificationListenerBean.NOTIFICATION_LISTENER)) {
- return parseNotificationListener(element, context);
- } else if (nodeNameEquals(element, CLUSTERED_APP_CONFIG)) {
+ if (nodeNameEquals(element, CLUSTERED_APP_CONFIG)) {
return parseClusteredAppConfig(element, context);
- } else if (nodeNameEquals(element, SPECIFIC_SERVICE_REF_LIST)) {
- return parseSpecificReferenceList(element, context);
- } else if (nodeNameEquals(element, STATIC_REFERENCE)) {
- return parseStaticReference(element, context);
- } else if (nodeNameEquals(element, ACTION_SERVICE)) {
- return parseActionService(element, context);
- } else if (nodeNameEquals(element, ActionProviderBean.ACTION_PROVIDER)) {
- return parseActionProvider(element, context);
}
throw new ComponentDefinitionException("Unsupported standalone element: " + element.getNodeName());
private static ComponentMetadata decorateServiceType(final Attr attr, final ComponentMetadata component,
final ParserContext context) {
- if (!(component instanceof MutableServiceMetadata)) {
+ if (!(component instanceof MutableServiceMetadata service)) {
throw new ComponentDefinitionException("Expected an instanceof MutableServiceMetadata");
}
- MutableServiceMetadata service = (MutableServiceMetadata)component;
-
LOG.debug("decorateServiceType for {} - adding type property {}", service.getId(), attr.getValue());
service.addServiceProperty(createValue(context, TYPE_ATTR), createValue(context, attr.getValue()));
return metadata;
}
- private static Metadata parseActionProvider(final Element element, final ParserContext context) {
- registerDomRpcProviderServiceRefBean(context);
- registerBindingRpcProviderServiceRefBean(context);
- registerSchemaServiceRefBean(context);
-
- MutableBeanMetadata metadata = createBeanMetadata(context, context.generateId(), ActionProviderBean.class,
- true, true);
- addBlueprintBundleRefProperty(context, metadata);
- metadata.addProperty("domRpcProvider", createRef(context, DOM_RPC_PROVIDER_SERVICE_NAME));
- metadata.addProperty("bindingRpcProvider", createRef(context, BINDING_RPC_PROVIDER_SERVICE_NAME));
- metadata.addProperty("schemaService", createRef(context, SCHEMA_SERVICE_NAME));
- metadata.addProperty("interfaceName", createValue(context, element.getAttribute(INTERFACE)));
-
- if (element.hasAttribute(REF_ATTR)) {
- metadata.addProperty("implementation", createRef(context, element.getAttribute(REF_ATTR)));
- }
-
- LOG.debug("parseActionProvider returning {}", metadata);
- return metadata;
- }
-
-
- private static Metadata parseRpcImplementation(final Element element, final ParserContext context) {
- registerBindingRpcProviderServiceRefBean(context);
-
- MutableBeanMetadata metadata = createBeanMetadata(context, context.generateId(), RpcImplementationBean.class,
- true, true);
- addBlueprintBundleRefProperty(context, metadata);
- metadata.addProperty("rpcProvider", createRef(context, BINDING_RPC_PROVIDER_SERVICE_NAME));
- metadata.addProperty("implementation", createRef(context, element.getAttribute(REF_ATTR)));
-
- if (element.hasAttribute(INTERFACE)) {
- metadata.addProperty("interfaceName", createValue(context, element.getAttribute(INTERFACE)));
- }
-
- LOG.debug("parseRpcImplementation returning {}", metadata);
- return metadata;
- }
-
- private static Metadata parseActionService(final Element element, final ParserContext context) {
- ComponentFactoryMetadata metadata = new ActionServiceMetadata(getId(context, element),
- element.getAttribute(INTERFACE));
-
- LOG.debug("parseActionService returning {}", metadata);
-
- return metadata;
- }
-
- private static Metadata parseRpcService(final Element element, final ParserContext context) {
- ComponentFactoryMetadata metadata = new RpcServiceMetadata(getId(context, element),
- element.getAttribute(INTERFACE));
-
- LOG.debug("parseRpcService returning {}", metadata);
-
- return metadata;
- }
-
- private static void registerDomRpcProviderServiceRefBean(final ParserContext context) {
- registerRefBean(context, DOM_RPC_PROVIDER_SERVICE_NAME, DOMRpcProviderService.class);
- }
-
- private static void registerBindingRpcProviderServiceRefBean(final ParserContext context) {
- registerRefBean(context, BINDING_RPC_PROVIDER_SERVICE_NAME, RpcProviderService.class);
- }
-
- private static void registerSchemaServiceRefBean(final ParserContext context) {
- registerRefBean(context, SCHEMA_SERVICE_NAME, DOMSchemaService.class);
- }
-
- private static void registerRefBean(final ParserContext context, final String name, final Class<?> clazz) {
- ComponentDefinitionRegistry registry = context.getComponentDefinitionRegistry();
- if (registry.getComponentDefinition(name) == null) {
- MutableReferenceMetadata metadata = createServiceRef(context, clazz, null);
- metadata.setId(name);
- registry.registerComponentDefinition(metadata);
- }
- }
-
- private static Metadata parseNotificationListener(final Element element, final ParserContext context) {
- registerNotificationServiceRefBean(context);
-
- MutableBeanMetadata metadata = createBeanMetadata(context, context.generateId(), NotificationListenerBean.class,
- true, true);
- addBlueprintBundleRefProperty(context, metadata);
- metadata.addProperty("notificationService", createRef(context, NOTIFICATION_SERVICE_NAME));
- metadata.addProperty("notificationListener", createRef(context, element.getAttribute(REF_ATTR)));
-
- LOG.debug("parseNotificationListener returning {}", metadata);
-
- return metadata;
- }
-
- private static void registerNotificationServiceRefBean(final ParserContext context) {
- ComponentDefinitionRegistry registry = context.getComponentDefinitionRegistry();
- if (registry.getComponentDefinition(NOTIFICATION_SERVICE_NAME) == null) {
- MutableReferenceMetadata metadata = createServiceRef(context, NotificationService.class, null);
- metadata.setId(NOTIFICATION_SERVICE_NAME);
- registry.registerComponentDefinition(metadata);
- }
- }
-
private static Metadata parseClusteredAppConfig(final Element element, final ParserContext context) {
LOG.debug("parseClusteredAppConfig");
}
}
- private static Metadata parseSpecificReferenceList(final Element element, final ParserContext context) {
- ComponentFactoryMetadata metadata = new SpecificReferenceListMetadata(getId(context, element),
- element.getAttribute(INTERFACE));
-
- LOG.debug("parseSpecificReferenceList returning {}", metadata);
-
- return metadata;
- }
-
- private static Metadata parseStaticReference(final Element element, final ParserContext context) {
- ComponentFactoryMetadata metadata = new StaticReferenceMetadata(getId(context, element),
- element.getAttribute(INTERFACE));
-
- LOG.debug("parseStaticReference returning {}", metadata);
-
- return metadata;
- }
-
private static Element parseXML(final String name, final String xml) {
try {
return UntrustedXML.newDocumentBuilder().parse(new InputSource(new StringReader(xml))).getDocumentElement();
+++ /dev/null
-/*
- * Copyright (c) 2016 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.blueprint.ext;
-
-import com.google.common.base.Strings;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import org.opendaylight.mdsal.binding.api.RpcProviderService;
-import org.opendaylight.yangtools.concepts.ObjectRegistration;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-import org.osgi.framework.Bundle;
-import org.osgi.service.blueprint.container.ComponentDefinitionException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Blueprint bean corresponding to the "rpc-implementation" element that registers an RPC implementation with
- * the RpcProviderRegistry.
- *
- * @author Thomas Pantelis
- */
-public class RpcImplementationBean {
- private static final Logger LOG = LoggerFactory.getLogger(RpcImplementationBean.class);
- static final String RPC_IMPLEMENTATION = "rpc-implementation";
-
- private RpcProviderService rpcProvider;
- private Bundle bundle;
- private String interfaceName;
- private RpcService implementation;
- private final List<ObjectRegistration<RpcService>> rpcRegistrations = new ArrayList<>();
-
- public void setRpcProvider(final RpcProviderService rpcProvider) {
- this.rpcProvider = rpcProvider;
- }
-
- public void setBundle(final Bundle bundle) {
- this.bundle = bundle;
- }
-
- public void setInterfaceName(final String interfaceName) {
- this.interfaceName = interfaceName;
- }
-
- public void setImplementation(final RpcService implementation) {
- this.implementation = implementation;
- }
-
- @SuppressWarnings("checkstyle:IllegalCatch")
- public void init() {
- try {
- List<Class<RpcService>> rpcInterfaces = getImplementedRpcServiceInterfaces(interfaceName,
- implementation.getClass(), bundle, RPC_IMPLEMENTATION);
-
- LOG.debug("{}: init - adding implementation {} for RpcService interface(s) {}", bundle.getSymbolicName(),
- implementation, rpcInterfaces);
-
- for (Class<RpcService> rpcInterface : rpcInterfaces) {
- rpcRegistrations.add(rpcProvider.registerRpcImplementation(rpcInterface, implementation));
- }
- } catch (final ComponentDefinitionException e) {
- throw e;
- } catch (final Exception e) {
- throw new ComponentDefinitionException(String.format(
- "Error processing \"%s\" for %s", RPC_IMPLEMENTATION, implementation.getClass()), e);
- }
- }
-
- public void destroy() {
- for (ObjectRegistration<RpcService> reg: rpcRegistrations) {
- reg.close();
- }
- }
-
- @SuppressWarnings("unchecked")
- static List<Class<RpcService>> getImplementedRpcServiceInterfaces(final String interfaceName,
- final Class<?> implementationClass, final Bundle bundle, final String logName)
- throws ClassNotFoundException {
- if (!Strings.isNullOrEmpty(interfaceName)) {
- Class<?> rpcInterface = bundle.loadClass(interfaceName);
-
- if (!rpcInterface.isAssignableFrom(implementationClass)) {
- throw new ComponentDefinitionException(String.format(
- "The specified \"interface\" %s for \"%s\" is not implemented by RpcService \"ref\" %s",
- interfaceName, logName, implementationClass));
- }
-
- return Collections.singletonList((Class<RpcService>)rpcInterface);
- }
-
- List<Class<RpcService>> rpcInterfaces = new ArrayList<>();
- for (Class<?> intface : implementationClass.getInterfaces()) {
- if (RpcService.class.isAssignableFrom(intface)) {
- rpcInterfaces.add((Class<RpcService>) intface);
- }
- }
-
- if (rpcInterfaces.isEmpty()) {
- throw new ComponentDefinitionException(String.format(
- "The \"ref\" instance %s for \"%s\" does not implemented any RpcService interfaces",
- implementationClass, logName));
- }
-
- return rpcInterfaces;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.blueprint.ext;
-
-import java.util.function.Predicate;
-import org.opendaylight.mdsal.dom.spi.RpcRoutingStrategy;
-
-/**
- * Factory metadata corresponding to the "rpc-service" element that gets an RPC service implementation from
- * the RpcProviderRegistry and provides it to the Blueprint container.
- *
- * @author Thomas Pantelis
- */
-final class RpcServiceMetadata extends AbstractInvokableServiceMetadata {
- RpcServiceMetadata(final String id, final String interfaceName) {
- super(id, interfaceName);
- }
-
- @Override
- Predicate<RpcRoutingStrategy> rpcFilter() {
- return s -> !s.isContextBasedRouted();
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.blueprint.ext;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.function.Predicate;
-import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections;
-import org.opendaylight.mdsal.dom.spi.RpcRoutingStrategy;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-import org.opendaylight.yangtools.yang.common.QNameModule;
-import org.opendaylight.yangtools.yang.model.api.Module;
-import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Utility methods for dealing with various aspects of RPCs and actions.
- *
- * @author Robert Varga
- */
-final class RpcUtil {
- private static final Logger LOG = LoggerFactory.getLogger(RpcUtil.class);
-
- private RpcUtil() {
- throw new UnsupportedOperationException();
- }
-
- static Collection<SchemaPath> decomposeRpcService(final Class<RpcService> service,
- final SchemaContext schemaContext, final Predicate<RpcRoutingStrategy> filter) {
- final QNameModule moduleName = BindingReflections.getQNameModule(service);
- final Module module = schemaContext.findModule(moduleName).orElseThrow(() -> new IllegalArgumentException(
- "Module not found in SchemaContext: " + moduleName + "; service: " + service));
- LOG.debug("Resolved service {} to module {}", service, module);
-
- final Collection<? extends RpcDefinition> rpcs = module.getRpcs();
- final Collection<SchemaPath> ret = new ArrayList<>(rpcs.size());
- for (RpcDefinition rpc : rpcs) {
- final RpcRoutingStrategy strategy = RpcRoutingStrategy.from(rpc);
- if (filter.test(strategy)) {
- ret.add(rpc.getPath());
- }
- }
-
- return ret;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.blueprint.ext;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.io.Resources;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.io.IOException;
-import java.net.URL;
-import java.nio.charset.StandardCharsets;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.concurrent.ConcurrentSkipListSet;
-import org.osgi.framework.Bundle;
-import org.osgi.framework.BundleEvent;
-import org.osgi.framework.ServiceReference;
-import org.osgi.service.blueprint.container.ComponentDefinitionException;
-import org.osgi.util.tracker.BundleTracker;
-import org.osgi.util.tracker.BundleTrackerCustomizer;
-import org.osgi.util.tracker.ServiceTracker;
-import org.osgi.util.tracker.ServiceTrackerCustomizer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Factory metadata corresponding to the "specific-reference-list" element that obtains a specific list
- * of service instances from the OSGi registry for a given interface. The specific list is learned by first
- * extracting the list of expected service types by inspecting RESOLVED bundles for a resource file under
- * META-INF/services with the same name as the given interface. The type(s) listed in the resource file
- * must match the "type" property of the advertised service(s). In this manner, an app bundle announces the
- * service type(s) that it will advertise so that this class knows which services to expect up front. Once
- * all the expected services are obtained, the container is notified that all dependencies of this component
- * factory are satisfied.
- *
- * @author Thomas Pantelis
- */
-class SpecificReferenceListMetadata extends AbstractDependentComponentFactoryMetadata {
- private static final Logger LOG = LoggerFactory.getLogger(SpecificReferenceListMetadata.class);
-
- private final String interfaceName;
- private final String serviceResourcePath;
- private final Collection<String> expectedServiceTypes = new ConcurrentSkipListSet<>();
- private final Collection<String> retrievedServiceTypes = new ConcurrentSkipListSet<>();
- private final Collection<Object> retrievedServices = Collections.synchronizedList(new ArrayList<>());
- private volatile BundleTracker<Bundle> bundleTracker;
- private volatile ServiceTracker<Object, Object> serviceTracker;
-
- SpecificReferenceListMetadata(final String id, final String interfaceName) {
- super(id);
- this.interfaceName = interfaceName;
- serviceResourcePath = "META-INF/services/" + interfaceName;
- }
-
- @Override
- protected void startTracking() {
- BundleTrackerCustomizer<Bundle> bundleListener = new BundleTrackerCustomizer<>() {
- @Override
- public Bundle addingBundle(final Bundle bundle, final BundleEvent event) {
- bundleAdded(bundle);
- return bundle;
- }
-
- @Override
- public void modifiedBundle(final Bundle bundle, final BundleEvent event, final Bundle object) {
- }
-
- @Override
- public void removedBundle(final Bundle bundle, final BundleEvent event, final Bundle object) {
- }
- };
-
- bundleTracker = new BundleTracker<>(container().getBundleContext(), Bundle.RESOLVED | Bundle.STARTING
- | Bundle.STOPPING | Bundle.ACTIVE, bundleListener);
-
- // This will get the list of all current RESOLVED+ bundles.
- bundleTracker.open();
-
- if (expectedServiceTypes.isEmpty()) {
- setSatisfied();
- return;
- }
-
- ServiceTrackerCustomizer<Object, Object> serviceListener = new ServiceTrackerCustomizer<>() {
- @Override
- public Object addingService(final ServiceReference<Object> reference) {
- return serviceAdded(reference);
- }
-
- @Override
- public void modifiedService(final ServiceReference<Object> reference, final Object service) {
- }
-
- @Override
- public void removedService(final ServiceReference<Object> reference, final Object service) {
- container().getBundleContext().ungetService(reference);
- }
- };
-
- setDependencyDesc(interfaceName + " services with types " + expectedServiceTypes);
-
- serviceTracker = new ServiceTracker<>(container().getBundleContext(), interfaceName, serviceListener);
- serviceTracker.open();
- }
-
- @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
- justification = "https://github.com/spotbugs/spotbugs/issues/811")
- private void bundleAdded(final Bundle bundle) {
- URL resource = bundle.getEntry(serviceResourcePath);
- if (resource == null) {
- return;
- }
-
- LOG.debug("{}: Found {} resource in bundle {}", logName(), resource, bundle.getSymbolicName());
-
- try {
- for (String line : Resources.readLines(resource, StandardCharsets.UTF_8)) {
- int ci = line.indexOf('#');
- if (ci >= 0) {
- line = line.substring(0, ci);
- }
-
- line = line.trim();
- if (line.isEmpty()) {
- continue;
- }
-
- String serviceType = line;
- LOG.debug("{}: Retrieved service type {}", logName(), serviceType);
- expectedServiceTypes.add(serviceType);
- }
- } catch (final IOException e) {
- setFailure(String.format("%s: Error reading resource %s from bundle %s", logName(), resource,
- bundle.getSymbolicName()), e);
- }
- }
-
- @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
- justification = "https://github.com/spotbugs/spotbugs/issues/811")
- private Object serviceAdded(final ServiceReference<Object> reference) {
- Object service = container().getBundleContext().getService(reference);
- String serviceType = (String) reference.getProperty(OpendaylightNamespaceHandler.TYPE_ATTR);
-
- LOG.debug("{}: Service type {} added from bundle {}", logName(), serviceType,
- reference.getBundle().getSymbolicName());
-
- if (serviceType == null) {
- LOG.error("{}: Missing OSGi service property '{}' for service interface {} in bundle {}", logName(),
- OpendaylightNamespaceHandler.TYPE_ATTR, interfaceName, reference.getBundle().getSymbolicName());
- return service;
- }
-
- if (!expectedServiceTypes.contains(serviceType)) {
- LOG.error("{}: OSGi service property '{}' for service interface {} in bundle {} was not found in the "
- + "expected service types {} obtained via {} bundle resources. Is the bundle resource missing or "
- + "the service type misspelled?", logName(), OpendaylightNamespaceHandler.TYPE_ATTR, interfaceName,
- reference.getBundle().getSymbolicName(), expectedServiceTypes, serviceResourcePath);
- return service;
- }
-
- // If already satisfied, meaning we got all initial services, then a new bundle must've been
- // dynamically installed or a prior service's blueprint container was restarted, in which case we
- // restart our container.
- if (isSatisfied()) {
- restartContainer();
- } else {
- retrievedServiceTypes.add(serviceType);
- retrievedServices.add(service);
-
- if (retrievedServiceTypes.equals(expectedServiceTypes)) {
- LOG.debug("{}: Got all expected service types", logName());
- setSatisfied();
- } else {
- Set<String> remaining = new HashSet<>(expectedServiceTypes);
- remaining.removeAll(retrievedServiceTypes);
- setDependencyDesc(interfaceName + " services with types " + remaining);
- }
- }
-
- return service;
- }
-
- @Override
- public Object create() throws ComponentDefinitionException {
- LOG.debug("{}: In create: interfaceName: {}", logName(), interfaceName);
-
- super.onCreate();
-
- LOG.debug("{}: create returning service list {}", logName(), retrievedServices);
-
- synchronized (retrievedServices) {
- return ImmutableList.copyOf(retrievedServices);
- }
- }
-
- @Override
- public void destroy(final Object instance) {
- super.destroy(instance);
-
- if (bundleTracker != null) {
- bundleTracker.close();
- bundleTracker = null;
- }
-
- if (serviceTracker != null) {
- serviceTracker.close();
- serviceTracker = null;
- }
- }
-
- @Override
- public String toString() {
- StringBuilder builder = new StringBuilder();
- builder.append("SpecificReferenceListMetadata [interfaceName=").append(interfaceName)
- .append(", serviceResourcePath=").append(serviceResourcePath).append("]");
- return builder.toString();
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.blueprint.ext;
-
-import org.osgi.service.blueprint.container.ComponentDefinitionException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Factory metadata corresponding to the "static-reference" element that obtains an OSGi service and
- * returns the actual instance. This differs from the standard "reference" element that returns a dynamic
- * proxy whose underlying service instance can come and go.
- *
- * @author Thomas Pantelis
- */
-class StaticReferenceMetadata extends AbstractDependentComponentFactoryMetadata {
- private static final Logger LOG = LoggerFactory.getLogger(StaticReferenceMetadata.class);
-
- private final String interfaceName;
- private volatile Object retrievedService;
-
- StaticReferenceMetadata(final String id, final String interfaceName) {
- super(id);
- this.interfaceName = interfaceName;
- }
-
- @Override
- protected void startTracking() {
- retrieveService(interfaceName, interfaceName, service -> {
- retrievedService = service;
- setSatisfied();
- });
- }
-
- @Override
- public Object create() throws ComponentDefinitionException {
- super.onCreate();
-
- LOG.debug("{}: create returning service {}", logName(), retrievedService);
-
- return retrievedService;
- }
-
- @Override
- public String toString() {
- StringBuilder builder = new StringBuilder();
- builder.append("StaticReferenceMetadata [interfaceName=").append(interfaceName).append("]");
- return builder.toString();
- }
-}
class StaticServiceReferenceRecipe extends AbstractServiceReferenceRecipe {
private static final Logger LOG = LoggerFactory.getLogger(StaticServiceReferenceRecipe.class);
- private static final SatisfactionListener NOOP_LISTENER = satisfiable -> {
- // Intentional NOOP
- };
-
private volatile ServiceReference<?> trackedServiceReference;
private volatile Object trackedService;
private Consumer<Object> serviceSatisfiedCallback;
}
void startTracking(final Consumer<Object> newServiceSatisfiedCallback) {
- this.serviceSatisfiedCallback = newServiceSatisfiedCallback;
- super.start(NOOP_LISTENER);
+ serviceSatisfiedCallback = newServiceSatisfiedCallback;
+ super.start(satisfiable -> {
+ // Intentional NOOP
+ });
}
@SuppressWarnings("rawtypes")
*/
package org.opendaylight.controller.blueprint.tests;
-import static com.google.common.truth.Truth.assertThat;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThrows;
import org.junit.Test;
import org.opendaylight.controller.blueprint.ext.DataStoreAppConfigDefaultXMLReader;
* @author Michael Vorburger.ch
*/
public class DataStoreAppConfigDefaultXMLReaderTest extends AbstractConcurrentDataBrokerTest {
-
@Test
public void testConfigXML() throws Exception {
- Lists lists = new DataStoreAppConfigDefaultXMLReader<>(
- getClass(), "/opendaylight-sal-test-store-config.xml",
- getDataBrokerTestCustomizer().getSchemaService(),
- getDataBrokerTestCustomizer().getAdapterContext().currentSerializer(),
- Lists.class).createDefaultInstance();
+ Lists lists = new DataStoreAppConfigDefaultXMLReader<>(getClass(), "/opendaylight-sal-test-store-config.xml",
+ getDataBrokerTestCustomizer().getSchemaService(),
+ getDataBrokerTestCustomizer().getAdapterContext().currentSerializer(), Lists.class)
+ .createDefaultInstance();
- UnorderedList element = lists.getUnorderedContainer().getUnorderedList().values().iterator().next();
- assertThat(element.getName()).isEqualTo("someName");
- assertThat(element.getValue()).isEqualTo("someValue");
+ UnorderedList element = lists.nonnullUnorderedContainer().nonnullUnorderedList().values().iterator().next();
+ assertEquals("someName", element.getName());
+ assertEquals("someValue", element.getValue());
}
- @Test(expected = IllegalArgumentException.class)
+ @Test
public void testBadXMLName() throws Exception {
- new DataStoreAppConfigDefaultXMLReader<>(
- getClass(), "/badname.xml",
- getDataBrokerTestCustomizer().getSchemaService(),
- getDataBrokerTestCustomizer().getAdapterContext().currentSerializer(),
- Lists.class).createDefaultInstance();
+ final var reader = new DataStoreAppConfigDefaultXMLReader<>(getClass(), "/badname.xml",
+ getDataBrokerTestCustomizer().getSchemaService(),
+ getDataBrokerTestCustomizer().getAdapterContext().currentSerializer(), Lists.class);
+
+ final String message = assertThrows(IllegalArgumentException.class, reader::createDefaultInstance).getMessage();
+ assertEquals("resource /badname.xml relative to " + DataStoreAppConfigDefaultXMLReaderTest.class.getName()
+ + " not found.", message);
}
}
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- vi: set et smarttab sw=4 tabstop=4: -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.opendaylight.odlparent</groupId>
- <artifactId>bundle-parent</artifactId>
- <version>7.0.5</version>
- <relativePath/>
- </parent>
-
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netty-event-executor-config</artifactId>
- <version>0.13.4-SNAPSHOT</version>
- <packaging>bundle</packaging>
- <name>${project.artifactId}</name>
- <description>Configuration Wrapper around netty's event executor</description>
-
- <dependencies>
- <dependency>
- <groupId>com.google.guava</groupId>
- <artifactId>guava</artifactId>
- </dependency>
- <dependency>
- <groupId>io.netty</groupId>
- <artifactId>netty-common</artifactId>
- </dependency>
- <dependency>
- <groupId>org.osgi</groupId>
- <artifactId>osgi.cmpn</artifactId>
- </dependency>
- </dependencies>
-</project>
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.config.yang.netty.eventexecutor;
-
-import com.google.common.reflect.AbstractInvocationHandler;
-import com.google.common.reflect.Reflection;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import io.netty.util.concurrent.EventExecutor;
-import io.netty.util.concurrent.GlobalEventExecutor;
-import io.netty.util.concurrent.ImmediateEventExecutor;
-import java.lang.reflect.Method;
-import java.util.concurrent.TimeUnit;
-
-public interface AutoCloseableEventExecutor extends EventExecutor, AutoCloseable {
-
- static AutoCloseableEventExecutor globalEventExecutor() {
- return CloseableEventExecutorMixin.createCloseableProxy(GlobalEventExecutor.INSTANCE);
- }
-
- static AutoCloseableEventExecutor immediateEventExecutor() {
- return CloseableEventExecutorMixin.createCloseableProxy(ImmediateEventExecutor.INSTANCE);
- }
-
- class CloseableEventExecutorMixin implements AutoCloseable {
- public static final int DEFAULT_SHUTDOWN_SECONDS = 1;
- private final EventExecutor eventExecutor;
-
- public CloseableEventExecutorMixin(final EventExecutor eventExecutor) {
- this.eventExecutor = eventExecutor;
- }
-
- @Override
- @SuppressFBWarnings(value = "UC_USELESS_VOID_METHOD", justification = "False positive")
- public void close() {
- eventExecutor.shutdownGracefully(0, DEFAULT_SHUTDOWN_SECONDS, TimeUnit.SECONDS);
- }
-
- static AutoCloseableEventExecutor createCloseableProxy(final EventExecutor eventExecutor) {
- final CloseableEventExecutorMixin closeableEventExecutor = new CloseableEventExecutorMixin(eventExecutor);
- return Reflection.newProxy(AutoCloseableEventExecutor.class, new AbstractInvocationHandler() {
- @Override
- protected Object handleInvocation(final Object proxy, final Method method, final Object[] args)
- throws Throwable {
- if (method.getName().equals("close")) {
- closeableEventExecutor.close();
- return null;
- } else {
- return method.invoke(closeableEventExecutor.eventExecutor, args);
- }
- }
- });
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.config.yang.netty.eventexecutor;
-
-import static io.netty.util.concurrent.GlobalEventExecutor.INSTANCE;
-
-import com.google.common.annotations.Beta;
-import io.netty.util.concurrent.EventExecutor;
-import io.netty.util.concurrent.EventExecutorGroup;
-import io.netty.util.concurrent.Future;
-import io.netty.util.concurrent.ProgressivePromise;
-import io.netty.util.concurrent.Promise;
-import io.netty.util.concurrent.ScheduledFuture;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.List;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import org.osgi.service.component.annotations.Activate;
-import org.osgi.service.component.annotations.Component;
-import org.osgi.service.component.annotations.Deactivate;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@Beta
-@Component(immediate = true, property = "type=global-event-executor")
-public final class OSGiGlobalEventExecutor implements EventExecutor {
- private static final Logger LOG = LoggerFactory.getLogger(OSGiGlobalEventExecutor.class);
-
- @Override
- public boolean isShuttingDown() {
- return INSTANCE.isShuttingDown();
- }
-
- @Override
- public Future<?> shutdownGracefully() {
- return INSTANCE.shutdownGracefully();
- }
-
- @Override
- public Future<?> shutdownGracefully(final long quietPeriod, final long timeout, final TimeUnit unit) {
- return INSTANCE.shutdownGracefully(quietPeriod, timeout, unit);
- }
-
- @Override
- public Future<?> terminationFuture() {
- return INSTANCE.terminationFuture();
- }
-
- @Override
- @Deprecated
- public void shutdown() {
- INSTANCE.shutdown();
- }
-
- @Override
- public List<Runnable> shutdownNow() {
- return INSTANCE.shutdownNow();
- }
-
- @Override
- public Iterator<EventExecutor> iterator() {
- return INSTANCE.iterator();
- }
-
- @Override
- public Future<?> submit(final Runnable task) {
- return INSTANCE.submit(task);
- }
-
- @Override
- public <T> Future<T> submit(final Runnable task, final T result) {
- return INSTANCE.submit(task, result);
- }
-
- @Override
- public <T> Future<T> submit(final Callable<T> task) {
- return INSTANCE.submit(task);
- }
-
- @Override
- public ScheduledFuture<?> schedule(final Runnable command, final long delay, final TimeUnit unit) {
- return INSTANCE.schedule(command, delay, unit);
- }
-
- @Override
- public <V> ScheduledFuture<V> schedule(final Callable<V> callable, final long delay, final TimeUnit unit) {
- return INSTANCE.schedule(callable, delay, unit);
- }
-
- @Override
- public ScheduledFuture<?> scheduleAtFixedRate(final Runnable command, final long initialDelay, final long period,
- final TimeUnit unit) {
- return INSTANCE.scheduleAtFixedRate(command, initialDelay, period, unit);
- }
-
- @Override
- public ScheduledFuture<?> scheduleWithFixedDelay(final Runnable command, final long initialDelay, final long delay,
- final TimeUnit unit) {
- return INSTANCE.scheduleWithFixedDelay(command, initialDelay, delay, unit);
- }
-
- @Override
- public boolean isShutdown() {
- return INSTANCE.isShutdown();
- }
-
- @Override
- public boolean isTerminated() {
- return INSTANCE.isTerminated();
- }
-
- @Override
- public boolean awaitTermination(final long timeout, final TimeUnit unit) throws InterruptedException {
- return INSTANCE.awaitTermination(timeout, unit);
- }
-
- @Override
- public <T> List<java.util.concurrent.Future<T>> invokeAll(final Collection<? extends Callable<T>> tasks)
- throws InterruptedException {
- return INSTANCE.invokeAll(tasks);
- }
-
- @Override
- public <T> List<java.util.concurrent.Future<T>> invokeAll(final Collection<? extends Callable<T>> tasks,
- final long timeout, final TimeUnit unit) throws InterruptedException {
- return INSTANCE.invokeAll(tasks, timeout, unit);
- }
-
- @Override
- public <T> T invokeAny(final Collection<? extends Callable<T>> tasks)
- throws InterruptedException, ExecutionException {
- return INSTANCE.invokeAny(tasks);
- }
-
- @Override
- public <T> T invokeAny(final Collection<? extends Callable<T>> tasks, final long timeout, final TimeUnit unit)
- throws InterruptedException, ExecutionException, TimeoutException {
- return INSTANCE.invokeAny(tasks, timeout, unit);
- }
-
- @Override
- public void execute(final Runnable command) {
- INSTANCE.execute(command);
- }
-
- @Override
- public EventExecutor next() {
- return INSTANCE.next();
- }
-
- @Override
- public EventExecutorGroup parent() {
- return INSTANCE.parent();
- }
-
- @Override
- public boolean inEventLoop() {
- return INSTANCE.inEventLoop();
- }
-
- @Override
- public boolean inEventLoop(final Thread thread) {
- return INSTANCE.inEventLoop(thread);
- }
-
- @Override
- public <V> Promise<V> newPromise() {
- return INSTANCE.newPromise();
- }
-
- @Override
- public <V> ProgressivePromise<V> newProgressivePromise() {
- return INSTANCE.newProgressivePromise();
- }
-
- @Override
- public <V> Future<V> newSucceededFuture(final V result) {
- return INSTANCE.newSucceededFuture(result);
- }
-
- @Override
- public <V> Future<V> newFailedFuture(final Throwable cause) {
- return INSTANCE.newFailedFuture(cause);
- }
-
- @Activate
- void activate() {
- LOG.info("Global Event executor enabled");
- }
-
- @Deactivate
- void deactivate() {
- LOG.info("Global Event executor disabled");
- }
-
-}
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- vi: set et smarttab sw=4 tabstop=4: -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.opendaylight.odlparent</groupId>
- <artifactId>bundle-parent</artifactId>
- <version>7.0.5</version>
- <relativePath/>
- </parent>
-
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netty-threadgroup-config</artifactId>
- <version>0.13.4-SNAPSHOT</version>
- <packaging>bundle</packaging>
- <name>${project.artifactId}</name>
- <description>Configuration Wrapper around netty's event group</description>
-
- <dependencies>
- <dependency>
- <groupId>io.netty</groupId>
- <artifactId>netty-transport</artifactId>
- </dependency>
- </dependencies>
-</project>
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.config.yang.netty.threadgroup;
-
-import io.netty.channel.nio.NioEventLoopGroup;
-import java.util.concurrent.TimeUnit;
-
-public final class NioEventLoopGroupCloseable extends NioEventLoopGroup implements AutoCloseable {
- private NioEventLoopGroupCloseable(final int threadCount) {
- super(threadCount);
- }
-
- private NioEventLoopGroupCloseable() {
- }
-
- @Override
- public void close() {
- shutdownGracefully(0, 1, TimeUnit.SECONDS);
- }
-
- public static NioEventLoopGroupCloseable newInstance(final Integer threadCount) {
- if (threadCount == null || threadCount <= 0) {
- return new NioEventLoopGroupCloseable();
- }
-
- return new NioEventLoopGroupCloseable(threadCount);
- }
-}
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
- xmlns:odl="http://opendaylight.org/xmlns/blueprint/v1.0.0"
- xmlns:cm="http://aries.apache.org/blueprint/xmlns/blueprint-cm/v1.1.0"
- odl:restart-dependents-on-updates="false">
-
- <cm:property-placeholder persistent-id="org.opendaylight.netty.threadgroup" update-strategy="none">
- <cm:default-properties>
- <!-- 0 means use the default number of threads which is 2 * number of CPUs -->
- <cm:property name="global-boss-group-thread-count" value="0"/>
- <cm:property name="global-worker-group-thread-count" value="0"/>
- </cm:default-properties>
- </cm:property-placeholder>
-
- <bean id="globalBossGroup" class="org.opendaylight.controller.config.yang.netty.threadgroup.NioEventLoopGroupCloseable"
- factory-method="newInstance">
- <argument value="${global-boss-group-thread-count}"/>
- </bean>
-
- <service ref="globalBossGroup" interface="io.netty.channel.EventLoopGroup" odl:type="global-boss-group">
- <service-properties>
- <entry key="config-module-namespace" value="urn:opendaylight:params:xml:ns:yang:controller:netty:threadgroup"/>
- <entry key="config-module-name" value="netty-threadgroup-fixed"/>
- <entry key="config-instance-name" value="global-boss-group"/>
- </service-properties>
- </service>
-
- <bean id="globalWorkerGroup" class="org.opendaylight.controller.config.yang.netty.threadgroup.NioEventLoopGroupCloseable"
- factory-method="newInstance">
- <argument value="${global-worker-group-thread-count}"/>
- </bean>
-
- <service ref="globalWorkerGroup" interface="io.netty.channel.EventLoopGroup" odl:type="global-worker-group">
- <service-properties>
- <entry key="config-module-namespace" value="urn:opendaylight:params:xml:ns:yang:controller:netty:threadgroup"/>
- <entry key="config-module-name" value="netty-threadgroup-fixed"/>
- <entry key="config-instance-name" value="global-worker-group"/>
- </service-properties>
- </service>
-
-</blueprint>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- vi: set et smarttab sw=4 tabstop=4: -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.opendaylight.odlparent</groupId>
- <artifactId>bundle-parent</artifactId>
- <version>7.0.5</version>
- <relativePath/>
- </parent>
-
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netty-timer-config</artifactId>
- <version>0.13.4-SNAPSHOT</version>
- <packaging>bundle</packaging>
- <name>${project.artifactId}</name>
- <description>Configuration Wrapper around netty's timer</description>
-
- <dependencies>
- <dependency>
- <groupId>io.netty</groupId>
- <artifactId>netty-common</artifactId>
- </dependency>
- <dependency>
- <groupId>org.osgi</groupId>
- <artifactId>osgi.cmpn</artifactId>
- </dependency>
- </dependencies>
-</project>
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.config.yang.netty.timer;
-
-import io.netty.util.HashedWheelTimer;
-import io.netty.util.Timeout;
-import io.netty.util.Timer;
-import io.netty.util.TimerTask;
-import java.util.Set;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.TimeUnit;
-import org.eclipse.jdt.annotation.Nullable;
-
-public final class HashedWheelTimerCloseable implements AutoCloseable, Timer {
-
- private final Timer timer;
-
- private HashedWheelTimerCloseable(final Timer timer) {
- this.timer = timer;
- }
-
- @Override
- public void close() {
- stop();
- }
-
- @Override
- public Timeout newTimeout(final TimerTask task, final long delay, final TimeUnit unit) {
- return this.timer.newTimeout(task, delay, unit);
- }
-
- @Override
- public Set<Timeout> stop() {
- return this.timer.stop();
- }
-
- public static HashedWheelTimerCloseable newInstance(final @Nullable Long duration,
- final @Nullable Integer ticksPerWheel) {
- return newInstance(null, duration, ticksPerWheel);
- }
-
- public static HashedWheelTimerCloseable newInstance(final @Nullable ThreadFactory threadFactory,
- final @Nullable Long duration, final @Nullable Integer ticksPerWheel) {
- TimeUnit unit = TimeUnit.MILLISECONDS;
- if (!nullOrNonPositive(duration) && threadFactory == null && nullOrNonPositive(ticksPerWheel)) {
- return new HashedWheelTimerCloseable(new HashedWheelTimer(duration, unit));
- }
-
- if (!nullOrNonPositive(duration) && threadFactory == null && !nullOrNonPositive(ticksPerWheel)) {
- return new HashedWheelTimerCloseable(new HashedWheelTimer(duration, unit, ticksPerWheel));
- }
-
- if (nullOrNonPositive(duration) && threadFactory != null && nullOrNonPositive(ticksPerWheel)) {
- return new HashedWheelTimerCloseable(new HashedWheelTimer(threadFactory));
- }
-
- if (!nullOrNonPositive(duration) && threadFactory != null && nullOrNonPositive(ticksPerWheel)) {
- return new HashedWheelTimerCloseable(
- new HashedWheelTimer(threadFactory, duration, unit));
- }
-
- if (!nullOrNonPositive(duration) && threadFactory != null && !nullOrNonPositive(ticksPerWheel)) {
- return new HashedWheelTimerCloseable(
- new HashedWheelTimer(threadFactory, duration, unit, ticksPerWheel));
- }
-
- return new HashedWheelTimerCloseable(new HashedWheelTimer());
- }
-
- private static boolean nullOrNonPositive(final Number num) {
- return num == null || num.longValue() <= 0;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.config.yang.netty.timer;
-
-import io.netty.util.Timeout;
-import io.netty.util.Timer;
-import io.netty.util.TimerTask;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-import org.osgi.service.component.annotations.Activate;
-import org.osgi.service.component.annotations.Component;
-import org.osgi.service.component.annotations.Deactivate;
-import org.osgi.service.metatype.annotations.AttributeDefinition;
-import org.osgi.service.metatype.annotations.Designate;
-import org.osgi.service.metatype.annotations.ObjectClassDefinition;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@Component(immediate = true, configurationPid = "org.opendaylight.netty.timer", property = "type=global-timer")
-@Designate(ocd = OSGiGlobalTimer.Config.class)
-public final class OSGiGlobalTimer implements Timer {
- @ObjectClassDefinition
- public @interface Config {
- @AttributeDefinition(name = "tick-duration")
- long tickDuration() default 0;
- @AttributeDefinition(name = "ticks-per-wheel")
- int ticksPerWheel() default 0;
- }
-
- private static final Logger LOG = LoggerFactory.getLogger(OSGiGlobalTimer.class);
-
- private Timer delegate;
-
- @Override
- public Timeout newTimeout(final TimerTask task, final long delay, final TimeUnit unit) {
- return delegate.newTimeout(task, delay, unit);
- }
-
- @Override
- public Set<Timeout> stop() {
- return delegate.stop();
- }
-
- @Activate
- void activate(final Config config) {
- delegate = HashedWheelTimerCloseable.newInstance(config.tickDuration(), config.ticksPerWheel());
- LOG.info("Global Netty timer started");
- }
-
- @Deactivate
- void deactivate() {
- delegate.stop();
- LOG.info("Global Netty timer stopped");
- }
-}
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.opendaylight.odlparent</groupId>
- <artifactId>odlparent-lite</artifactId>
- <version>7.0.5</version>
- <relativePath/>
- </parent>
-
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>config-aggregator</artifactId>
- <version>0.13.4-SNAPSHOT</version>
- <packaging>pom</packaging>
-
- <properties>
- <maven.deploy.skip>true</maven.deploy.skip>
- <maven.install.skip>true</maven.install.skip>
- </properties>
-
- <modules>
- <module>threadpool-config-api</module>
- <module>threadpool-config-impl</module>
- <module>netty-threadgroup-config</module>
- <module>netty-event-executor-config</module>
- <module>netty-timer-config</module>
- </modules>
-</project>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- vi: set et smarttab sw=4 tabstop=4: -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.opendaylight.odlparent</groupId>
- <artifactId>bundle-parent</artifactId>
- <version>7.0.5</version>
- <relativePath/>
- </parent>
-
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>threadpool-config-api</artifactId>
- <version>0.13.4-SNAPSHOT</version>
- <packaging>bundle</packaging>
- <name>${project.artifactId}</name>
-</project>
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.config.threadpool;
-
-import java.util.concurrent.ScheduledExecutorService;
-
-/**
- * Interface representing scheduled {@link ThreadPool}.
- */
-public interface ScheduledThreadPool extends ThreadPool {
-
- @Override
- ScheduledExecutorService getExecutor();
-}
\ No newline at end of file
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.config.threadpool;
-
-import java.util.concurrent.ExecutorService;
-
-/**
- * Interface representing thread pool.
- */
-public interface ThreadPool {
-
- ExecutorService getExecutor();
-
- int getMaxThreadCount();
-}
\ No newline at end of file
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- vi: set et smarttab sw=4 tabstop=4: -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.opendaylight.odlparent</groupId>
- <artifactId>bundle-parent</artifactId>
- <version>7.0.5</version>
- <relativePath/>
- </parent>
-
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>threadpool-config-impl</artifactId>
- <version>0.13.4-SNAPSHOT</version>
- <packaging>bundle</packaging>
- <name>${project.artifactId}</name>
-
- <dependencyManagement>
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>controller-artifacts</artifactId>
- <version>2.0.4-SNAPSHOT</version>
- <type>pom</type>
- <scope>import</scope>
- </dependency>
- </dependencies>
- </dependencyManagement>
-
- <dependencies>
- <dependency>
- <groupId>${project.groupId}</groupId>
- <artifactId>threadpool-config-api</artifactId>
- </dependency>
- <dependency>
- <groupId>com.google.guava</groupId>
- <artifactId>guava</artifactId>
- </dependency>
- </dependencies>
-</project>
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.config.threadpool.util;
-
-import java.io.Closeable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.ThreadPoolExecutor;
-import org.opendaylight.controller.config.threadpool.ThreadPool;
-
-/**
- * Implementation of {@link ThreadPool} using fixed number of threads wraps
- * {@link ExecutorService}.
- */
-public class FixedThreadPoolWrapper implements ThreadPool, Closeable {
-
- private final ThreadPoolExecutor executor;
-
- public FixedThreadPoolWrapper(int threadCount, ThreadFactory factory) {
- this.executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(threadCount, factory);
- executor.prestartAllCoreThreads();
- }
-
- @Override
- public ExecutorService getExecutor() {
- return Executors.unconfigurableExecutorService(executor);
- }
-
- @Override
- public void close() {
- executor.shutdown();
- }
-
- @Override
- public int getMaxThreadCount() {
- return executor.getMaximumPoolSize();
- }
-
- public void setMaxThreadCount(int maxThreadCount) {
- executor.setCorePoolSize(maxThreadCount);
- executor.setMaximumPoolSize(maxThreadCount);
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.config.threadpool.util;
-
-import java.io.Closeable;
-import java.util.OptionalInt;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.RejectedExecutionException;
-import java.util.concurrent.RejectedExecutionHandler;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import org.opendaylight.controller.config.threadpool.ThreadPool;
-
-/**
- * Implementation of {@link ThreadPool} using flexible number of threads wraps
- * {@link ExecutorService}.
- */
-public class FlexibleThreadPoolWrapper implements ThreadPool, Closeable {
- private final ThreadPoolExecutor executor;
-
- public FlexibleThreadPoolWrapper(final int minThreadCount, final int maxThreadCount, final long keepAlive,
- final TimeUnit timeUnit, final ThreadFactory threadFactory) {
- this(minThreadCount, maxThreadCount, keepAlive, timeUnit, threadFactory, getQueue(OptionalInt.empty()));
- }
-
- public FlexibleThreadPoolWrapper(final int minThreadCount, final int maxThreadCount, final long keepAlive,
- final TimeUnit timeUnit, final ThreadFactory threadFactory, final OptionalInt queueCapacity) {
- this(minThreadCount, maxThreadCount, keepAlive, timeUnit, threadFactory, getQueue(queueCapacity));
- }
-
- private FlexibleThreadPoolWrapper(final int minThreadCount, final int maxThreadCount, final long keepAlive,
- final TimeUnit timeUnit, final ThreadFactory threadFactory, final BlockingQueue<Runnable> queue) {
-
- executor = new ThreadPoolExecutor(minThreadCount, maxThreadCount, keepAlive, timeUnit,
- queue, threadFactory, new FlexibleRejectionHandler());
- executor.prestartAllCoreThreads();
- }
-
- /**
- * Overriding the queue:
- * ThreadPoolExecutor would not create new threads if the queue is not full, thus adding
- * occurs in RejectedExecutionHandler.
- * This impl saturates threadpool first, then queue. When both are full caller will get blocked.
- */
- private static ForwardingBlockingQueue getQueue(final OptionalInt capacity) {
- final BlockingQueue<Runnable> delegate = capacity.isPresent() ? new LinkedBlockingQueue<>(capacity.getAsInt())
- : new LinkedBlockingQueue<>();
- return new ForwardingBlockingQueue(delegate);
- }
-
- @Override
- public ExecutorService getExecutor() {
- return Executors.unconfigurableExecutorService(executor);
- }
-
- public int getMinThreadCount() {
- return executor.getCorePoolSize();
- }
-
- public void setMinThreadCount(final int minThreadCount) {
- executor.setCorePoolSize(minThreadCount);
- }
-
- @Override
- public int getMaxThreadCount() {
- return executor.getMaximumPoolSize();
- }
-
- public void setMaxThreadCount(final int maxThreadCount) {
- executor.setMaximumPoolSize(maxThreadCount);
- }
-
- public long getKeepAliveMillis() {
- return executor.getKeepAliveTime(TimeUnit.MILLISECONDS);
- }
-
- public void setKeepAliveMillis(final long keepAliveMillis) {
- executor.setKeepAliveTime(keepAliveMillis, TimeUnit.MILLISECONDS);
- }
-
- public void setThreadFactory(final ThreadFactory threadFactory) {
- executor.setThreadFactory(threadFactory);
- }
-
- public void prestartAllCoreThreads() {
- executor.prestartAllCoreThreads();
- }
-
- @Override
- public void close() {
- executor.shutdown();
- }
-
- /**
- * if the max threads are met, then it will raise a rejectedExecution. We then push to the queue.
- */
- private static class FlexibleRejectionHandler implements RejectedExecutionHandler {
- @Override
- @SuppressWarnings("checkstyle:parameterName")
- public void rejectedExecution(final Runnable r, final ThreadPoolExecutor executor) {
- try {
- executor.getQueue().put(r);
- } catch (InterruptedException e) {
- throw new RejectedExecutionException("Interrupted while waiting on the queue", e);
- }
- }
- }
-
- private static class ForwardingBlockingQueue
- extends com.google.common.util.concurrent.ForwardingBlockingQueue<Runnable> {
- private final BlockingQueue<Runnable> delegate;
-
- ForwardingBlockingQueue(final BlockingQueue<Runnable> delegate) {
- this.delegate = delegate;
- }
-
- @Override
- protected BlockingQueue<Runnable> delegate() {
- return delegate;
- }
-
- @Override
- @SuppressWarnings("checkstyle:parameterName")
- public boolean offer(final Runnable o) {
- // ThreadPoolExecutor will spawn a new thread after core size is reached only
- // if the queue.offer returns false.
- return false;
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.config.threadpool.util;
-
-import static java.util.Objects.requireNonNull;
-
-import java.io.Closeable;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.atomic.AtomicLong;
-
-/**
- * Implementation of {@link ThreadFactory}. This class is thread-safe.
- */
-public class NamingThreadPoolFactory implements ThreadFactory, Closeable {
-
- private final ThreadGroup group;
- private final String namePrefix;
- private final AtomicLong threadName = new AtomicLong();
-
- public NamingThreadPoolFactory(final String namePrefix) {
- this.namePrefix = requireNonNull(namePrefix);
- this.group = new ThreadGroup(namePrefix);
- }
-
- @Override
- @SuppressWarnings("checkstyle:parameterName")
- public Thread newThread(final Runnable r) {
- return new Thread(group, r, String.format("%s-%d", group.getName(), threadName.incrementAndGet()));
- }
-
- @Override
- public void close() {
- }
-
- public String getNamePrefix() {
- return namePrefix;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.config.threadpool.util;
-
-import java.io.Closeable;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.ScheduledThreadPoolExecutor;
-import java.util.concurrent.ThreadFactory;
-import org.opendaylight.controller.config.threadpool.ScheduledThreadPool;
-
-/**
- * Implementation of {@link ScheduledThreadPool} wraps
- * {@link ScheduledExecutorService}.
- */
-public class ScheduledThreadPoolWrapper implements ScheduledThreadPool, Closeable {
-
- private final ScheduledThreadPoolExecutor executor;
- private final int threadCount;
-
- public ScheduledThreadPoolWrapper(int threadCount, ThreadFactory factory) {
- this.threadCount = threadCount;
- this.executor = new ScheduledThreadPoolExecutor(threadCount, factory);
- executor.prestartAllCoreThreads();
- }
-
- @Override
- public ScheduledExecutorService getExecutor() {
- return Executors.unconfigurableScheduledExecutorService(executor);
- }
-
- @Override
- public void close() {
- executor.shutdown();
- }
-
- @Override
- public int getMaxThreadCount() {
- return threadCount;
- }
-
-}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>mdsal-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../parent</relativePath>
</parent>
<dependencies>
<dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-actor_2.13</artifactId>
+ <groupId>com.github.spotbugs</groupId>
+ <artifactId>spotbugs-annotations</artifactId>
+ <optional>true</optional>
+ </dependency>
+ <dependency>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jdt</groupId>
+ <artifactId>org.eclipse.jdt.annotation</artifactId>
</dependency>
-
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>concepts</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-common</artifactId>
+ </dependency>
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>yang-data-api</artifactId>
</dependency>
-
- <!-- Needed for serialization of yang-data-api objects -->
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-impl</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-spi</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-codec-binfmt</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-tree-api</artifactId>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-clustering-commons</artifactId>
+ <artifactId>repackaged-akka</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.scala-lang</groupId>
+ <artifactId>scala-library</artifactId>
</dependency>
<!-- Testing dependencies -->
+ <dependency>
+ <groupId>org.apache.commons</groupId>
+ <artifactId>commons-lang3</artifactId>
+ <scope>test</scope>
+ </dependency>
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>mockito-configuration</artifactId>
</dependency>
<dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-testkit_2.13</artifactId>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-tree-ri</artifactId>
<scope>test</scope>
</dependency>
<dependency>
- <groupId>commons-lang</groupId>
- <artifactId>commons-lang</artifactId>
- <scope>test</scope>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-testkit_2.13</artifactId>
</dependency>
</dependencies>
<build>
+ <pluginManagement>
+ <plugins>
+ <plugin>
+ <artifactId>maven-javadoc-plugin</artifactId>
+ <version>3.1.1</version>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+
<plugins>
<plugin>
<groupId>org.apache.felix</groupId>
import static com.google.common.base.Preconditions.checkArgument;
-import com.google.common.annotations.Beta;
import com.google.common.annotations.VisibleForTesting;
import java.io.DataInput;
import java.io.DataOutput;
/**
* Enumeration of all ABI versions supported by this implementation of the client access API.
- *
- * @author Robert Varga
*/
-@Beta
public enum ABIVersion implements WritableObject {
// NOTE: enumeration values need to be sorted in ascending order of their version to keep Comparable working
}
},
+ // BORON was 5
+ // NEON_SR2 was 6
+ // SODIUM_SR1 was 7
+ // MAGNESIUM was 8
+ // CHLORINE_SR2 was 9
+
/**
- * Initial ABI version, as shipped with Boron Simultaneous release.
- */
- // We seed the initial version to be the same as DataStoreVersions.BORON-VERSION for compatibility reasons.
- BORON(5) {
- @Override
- public NormalizedNodeStreamVersion getStreamVersion() {
- return NormalizedNodeStreamVersion.LITHIUM;
- }
- },
- /**
- * Revised ABI version. The messages remain the same as {@link #BORON}, but messages bearing QNames in any shape
- * are using {@link NormalizedNodeStreamVersion#NEON_SR2}, which improves encoding.
- */
- NEON_SR2(6) {
- @Override
- public NormalizedNodeStreamVersion getStreamVersion() {
- return NormalizedNodeStreamVersion.NEON_SR2;
- }
- },
- /**
- * Revised ABI version. The messages remain the same as {@link #BORON}, but messages bearing QNames in any shape
- * are using {@link NormalizedNodeStreamVersion#SODIUM_SR1}, which improves encoding.
- */
- SODIUM_SR1(7) {
- @Override
- public NormalizedNodeStreamVersion getStreamVersion() {
- return NormalizedNodeStreamVersion.SODIUM_SR1;
- }
- },
- /**
- * Revised ABI version. The messages remain the same as {@link #BORON}, but messages bearing QNames in any shape
- * are using {@link NormalizedNodeStreamVersion#MAGNESIUM}, which improves encoding.
+ * Oldest ABI version we support. The messages remain the same as {@code CHLORINE_SR2}, the serialization proxies in
+ * use are flat objects without any superclasses. Data encoding does not include augmentations as separate objects.
*/
- MAGNESIUM(8) {
+ POTASSIUM(10) {
@Override
public NormalizedNodeStreamVersion getStreamVersion() {
- return NormalizedNodeStreamVersion.MAGNESIUM;
+ return NormalizedNodeStreamVersion.POTASSIUM;
}
},
* @return Current {@link ABIVersion}
*/
public static @NonNull ABIVersion current() {
- return SODIUM_SR1;
+ return POTASSIUM;
}
/**
* @throws PastVersionException if the specified integer identifies a past version which is no longer supported
*/
public static @NonNull ABIVersion valueOf(final short value) throws FutureVersionException, PastVersionException {
- switch (Short.toUnsignedInt(value)) {
- case 0:
- case 1:
- case 2:
- case 3:
- case 4:
- throw new PastVersionException(value, BORON);
- case 5:
- return BORON;
- case 6:
- return NEON_SR2;
- case 7:
- return SODIUM_SR1;
- case 8:
- return MAGNESIUM;
- default:
- throw new FutureVersionException(value, MAGNESIUM);
- }
+ return switch (Short.toUnsignedInt(value)) {
+ case 0, 1, 2, 3, 4, 6, 7, 8, 9 -> throw new PastVersionException(value, POTASSIUM);
+ case 10 -> POTASSIUM;
+ default -> throw new FutureVersionException(value, POTASSIUM);
+ };
+ }
+
+ /**
+ * Return {@code true} if this version is earier than some {@code other} version.
+ *
+ * @param other Other {@link ABIVersion}
+ * @return {@code true} if {@code other is later}
+ * @throws NullPointerException if {@code other} is null
+ */
+ public boolean lt(final @NonNull ABIVersion other) {
+ return compareTo(other) < 0;
}
@Override
import static java.util.Objects.requireNonNull;
-import com.google.common.annotations.Beta;
import org.eclipse.jdt.annotation.NonNull;
/**
* Abstract base exception used for reporting version mismatches from {@link ABIVersion}.
- *
- * @author Robert Varga
*/
-@Beta
public abstract class AbstractVersionException extends Exception {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
+
private final @NonNull ABIVersion closestVersion;
private final int version;
*
* @return Numeric version
*/
- public final int getVersion() {
+ public final int version() {
return version;
}
*
* @return Closest supported {@link ABIVersion}
*/
- public final @NonNull ABIVersion getClosestVersion() {
+ public final @NonNull ABIVersion closestVersion() {
return closestVersion;
}
-
}
*/
package org.opendaylight.controller.cluster.access;
-import com.google.common.annotations.Beta;
-
/**
* Exception thrown from {@link ABIVersion#valueOf(short)} when the specified version is too new to be supported
* by the codebase.
- *
- * @author Robert Varga
*/
-@Beta
public final class FutureVersionException extends AbstractVersionException {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
- FutureVersionException(final short version, ABIVersion closest) {
+ FutureVersionException(final short version, final ABIVersion closest) {
super("Version " + Short.toUnsignedInt(version) + " is too new", version, closest);
}
}
*/
package org.opendaylight.controller.cluster.access;
-import com.google.common.annotations.Beta;
-
/**
* Exception thrown from {@link ABIVersion#valueOf(short)} when the specified version is too old and no longer
* supported by the codebase.
- *
- * @author Robert Varga
*/
-@Beta
public final class PastVersionException extends AbstractVersionException {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
PastVersionException(final short version, final ABIVersion closest) {
package org.opendaylight.controller.cluster.access.commands;
import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
* Request to abort a local transaction. Since local transactions do not introduce state on the backend until they
* are ready, the purpose of this message is to inform the backend that a message identifier has been used. This is
* not important for single transactions, but is critical to ensure transaction ordering within local histories.
- *
- * @author Robert Varga
*/
-@Beta
public final class AbortLocalTransactionRequest extends AbstractLocalTransactionRequest<AbortLocalTransactionRequest> {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
public AbortLocalTransactionRequest(final @NonNull TransactionIdentifier identifier,
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import java.io.DataInput;
-import java.io.IOException;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestProxy;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-
-/**
- * Abstract base class for serialization proxies associated with {@link LocalHistoryRequest}s.
- *
- * @author Robert Varga
- *
- * @param <T> Message type
- */
-abstract class AbstractLocalHistoryRequestProxy<T extends LocalHistoryRequest<T>>
- extends AbstractRequestProxy<LocalHistoryIdentifier, T> {
- private static final long serialVersionUID = 1L;
-
- protected AbstractLocalHistoryRequestProxy() {
- // For Externalizable
- }
-
- AbstractLocalHistoryRequestProxy(final T request) {
- super(request);
- }
-
- @Override
- protected final LocalHistoryIdentifier readTarget(final DataInput in) throws IOException {
- return LocalHistoryIdentifier.readFrom(in);
- }
-}
*/
public abstract class AbstractLocalTransactionRequest<T extends AbstractLocalTransactionRequest<T>>
extends TransactionRequest<T> {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
AbstractLocalTransactionRequest(final TransactionIdentifier identifier, final long sequence,
}
@Override
- protected final AbstractTransactionRequestProxy<T> externalizableProxy(final ABIVersion version) {
+ protected final SerialForm<T> externalizableProxy(final ABIVersion version) {
throw new UnsupportedOperationException("Local transaction request " + this + " should never be serialized");
}
import static java.util.Objects.requireNonNull;
import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
import com.google.common.base.MoreObjects.ToStringHelper;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
/**
* Abstract base class for {@link TransactionRequest}s accessing data as visible in the isolated context of a particular
* This class is visible outside of this package for the purpose of allowing common instanceof checks
* and simplified codepaths.
*
- * @author Robert Varga
- *
* @param <T> Message type
*/
-@Beta
public abstract class AbstractReadPathTransactionRequest<T extends AbstractReadPathTransactionRequest<T>>
extends AbstractReadTransactionRequest<T> {
+ interface SerialForm<T extends AbstractReadPathTransactionRequest<T>>
+ extends AbstractReadTransactionRequest.SerialForm<T> {
+
+ @Override
+ default T readExternal(final ObjectInput in, final TransactionIdentifier target, final long sequence,
+ final ActorRef replyTo, final boolean snapshotOnly) throws IOException {
+ return readExternal(in, target, sequence, replyTo, snapshotOnly,
+ NormalizedNodeDataInput.newDataInput(in).readYangInstanceIdentifier());
+ }
+
+ @NonNull T readExternal(@NonNull ObjectInput in, @NonNull TransactionIdentifier target, long sequence,
+ @NonNull ActorRef replyTo, boolean snapshotOnly, @NonNull YangInstanceIdentifier path) throws IOException;
+
+ @Override
+ default void writeExternal(final ObjectOutput out, final T msg) throws IOException {
+ AbstractReadTransactionRequest.SerialForm.super.writeExternal(out, msg);
+ try (var nnout = msg.getVersion().getStreamVersion().newDataOutput(out)) {
+ nnout.writeYangInstanceIdentifier(msg.getPath());
+ }
+ }
+ }
+
+ @java.io.Serial
private static final long serialVersionUID = 1L;
private final @NonNull YangInstanceIdentifier path;
}
@Override
- protected abstract AbstractReadTransactionRequestProxyV1<T> externalizableProxy(ABIVersion version);
+ protected abstract SerialForm<T> externalizableProxy(ABIVersion version);
}
+++ /dev/null
-/*
- * Copyright (c) 2017 Pantheon Technologies, s.r.o. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
-import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
-import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
-
-/**
- * Abstract base class for serialization proxies associated with {@link AbstractReadTransactionRequest}s. It implements
- * the initial (Boron) serialization format.
- *
- * @author Robert Varga
- *
- * @param <T> Message type
- */
-abstract class AbstractReadPathTransactionRequestProxyV1<T extends AbstractReadPathTransactionRequest<T>>
- extends AbstractReadTransactionRequestProxyV1<T> {
- private static final long serialVersionUID = 1L;
-
- private YangInstanceIdentifier path;
- private transient NormalizedNodeStreamVersion streamVersion;
-
- protected AbstractReadPathTransactionRequestProxyV1() {
- // For Externalizable
- }
-
- AbstractReadPathTransactionRequestProxyV1(final T request) {
- super(request);
- path = request.getPath();
- streamVersion = request.getVersion().getStreamVersion();
- }
-
- @Override
- public final void writeExternal(final ObjectOutput out) throws IOException {
- super.writeExternal(out);
- try (NormalizedNodeDataOutput nnout = streamVersion.newDataOutput(out)) {
- nnout.writeYangInstanceIdentifier(path);
- }
- }
-
- @Override
- public final void readExternal(final ObjectInput in) throws ClassNotFoundException, IOException {
- super.readExternal(in);
- path = NormalizedNodeDataInput.newDataInput(in).readYangInstanceIdentifier();
- }
-
- @Override
- protected final T createReadRequest(final TransactionIdentifier target, final long sequence,
- final ActorRef replyTo, final boolean snapshotOnly) {
- return createReadPathRequest(target, sequence, replyTo, path, snapshotOnly);
- }
-
- abstract T createReadPathRequest(TransactionIdentifier target, long sequence, ActorRef replyTo,
- YangInstanceIdentifier requestPath, boolean snapshotOnly);
-}
package org.opendaylight.controller.cluster.access.commands;
import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
import com.google.common.base.MoreObjects.ToStringHelper;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
* This class is visible outside of this package for the purpose of allowing common instanceof checks
* and simplified codepaths.
*
- * @author Robert Varga
- *
* @param <T> Message type
*/
-@Beta
public abstract class AbstractReadTransactionRequest<T extends AbstractReadTransactionRequest<T>>
extends TransactionRequest<T> {
+ interface SerialForm<T extends AbstractReadTransactionRequest<T>> extends TransactionRequest.SerialForm<T> {
+ @Override
+ default T readExternal(final ObjectInput in, final TransactionIdentifier target, final long sequence,
+ final ActorRef replyTo) throws IOException {
+ return readExternal(in, target, sequence, replyTo, in.readBoolean());
+ }
+
+ @NonNull T readExternal(@NonNull ObjectInput in, @NonNull TransactionIdentifier target, long sequence,
+ @NonNull ActorRef replyTo, boolean snapshotOnly) throws IOException;
+
+ @Override
+ default void writeExternal(final ObjectOutput out, final T msg) throws IOException {
+ TransactionRequest.SerialForm.super.writeExternal(out, msg);
+ out.writeBoolean(msg.isSnapshotOnly());
+ }
+ }
+
+ @java.io.Serial
private static final long serialVersionUID = 1L;
private final boolean snapshotOnly;
}
@Override
- protected abstract AbstractReadTransactionRequestProxyV1<T> externalizableProxy(ABIVersion version);
+ protected abstract SerialForm<T> externalizableProxy(ABIVersion version);
}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Abstract base class for serialization proxies associated with {@link AbstractReadTransactionRequest}s. It implements
- * the initial (Boron) serialization format.
- *
- * @author Robert Varga
- *
- * @param <T> Message type
- */
-abstract class AbstractReadTransactionRequestProxyV1<T extends AbstractReadTransactionRequest<T>>
- extends AbstractTransactionRequestProxy<T> {
- private static final long serialVersionUID = 1L;
- private boolean snapshotOnly;
-
- protected AbstractReadTransactionRequestProxyV1() {
- // For Externalizable
- }
-
- AbstractReadTransactionRequestProxyV1(final T request) {
- super(request);
- snapshotOnly = request.isSnapshotOnly();
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) throws IOException {
- super.writeExternal(out);
- out.writeBoolean(snapshotOnly);
- }
-
- @Override
- public void readExternal(final ObjectInput in) throws ClassNotFoundException, IOException {
- super.readExternal(in);
- snapshotOnly = in.readBoolean();
- }
-
- @Override
- protected final T createRequest(final TransactionIdentifier target, final long sequence, final ActorRef replyTo) {
- return createReadRequest(target, sequence, replyTo, snapshotOnly);
- }
-
- @SuppressWarnings("checkstyle:hiddenField")
- abstract T createReadRequest(TransactionIdentifier target, long sequence, ActorRef replyTo, boolean snapshotOnly);
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import java.io.DataInput;
-import java.io.IOException;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestProxy;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Abstract base class for serialization proxies associated with {@link TransactionRequest}s.
- *
- * @author Robert Varga
- *
- * @param <T> Message type
- */
-abstract class AbstractTransactionRequestProxy<T extends TransactionRequest<T>>
- extends AbstractRequestProxy<TransactionIdentifier, T> {
- private static final long serialVersionUID = 1L;
-
- protected AbstractTransactionRequestProxy() {
- // For Externalizable
- }
-
- AbstractTransactionRequestProxy(final T request) {
- super(request);
- }
-
- @Override
- protected final TransactionIdentifier readTarget(final DataInput in) throws IOException {
- return TransactionIdentifier.readFrom(in);
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import java.io.DataInput;
-import java.io.IOException;
-import org.opendaylight.controller.cluster.access.concepts.AbstractSuccessProxy;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Abstract base class for serialization proxies associated with {@link TransactionSuccess}es.
- *
- * @author Robert Varga
- *
- * @param <T> Message type
- */
-abstract class AbstractTransactionSuccessProxy<T extends TransactionSuccess<T>>
- extends AbstractSuccessProxy<TransactionIdentifier, T> {
- private static final long serialVersionUID = 1L;
-
- protected AbstractTransactionSuccessProxy() {
- // For Externalizable
- }
-
- AbstractTransactionSuccessProxy(final T request) {
- super(request);
- }
-
- @Override
- protected final TransactionIdentifier readTarget(final DataInput in) throws IOException {
- return TransactionIdentifier.readFrom(in);
- }
-}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link ConnectClientFailure}. It implements the Chlorine SR2 serialization format.
+ */
+final class CCF implements ConnectClientFailure.SerialForm {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private ConnectClientFailure message;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public CCF() {
+ // for Externalizable
+ }
+
+ CCF(final ConnectClientFailure request) {
+ message = requireNonNull(request);
+ }
+
+ @Override
+ public ConnectClientFailure message() {
+ return verifyNotNull(message);
+ }
+
+ @Override
+ public void setMessage(final ConnectClientFailure message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public Object readResolve() {
+ return message();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link ConnectClientRequest}. It implements the Chlorine SR2 serialization format.
+ */
+final class CCR implements ConnectClientRequest.SerialForm {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private ConnectClientRequest message;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public CCR() {
+ // for Externalizable
+ }
+
+ CCR(final ConnectClientRequest request) {
+ message = requireNonNull(request);
+ }
+
+ @Override
+ public ConnectClientRequest message() {
+ return verifyNotNull(message);
+ }
+
+ @Override
+ public void setMessage(final ConnectClientRequest message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public Object readResolve() {
+ return message();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link ConnectClientSuccess}. It implements the Chlorine SR2 serialization format.
+ */
+final class CCS implements ConnectClientSuccess.SerialForm {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private ConnectClientSuccess message;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public CCS() {
+ // for Externalizable
+ }
+
+ CCS(final ConnectClientSuccess request) {
+ message = requireNonNull(request);
+ }
+
+ @Override
+ public ConnectClientSuccess message() {
+ return verifyNotNull(message);
+ }
+
+ @Override
+ public void setMessage(final ConnectClientSuccess message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public Object readResolve() {
+ return message();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link CreateLocalHistoryRequest}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class CHR implements CreateLocalHistoryRequest.SerialForm {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private CreateLocalHistoryRequest message;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public CHR() {
+ // For Externalizable
+ }
+
+ CHR(final CreateLocalHistoryRequest message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public CreateLocalHistoryRequest message() {
+ return verifyNotNull(message);
+ }
+
+ @Override
+ public void setMessage(final CreateLocalHistoryRequest message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public Object readResolve() {
+ return message();
+ }
+}
*/
package org.opendaylight.controller.cluster.access.commands;
-import com.google.common.annotations.Beta;
import org.opendaylight.controller.cluster.access.concepts.RequestException;
/**
* been closed, either via a successful commit or abort (which is indicated via {@link #isSuccessful()}. This can
* happen if the corresponding journal record is replicated, but the message to the frontend gets lost and the backed
* leader moved before the frontend retried the corresponding request.
- *
- * @author Robert Varga
*/
-@Beta
public final class ClosedTransactionException extends RequestException {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
private final boolean successful;
import static java.util.Objects.requireNonNull;
import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
import com.google.common.base.MoreObjects.ToStringHelper;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.ObjectStreamException;
import java.util.Optional;
import org.eclipse.jdt.annotation.NonNull;
import org.eclipse.jdt.annotation.Nullable;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
/**
* Request to commit a local transaction. Since local transactions do not introduce state on the backend until they
* are ready, this message carries a complete set of modifications.
- *
- * @author Robert Varga
*/
-@Beta
public final class CommitLocalTransactionRequest
extends AbstractLocalTransactionRequest<CommitLocalTransactionRequest> {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
- @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but this class "
- + "implements writeReplace to delegate serialization to a Proxy class and thus instances of this class "
- + "aren't serialized. FindBugs does not recognize this.")
private final DataTreeModification mod;
private final Exception delayedFailure;
private final boolean coordinated;
return super.addToStringAttributes(toStringHelper).add("coordinated", coordinated)
.add("delayedError", delayedFailure);
}
+
+ @java.io.Serial
+ private void readObject(final ObjectInputStream stream) throws IOException, ClassNotFoundException {
+ throwNSE();
+ }
+
+ @java.io.Serial
+ private void readObjectNoData() throws ObjectStreamException {
+ throwNSE();
+ }
+
+ @java.io.Serial
+ private void writeObject(final ObjectOutputStream stream) throws IOException {
+ throwNSE();
+ }
}
*/
package org.opendaylight.controller.cluster.access.commands;
-import com.google.common.annotations.Beta;
+import java.io.DataInput;
+import java.io.IOException;
import org.opendaylight.controller.cluster.access.ABIVersion;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestFailureProxy;
import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
import org.opendaylight.controller.cluster.access.concepts.RequestException;
import org.opendaylight.controller.cluster.access.concepts.RequestFailure;
/**
* A {@link RequestFailure} reported when {@link ConnectClientRequest} fails.
- *
- * @author Robert Varga
*/
-@Beta
public final class ConnectClientFailure extends RequestFailure<ClientIdentifier, ConnectClientFailure> {
+ interface SerialForm extends RequestFailure.SerialForm<ClientIdentifier, ConnectClientFailure> {
+ @Override
+ default ClientIdentifier readTarget(final DataInput in) throws IOException {
+ return ClientIdentifier.readFrom(in);
+ }
+
+ @Override
+ default ConnectClientFailure createFailure(final ClientIdentifier target, final long sequence,
+ final RequestException cause) {
+ return new ConnectClientFailure(target, sequence, cause);
+ }
+ }
+
+ @java.io.Serial
private static final long serialVersionUID = 1L;
ConnectClientFailure(final ClientIdentifier target, final long sequence, final RequestException cause) {
}
@Override
- protected AbstractRequestFailureProxy<ClientIdentifier, ConnectClientFailure> externalizableProxy(
- final ABIVersion version) {
- return new ConnectClientFailureProxyV1(this);
+ protected SerialForm externalizableProxy(final ABIVersion version) {
+ return new CCF(this);
}
@Override
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import java.io.DataInput;
-import java.io.IOException;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestFailureProxy;
-import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.RequestException;
-
-/**
- * Serialization proxy for use with {@link ConnectClientFailure}. This class implements initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class ConnectClientFailureProxyV1 extends AbstractRequestFailureProxy<ClientIdentifier, ConnectClientFailure> {
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public ConnectClientFailureProxyV1() {
- // For Externalizable
- }
-
- ConnectClientFailureProxyV1(final ConnectClientFailure failure) {
- super(failure);
- }
-
- @Override
- protected ConnectClientFailure createFailure(final ClientIdentifier target, final long sequence,
- final RequestException cause) {
- return new ConnectClientFailure(target, sequence, cause);
- }
-
- @Override
- protected ClientIdentifier readTarget(final DataInput in) throws IOException {
- return ClientIdentifier.readFrom(in);
- }
-}
import static java.util.Objects.requireNonNull;
import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
import com.google.common.base.MoreObjects.ToStringHelper;
+import java.io.DataInput;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
import org.opendaylight.controller.cluster.access.ABIVersion;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestProxy;
import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
import org.opendaylight.controller.cluster.access.concepts.Request;
import org.opendaylight.controller.cluster.access.concepts.RequestException;
*
* <p>
* It also includes request stream sequencing information.
- *
- * @author Robert Varga
*/
-@Beta
public final class ConnectClientRequest extends Request<ClientIdentifier, ConnectClientRequest> {
+ interface SerialForm extends Request.SerialForm<ClientIdentifier, ConnectClientRequest> {
+ @Override
+ default ConnectClientRequest readExternal(final ObjectInput in, final ClientIdentifier target,
+ final long sequence, final ActorRef replyTo) throws IOException {
+ return new ConnectClientRequest(target, sequence, replyTo, ABIVersion.inexactReadFrom(in),
+ ABIVersion.inexactReadFrom(in));
+ }
+
+ @Override
+ default ClientIdentifier readTarget(final DataInput in) throws IOException {
+ return ClientIdentifier.readFrom(in);
+ }
+
+ @Override
+ default void writeExternal(final ObjectOutput out, final ConnectClientRequest msg) throws IOException {
+ Request.SerialForm.super.writeExternal(out, msg);
+ msg.getMinVersion().writeTo(out);
+ msg.getMaxVersion().writeTo(out);
+ }
+ }
+
+ @java.io.Serial
private static final long serialVersionUID = 1L;
private final ABIVersion minVersion;
private ConnectClientRequest(final ConnectClientRequest request, final ABIVersion version) {
super(request, version);
- this.minVersion = request.minVersion;
- this.maxVersion = request.maxVersion;
+ minVersion = request.minVersion;
+ maxVersion = request.maxVersion;
}
public ABIVersion getMinVersion() {
}
@Override
- protected AbstractRequestProxy<ClientIdentifier, ConnectClientRequest> externalizableProxy(
- final ABIVersion version) {
- return new ConnectClientRequestProxyV1(this);
+ protected SerialForm externalizableProxy(final ABIVersion version) {
+ return new CCR(this);
}
@Override
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import java.io.DataInput;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.opendaylight.controller.cluster.access.ABIVersion;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestProxy;
-import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
-
-/**
- * Externalizable proxy for use with {@link ConnectClientRequest}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class ConnectClientRequestProxyV1 extends AbstractRequestProxy<ClientIdentifier, ConnectClientRequest> {
- private ABIVersion minVersion;
- private ABIVersion maxVersion;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public ConnectClientRequestProxyV1() {
- // for Externalizable
- }
-
- ConnectClientRequestProxyV1(final ConnectClientRequest request) {
- super(request);
- this.minVersion = request.getMinVersion();
- this.maxVersion = request.getMaxVersion();
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) throws IOException {
- super.writeExternal(out);
- minVersion.writeTo(out);
- maxVersion.writeTo(out);
- }
-
- @Override
- public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
- super.readExternal(in);
- minVersion = ABIVersion.inexactReadFrom(in);
- maxVersion = ABIVersion.inexactReadFrom(in);
- }
-
- @Override
- protected ConnectClientRequest createRequest(final ClientIdentifier target, final long sequence,
- final ActorRef replyTo) {
- return new ConnectClientRequest(target, sequence, replyTo, minVersion, maxVersion);
- }
-
- @Override
- protected ClientIdentifier readTarget(final DataInput in) throws IOException {
- return ClientIdentifier.readFrom(in);
- }
-}
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
-import com.google.common.annotations.Beta;
+import akka.serialization.JavaSerializer;
+import akka.serialization.Serialization;
import com.google.common.base.MoreObjects.ToStringHelper;
import com.google.common.collect.ImmutableList;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.io.DataInput;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutput;
+import java.io.ObjectOutputStream;
+import java.io.ObjectStreamException;
+import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
import org.opendaylight.controller.cluster.access.concepts.RequestSuccess;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.ReadOnlyDataTree;
/**
* Successful reply to an {@link ConnectClientRequest}. Client actor which initiated this connection should use
* the version reported via {@link #getVersion()} of this message to communicate with this backend. Should this backend
* fail, the client can try accessing the provided alternates.
- *
- * @author Robert Varga
*/
-@Beta
public final class ConnectClientSuccess extends RequestSuccess<ClientIdentifier, ConnectClientSuccess> {
- private static final long serialVersionUID = 1L;
+ interface SerialForm extends RequestSuccess.SerialForm<ClientIdentifier, ConnectClientSuccess> {
+ @Override
+ default ClientIdentifier readTarget(final DataInput in) throws IOException {
+ return ClientIdentifier.readFrom(in);
+ }
+
+ @Override
+ default ConnectClientSuccess readExternal(final ObjectInput in, final ClientIdentifier target,
+ final long sequence) throws IOException, ClassNotFoundException {
+ final var backend = JavaSerializer.currentSystem().value().provider()
+ .resolveActorRef((String) in.readObject());
+ final var maxMessages = in.readInt();
+
+ final int alternatesSize = in.readInt();
+ final var alternates = new ArrayList<ActorSelection>(alternatesSize);
+ for (int i = 0; i < alternatesSize; ++i) {
+ alternates.add(ActorSelection.apply(ActorRef.noSender(), (String)in.readObject()));
+ }
+
+ return new ConnectClientSuccess(target, sequence, backend, alternates, maxMessages, null);
+ }
+
+ @Override
+ default void writeExternal(final ObjectOutput out, final ConnectClientSuccess msg) throws IOException {
+ out.writeObject(Serialization.serializedActorPath(msg.backend));
+ out.writeInt(msg.maxMessages);
+
+ out.writeInt(msg.alternates.size());
+ for (ActorSelection b : msg.alternates) {
+ out.writeObject(b.toSerializationFormat());
+ }
+
+ // We are ignoring the DataTree, it is not serializable anyway
+ }
+ }
- @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but this class "
- + "implements writeReplace to delegate serialization to a Proxy class and thus instances of this class "
- + "aren't serialized. FindBugs does not recognize this.")
- private final @NonNull List<ActorSelection> alternates;
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
- @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "See justification above.")
+ private final @NonNull ImmutableList<ActorSelection> alternates;
private final ReadOnlyDataTree dataTree;
private final @NonNull ActorRef backend;
private final int maxMessages;
+ private ConnectClientSuccess(final ConnectClientSuccess success, final ABIVersion version) {
+ super(success, version);
+ alternates = success.alternates;
+ dataTree = success.dataTree;
+ backend = success.backend;
+ maxMessages = success.maxMessages;
+ }
+
ConnectClientSuccess(final ClientIdentifier target, final long sequence, final ActorRef backend,
final List<ActorSelection> alternates, final int maxMessages, final ReadOnlyDataTree dataTree) {
super(target, sequence);
}
@Override
- protected ConnectClientSuccessProxyV1 externalizableProxy(final ABIVersion version) {
- return new ConnectClientSuccessProxyV1(this);
+ protected SerialForm externalizableProxy(final ABIVersion version) {
+ return new CCS(this);
}
@Override
protected ConnectClientSuccess cloneAsVersion(final ABIVersion version) {
- return this;
+ return new ConnectClientSuccess(this, version);
}
@Override
return super.addToStringAttributes(toStringHelper).add("alternates", alternates)
.add("dataTree present", getDataTree().isPresent()).add("maxMessages", maxMessages);
}
+
+ @java.io.Serial
+ private void readObject(final ObjectInputStream stream) throws IOException, ClassNotFoundException {
+ throwNSE();
+ }
+
+ @java.io.Serial
+ private void readObjectNoData() throws ObjectStreamException {
+ throwNSE();
+ }
+
+ @java.io.Serial
+ private void writeObject(final ObjectOutputStream stream) throws IOException {
+ throwNSE();
+ }
}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import akka.actor.ActorSelection;
-import akka.serialization.JavaSerializer;
-import akka.serialization.Serialization;
-import java.io.DataInput;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.util.ArrayList;
-import java.util.List;
-import org.opendaylight.controller.cluster.access.concepts.AbstractSuccessProxy;
-import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
-
-/**
- * Externalizable proxy for use with {@link ConnectClientSuccess}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class ConnectClientSuccessProxyV1 extends AbstractSuccessProxy<ClientIdentifier, ConnectClientSuccess> {
- private static final long serialVersionUID = 1L;
-
- private List<ActorSelection> alternates;
- private ActorRef backend;
- private int maxMessages;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public ConnectClientSuccessProxyV1() {
- // For Externalizable
- }
-
- ConnectClientSuccessProxyV1(final ConnectClientSuccess success) {
- super(success);
- this.alternates = success.getAlternates();
- this.backend = success.getBackend();
- this.maxMessages = success.getMaxMessages();
- // We are ignoring the DataTree, it is not serializable anyway
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) throws IOException {
- super.writeExternal(out);
-
- out.writeObject(Serialization.serializedActorPath(backend));
- out.writeInt(maxMessages);
-
- out.writeInt(alternates.size());
- for (ActorSelection b : alternates) {
- out.writeObject(b.toSerializationFormat());
- }
- }
-
- @Override
- public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
- super.readExternal(in);
-
- backend = JavaSerializer.currentSystem().value().provider().resolveActorRef((String) in.readObject());
- maxMessages = in.readInt();
-
- final int alternatesSize = in.readInt();
- alternates = new ArrayList<>(alternatesSize);
- for (int i = 0; i < alternatesSize; ++i) {
- alternates.add(ActorSelection.apply(ActorRef.noSender(), (String)in.readObject()));
- }
- }
-
- @Override
- protected ConnectClientSuccess createSuccess(final ClientIdentifier target, final long sequence) {
- return new ConnectClientSuccess(target, sequence, backend, alternates, maxMessages, null);
- }
-
- @Override
- protected ClientIdentifier readTarget(final DataInput in) throws IOException {
- return ClientIdentifier.readFrom(in);
- }
-}
package org.opendaylight.controller.cluster.access.commands;
import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
+import java.io.ObjectInput;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
/**
* Request to create a new local history.
- *
- * @author Robert Varga
*/
-@Beta
public final class CreateLocalHistoryRequest extends LocalHistoryRequest<CreateLocalHistoryRequest> {
+ interface SerialForm extends LocalHistoryRequest.SerialForm<CreateLocalHistoryRequest> {
+ @Override
+ default CreateLocalHistoryRequest readExternal(final ObjectInput in, final LocalHistoryIdentifier target,
+ final long sequence, final ActorRef replyTo) {
+ return new CreateLocalHistoryRequest(target, sequence, replyTo);
+ }
+ }
+
+ @java.io.Serial
private static final long serialVersionUID = 1L;
public CreateLocalHistoryRequest(final LocalHistoryIdentifier target, final ActorRef replyTo) {
}
@Override
- protected AbstractLocalHistoryRequestProxy<CreateLocalHistoryRequest> externalizableProxy(
- final ABIVersion version) {
- return new CreateLocalHistoryRequestProxyV1(this);
+ protected SerialForm externalizableProxy(final ABIVersion version) {
+ return new CHR(this);
}
@Override
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-
-/**
- * Externalizable proxy for use with {@link CreateLocalHistoryRequest}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class CreateLocalHistoryRequestProxyV1 extends AbstractLocalHistoryRequestProxy<CreateLocalHistoryRequest> {
- private static final long serialVersionUID = 1L;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public CreateLocalHistoryRequestProxyV1() {
- // For Externalizable
- }
-
- CreateLocalHistoryRequestProxyV1(final CreateLocalHistoryRequest request) {
- super(request);
- }
-
- @Override
- protected CreateLocalHistoryRequest createRequest(final LocalHistoryIdentifier target, final long sequence,
- final ActorRef replyTo) {
- return new CreateLocalHistoryRequest(target, sequence, replyTo);
- }
-}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link DestroyLocalHistoryRequest}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class DHR implements DestroyLocalHistoryRequest.SerialForm {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private DestroyLocalHistoryRequest message;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public DHR() {
+ // for Externalizable
+ }
+
+ DHR(final DestroyLocalHistoryRequest message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public DestroyLocalHistoryRequest message() {
+ return verifyNotNull(message);
+ }
+
+ @Override
+ public void setMessage(final DestroyLocalHistoryRequest message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public Object readResolve() {
+ return message();
+ }
+}
*/
package org.opendaylight.controller.cluster.access.commands;
-import com.google.common.annotations.Beta;
import com.google.common.collect.RangeSet;
import com.google.common.primitives.UnsignedLong;
import org.opendaylight.controller.cluster.access.concepts.RequestException;
/**
* A {@link RequestException} indicating that the backend has received a request to create a history which has already
* been retired.
- *
- * @author Robert Varga
*/
-@Beta
public final class DeadHistoryException extends RequestException {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
public DeadHistoryException(final RangeSet<UnsignedLong> purgedHistories) {
*/
package org.opendaylight.controller.cluster.access.commands;
-import com.google.common.annotations.Beta;
import com.google.common.collect.ImmutableRangeSet;
import com.google.common.collect.RangeSet;
import com.google.common.primitives.UnsignedLong;
/**
* A {@link RequestException} indicating that the backend has received a request to create a transaction which has
* already been purged.
- *
- * @author Robert Varga
*/
-@Beta
public final class DeadTransactionException extends RequestException {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
private final RangeSet<UnsignedLong> purgedIdentifiers;
package org.opendaylight.controller.cluster.access.commands;
import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
+import java.io.ObjectInput;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
/**
* Request to destroy a local history.
- *
- * @author Robert Varga
*/
-@Beta
public final class DestroyLocalHistoryRequest extends LocalHistoryRequest<DestroyLocalHistoryRequest> {
+ interface SerialForm extends LocalHistoryRequest.SerialForm<DestroyLocalHistoryRequest> {
+ @Override
+ default DestroyLocalHistoryRequest readExternal(final ObjectInput in, final LocalHistoryIdentifier target,
+ final long sequence, final ActorRef replyTo) {
+ return new DestroyLocalHistoryRequest(target, sequence, replyTo);
+ }
+ }
+
+ @java.io.Serial
private static final long serialVersionUID = 1L;
public DestroyLocalHistoryRequest(final LocalHistoryIdentifier target, final long sequence,
}
@Override
- protected AbstractLocalHistoryRequestProxy<DestroyLocalHistoryRequest> externalizableProxy(
- final ABIVersion version) {
- return new DestroyLocalHistoryRequestProxyV1(this);
+ protected SerialForm externalizableProxy(final ABIVersion version) {
+ return new DHR(this);
}
@Override
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-
-/**
- * Externalizable proxy for use with {@link DestroyLocalHistoryRequest}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class DestroyLocalHistoryRequestProxyV1 extends AbstractLocalHistoryRequestProxy<DestroyLocalHistoryRequest> {
- private static final long serialVersionUID = 1L;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public DestroyLocalHistoryRequestProxyV1() {
- // For Externalizable
- }
-
- DestroyLocalHistoryRequestProxyV1(final DestroyLocalHistoryRequest request) {
- super(request);
- }
-
- @Override
- protected DestroyLocalHistoryRequest createRequest(final LocalHistoryIdentifier target, final long sequence,
- final ActorRef replyTo) {
- return new DestroyLocalHistoryRequest(target, sequence, replyTo);
- }
-}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link ExistsTransactionRequest}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class ETR implements ExistsTransactionRequest.SerialForm {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private ExistsTransactionRequest message;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public ETR() {
+ // for Externalizable
+ }
+
+ ETR(final ExistsTransactionRequest message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public ExistsTransactionRequest message() {
+ return verifyNotNull(message);
+ }
+
+ @Override
+ public void setMessage(final ExistsTransactionRequest message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public Object readResolve() {
+ return message();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+
+/**
+ * Externalizable proxy for use with {@link ExistsTransactionSuccess}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class ETS implements TransactionSuccess.SerialForm<ExistsTransactionSuccess> {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private ExistsTransactionSuccess message;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public ETS() {
+ // for Externalizable
+ }
+
+ ETS(final ExistsTransactionSuccess message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public ExistsTransactionSuccess message() {
+ return verifyNotNull(message);
+ }
+
+ @Override
+ public void setMessage(final ExistsTransactionSuccess message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public void writeExternal(final ObjectOutput out, final ExistsTransactionSuccess msg) throws IOException {
+ out.writeBoolean(msg.getExists());
+ }
+
+ @Override
+ public ExistsTransactionSuccess readExternal(final ObjectInput in, final TransactionIdentifier target,
+ final long sequence) throws IOException {
+ return new ExistsTransactionSuccess(target, sequence, in.readBoolean());
+ }
+
+ @Override
+ public Object readResolve() {
+ return message();
+ }
+}
package org.opendaylight.controller.cluster.access.commands;
import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
+import java.io.IOException;
+import java.io.ObjectInput;
import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
/**
* A transaction request to query if a particular path exists in the current view of a particular transaction.
- *
- * @author Robert Varga
*/
-@Beta
public final class ExistsTransactionRequest extends AbstractReadPathTransactionRequest<ExistsTransactionRequest> {
+ interface SerialForm extends AbstractReadPathTransactionRequest.SerialForm<ExistsTransactionRequest> {
+ @Override
+ default ExistsTransactionRequest readExternal(final ObjectInput in, final TransactionIdentifier target,
+ final long sequence, final ActorRef replyTo, final boolean snapshotOnly, final YangInstanceIdentifier path)
+ throws IOException {
+ return new ExistsTransactionRequest(target, sequence, replyTo, path, snapshotOnly);
+ }
+ }
+
+ @java.io.Serial
private static final long serialVersionUID = 1L;
public ExistsTransactionRequest(final @NonNull TransactionIdentifier identifier, final long sequence,
- final @NonNull ActorRef replyTo, final @NonNull YangInstanceIdentifier path, final boolean snapshotOnly) {
+ final @NonNull ActorRef replyTo, final @NonNull YangInstanceIdentifier path, final boolean snapshotOnly) {
super(identifier, sequence, replyTo, path, snapshotOnly);
}
}
@Override
- protected ExistsTransactionRequestProxyV1 externalizableProxy(final ABIVersion version) {
- return new ExistsTransactionRequestProxyV1(this);
+ protected SerialForm externalizableProxy(final ABIVersion version) {
+ return new ETR(this);
}
}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-/**
- * Externalizable proxy for use with {@link ExistsTransactionRequest}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class ExistsTransactionRequestProxyV1 extends
- AbstractReadPathTransactionRequestProxyV1<ExistsTransactionRequest> {
- private static final long serialVersionUID = 1L;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public ExistsTransactionRequestProxyV1() {
- // For Externalizable
- }
-
- ExistsTransactionRequestProxyV1(final ExistsTransactionRequest request) {
- super(request);
- }
-
- @Override
- ExistsTransactionRequest createReadPathRequest(final TransactionIdentifier target, final long sequence,
- final ActorRef replyTo, final YangInstanceIdentifier path, final boolean snapshotOnly) {
- return new ExistsTransactionRequest(target, sequence, replyTo, path, snapshotOnly);
- }
-}
*/
package org.opendaylight.controller.cluster.access.commands;
-import com.google.common.annotations.Beta;
import com.google.common.base.MoreObjects.ToStringHelper;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
/**
* Successful reply to an {@link ExistsTransactionRequest}. It indicates presence of requested data via
* {@link #getExists()}.
- *
- * @author Robert Varga
*/
-@Beta
public final class ExistsTransactionSuccess extends TransactionSuccess<ExistsTransactionSuccess> {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
+
private final boolean exists;
+ private ExistsTransactionSuccess(final ExistsTransactionSuccess success, final ABIVersion version) {
+ super(success, version);
+ exists = success.exists;
+ }
+
public ExistsTransactionSuccess(final TransactionIdentifier target, final long sequence, final boolean exists) {
super(target, sequence);
this.exists = exists;
}
@Override
- protected ExistsTransactionSuccessProxyV1 externalizableProxy(final ABIVersion version) {
- return new ExistsTransactionSuccessProxyV1(this);
+ protected ETS externalizableProxy(final ABIVersion version) {
+ return new ETS(this);
}
@Override
protected ExistsTransactionSuccess cloneAsVersion(final ABIVersion version) {
- return this;
+ return new ExistsTransactionSuccess(this, version);
}
@Override
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link ExistsTransactionSuccess}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class ExistsTransactionSuccessProxyV1 extends AbstractTransactionSuccessProxy<ExistsTransactionSuccess> {
- private static final long serialVersionUID = 1L;
- private boolean exists;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public ExistsTransactionSuccessProxyV1() {
- // For Externalizable
- }
-
- ExistsTransactionSuccessProxyV1(final ExistsTransactionSuccess request) {
- super(request);
- this.exists = request.getExists();
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) throws IOException {
- super.writeExternal(out);
- out.writeBoolean(exists);
- }
-
- @Override
- public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
- super.readExternal(in);
- exists = in.readBoolean();
- }
-
- @Override
- protected ExistsTransactionSuccess createSuccess(final TransactionIdentifier target, final long sequence) {
- return new ExistsTransactionSuccess(target, sequence, exists);
- }
-}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link LocalHistoryFailure}. It implements the Chlorine SR2 serialization format.
+ */
+final class HF implements LocalHistoryFailure.SerialForm {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private LocalHistoryFailure message;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public HF() {
+ // for Externalizable
+ }
+
+ HF(final LocalHistoryFailure message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public LocalHistoryFailure message() {
+ return verifyNotNull(message);
+ }
+
+ @Override
+ public void setMessage(final LocalHistoryFailure message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public Object readResolve() {
+ return message();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link LocalHistorySuccess}. It implements the Chlorine SR2 serialization format.
+ */
+final class HS implements LocalHistorySuccess.SerialForm {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private LocalHistorySuccess message;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public HS() {
+ // for Externalizable
+ }
+
+ HS(final LocalHistorySuccess message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public LocalHistorySuccess message() {
+ return verifyNotNull(message);
+ }
+
+ @Override
+ public void setMessage(final LocalHistorySuccess message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public Object readResolve() {
+ return message();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link IncrementTransactionSequenceRequest}. It implements the Chlorine SR2
+ * serialization format.
+ */
+final class ITSR implements IncrementTransactionSequenceRequest.SerialForm {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private IncrementTransactionSequenceRequest message;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public ITSR() {
+ // for Externalizable
+ }
+
+ ITSR(final IncrementTransactionSequenceRequest message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public IncrementTransactionSequenceRequest message() {
+ return verifyNotNull(message);
+ }
+
+ @Override
+ public void setMessage(final IncrementTransactionSequenceRequest message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public Object readResolve() {
+ return message();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.ObjectInput;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+
+/**
+ * Externalizable proxy for use with {@link IncrementTransactionSequenceSuccess}. It implements the Chlorine SR2
+ * serialization format.
+ */
+final class ITSS implements TransactionSuccess.SerialForm<IncrementTransactionSequenceSuccess> {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private IncrementTransactionSequenceSuccess message;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public ITSS() {
+ // for Externalizable
+ }
+
+ ITSS(final IncrementTransactionSequenceSuccess message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public IncrementTransactionSequenceSuccess message() {
+ return verifyNotNull(message);
+ }
+
+ @Override
+ public void setMessage(final IncrementTransactionSequenceSuccess message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public IncrementTransactionSequenceSuccess readExternal(final ObjectInput it, final TransactionIdentifier target,
+ final long sequence) {
+ return new IncrementTransactionSequenceSuccess(target, sequence);
+ }
+
+ @Override
+ public Object readResolve() {
+ return message();
+ }
+}
*/
package org.opendaylight.controller.cluster.access.commands;
+import static com.google.common.base.Preconditions.checkArgument;
+
import akka.actor.ActorRef;
-import com.google.common.base.Preconditions;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.yangtools.concepts.WritableObjects;
/**
* A blank transaction request. This is used to provide backfill requests in converted retransmit scenarios, such as
* when a initial request to a transaction (such as a {@link ReadTransactionRequest}) is satisfied by the backend
* before the need to replay the transaction to a different remote backend.
- *
- * @author Robert Varga
*/
public final class IncrementTransactionSequenceRequest extends
AbstractReadTransactionRequest<IncrementTransactionSequenceRequest> {
+ interface SerialForm extends AbstractReadTransactionRequest.SerialForm<IncrementTransactionSequenceRequest> {
+ @Override
+ default void writeExternal(final ObjectOutput out, final IncrementTransactionSequenceRequest msg)
+ throws IOException {
+ AbstractReadTransactionRequest.SerialForm.super.writeExternal(out, msg);
+ WritableObjects.writeLong(out, msg.getIncrement());
+ }
+
+ @Override
+ default IncrementTransactionSequenceRequest readExternal(final ObjectInput in,
+ final TransactionIdentifier target, final long sequence, final ActorRef replyTo,
+ final boolean snapshotOnly) throws IOException {
+ return new IncrementTransactionSequenceRequest(target, sequence, replyTo, snapshotOnly,
+ WritableObjects.readLong(in));
+ }
+ }
+
+ @java.io.Serial
private static final long serialVersionUID = 1L;
private final long increment;
+ public IncrementTransactionSequenceRequest(final IncrementTransactionSequenceRequest request,
+ final ABIVersion version) {
+ super(request, version);
+ increment = request.increment;
+ }
+
public IncrementTransactionSequenceRequest(final TransactionIdentifier identifier, final long sequence,
final ActorRef replyTo, final boolean snapshotOnly, final long increment) {
super(identifier, sequence, replyTo, snapshotOnly);
- Preconditions.checkArgument(increment >= 0);
+ checkArgument(increment >= 0, "Unexpected increment %s", increment);
this.increment = increment;
}
}
@Override
- protected IncrementTransactionSequenceRequestProxyV1 externalizableProxy(final ABIVersion version) {
- return new IncrementTransactionSequenceRequestProxyV1(this);
+ protected SerialForm externalizableProxy(final ABIVersion version) {
+ return new ITSR(this);
}
@Override
protected IncrementTransactionSequenceRequest cloneAsVersion(final ABIVersion targetVersion) {
- return this;
+ return new IncrementTransactionSequenceRequest(this, targetVersion);
}
}
+++ /dev/null
-/*
- * Copyright (c) 2017 Pantheon Technologies, s.r.o. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.concepts.WritableObjects;
-
-final class IncrementTransactionSequenceRequestProxyV1
- extends AbstractReadTransactionRequestProxyV1<IncrementTransactionSequenceRequest> {
- private long increment;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public IncrementTransactionSequenceRequestProxyV1() {
- // For Externalizable
- }
-
- IncrementTransactionSequenceRequestProxyV1(final IncrementTransactionSequenceRequest request) {
- super(request);
- this.increment = request.getIncrement();
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) throws IOException {
- super.writeExternal(out);
- WritableObjects.writeLong(out, increment);
- }
-
- @Override
- public void readExternal(final ObjectInput in) throws ClassNotFoundException, IOException {
- super.readExternal(in);
- increment = WritableObjects.readLong(in);
- }
-
- @Override
- IncrementTransactionSequenceRequest createReadRequest(final TransactionIdentifier target, final long sequence,
- final ActorRef replyToActor, final boolean snapshotOnly) {
- return new IncrementTransactionSequenceRequest(target, sequence, replyToActor, snapshotOnly, increment);
- }
-}
*/
package org.opendaylight.controller.cluster.access.commands;
-import com.google.common.annotations.Beta;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
/**
* Successful reply to an {@link IncrementTransactionSequenceRequest}.
- *
- * @author Robert Varga
*/
-@Beta
public final class IncrementTransactionSequenceSuccess extends TransactionSuccess<IncrementTransactionSequenceSuccess> {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
+ private IncrementTransactionSequenceSuccess(final IncrementTransactionSequenceSuccess success,
+ final ABIVersion version) {
+ super(success, version);
+ }
+
public IncrementTransactionSequenceSuccess(final TransactionIdentifier target, final long sequence) {
super(target, sequence);
}
@Override
- protected IncrementTransactionSequenceSuccessProxyV1 externalizableProxy(final ABIVersion version) {
- return new IncrementTransactionSequenceSuccessProxyV1(this);
+ protected ITSS externalizableProxy(final ABIVersion version) {
+ return new ITSS(this);
}
@Override
protected IncrementTransactionSequenceSuccess cloneAsVersion(final ABIVersion version) {
- return this;
+ return new IncrementTransactionSequenceSuccess(this, version);
}
}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link IncrementTransactionSequenceSuccess}. It implements the initial (Boron)
- * serialization format.
- *
- * @author Robert Varga
- */
-final class IncrementTransactionSequenceSuccessProxyV1
- extends AbstractTransactionSuccessProxy<IncrementTransactionSequenceSuccess> {
- private static final long serialVersionUID = 1L;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public IncrementTransactionSequenceSuccessProxyV1() {
- // For Externalizable
- }
-
- IncrementTransactionSequenceSuccessProxyV1(final IncrementTransactionSequenceSuccess request) {
- super(request);
- }
-
- @Override
- protected IncrementTransactionSequenceSuccess createSuccess(final TransactionIdentifier target,
- final long sequence) {
- return new IncrementTransactionSequenceSuccess(target, sequence);
- }
-}
*/
package org.opendaylight.controller.cluster.access.commands;
-import com.google.common.annotations.Beta;
+import java.io.DataInput;
+import java.io.IOException;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
import org.opendaylight.controller.cluster.access.concepts.RequestException;
/**
* Generic {@link RequestFailure} involving a {@link LocalHistoryRequest}.
- *
- * @author Robert Varga
*/
-@Beta
public final class LocalHistoryFailure extends RequestFailure<LocalHistoryIdentifier, LocalHistoryFailure> {
+ interface SerialForm extends RequestFailure.SerialForm<LocalHistoryIdentifier, LocalHistoryFailure> {
+ @Override
+ default LocalHistoryIdentifier readTarget(final DataInput in) throws IOException {
+ return LocalHistoryIdentifier.readFrom(in);
+ }
+
+ @Override
+ default LocalHistoryFailure createFailure(final LocalHistoryIdentifier target, final long sequence,
+ final RequestException cause) {
+ return new LocalHistoryFailure(target, sequence, cause);
+ }
+ }
+
+ @java.io.Serial
private static final long serialVersionUID = 1L;
+ private LocalHistoryFailure(final LocalHistoryFailure failure, final ABIVersion version) {
+ super(failure, version);
+ }
+
LocalHistoryFailure(final LocalHistoryIdentifier target, final long sequence, final RequestException cause) {
super(target, sequence, cause);
}
@Override
- protected LocalHistoryFailure cloneAsVersion(final ABIVersion version) {
- return this;
+ protected LocalHistoryFailure cloneAsVersion(final ABIVersion targetVersion) {
+ return new LocalHistoryFailure(this, targetVersion);
}
@Override
- protected LocalHistoryFailureProxyV1 externalizableProxy(final ABIVersion version) {
- return new LocalHistoryFailureProxyV1(this);
+ protected SerialForm externalizableProxy(final ABIVersion version) {
+ return new HF(this);
}
}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import java.io.DataInput;
-import java.io.IOException;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestFailureProxy;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.RequestException;
-
-/**
- * Externalizable proxy for use with {@link LocalHistoryFailure}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class LocalHistoryFailureProxyV1 extends
- AbstractRequestFailureProxy<LocalHistoryIdentifier, LocalHistoryFailure> {
- private static final long serialVersionUID = 1L;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public LocalHistoryFailureProxyV1() {
- // For Externalizable
- }
-
- LocalHistoryFailureProxyV1(final LocalHistoryFailure failure) {
- super(failure);
- }
-
- @Override
- protected LocalHistoryFailure createFailure(final LocalHistoryIdentifier target, final long sequence,
- final RequestException cause) {
- return new LocalHistoryFailure(target, sequence, cause);
- }
-
- @Override
- protected LocalHistoryIdentifier readTarget(final DataInput in) throws IOException {
- return LocalHistoryIdentifier.readFrom(in);
- }
-}
package org.opendaylight.controller.cluster.access.commands;
import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
import com.google.common.base.Preconditions;
+import java.io.DataInput;
+import java.io.IOException;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
import org.opendaylight.controller.cluster.access.concepts.Request;
* Abstract base class for {@link Request}s involving specific local history. This class is visible outside of this
* package solely for the ability to perform a unified instanceof check.
*
- * @author Robert Varga
- *
* @param <T> Message type
*/
-@Beta
public abstract class LocalHistoryRequest<T extends LocalHistoryRequest<T>> extends Request<LocalHistoryIdentifier, T> {
+ interface SerialForm<T extends LocalHistoryRequest<T>> extends Request.SerialForm<LocalHistoryIdentifier, T> {
+ @Override
+ default LocalHistoryIdentifier readTarget(final DataInput in) throws IOException {
+ return LocalHistoryIdentifier.readFrom(in);
+ }
+ }
+
+ @java.io.Serial
private static final long serialVersionUID = 1L;
LocalHistoryRequest(final LocalHistoryIdentifier target, final long sequence, final ActorRef replyTo) {
}
@Override
- protected abstract AbstractLocalHistoryRequestProxy<T> externalizableProxy(ABIVersion version);
+ protected abstract SerialForm<T> externalizableProxy(ABIVersion version);
}
*/
package org.opendaylight.controller.cluster.access.commands;
-import com.google.common.annotations.Beta;
+import java.io.DataInput;
+import java.io.IOException;
+import java.io.ObjectInput;
import org.opendaylight.controller.cluster.access.ABIVersion;
-import org.opendaylight.controller.cluster.access.concepts.AbstractSuccessProxy;
import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
import org.opendaylight.controller.cluster.access.concepts.RequestSuccess;
/**
* Success class for {@link RequestSuccess}es involving a specific local history.
- *
- * @author Robert Varga
*/
-@Beta
public final class LocalHistorySuccess extends RequestSuccess<LocalHistoryIdentifier, LocalHistorySuccess> {
- private static final long serialVersionUID = 1L;
+ interface SerialForm extends RequestSuccess.SerialForm<LocalHistoryIdentifier, LocalHistorySuccess> {
+ @Override
+ default LocalHistoryIdentifier readTarget(final DataInput in) throws IOException {
+ return LocalHistoryIdentifier.readFrom(in);
+ }
- public LocalHistorySuccess(final LocalHistoryIdentifier target, final long sequence) {
- super(target, sequence);
+ @Override
+ default LocalHistorySuccess readExternal(final ObjectInput it, final LocalHistoryIdentifier target,
+ final long sequence) {
+ return new LocalHistorySuccess(target, sequence);
+ }
}
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
private LocalHistorySuccess(final LocalHistorySuccess success, final ABIVersion version) {
super(success, version);
}
+ public LocalHistorySuccess(final LocalHistoryIdentifier target, final long sequence) {
+ super(target, sequence);
+ }
+
@Override
protected LocalHistorySuccess cloneAsVersion(final ABIVersion version) {
return new LocalHistorySuccess(this, version);
}
@Override
- protected AbstractSuccessProxy<LocalHistoryIdentifier, LocalHistorySuccess> externalizableProxy(
- final ABIVersion version) {
- return new LocalHistorySuccessProxyV1(this);
+ protected SerialForm externalizableProxy(final ABIVersion version) {
+ return new HS(this);
}
}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import java.io.DataInput;
-import java.io.IOException;
-import org.opendaylight.controller.cluster.access.concepts.AbstractSuccessProxy;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-
-/**
- * Serialization proxy associated with {@link LocalHistorySuccess}.
- *
- * @author Robert Varga
- */
-final class LocalHistorySuccessProxyV1 extends AbstractSuccessProxy<LocalHistoryIdentifier, LocalHistorySuccess> {
- private static final long serialVersionUID = 1L;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public LocalHistorySuccessProxyV1() {
- // For Externalizable
- }
-
- LocalHistorySuccessProxyV1(final LocalHistorySuccess success) {
- super(success);
- }
-
- @Override
- protected LocalHistoryIdentifier readTarget(final DataInput in) throws IOException {
- return LocalHistoryIdentifier.readFrom(in);
- }
-
- @Override
- protected LocalHistorySuccess createSuccess(final LocalHistoryIdentifier target, final long sequence) {
- return new LocalHistorySuccess(target, sequence);
- }
-}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link ModifyTransactionRequest}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class MTR implements ModifyTransactionRequest.SerialForm {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private ModifyTransactionRequest message;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public MTR() {
+ // for Externalizable
+ }
+
+ MTR(final ModifyTransactionRequest message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public ModifyTransactionRequest message() {
+ return verifyNotNull(message);
+ }
+
+ @Override
+ public void setMessage(final ModifyTransactionRequest message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public Object readResolve() {
+ return message();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.IOException;
+import java.io.ObjectInput;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+
+/**
+ * Externalizable proxy for use with {@link ModifyTransactionSuccess}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class MTS implements TransactionSuccess.SerialForm<ModifyTransactionSuccess> {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private ModifyTransactionSuccess message;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public MTS() {
+ // for Externalizable
+ }
+
+ MTS(final ModifyTransactionSuccess message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public ModifyTransactionSuccess message() {
+ return verifyNotNull(message);
+ }
+
+ @Override
+ public void setMessage(final ModifyTransactionSuccess message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public ModifyTransactionSuccess readExternal(final ObjectInput in, final TransactionIdentifier target,
+ final long sequence) throws IOException {
+ return new ModifyTransactionSuccess(target, sequence);
+ }
+
+ @Override
+ public Object readResolve() {
+ return message();
+ }
+}
package org.opendaylight.controller.cluster.access.commands;
import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
import com.google.common.base.MoreObjects.ToStringHelper;
import com.google.common.collect.ImmutableList;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutput;
+import java.io.ObjectOutputStream;
+import java.io.ObjectStreamException;
+import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.controller.cluster.access.concepts.SliceableMessage;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
+import org.opendaylight.yangtools.yang.data.impl.schema.ReusableImmutableNormalizedNodeStreamWriter;
/**
* A transaction request to apply a particular set of operations on top of the current transaction. This message is
* used to also finish a transaction by specifying a {@link PersistenceProtocol}.
- *
- * @author Robert Varga
*/
-@Beta
public final class ModifyTransactionRequest extends TransactionRequest<ModifyTransactionRequest>
implements SliceableMessage {
+ interface SerialForm extends TransactionRequest.SerialForm<ModifyTransactionRequest> {
+
+
+ @Override
+ default ModifyTransactionRequest readExternal(final ObjectInput in, final TransactionIdentifier target,
+ final long sequence, final ActorRef replyTo) throws IOException {
+
+ final var protocol = Optional.ofNullable(PersistenceProtocol.readFrom(in));
+ final int size = in.readInt();
+ final List<TransactionModification> modifications;
+ if (size != 0) {
+ modifications = new ArrayList<>(size);
+ final var nnin = NormalizedNodeDataInput.newDataInput(in);
+ final var writer = ReusableImmutableNormalizedNodeStreamWriter.create();
+ for (int i = 0; i < size; ++i) {
+ modifications.add(TransactionModification.readFrom(nnin, writer));
+ }
+ } else {
+ modifications = ImmutableList.of();
+ }
+
+ return new ModifyTransactionRequest(target, sequence, replyTo, modifications, protocol.orElse(null));
+ }
+
+ @Override
+ default void writeExternal(final ObjectOutput out, final ModifyTransactionRequest msg) throws IOException {
+ TransactionRequest.SerialForm.super.writeExternal(out, msg);
+
+ out.writeByte(PersistenceProtocol.byteValue(msg.getPersistenceProtocol().orElse(null)));
+
+ final var modifications = msg.getModifications();
+ out.writeInt(modifications.size());
+ if (!modifications.isEmpty()) {
+ try (var nnout = msg.getVersion().getStreamVersion().newDataOutput(out)) {
+ for (var op : modifications) {
+ op.writeTo(nnout);
+ }
+ }
+ }
+ }
+ }
+
+ @java.io.Serial
private static final long serialVersionUID = 1L;
- @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but this class "
- + "implements writeReplace to delegate serialization to a Proxy class and thus instances of this class "
- + "aren't serialized. FindBugs does not recognize this.")
private final List<TransactionModification> modifications;
private final PersistenceProtocol protocol;
+ private ModifyTransactionRequest(final ModifyTransactionRequest request, final ABIVersion version) {
+ super(request, version);
+ modifications = request.modifications;
+ protocol = request.protocol;
+ }
+
ModifyTransactionRequest(final TransactionIdentifier target, final long sequence, final ActorRef replyTo,
final List<TransactionModification> modifications, final PersistenceProtocol protocol) {
super(target, sequence, replyTo);
}
@Override
- protected ModifyTransactionRequestProxyV1 externalizableProxy(final ABIVersion version) {
- return new ModifyTransactionRequestProxyV1(this);
+ protected SerialForm externalizableProxy(final ABIVersion version) {
+ return new MTR(this);
}
@Override
protected ModifyTransactionRequest cloneAsVersion(final ABIVersion version) {
- return this;
+ return new ModifyTransactionRequest(this, version);
+ }
+
+ @java.io.Serial
+ private void readObject(final ObjectInputStream stream) throws IOException, ClassNotFoundException {
+ throwNSE();
+ }
+
+ @java.io.Serial
+ private void readObjectNoData() throws ObjectStreamException {
+ throwNSE();
+ }
+
+ @java.io.Serial
+ private void writeObject(final ObjectOutputStream stream) throws IOException {
+ throwNSE();
}
}
import static java.util.Objects.requireNonNull;
import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
import java.util.ArrayList;
import java.util.List;
+import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.concepts.Builder;
import org.opendaylight.yangtools.concepts.Identifiable;
/**
- * A reusable {@link Builder} for creating {@link ModifyTransactionRequest} message instances. Its internal state is
- * reset when {@link #build()} is invoked, hence it can be used to create a sequence of messages. This class is NOT
- * thread-safe.
- *
- * @author Robert Varga
+ * A reusable builder for creating {@link ModifyTransactionRequest} message instances. Its internal state is reset when
+ * {@link #build()} is invoked, hence it can be used to create a sequence of messages. This class is NOT thread-safe.
*/
-@Beta
-public final class ModifyTransactionRequestBuilder implements Builder<ModifyTransactionRequest>,
- Identifiable<TransactionIdentifier> {
+public final class ModifyTransactionRequestBuilder implements Identifiable<TransactionIdentifier> {
private final List<TransactionModification> modifications = new ArrayList<>(1);
- private final TransactionIdentifier identifier;
+ private final @NonNull TransactionIdentifier identifier;
private final ActorRef replyTo;
private PersistenceProtocol protocol;
return modifications.size();
}
- @Override
- public ModifyTransactionRequest build() {
+ public @NonNull ModifyTransactionRequest build() {
checkState(haveSequence, "Request sequence has not been set");
final ModifyTransactionRequest ret = new ModifyTransactionRequest(identifier, sequence, replyTo, modifications,
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorRef;
-import com.google.common.collect.ImmutableList;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Optional;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
-import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
-import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
-import org.opendaylight.yangtools.yang.data.impl.schema.ReusableImmutableNormalizedNodeStreamWriter;
-
-/**
- * Externalizable proxy for use with {@link ExistsTransactionRequest}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class ModifyTransactionRequestProxyV1 extends AbstractTransactionRequestProxy<ModifyTransactionRequest> {
- private static final long serialVersionUID = 1L;
-
- private List<TransactionModification> modifications;
- private Optional<PersistenceProtocol> protocol;
- private transient NormalizedNodeStreamVersion streamVersion;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public ModifyTransactionRequestProxyV1() {
- // For Externalizable
- }
-
- ModifyTransactionRequestProxyV1(final ModifyTransactionRequest request) {
- super(request);
- this.modifications = requireNonNull(request.getModifications());
- this.protocol = request.getPersistenceProtocol();
- this.streamVersion = request.getVersion().getStreamVersion();
- }
-
- @Override
- public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
- super.readExternal(in);
-
- protocol = Optional.ofNullable(PersistenceProtocol.readFrom(in));
-
- final int size = in.readInt();
- if (size != 0) {
- modifications = new ArrayList<>(size);
- final NormalizedNodeDataInput nnin = NormalizedNodeDataInput.newDataInput(in);
- final ReusableImmutableNormalizedNodeStreamWriter writer =
- ReusableImmutableNormalizedNodeStreamWriter.create();
- for (int i = 0; i < size; ++i) {
- modifications.add(TransactionModification.readFrom(nnin, writer));
- }
- } else {
- modifications = ImmutableList.of();
- }
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) throws IOException {
- super.writeExternal(out);
-
- out.writeByte(PersistenceProtocol.byteValue(protocol.orElse(null)));
- out.writeInt(modifications.size());
- if (!modifications.isEmpty()) {
- try (NormalizedNodeDataOutput nnout = streamVersion.newDataOutput(out)) {
- for (TransactionModification op : modifications) {
- op.writeTo(nnout);
- }
- }
- }
- }
-
- @Override
- protected ModifyTransactionRequest createRequest(final TransactionIdentifier target, final long sequence,
- final ActorRef replyTo) {
- return new ModifyTransactionRequest(target, sequence, replyTo, modifications, protocol.orElse(null));
- }
-}
*/
package org.opendaylight.controller.cluster.access.commands;
-import com.google.common.annotations.Beta;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
/**
* Response to a {@link ModifyTransactionRequest} which does not have a {@link PersistenceProtocol}.
- *
- * @author Robert Varga
*/
-@Beta
public final class ModifyTransactionSuccess extends TransactionSuccess<ModifyTransactionSuccess> {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
public ModifyTransactionSuccess(final TransactionIdentifier identifier, final long sequence) {
}
@Override
- protected AbstractTransactionSuccessProxy<ModifyTransactionSuccess> externalizableProxy(final ABIVersion version) {
- return new ModifyTransactionSuccessProxyV1(this);
+ protected MTS externalizableProxy(final ABIVersion version) {
+ return new MTS(this);
}
@Override
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link ModifyTransactionSuccess}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class ModifyTransactionSuccessProxyV1 extends AbstractTransactionSuccessProxy<ModifyTransactionSuccess> {
- private static final long serialVersionUID = 1L;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public ModifyTransactionSuccessProxyV1() {
- // For Externalizable
- }
-
- ModifyTransactionSuccessProxyV1(final ModifyTransactionSuccess success) {
- super(success);
- }
-
- @Override
- protected ModifyTransactionSuccess createSuccess(final TransactionIdentifier target, final long sequence) {
- return new ModifyTransactionSuccess(target, sequence);
- }
-}
package org.opendaylight.controller.cluster.access.commands;
import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
import org.opendaylight.controller.cluster.access.concepts.RequestException;
/**
* General error raised when the recipient of a Request is not the correct backend to talk to. This typically
* means that the backend processing has moved and the frontend needs to run rediscovery and retry the request.
- *
- * @author Robert Varga
*/
-@Beta
public final class NotLeaderException extends RequestException {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
public NotLeaderException(final ActorRef me) {
*/
package org.opendaylight.controller.cluster.access.commands;
-import com.google.common.annotations.Beta;
import org.opendaylight.controller.cluster.access.concepts.RequestException;
/**
* A {@link RequestException} indicating that the backend has received a Request whose sequence does not match the
* next expected sequence for the target. This is a hard error, as it indicates a Request is missing in the stream.
- *
- * @author Robert Varga
*/
-@Beta
public final class OutOfOrderRequestException extends RequestException {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
public OutOfOrderRequestException(final long expectedRequest) {
*/
package org.opendaylight.controller.cluster.access.commands;
-import com.google.common.annotations.Beta;
import org.opendaylight.controller.cluster.access.concepts.RequestException;
/**
* A {@link RequestException} indicating that the backend has received a RequestEnvelope whose sequence does not match
* the next expected sequence. This can happen during leader transitions, when a part of the stream is rejected because
* the backend is not the leader and it transitions to being a leader with old stream messages still being present.
- *
- * @author Robert Varga
*/
-@Beta
public final class OutOfSequenceEnvelopeException extends RequestException {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
public OutOfSequenceEnvelopeException(final long expectedEnvelope) {
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link PurgeLocalHistoryRequest}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class PHR implements PurgeLocalHistoryRequest.SerialForm {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private PurgeLocalHistoryRequest message;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public PHR() {
+ // for Externalizable
+ }
+
+ PHR(final PurgeLocalHistoryRequest message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public PurgeLocalHistoryRequest message() {
+ return verifyNotNull(message);
+ }
+
+ @Override
+ public void setMessage(final PurgeLocalHistoryRequest message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public Object readResolve() {
+ return message();
+ }
+}
*/
package org.opendaylight.controller.cluster.access.commands;
-import com.google.common.annotations.Beta;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
+import org.eclipse.jdt.annotation.Nullable;
import org.opendaylight.yangtools.concepts.WritableObject;
/**
* Enumeration of transaction persistence protocols. These govern which protocol is executed between the frontend
* and backend to drive persistence of a particular transaction.
- *
- * @author Robert Varga
*/
-@Beta
public enum PersistenceProtocol implements WritableObject {
/**
* Abort protocol. The transaction has been aborted on the frontend and its effects should not be visible
return finish == null ? 0 : finish.byteValue();
}
- static PersistenceProtocol valueOf(final byte value) {
- switch (value) {
- case 0:
- return null;
- case 1:
- return ABORT;
- case 2:
- return SIMPLE;
- case 3:
- return THREE_PHASE;
- case 4:
- return READY;
- default:
- throw new IllegalArgumentException("Unhandled byte value " + value);
- }
+ static @Nullable PersistenceProtocol valueOf(final byte value) {
+ return switch (value) {
+ case 0 -> null;
+ case 1 -> ABORT;
+ case 2 -> SIMPLE;
+ case 3 -> THREE_PHASE;
+ case 4 -> READY;
+ default -> throw new IllegalArgumentException("Unhandled byte value " + value);
+ };
}
}
package org.opendaylight.controller.cluster.access.commands;
import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
+import java.io.ObjectInput;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
/**
* Request to purge a local history. This request is sent by the client once it receives a successful reply to
* {@link DestroyLocalHistoryRequest} and indicates it has removed all state attached to a particular local history.
- *
- * @author Robert Varga
*/
-@Beta
public final class PurgeLocalHistoryRequest extends LocalHistoryRequest<PurgeLocalHistoryRequest> {
+ interface SerialForm extends LocalHistoryRequest.SerialForm<PurgeLocalHistoryRequest> {
+ @Override
+ default PurgeLocalHistoryRequest readExternal(final ObjectInput in, final LocalHistoryIdentifier target,
+ final long sequence, final ActorRef replyTo) {
+ return new PurgeLocalHistoryRequest(target, sequence, replyTo);
+ }
+ }
+
+ @java.io.Serial
private static final long serialVersionUID = 1L;
public PurgeLocalHistoryRequest(final LocalHistoryIdentifier target, final long sequence, final ActorRef replyTo) {
}
@Override
- protected AbstractLocalHistoryRequestProxy<PurgeLocalHistoryRequest> externalizableProxy(final ABIVersion version) {
- return new PurgeLocalHistoryRequestProxyV1(this);
+ protected SerialForm externalizableProxy(final ABIVersion version) {
+ return new PHR(this);
}
@Override
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-
-/**
- * Externalizable proxy for use with {@link PurgeLocalHistoryRequest}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class PurgeLocalHistoryRequestProxyV1 extends AbstractLocalHistoryRequestProxy<PurgeLocalHistoryRequest> {
- private static final long serialVersionUID = 1L;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public PurgeLocalHistoryRequestProxyV1() {
- // For Externalizable
- }
-
- PurgeLocalHistoryRequestProxyV1(final PurgeLocalHistoryRequest request) {
- super(request);
- }
-
- @Override
- protected PurgeLocalHistoryRequest createRequest(final LocalHistoryIdentifier target, final long sequence,
- final ActorRef replyTo) {
- return new PurgeLocalHistoryRequest(target, sequence, replyTo);
- }
-}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link ReadTransactionRequest}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class RTR implements ReadTransactionRequest.SerialForm {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private ReadTransactionRequest message;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public RTR() {
+ // for Externalizable
+ }
+
+ RTR(final ReadTransactionRequest message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public ReadTransactionRequest message() {
+ return verifyNotNull(message);
+ }
+
+ @Override
+ public void setMessage(final ReadTransactionRequest message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public Object readResolve() {
+ return message();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.Optional;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
+
+/**
+ * Externalizable proxy for use with {@link ReadTransactionSuccess}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class RTS implements TransactionSuccess.SerialForm<ReadTransactionSuccess> {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private ReadTransactionSuccess message;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public RTS() {
+ // for Externalizable
+ }
+
+ RTS(final ReadTransactionSuccess message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public ReadTransactionSuccess message() {
+ return verifyNotNull(message);
+ }
+
+ @Override
+ public void setMessage(final ReadTransactionSuccess message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public ReadTransactionSuccess readExternal(final ObjectInput in, final TransactionIdentifier target,
+ final long sequence) throws IOException {
+ final Optional<NormalizedNode> data;
+ if (in.readBoolean()) {
+ data = Optional.of(NormalizedNodeDataInput.newDataInput(in).readNormalizedNode());
+ } else {
+ data = Optional.empty();
+ }
+ return new ReadTransactionSuccess(target, sequence, data);
+ }
+
+ @Override
+ public void writeExternal(final ObjectOutput out, final ReadTransactionSuccess msg) throws IOException {
+ TransactionSuccess.SerialForm.super.writeExternal(out, msg);
+
+ final var data = msg.getData();
+ if (data.isPresent()) {
+ out.writeBoolean(true);
+ try (var nnout = msg.getVersion().getStreamVersion().newDataOutput(out)) {
+ nnout.writeNormalizedNode(data.orElseThrow());
+ }
+ } else {
+ out.writeBoolean(false);
+ }
+ }
+
+ @Override
+ public Object readResolve() {
+ return message();
+ }
+}
package org.opendaylight.controller.cluster.access.commands;
import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
+import java.io.IOException;
+import java.io.ObjectInput;
import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
/**
* A transaction request to read a particular path exists in the current view of a particular transaction.
- *
- * @author Robert Varga
*/
-@Beta
public final class ReadTransactionRequest extends AbstractReadPathTransactionRequest<ReadTransactionRequest> {
+ interface SerialForm extends AbstractReadPathTransactionRequest.SerialForm<ReadTransactionRequest> {
+ @Override
+ default ReadTransactionRequest readExternal(final ObjectInput in, final TransactionIdentifier target,
+ final long sequence, final ActorRef replyTo, final boolean snapshotOnly, final YangInstanceIdentifier path)
+ throws IOException {
+ return new ReadTransactionRequest(target, sequence, replyTo, path, snapshotOnly);
+ }
+ }
+
+ @java.io.Serial
private static final long serialVersionUID = 1L;
public ReadTransactionRequest(final @NonNull TransactionIdentifier identifier, final long sequence,
}
@Override
- protected ReadTransactionRequestProxyV1 externalizableProxy(final ABIVersion version) {
- return new ReadTransactionRequestProxyV1(this);
+ protected SerialForm externalizableProxy(final ABIVersion version) {
+ return new RTR(this);
}
}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-/**
- * Externalizable proxy for use with {@link ReadTransactionRequest}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class ReadTransactionRequestProxyV1 extends AbstractReadPathTransactionRequestProxyV1<ReadTransactionRequest> {
- private static final long serialVersionUID = 1L;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public ReadTransactionRequestProxyV1() {
- // For Externalizable
- }
-
- ReadTransactionRequestProxyV1(final ReadTransactionRequest request) {
- super(request);
- }
-
- @Override
- ReadTransactionRequest createReadPathRequest(final TransactionIdentifier target, final long sequence,
- final ActorRef replyTo, final YangInstanceIdentifier path, final boolean snapshotOnly) {
- return new ReadTransactionRequest(target, sequence, replyTo, path, snapshotOnly);
- }
-}
import static java.util.Objects.requireNonNull;
-import com.google.common.annotations.Beta;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.ObjectStreamException;
import java.util.Optional;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.controller.cluster.access.concepts.SliceableMessage;
/**
* Successful reply to an {@link ReadTransactionRequest}. It indicates presence of requested data via
* {@link #getData()}.
- *
- * @author Robert Varga
*/
-@Beta
-@SuppressFBWarnings("SE_BAD_FIELD")
public final class ReadTransactionSuccess extends TransactionSuccess<ReadTransactionSuccess>
implements SliceableMessage {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
- private final Optional<NormalizedNode<?, ?>> data;
+
+ private final Optional<NormalizedNode> data;
+
+ private ReadTransactionSuccess(final ReadTransactionSuccess request, final ABIVersion version) {
+ super(request, version);
+ data = request.data;
+ }
public ReadTransactionSuccess(final TransactionIdentifier identifier, final long sequence,
- final Optional<NormalizedNode<?, ?>> data) {
+ final Optional<NormalizedNode> data) {
super(identifier, sequence);
this.data = requireNonNull(data);
}
- public Optional<NormalizedNode<?, ?>> getData() {
+ public Optional<NormalizedNode> getData() {
return data;
}
@Override
- protected AbstractTransactionSuccessProxy<ReadTransactionSuccess> externalizableProxy(final ABIVersion version) {
- return new ReadTransactionSuccessProxyV1(this);
+ protected RTS externalizableProxy(final ABIVersion version) {
+ return new RTS(this);
}
@Override
protected ReadTransactionSuccess cloneAsVersion(final ABIVersion version) {
- return this;
+ return new ReadTransactionSuccess(this, version);
+ }
+
+ @java.io.Serial
+ private void readObject(final ObjectInputStream stream) throws IOException, ClassNotFoundException {
+ throwNSE();
+ }
+
+ @java.io.Serial
+ private void readObjectNoData() throws ObjectStreamException {
+ throwNSE();
+ }
+
+ @java.io.Serial
+ private void writeObject(final ObjectOutputStream stream) throws IOException {
+ throwNSE();
}
}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.util.Optional;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
-import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
-import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
-
-/**
- * Externalizable proxy for use with {@link ReadTransactionSuccess}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class ReadTransactionSuccessProxyV1 extends AbstractTransactionSuccessProxy<ReadTransactionSuccess> {
- private static final long serialVersionUID = 1L;
-
- private Optional<NormalizedNode<?, ?>> data;
- private transient NormalizedNodeStreamVersion streamVersion;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public ReadTransactionSuccessProxyV1() {
- // For Externalizable
- }
-
- ReadTransactionSuccessProxyV1(final ReadTransactionSuccess request) {
- super(request);
- this.data = request.getData();
- this.streamVersion = request.getVersion().getStreamVersion();
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) throws IOException {
- super.writeExternal(out);
-
- if (data.isPresent()) {
- out.writeBoolean(true);
- try (NormalizedNodeDataOutput nnout = streamVersion.newDataOutput(out)) {
- nnout.writeNormalizedNode(data.get());
- }
- } else {
- out.writeBoolean(false);
- }
- }
-
- @Override
- public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
- super.readExternal(in);
-
- if (in.readBoolean()) {
- data = Optional.of(NormalizedNodeDataInput.newDataInput(in).readNormalizedNode());
- } else {
- data = Optional.empty();
- }
- }
-
- @Override
- protected ReadTransactionSuccess createSuccess(final TransactionIdentifier target, final long sequence) {
- return new ReadTransactionSuccess(target, sequence, data);
- }
-}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link SkipTransactionsRequest}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class STR implements SkipTransactionsRequest.SerialForm {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private SkipTransactionsRequest message;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public STR() {
+ // for Externalizable
+ }
+
+ STR(final SkipTransactionsRequest message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public SkipTransactionsRequest message() {
+ return verifyNotNull(message);
+ }
+
+ @Override
+ public void setMessage(final SkipTransactionsRequest message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public Object readResolve() {
+ return message();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.ObjectInput;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+
+/**
+ * Externalizable proxy for use with {@link SkipTransactionsResponse}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class STS implements TransactionSuccess.SerialForm<SkipTransactionsResponse> {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private SkipTransactionsResponse message;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public STS() {
+ // for Externalizable
+ }
+
+ STS(final SkipTransactionsResponse message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public SkipTransactionsResponse message() {
+ return verifyNotNull(message);
+ }
+
+ @Override
+ public void setMessage(final SkipTransactionsResponse message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public SkipTransactionsResponse readExternal(final ObjectInput in, final TransactionIdentifier target,
+ final long sequence) {
+ return new SkipTransactionsResponse(target, sequence);
+ }
+
+ @Override
+ public Object readResolve() {
+ return message();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import akka.actor.ActorRef;
+import com.google.common.base.MoreObjects.ToStringHelper;
+import com.google.common.collect.ImmutableList;
+import com.google.common.primitives.UnsignedLong;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.Collection;
+import java.util.List;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.controller.cluster.access.ABIVersion;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Request to skip a number of {@link TransactionIdentifier}s within a {code local history}. This request is essentially
+ * equivalent to {@link TransactionPurgeRequest} for {@link #getTarget()}, but also carries additional sibling
+ * {@link TransactionIdentifier}s in {@link #getOthers()}.
+ *
+ * <p>
+ * This request is sent by the frontend to inform the backend that a set of {@link TransactionIdentifier}s are
+ * explicitly retired and are guaranteed to never be used by the frontend.
+ */
+public final class SkipTransactionsRequest extends TransactionRequest<SkipTransactionsRequest> {
+ interface SerialForm extends TransactionRequest.SerialForm<SkipTransactionsRequest> {
+ @Override
+ default SkipTransactionsRequest readExternal(final ObjectInput in, final TransactionIdentifier target,
+ final long sequence, final ActorRef replyTo) throws IOException {
+ final int size = in.readInt();
+ final var builder = ImmutableList.<UnsignedLong>builderWithExpectedSize(size);
+ int idx;
+ if (size % 2 != 0) {
+ builder.add(UnsignedLong.fromLongBits(WritableObjects.readLong(in)));
+ idx = 1;
+ } else {
+ idx = 0;
+ }
+ for (; idx < size; idx += 2) {
+ final byte hdr = WritableObjects.readLongHeader(in);
+ builder.add(UnsignedLong.fromLongBits(WritableObjects.readFirstLong(in, hdr)));
+ builder.add(UnsignedLong.fromLongBits(WritableObjects.readSecondLong(in, hdr)));
+ }
+
+ return new SkipTransactionsRequest(target, sequence, replyTo, builder.build());
+ }
+
+ @Override
+ default void writeExternal(final ObjectOutput out, final SkipTransactionsRequest msg) throws IOException {
+ TransactionRequest.SerialForm.super.writeExternal(out, msg);
+
+ final var others = msg.others;
+ final int size = others.size();
+ out.writeInt(size);
+
+ int idx;
+ if (size % 2 != 0) {
+ WritableObjects.writeLong(out, others.get(0).longValue());
+ idx = 1;
+ } else {
+ idx = 0;
+ }
+ for (; idx < size; idx += 2) {
+ WritableObjects.writeLongs(out, others.get(idx).longValue(), others.get(idx + 1).longValue());
+ }
+ }
+ }
+
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ // Note: UnsignedLong is arbitrary, yang.common.Uint64 would work just as well, we really want an immutable
+ // List<long>, though.
+ private final @NonNull ImmutableList<UnsignedLong> others;
+
+ public SkipTransactionsRequest(final TransactionIdentifier target, final long sequence,
+ final ActorRef replyTo, final Collection<UnsignedLong> others) {
+ super(target, sequence, replyTo);
+ this.others = ImmutableList.copyOf(others);
+ }
+
+ private SkipTransactionsRequest(final SkipTransactionsRequest request, final ABIVersion version) {
+ super(request, version);
+ others = request.others;
+ }
+
+ /**
+ * Return this {@link #getTarget()}s sibling {@link TransactionIdentifier}s.
+ *
+ * @return Siblings values of {@link TransactionIdentifier#getTransactionId()}
+ */
+ public List<UnsignedLong> getOthers() {
+ return others;
+ }
+
+ @Override
+ protected SerialForm externalizableProxy(final ABIVersion version) {
+ return new STR(this);
+ }
+
+ @Override
+ protected SkipTransactionsRequest cloneAsVersion(final ABIVersion version) {
+ return new SkipTransactionsRequest(this, version);
+ }
+
+ @Override
+ protected ToStringHelper addToStringAttributes(final ToStringHelper toStringHelper) {
+ final var helper = super.addToStringAttributes(toStringHelper);
+ if (!others.isEmpty()) {
+ helper.add("others", others);
+ }
+ return helper;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import org.opendaylight.controller.cluster.access.ABIVersion;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+
+/**
+ * Successful reply to a {@link SkipTransactionsRequest}.
+ */
+// FIXME: rename to SkipTransactionsSuccess
+public final class SkipTransactionsResponse extends TransactionSuccess<SkipTransactionsResponse> {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private SkipTransactionsResponse(final SkipTransactionsResponse success, final ABIVersion version) {
+ super(success, version);
+ }
+
+ public SkipTransactionsResponse(final TransactionIdentifier identifier, final long sequence) {
+ super(identifier, sequence);
+ }
+
+ @Override
+ protected STS externalizableProxy(final ABIVersion version) {
+ return new STS(this);
+ }
+
+ @Override
+ protected SkipTransactionsResponse cloneAsVersion(final ABIVersion version) {
+ return new SkipTransactionsResponse(this, version);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link TransactionAbortRequest}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class TAR implements TransactionAbortRequest.SerialForm {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private TransactionAbortRequest message;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public TAR() {
+ // for Externalizable
+ }
+
+ TAR(final TransactionAbortRequest message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public TransactionAbortRequest message() {
+ return verifyNotNull(message);
+ }
+
+ @Override
+ public void setMessage(final TransactionAbortRequest message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public Object readResolve() {
+ return message();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.ObjectInput;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+
+/**
+ * Externalizable proxy for use with {@link TransactionAbortSuccess}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class TAS implements TransactionSuccess.SerialForm<TransactionAbortSuccess> {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private TransactionAbortSuccess message;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public TAS() {
+ // for Externalizable
+ }
+
+ TAS(final TransactionAbortSuccess message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public TransactionAbortSuccess message() {
+ return verifyNotNull(message);
+ }
+
+ @Override
+ public void setMessage(final TransactionAbortSuccess message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public TransactionAbortSuccess readExternal(final ObjectInput in, final TransactionIdentifier target,
+ final long sequence) {
+ return new TransactionAbortSuccess(target, sequence);
+ }
+
+ @Override
+ public Object readResolve() {
+ return message();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.ObjectInput;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+
+/**
+ * Externalizable proxy for use with {@link TransactionCanCommitSuccess}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class TCCS implements TransactionSuccess.SerialForm<TransactionCanCommitSuccess> {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private TransactionCanCommitSuccess message;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public TCCS() {
+ // for Externalizable
+ }
+
+ TCCS(final TransactionCanCommitSuccess message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public TransactionCanCommitSuccess message() {
+ return verifyNotNull(message);
+ }
+
+ @Override
+ public void setMessage(final TransactionCanCommitSuccess message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public TransactionCanCommitSuccess readExternal(final ObjectInput in, final TransactionIdentifier target,
+ final long sequence) {
+ return new TransactionCanCommitSuccess(target, sequence);
+ }
+
+ @Override
+ public Object readResolve() {
+ return message();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.ObjectInput;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+
+/**
+ * Externalizable proxy for use with {@link TransactionCommitSuccess}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class TCS implements TransactionSuccess.SerialForm<TransactionCommitSuccess> {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private TransactionCommitSuccess message;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public TCS() {
+ // for Externalizable
+ }
+
+ TCS(final TransactionCommitSuccess message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public TransactionCommitSuccess message() {
+ return verifyNotNull(message);
+ }
+
+ @Override
+ public void setMessage(final TransactionCommitSuccess message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public TransactionCommitSuccess readExternal(final ObjectInput in, final TransactionIdentifier target,
+ final long sequence) {
+ return new TransactionCommitSuccess(target, sequence);
+ }
+
+ @Override
+ public Object readResolve() {
+ return message();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link TransactionDoCommitRequest}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class TDCR implements TransactionDoCommitRequest.SerialForm {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private TransactionDoCommitRequest message;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public TDCR() {
+ // for Externalizable
+ }
+
+ TDCR(final TransactionDoCommitRequest message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public TransactionDoCommitRequest message() {
+ return verifyNotNull(message);
+ }
+
+ @Override
+ public void setMessage(final TransactionDoCommitRequest message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public Object readResolve() {
+ return message();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link TransactionFailure}. It implements the Chlorine SR2 serialization format.
+ */
+final class TF implements TransactionFailure.SerialForm {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private TransactionFailure message;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public TF() {
+ // for Externalizable
+ }
+
+ TF(final TransactionFailure message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public TransactionFailure message() {
+ return verifyNotNull(message);
+ }
+
+ @Override
+ public void setMessage(final TransactionFailure message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public Object readResolve() {
+ return message();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link TransactionPreCommitRequest}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class TPCR implements TransactionPreCommitRequest.SerialForm {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private TransactionPreCommitRequest message;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public TPCR() {
+ // for Externalizable
+ }
+
+ TPCR(final TransactionPreCommitRequest message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public TransactionPreCommitRequest message() {
+ return verifyNotNull(message);
+ }
+
+ @Override
+ public void setMessage(final TransactionPreCommitRequest message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public Object readResolve() {
+ return message();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.IOException;
+import java.io.ObjectInput;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+
+/**
+ * Externalizable proxy for use with {@link TransactionPreCommitSuccess}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class TPCS implements TransactionSuccess.SerialForm<TransactionPreCommitSuccess> {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private TransactionPreCommitSuccess message;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public TPCS() {
+ // for Externalizable
+ }
+
+ TPCS(final TransactionPreCommitSuccess message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public TransactionPreCommitSuccess message() {
+ return verifyNotNull(message);
+ }
+
+ @Override
+ public void setMessage(final TransactionPreCommitSuccess message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public TransactionPreCommitSuccess readExternal(final ObjectInput in, final TransactionIdentifier target,
+ final long sequence) throws IOException {
+ return new TransactionPreCommitSuccess(target, sequence);
+ }
+
+ @Override
+ public Object readResolve() {
+ return message();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link TransactionPurgeRequest}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class TPR implements TransactionPurgeRequest.SerialForm {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private TransactionPurgeRequest message;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public TPR() {
+ // for Externalizable
+ }
+
+ TPR(final TransactionPurgeRequest message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public TransactionPurgeRequest message() {
+ return verifyNotNull(message);
+ }
+
+ @Override
+ public void setMessage(final TransactionPurgeRequest message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public Object readResolve() {
+ return message();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.ObjectInput;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+
+/**
+ * Externalizable proxy for use with {@link TransactionPurgeResponse}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class TPS implements TransactionSuccess.SerialForm<TransactionPurgeResponse> {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private TransactionPurgeResponse message;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public TPS() {
+ // for Externalizable
+ }
+
+ TPS(final TransactionPurgeResponse message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public TransactionPurgeResponse message() {
+ return verifyNotNull(message);
+ }
+
+ @Override
+ public void setMessage(final TransactionPurgeResponse message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public TransactionPurgeResponse readExternal(final ObjectInput in, final TransactionIdentifier target,
+ final long sequence) {
+ return new TransactionPurgeResponse(target, sequence);
+ }
+
+ @Override
+ public Object readResolve() {
+ return message();
+ }
+}
package org.opendaylight.controller.cluster.access.commands;
import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
+import java.io.ObjectInput;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
/**
* A transaction request to perform the abort step of the three-phase commit protocol.
- *
- * @author Robert Varga
*/
-@Beta
public final class TransactionAbortRequest extends TransactionRequest<TransactionAbortRequest> {
+ interface SerialForm extends TransactionRequest.SerialForm<TransactionAbortRequest> {
+ @Override
+ default TransactionAbortRequest readExternal(final ObjectInput in, final TransactionIdentifier target,
+ final long sequence, final ActorRef replyTo) {
+ return new TransactionAbortRequest(target, sequence, replyTo);
+ }
+ }
+
+ @java.io.Serial
private static final long serialVersionUID = 1L;
+ private TransactionAbortRequest(final TransactionAbortRequest request, final ABIVersion version) {
+ super(request, version);
+ }
+
public TransactionAbortRequest(final TransactionIdentifier target, final long sequence, final ActorRef replyTo) {
super(target, sequence, replyTo);
}
@Override
- protected TransactionAbortRequestProxyV1 externalizableProxy(final ABIVersion version) {
- return new TransactionAbortRequestProxyV1(this);
+ protected SerialForm externalizableProxy(final ABIVersion version) {
+ return new TAR(this);
}
@Override
protected TransactionAbortRequest cloneAsVersion(final ABIVersion version) {
- return this;
+ return new TransactionAbortRequest(this, version);
}
}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link TransactionAbortRequest}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class TransactionAbortRequestProxyV1 extends AbstractTransactionRequestProxy<TransactionAbortRequest> {
- private static final long serialVersionUID = 1L;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public TransactionAbortRequestProxyV1() {
- // For Externalizable
- }
-
- TransactionAbortRequestProxyV1(final TransactionAbortRequest request) {
- super(request);
- }
-
- @Override
- protected TransactionAbortRequest createRequest(final TransactionIdentifier target, final long sequence,
- final ActorRef replyTo) {
- return new TransactionAbortRequest(target, sequence, replyTo);
- }
-}
* @author Robert Varga
*/
public final class TransactionAbortSuccess extends TransactionSuccess<TransactionAbortSuccess> {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
+ private TransactionAbortSuccess(final TransactionAbortSuccess success, final ABIVersion version) {
+ super(success, version);
+ }
+
public TransactionAbortSuccess(final TransactionIdentifier identifier, final long sequence) {
super(identifier, sequence);
}
@Override
- protected AbstractTransactionSuccessProxy<TransactionAbortSuccess> externalizableProxy(final ABIVersion version) {
- return new TransactionAbortSuccessProxyV1(this);
+ protected TAS externalizableProxy(final ABIVersion version) {
+ return new TAS(this);
}
@Override
protected TransactionAbortSuccess cloneAsVersion(final ABIVersion version) {
- return this;
+ return new TransactionAbortSuccess(this, version);
}
}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link TransactionAbortSuccess}. It implements the initial (Boron)
- * serialization format.
- *
- * @author Robert Varga
- */
-final class TransactionAbortSuccessProxyV1 extends AbstractTransactionSuccessProxy<TransactionAbortSuccess> {
- private static final long serialVersionUID = 1L;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public TransactionAbortSuccessProxyV1() {
- // For Externalizable
- }
-
- TransactionAbortSuccessProxyV1(final TransactionAbortSuccess success) {
- super(success);
- }
-
- @Override
- protected TransactionAbortSuccess createSuccess(final TransactionIdentifier target, final long sequence) {
- return new TransactionAbortSuccess(target, sequence);
- }
-}
* @author Robert Varga
*/
public final class TransactionCanCommitSuccess extends TransactionSuccess<TransactionCanCommitSuccess> {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
+ private TransactionCanCommitSuccess(final TransactionCanCommitSuccess success, final ABIVersion version) {
+ super(success, version);
+ }
+
public TransactionCanCommitSuccess(final TransactionIdentifier identifier, final long sequence) {
super(identifier, sequence);
}
@Override
- protected AbstractTransactionSuccessProxy<TransactionCanCommitSuccess> externalizableProxy(
- final ABIVersion version) {
- return new TransactionCanCommitSuccessProxyV1(this);
+ protected TCCS externalizableProxy(final ABIVersion version) {
+ return new TCCS(this);
}
@Override
protected TransactionCanCommitSuccess cloneAsVersion(final ABIVersion version) {
- return this;
+ return new TransactionCanCommitSuccess(this, version);
}
}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link TransactionCanCommitSuccess}. It implements the initial (Boron)
- * serialization format.
- *
- * @author Robert Varga
- */
-final class TransactionCanCommitSuccessProxyV1 extends AbstractTransactionSuccessProxy<TransactionCanCommitSuccess> {
- private static final long serialVersionUID = 1L;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public TransactionCanCommitSuccessProxyV1() {
- // For Externalizable
- }
-
- TransactionCanCommitSuccessProxyV1(final TransactionCanCommitSuccess success) {
- super(success);
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) throws IOException {
- super.writeExternal(out);
- }
-
- @Override
- public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
- super.readExternal(in);
- }
-
- @Override
- protected TransactionCanCommitSuccess createSuccess(final TransactionIdentifier target, final long sequence) {
- return new TransactionCanCommitSuccess(target, sequence);
- }
-}
* @author Robert Varga
*/
public final class TransactionCommitSuccess extends TransactionSuccess<TransactionCommitSuccess> {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
+ private TransactionCommitSuccess(final TransactionCommitSuccess success, final ABIVersion version) {
+ super(success, version);
+ }
+
public TransactionCommitSuccess(final TransactionIdentifier identifier, final long sequence) {
super(identifier, sequence);
}
@Override
- protected AbstractTransactionSuccessProxy<TransactionCommitSuccess> externalizableProxy(final ABIVersion version) {
- return new TransactionCommitSuccessProxyV1(this);
+ protected TCS externalizableProxy(final ABIVersion version) {
+ return new TCS(this);
}
@Override
protected TransactionCommitSuccess cloneAsVersion(final ABIVersion version) {
- return this;
+ return new TransactionCommitSuccess(this, version);
}
}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link TransactionCommitSuccess}. It implements the initial (Boron)
- * serialization format.
- *
- * @author Robert Varga
- */
-final class TransactionCommitSuccessProxyV1 extends AbstractTransactionSuccessProxy<TransactionCommitSuccess> {
- private static final long serialVersionUID = 1L;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public TransactionCommitSuccessProxyV1() {
- // For Externalizable
- }
-
- TransactionCommitSuccessProxyV1(final TransactionCommitSuccess success) {
- super(success);
- }
-
- @Override
- protected TransactionCommitSuccess createSuccess(final TransactionIdentifier target, final long sequence) {
- return new TransactionCommitSuccess(target, sequence);
- }
-}
import static java.util.Objects.requireNonNull;
-import com.google.common.annotations.Beta;
import java.io.IOException;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
/**
* A {@link TransactionModification} which has a data component.
- *
- * @author Robert Varga
*/
-@Beta
public abstract class TransactionDataModification extends TransactionModification {
- private final NormalizedNode<?, ?> data;
+ private final NormalizedNode data;
- TransactionDataModification(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ TransactionDataModification(final YangInstanceIdentifier path, final NormalizedNode data) {
super(path);
this.data = requireNonNull(data);
}
- public final NormalizedNode<?, ?> getData() {
+ public final NormalizedNode getData() {
return data;
}
*/
package org.opendaylight.controller.cluster.access.commands;
-import com.google.common.annotations.Beta;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
/**
* Delete a particular path.
- *
- * @author Robert Varga
*/
-@Beta
public final class TransactionDelete extends TransactionModification {
public TransactionDelete(final YangInstanceIdentifier path) {
super(path);
package org.opendaylight.controller.cluster.access.commands;
import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
+import java.io.ObjectInput;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
/**
* A transaction request to perform the final, doCommit, step of the three-phase commit protocol.
- *
- * @author Robert Varga
*/
-@Beta
public final class TransactionDoCommitRequest extends TransactionRequest<TransactionDoCommitRequest> {
+ interface SerialForm extends TransactionRequest.SerialForm<TransactionDoCommitRequest> {
+ @Override
+ default TransactionDoCommitRequest readExternal(final ObjectInput in, final TransactionIdentifier target,
+ final long sequence, final ActorRef replyTo) {
+ return new TransactionDoCommitRequest(target, sequence, replyTo);
+ }
+ }
+
+ @java.io.Serial
private static final long serialVersionUID = 1L;
+ private TransactionDoCommitRequest(final TransactionDoCommitRequest request, final ABIVersion version) {
+ super(request, version);
+ }
+
public TransactionDoCommitRequest(final TransactionIdentifier target, final long sequence, final ActorRef replyTo) {
super(target, sequence, replyTo);
}
@Override
- protected TransactionDoCommitRequestProxyV1 externalizableProxy(final ABIVersion version) {
- return new TransactionDoCommitRequestProxyV1(this);
+ protected SerialForm externalizableProxy(final ABIVersion version) {
+ return new TDCR(this);
}
@Override
protected TransactionDoCommitRequest cloneAsVersion(final ABIVersion version) {
- return this;
+ return new TransactionDoCommitRequest(this, version);
}
}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link TransactionDoCommitRequest}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class TransactionDoCommitRequestProxyV1 extends AbstractTransactionRequestProxy<TransactionDoCommitRequest> {
- private static final long serialVersionUID = 1L;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public TransactionDoCommitRequestProxyV1() {
- // For Externalizable
- }
-
- TransactionDoCommitRequestProxyV1(final TransactionDoCommitRequest request) {
- super(request);
- }
-
- @Override
- protected TransactionDoCommitRequest createRequest(final TransactionIdentifier target, final long sequence,
- final ActorRef replyTo) {
- return new TransactionDoCommitRequest(target, sequence, replyTo);
- }
-}
*/
package org.opendaylight.controller.cluster.access.commands;
-import com.google.common.annotations.Beta;
+import java.io.DataInput;
+import java.io.IOException;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.controller.cluster.access.concepts.RequestException;
import org.opendaylight.controller.cluster.access.concepts.RequestFailure;
/**
* Generic {@link RequestFailure} involving a {@link TransactionRequest}.
- *
- * @author Robert Varga
*/
-@Beta
public final class TransactionFailure extends RequestFailure<TransactionIdentifier, TransactionFailure> {
+ interface SerialForm extends RequestFailure.SerialForm<TransactionIdentifier, TransactionFailure> {
+ @Override
+ default TransactionIdentifier readTarget(final DataInput in) throws IOException {
+ return TransactionIdentifier.readFrom(in);
+ }
+
+ @Override
+ default TransactionFailure createFailure(final TransactionIdentifier target, final long sequence,
+ final RequestException cause) {
+ return new TransactionFailure(target, sequence, cause);
+ }
+ }
+
+ @java.io.Serial
private static final long serialVersionUID = 1L;
+ private TransactionFailure(final TransactionFailure failure, final ABIVersion version) {
+ super(failure, version);
+ }
+
TransactionFailure(final TransactionIdentifier target, final long sequence, final RequestException cause) {
super(target, sequence, cause);
}
@Override
protected TransactionFailure cloneAsVersion(final ABIVersion version) {
- return this;
+ return new TransactionFailure(this, version);
}
@Override
- protected TransactionFailureProxyV1 externalizableProxy(final ABIVersion version) {
- return new TransactionFailureProxyV1(this);
+ protected SerialForm externalizableProxy(final ABIVersion version) {
+ return new TF(this);
}
}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import java.io.DataInput;
-import java.io.IOException;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestFailureProxy;
-import org.opendaylight.controller.cluster.access.concepts.RequestException;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link TransactionFailure}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class TransactionFailureProxyV1 extends AbstractRequestFailureProxy<TransactionIdentifier, TransactionFailure> {
- private static final long serialVersionUID = 1L;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public TransactionFailureProxyV1() {
- // For Externalizable
- }
-
- TransactionFailureProxyV1(final TransactionFailure failure) {
- super(failure);
- }
-
- @Override
- protected TransactionFailure createFailure(final TransactionIdentifier target, final long sequence,
- final RequestException cause) {
- return new TransactionFailure(target, sequence, cause);
- }
-
- @Override
- protected TransactionIdentifier readTarget(final DataInput in) throws IOException {
- return TransactionIdentifier.readFrom(in);
- }
-}
*/
package org.opendaylight.controller.cluster.access.commands;
-import com.google.common.annotations.Beta;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
/**
* Merge a {@link NormalizedNode} tree onto a specific path.
- *
- * @author Robert Varga
*/
-@Beta
public final class TransactionMerge extends TransactionDataModification {
- public TransactionMerge(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ public TransactionMerge(final YangInstanceIdentifier path, final NormalizedNode data) {
super(path, data);
}
import static java.util.Objects.requireNonNull;
-import com.google.common.annotations.Beta;
import com.google.common.base.MoreObjects;
import java.io.IOException;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
* {@link #readFrom(NormalizedNodeDataInput, ReusableStreamReceiver)} methods for explicit serialization. The reason for
* this is that they are usually transmitted in bulk, hence it is advantageous to reuse
* a {@link NormalizedNodeDataOutput} instance to achieve better compression.
- *
- * @author Robert Varga
*/
-@Beta
public abstract class TransactionModification {
static final byte TYPE_DELETE = 1;
static final byte TYPE_MERGE = 2;
static TransactionModification readFrom(final NormalizedNodeDataInput in, final ReusableStreamReceiver writer)
throws IOException {
final byte type = in.readByte();
- switch (type) {
- case TYPE_DELETE:
- return new TransactionDelete(in.readYangInstanceIdentifier());
- case TYPE_MERGE:
- return new TransactionMerge(in.readYangInstanceIdentifier(), in.readNormalizedNode(writer));
- case TYPE_WRITE:
- return new TransactionWrite(in.readYangInstanceIdentifier(), in.readNormalizedNode(writer));
- default:
- throw new IllegalArgumentException("Unhandled type " + type);
- }
+ return switch (type) {
+ case TYPE_DELETE -> new TransactionDelete(in.readYangInstanceIdentifier());
+ case TYPE_MERGE -> new TransactionMerge(in.readYangInstanceIdentifier(), in.readNormalizedNode(writer));
+ case TYPE_WRITE -> new TransactionWrite(in.readYangInstanceIdentifier(), in.readNormalizedNode(writer));
+ default -> throw new IllegalArgumentException("Unhandled type " + type);
+ };
}
}
package org.opendaylight.controller.cluster.access.commands;
import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
+import java.io.ObjectInput;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
/**
* A transaction request to perform the second, preCommit, step of the three-phase commit protocol.
- *
- * @author Robert Varga
*/
-@Beta
public final class TransactionPreCommitRequest extends TransactionRequest<TransactionPreCommitRequest> {
+ interface SerialForm extends TransactionRequest.SerialForm<TransactionPreCommitRequest> {
+ @Override
+ default TransactionPreCommitRequest readExternal(final ObjectInput in, final TransactionIdentifier target,
+ final long sequence, final ActorRef replyTo) {
+ return new TransactionPreCommitRequest(target, sequence, replyTo);
+ }
+ }
+
+ @java.io.Serial
private static final long serialVersionUID = 1L;
+ private TransactionPreCommitRequest(final TransactionPreCommitRequest request, final ABIVersion version) {
+ super(request, version);
+ }
+
public TransactionPreCommitRequest(final TransactionIdentifier target, final long sequence,
final ActorRef replyTo) {
super(target, sequence, replyTo);
}
@Override
- protected TransactionPreCommitRequestProxyV1 externalizableProxy(final ABIVersion version) {
- return new TransactionPreCommitRequestProxyV1(this);
+ protected SerialForm externalizableProxy(final ABIVersion version) {
+ return new TPCR(this);
}
@Override
protected TransactionPreCommitRequest cloneAsVersion(final ABIVersion version) {
- return this;
+ return new TransactionPreCommitRequest(this, version);
}
}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link TransactionPreCommitRequest}. It implements the initial (Boron)
- * serialization format.
- *
- * @author Robert Varga
- */
-final class TransactionPreCommitRequestProxyV1 extends AbstractTransactionRequestProxy<TransactionPreCommitRequest> {
- private static final long serialVersionUID = 1L;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public TransactionPreCommitRequestProxyV1() {
- // For Externalizable
- }
-
- TransactionPreCommitRequestProxyV1(final TransactionPreCommitRequest request) {
- super(request);
- }
-
- @Override
- protected TransactionPreCommitRequest createRequest(final TransactionIdentifier target, final long sequence,
- final ActorRef replyTo) {
- return new TransactionPreCommitRequest(target, sequence, replyTo);
- }
-}
* @author Robert Varga
*/
public final class TransactionPreCommitSuccess extends TransactionSuccess<TransactionPreCommitSuccess> {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
+ private TransactionPreCommitSuccess(final TransactionPreCommitSuccess success, final ABIVersion version) {
+ super(success, version);
+ }
+
public TransactionPreCommitSuccess(final TransactionIdentifier identifier, final long sequence) {
super(identifier, sequence);
}
@Override
- protected AbstractTransactionSuccessProxy<TransactionPreCommitSuccess> externalizableProxy(
- final ABIVersion version) {
- return new TransactionPreCommitSuccessProxyV1(this);
+ protected TPCS externalizableProxy(final ABIVersion version) {
+ return new TPCS(this);
}
@Override
protected TransactionPreCommitSuccess cloneAsVersion(final ABIVersion version) {
- return this;
+ return new TransactionPreCommitSuccess(this, version);
}
}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link TransactionPreCommitSuccess}. It implements the initial (Boron)
- * serialization format.
- *
- * @author Robert Varga
- */
-final class TransactionPreCommitSuccessProxyV1 extends AbstractTransactionSuccessProxy<TransactionPreCommitSuccess> {
- private static final long serialVersionUID = 1L;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public TransactionPreCommitSuccessProxyV1() {
- // For Externalizable
- }
-
- TransactionPreCommitSuccessProxyV1(final TransactionPreCommitSuccess success) {
- super(success);
- }
-
- @Override
- protected TransactionPreCommitSuccess createSuccess(final TransactionIdentifier target, final long sequence) {
- return new TransactionPreCommitSuccess(target, sequence);
- }
-}
package org.opendaylight.controller.cluster.access.commands;
import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
+import java.io.ObjectInput;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
* A transaction request to perform the final transaction transition, which is purging it from the protocol view,
* meaning the frontend has no further knowledge of the transaction. The backend is free to purge any state related
* to the transaction and responds with a {@link TransactionPurgeResponse}.
- *
- * @author Robert Varga
*/
-@Beta
public final class TransactionPurgeRequest extends TransactionRequest<TransactionPurgeRequest> {
+ interface SerialForm extends TransactionRequest.SerialForm<TransactionPurgeRequest> {
+ @Override
+ default TransactionPurgeRequest readExternal(final ObjectInput in, final TransactionIdentifier target,
+ final long sequence, final ActorRef replyTo) {
+ return new TransactionPurgeRequest(target, sequence, replyTo);
+ }
+ }
+
+ @java.io.Serial
private static final long serialVersionUID = 1L;
+ private TransactionPurgeRequest(final TransactionPurgeRequest request, final ABIVersion version) {
+ super(request, version);
+ }
+
public TransactionPurgeRequest(final TransactionIdentifier target, final long sequence, final ActorRef replyTo) {
super(target, sequence, replyTo);
}
@Override
- protected TransactionPurgeRequestProxyV1 externalizableProxy(final ABIVersion version) {
- return new TransactionPurgeRequestProxyV1(this);
+ protected SerialForm externalizableProxy(final ABIVersion version) {
+ return new TPR(this);
}
@Override
protected TransactionPurgeRequest cloneAsVersion(final ABIVersion version) {
- return this;
+ return new TransactionPurgeRequest(this, version);
}
}
\ No newline at end of file
+++ /dev/null
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link TransactionPurgeRequest}. It implements the initial (Boron)
- * serialization format.
- *
- * @author Robert Varga
- */
-final class TransactionPurgeRequestProxyV1 extends AbstractTransactionRequestProxy<TransactionPurgeRequest> {
- private static final long serialVersionUID = 1L;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public TransactionPurgeRequestProxyV1() {
- // For Externalizable
- }
-
- TransactionPurgeRequestProxyV1(final TransactionPurgeRequest request) {
- super(request);
- }
-
- @Override
- protected TransactionPurgeRequest createRequest(final TransactionIdentifier target, final long sequence,
- final ActorRef replyTo) {
- return new TransactionPurgeRequest(target, sequence, replyTo);
- }
-}
/**
* Successful reply to a {@link TransactionPurgeRequest}.
- *
- * @author Robert Varga
*/
+// FIXME: rename to TransactionPurgeSuccess
public final class TransactionPurgeResponse extends TransactionSuccess<TransactionPurgeResponse> {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
+ private TransactionPurgeResponse(final TransactionPurgeResponse success, final ABIVersion version) {
+ super(success, version);
+ }
+
public TransactionPurgeResponse(final TransactionIdentifier identifier, final long sequence) {
super(identifier, sequence);
}
@Override
- protected AbstractTransactionSuccessProxy<TransactionPurgeResponse> externalizableProxy(
- final ABIVersion version) {
- return new TransactionPurgeResponseProxyV1(this);
+ protected TPS externalizableProxy(final ABIVersion version) {
+ return new TPS(this);
}
@Override
protected TransactionPurgeResponse cloneAsVersion(final ABIVersion version) {
- return this;
+ return new TransactionPurgeResponse(this, version);
}
}
+++ /dev/null
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link TransactionPurgeResponse}. It implements the initial (Boron)
- * serialization format.
- *
- * @author Robert Varga
- */
-final class TransactionPurgeResponseProxyV1 extends AbstractTransactionSuccessProxy<TransactionPurgeResponse> {
- private static final long serialVersionUID = 1L;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public TransactionPurgeResponseProxyV1() {
- // For Externalizable
- }
-
- TransactionPurgeResponseProxyV1(final TransactionPurgeResponse success) {
- super(success);
- }
-
- @Override
- protected TransactionPurgeResponse createSuccess(final TransactionIdentifier target, final long sequence) {
- return new TransactionPurgeResponse(target, sequence);
- }
-}
package org.opendaylight.controller.cluster.access.commands;
import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
+import java.io.DataInput;
+import java.io.IOException;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.controller.cluster.access.concepts.Request;
import org.opendaylight.controller.cluster.access.concepts.RequestException;
* Abstract base class for {@link Request}s involving specific transaction. This class is visible outside of this
* package solely for the ability to perform a unified instanceof check.
*
- * @author Robert Varga
- *
* @param <T> Message type
*/
-@Beta
public abstract class TransactionRequest<T extends TransactionRequest<T>> extends Request<TransactionIdentifier, T> {
+ protected interface SerialForm<T extends TransactionRequest<T>>
+ extends Request.SerialForm<TransactionIdentifier, T> {
+ @Override
+ default TransactionIdentifier readTarget(final DataInput in) throws IOException {
+ return TransactionIdentifier.readFrom(in);
+ }
+ }
+
+ @java.io.Serial
private static final long serialVersionUID = 1L;
TransactionRequest(final TransactionIdentifier identifier, final long sequence, final ActorRef replyTo) {
}
@Override
- protected abstract AbstractTransactionRequestProxy<T> externalizableProxy(ABIVersion version);
+ protected abstract SerialForm<T> externalizableProxy(ABIVersion version);
}
*/
package org.opendaylight.controller.cluster.access.commands;
-import com.google.common.annotations.Beta;
+import java.io.DataInput;
+import java.io.IOException;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.controller.cluster.access.concepts.RequestSuccess;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
* Abstract base class for {@link RequestSuccess}es involving specific transaction. This class is visible outside of
* this package solely for the ability to perform a unified instanceof check.
*
- * @author Robert Varga
- *
* @param <T> Message type
*/
-@Beta
public abstract class TransactionSuccess<T extends TransactionSuccess<T>>
extends RequestSuccess<TransactionIdentifier, T> {
+ interface SerialForm<T extends TransactionSuccess<T>> extends RequestSuccess.SerialForm<TransactionIdentifier, T> {
+ @Override
+ default TransactionIdentifier readTarget(final DataInput in) throws IOException {
+ return TransactionIdentifier.readFrom(in);
+ }
+ }
+
+ @java.io.Serial
private static final long serialVersionUID = 1L;
TransactionSuccess(final TransactionIdentifier identifier, final long sequence) {
}
@Override
- protected abstract AbstractTransactionSuccessProxy<T> externalizableProxy(ABIVersion version);
+ protected abstract SerialForm<T> externalizableProxy(ABIVersion version);
}
*/
package org.opendaylight.controller.cluster.access.commands;
-import com.google.common.annotations.Beta;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
/**
* Modification to write (and replace) a subtree at specified path with another subtree.
- *
- * @author Robert Varga
*/
-@Beta
public final class TransactionWrite extends TransactionDataModification {
- public TransactionWrite(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ public TransactionWrite(final YangInstanceIdentifier path, final NormalizedNode data) {
super(path, data);
}
*/
package org.opendaylight.controller.cluster.access.commands;
-import com.google.common.annotations.Beta;
import org.opendaylight.controller.cluster.access.concepts.RequestException;
/**
* A {@link RequestException} indicating that the backend has received a request referencing an unknown history. This
* typically happens when the linear history ID is newer than the highest observed {@link CreateLocalHistoryRequest}.
- *
- * @author Robert Varga
*/
-@Beta
public final class UnknownHistoryException extends RequestException {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
public UnknownHistoryException(final Long lastSeenHistory) {
}
private static String historyToString(final Long history) {
- return history == null ? "null" : Long.toUnsignedString(history.longValue());
+ return history == null ? "null" : Long.toUnsignedString(history);
}
@Override
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.concepts;
-
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.opendaylight.yangtools.concepts.WritableObjects;
-
-abstract class AbstractEnvelopeProxy<T extends Message<?, ?>> implements Externalizable {
- private static final long serialVersionUID = 1L;
-
- private T message;
- private long sessionId;
- private long txSequence;
-
- AbstractEnvelopeProxy() {
- // for Externalizable
- }
-
- AbstractEnvelopeProxy(final Envelope<T> envelope) {
- message = envelope.getMessage();
- txSequence = envelope.getTxSequence();
- sessionId = envelope.getSessionId();
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) throws IOException {
- WritableObjects.writeLongs(out, sessionId, txSequence);
- out.writeObject(message);
- }
-
- @SuppressWarnings("unchecked")
- @Override
- public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
- final byte header = WritableObjects.readLongHeader(in);
- sessionId = WritableObjects.readFirstLong(in, header);
- txSequence = WritableObjects.readSecondLong(in, header);
- message = (T) in.readObject();
- }
-
- @SuppressWarnings("checkstyle:hiddenField")
- abstract Envelope<T> createEnvelope(T wrappedNessage, long sessionId, long txSequence);
-
- final Object readResolve() {
- return createEnvelope(message, sessionId, txSequence);
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.concepts;
-
-import static com.google.common.base.Verify.verifyNotNull;
-
-import java.io.DataInput;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.yangtools.concepts.WritableIdentifier;
-import org.opendaylight.yangtools.concepts.WritableObjects;
-
-/**
- * Abstract Externalizable proxy for use with {@link Message} subclasses.
- *
- * @author Robert Varga
- *
- * @param <T> Target identifier type
- * @param <C> Message class
- */
-abstract class AbstractMessageProxy<T extends WritableIdentifier, C extends Message<T, C>> implements Externalizable {
- private static final long serialVersionUID = 1L;
- private T target;
- private long sequence;
-
- protected AbstractMessageProxy() {
- // For Externalizable
- }
-
- AbstractMessageProxy(final @NonNull C message) {
- this.target = message.getTarget();
- this.sequence = message.getSequence();
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) throws IOException {
- target.writeTo(out);
- WritableObjects.writeLong(out, sequence);
- }
-
- @Override
- public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
- target = verifyNotNull(readTarget(in));
- sequence = WritableObjects.readLong(in);
- }
-
- protected final Object readResolve() {
- return verifyNotNull(createMessage(target, sequence));
- }
-
- protected abstract @NonNull T readTarget(@NonNull DataInput in) throws IOException;
-
- abstract @NonNull C createMessage(@NonNull T msgTarget, long msgSequence);
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.concepts;
-
-import com.google.common.annotations.Beta;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.yangtools.concepts.WritableIdentifier;
-
-/**
- * Abstract Externalizable proxy for use with {@link RequestFailure} subclasses.
- *
- * @author Robert Varga
- *
- * @param <T> Target identifier type
- */
-@Beta
-public abstract class AbstractRequestFailureProxy<T extends WritableIdentifier, C extends RequestFailure<T, C>>
- extends AbstractResponseProxy<T, C> {
- private static final long serialVersionUID = 1L;
- private RequestException cause;
-
- protected AbstractRequestFailureProxy() {
- // For Externalizable
- }
-
- protected AbstractRequestFailureProxy(final @NonNull C failure) {
- super(failure);
- this.cause = failure.getCause();
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) throws IOException {
- super.writeExternal(out);
- out.writeObject(cause);
- }
-
- @Override
- public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
- super.readExternal(in);
- cause = (RequestException) in.readObject();
- }
-
- @Override
- final C createResponse(final T target, final long sequence) {
- return createFailure(target, sequence, cause);
- }
-
- protected abstract @NonNull C createFailure(@NonNull T target, long sequence,
- @NonNull RequestException failureCause);
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.concepts;
-
-import akka.actor.ActorRef;
-import akka.serialization.JavaSerializer;
-import akka.serialization.Serialization;
-import com.google.common.annotations.Beta;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.yangtools.concepts.WritableIdentifier;
-
-/**
- * Abstract Externalizable proxy for use with {@link Request} subclasses.
- *
- * @author Robert Varga
- *
- * @param <T> Target identifier type
- */
-@Beta
-public abstract class AbstractRequestProxy<T extends WritableIdentifier, C extends Request<T, C>>
- extends AbstractMessageProxy<T, C> {
- private static final long serialVersionUID = 1L;
- private ActorRef replyTo;
-
- protected AbstractRequestProxy() {
- // For Externalizable
- }
-
- protected AbstractRequestProxy(final @NonNull C request) {
- super(request);
- this.replyTo = request.getReplyTo();
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) throws IOException {
- super.writeExternal(out);
- out.writeObject(Serialization.serializedActorPath(replyTo));
- }
-
- @Override
- public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
- super.readExternal(in);
- replyTo = JavaSerializer.currentSystem().value().provider().resolveActorRef((String) in.readObject());
- }
-
- @Override
- final C createMessage(final T target, final long sequence) {
- return createRequest(target, sequence, replyTo);
- }
-
- protected abstract @NonNull C createRequest(@NonNull T target, long sequence, @NonNull ActorRef replyToActor);
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.concepts;
-
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.opendaylight.yangtools.concepts.WritableObjects;
-
-abstract class AbstractResponseEnvelopeProxy<T extends Response<?, ?>> extends AbstractEnvelopeProxy<T> {
- private static final long serialVersionUID = 1L;
-
- private long executionTimeNanos;
-
- AbstractResponseEnvelopeProxy() {
- // for Externalizable
- }
-
- AbstractResponseEnvelopeProxy(final ResponseEnvelope<T> envelope) {
- super(envelope);
- this.executionTimeNanos = envelope.getExecutionTimeNanos();
- }
-
- @Override
- public final void writeExternal(final ObjectOutput out) throws IOException {
- super.writeExternal(out);
- WritableObjects.writeLong(out, executionTimeNanos);
- }
-
- @Override
- public final void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
- super.readExternal(in);
- executionTimeNanos = WritableObjects.readLong(in);
- }
-
- @Override
- final ResponseEnvelope<T> createEnvelope(final T message, final long sessionId, final long txSequence) {
- return createEnvelope(message, sessionId, txSequence, executionTimeNanos);
- }
-
- @SuppressWarnings("checkstyle:hiddenField")
- abstract ResponseEnvelope<T> createEnvelope(T message, long sessionId, long txSequence, long executionTimeNanos);
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.concepts;
-
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.yangtools.concepts.WritableIdentifier;
-
-/**
- * Abstract Externalizable proxy class to use with {@link Response} subclasses.
- *
- * @author Robert Varga
- *
- * @param <T> Target identifier type
- * @param <C> Message class
- */
-abstract class AbstractResponseProxy<T extends WritableIdentifier, C extends Response<T, C>>
- extends AbstractMessageProxy<T, C> {
- private static final long serialVersionUID = 1L;
-
- protected AbstractResponseProxy() {
- // for Externalizable
- }
-
- AbstractResponseProxy(final @NonNull C response) {
- super(response);
- }
-
- @Override
- final C createMessage(final T target, final long sequence) {
- return createResponse(target, sequence);
- }
-
- abstract @NonNull C createResponse(@NonNull T target, long sequence);
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.concepts;
-
-import com.google.common.annotations.Beta;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.yangtools.concepts.WritableIdentifier;
-
-/**
- * Abstract Externalizable proxy for use with {@link RequestSuccess} subclasses.
- *
- * @author Robert Varga
- *
- * @param <T> Target identifier type
- */
-@Beta
-public abstract class AbstractSuccessProxy<T extends WritableIdentifier, C extends RequestSuccess<T, C>>
- extends AbstractResponseProxy<T, C> {
- private static final long serialVersionUID = 1L;
-
- protected AbstractSuccessProxy() {
- // For Externalizable
- }
-
- protected AbstractSuccessProxy(final @NonNull C success) {
- super(success);
- }
-
- @Override
- final C createResponse(final T target, final long sequence) {
- return createSuccess(target, sequence);
- }
-
- protected abstract @NonNull C createSuccess(@NonNull T target, long sequence);
-}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.concepts;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link ClientIdentifier}.
+ */
+final class CI implements Externalizable {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private ClientIdentifier identifier;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public CI() {
+ // for Externalizable
+ }
+
+ CI(final ClientIdentifier identifier) {
+ this.identifier = requireNonNull(identifier);
+ }
+
+ @Override
+ public void readExternal(final ObjectInput in) throws IOException {
+ identifier = new ClientIdentifier(FrontendIdentifier.readFrom(in), WritableObjects.readLong(in));
+ }
+
+ @Override
+ public void writeExternal(final ObjectOutput out) throws IOException {
+ identifier.getFrontendId().writeTo(out);
+ WritableObjects.writeLong(out, identifier.getGeneration());
+ }
+
+ @java.io.Serial
+ private Object readResolve() {
+ return verifyNotNull(identifier);
+ }
+}
import static java.util.Objects.requireNonNull;
-import com.google.common.annotations.Beta;
import com.google.common.base.MoreObjects;
import java.io.DataInput;
import java.io.DataOutput;
-import java.io.Externalizable;
import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.cds.types.rev191024.ClientGeneration;
import org.opendaylight.yangtools.concepts.WritableIdentifier;
/**
* A cluster-wide unique identifier of a frontend instance. This identifier discerns between individual incarnations
* of a particular frontend.
- *
- * @author Robert Varga
*/
-@Beta
public final class ClientIdentifier implements WritableIdentifier {
- private static final class Proxy implements Externalizable {
- private static final long serialVersionUID = 1L;
- private FrontendIdentifier frontendId;
- private long generation;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public Proxy() {
- // Needed for Externalizable
- }
-
- Proxy(final FrontendIdentifier frontendId, final long generation) {
- this.frontendId = requireNonNull(frontendId);
- this.generation = generation;
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) throws IOException {
- frontendId.writeTo(out);
- WritableObjects.writeLong(out, generation);
- }
-
- @Override
- public void readExternal(final ObjectInput in) throws IOException {
- frontendId = FrontendIdentifier.readFrom(in);
- generation = WritableObjects.readLong(in);
- }
-
- private Object readResolve() {
- return new ClientIdentifier(frontendId, generation);
- }
- }
-
+ @java.io.Serial
private static final long serialVersionUID = 1L;
private final @NonNull FrontendIdentifier frontendId;
@Override
public boolean equals(final Object obj) {
- if (this == obj) {
- return true;
- }
- if (!(obj instanceof ClientIdentifier)) {
- return false;
- }
-
- final ClientIdentifier other = (ClientIdentifier) obj;
- return generation == other.generation && frontendId.equals(other.frontendId);
+ return this == obj || obj instanceof ClientIdentifier other && generation == other.generation
+ && frontendId.equals(other.frontendId);
}
@Override
public String toString() {
- return MoreObjects.toStringHelper(ClientIdentifier.class).add("frontend", frontendId)
- .add("generation", Long.toUnsignedString(generation)).toString();
+ return MoreObjects.toStringHelper(ClientIdentifier.class)
+ .add("frontend", frontendId)
+ .add("generation", Long.toUnsignedString(generation))
+ .toString();
}
+ @java.io.Serial
private Object writeReplace() {
- return new Proxy(frontendId, generation);
+ return new CI(this);
}
}
import static java.util.Objects.requireNonNull;
import com.google.common.base.MoreObjects;
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
import java.io.Serializable;
+import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.yangtools.concepts.Immutable;
+import org.opendaylight.yangtools.concepts.WritableObjects;
public abstract class Envelope<T extends Message<?, ?>> implements Immutable, Serializable {
+ interface SerialForm<T extends Message<?, ?>, E extends Envelope<T>> extends Externalizable {
+
+ @NonNull E envelope();
+
+ void setEnvelope(@NonNull E envelope);
+
+ @java.io.Serial
+ Object readResolve();
+
+ @Override
+ default void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+ final byte header = WritableObjects.readLongHeader(in);
+ final var sessionId = WritableObjects.readFirstLong(in, header);
+ final var txSequence = WritableObjects.readSecondLong(in, header);
+ @SuppressWarnings("unchecked")
+ final var message = (T) in.readObject();
+ setEnvelope(readExternal(in, sessionId, txSequence, message));
+ }
+
+ E readExternal(ObjectInput in, long sessionId, long txSequence, T message) throws IOException;
+
+ @Override
+ default void writeExternal(final ObjectOutput out) throws IOException {
+ writeExternal(out, envelope());
+ }
+
+ default void writeExternal(final ObjectOutput out, final @NonNull E envelope) throws IOException {
+ WritableObjects.writeLongs(out, envelope.getSessionId(), envelope.getTxSequence());
+ out.writeObject(envelope.getMessage());
+ }
+ }
+
+ @java.io.Serial
private static final long serialVersionUID = 1L;
- private final T message;
+ private final @NonNull T message;
private final long txSequence;
private final long sessionId;
*
* @return enclose message
*/
- public T getMessage() {
+ public @NonNull T getMessage() {
return message;
}
.add("txSequence", Long.toHexString(txSequence)).add("message", message).toString();
}
+ @java.io.Serial
final Object writeReplace() {
return createProxy();
}
- abstract AbstractEnvelopeProxy<T> createProxy();
+ abstract @NonNull SerialForm<T, ?> createProxy();
}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.concepts;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.ObjectInput;
+
+/**
+ * Serialization proxy for {@link FailureEnvelope}.
+ */
+final class FE implements ResponseEnvelope.SerialForm<RequestFailure<?, ?>, FailureEnvelope> {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private FailureEnvelope envelope;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public FE() {
+ // for Externalizable
+ }
+
+ FE(final FailureEnvelope envelope) {
+ this.envelope = requireNonNull(envelope);
+ }
+
+ @Override
+ public FailureEnvelope envelope() {
+ return verifyNotNull(envelope);
+ }
+
+ @Override
+ public void setEnvelope(final FailureEnvelope envelope) {
+ this.envelope = requireNonNull(envelope);
+ }
+
+ @Override
+ public FailureEnvelope readExternal(final ObjectInput in, final long sessionId, final long txSequence,
+ final RequestFailure<?, ?> message, final long executionTimeNanos) {
+ return new FailureEnvelope(message, sessionId, txSequence, executionTimeNanos);
+ }
+
+ @Override
+ public Object readResolve() {
+ return envelope();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.concepts;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+
+/**
+ * Serialization proxy for {@link FrontendIdentifier}.
+ */
+final class FI implements Externalizable {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private FrontendIdentifier identifier;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public FI() {
+ // for Externalizable
+ }
+
+ FI(final FrontendIdentifier identifier) {
+ this.identifier = requireNonNull(identifier);
+ }
+
+ @Override
+ public void readExternal(final ObjectInput in) throws IOException {
+ identifier = new FrontendIdentifier(MemberName.readFrom(in), FrontendType.readFrom(in));
+ }
+
+ @Override
+ public void writeExternal(final ObjectOutput out) throws IOException {
+ identifier.getMemberName().writeTo(out);
+ identifier.getClientType().writeTo(out);
+ }
+
+ @java.io.Serial
+ private Object readResolve() {
+ return verifyNotNull(identifier);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.concepts;
+
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.nio.charset.StandardCharsets;
+
+/**
+ * Serialization proxy for {@link FrontendType}.
+ */
+final class FT implements Externalizable {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private byte[] serialized;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public FT() {
+ // for Externalizable
+ }
+
+ FT(final byte[] serialized) {
+ this.serialized = requireNonNull(serialized);
+ }
+
+ @Override
+ public void writeExternal(final ObjectOutput out) throws IOException {
+ out.writeInt(serialized.length);
+ out.write(serialized);
+ }
+
+ @Override
+ public void readExternal(final ObjectInput in) throws IOException {
+ serialized = new byte[in.readInt()];
+ in.readFully(serialized);
+ }
+
+ @java.io.Serial
+ private Object readResolve() {
+ // TODO: consider caching instances here
+ return new FrontendType(new String(serialized, StandardCharsets.UTF_8), serialized);
+ }
+}
package org.opendaylight.controller.cluster.access.concepts;
public final class FailureEnvelope extends ResponseEnvelope<RequestFailure<?, ?>> {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
public FailureEnvelope(final RequestFailure<?, ?> message, final long sessionId, final long txSequence,
}
@Override
- FailureEnvelopeProxy createProxy() {
- return new FailureEnvelopeProxy(this);
+ FE createProxy() {
+ return new FE(this);
}
}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.concepts;
-
-final class FailureEnvelopeProxy extends AbstractResponseEnvelopeProxy<RequestFailure<?, ?>> {
- private static final long serialVersionUID = 1L;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to be
- // able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public FailureEnvelopeProxy() {
- // for Externalizable
- }
-
- FailureEnvelopeProxy(final FailureEnvelope envelope) {
- super(envelope);
- }
-
- @Override
- ResponseEnvelope<RequestFailure<?, ?>> createEnvelope(final RequestFailure<?, ?> message, final long sessionId,
- final long txSequence, final long executionTimeNanos) {
- return new FailureEnvelope(message, sessionId, txSequence, executionTimeNanos);
- }
-}
import static java.util.Objects.requireNonNull;
-import com.google.common.annotations.Beta;
import java.io.DataInput;
import java.io.DataOutput;
-import java.io.Externalizable;
import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
import java.util.Objects;
+import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.yangtools.concepts.WritableIdentifier;
/**
* A cluster-wide unique identifier of a frontend type located at a cluster member.
- *
- * @author Robert Varga
*/
-@Beta
public final class FrontendIdentifier implements WritableIdentifier {
- private static final class Proxy implements Externalizable {
- private static final long serialVersionUID = 1L;
- private MemberName memberName;
- private FrontendType clientType;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public Proxy() {
- // Needed for Externalizable
- }
-
- Proxy(final MemberName memberName, final FrontendType clientType) {
- this.memberName = requireNonNull(memberName);
- this.clientType = requireNonNull(clientType);
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) throws IOException {
- memberName.writeTo(out);
- clientType.writeTo(out);
- }
-
- @Override
- public void readExternal(final ObjectInput in) throws IOException {
- memberName = MemberName.readFrom(in);
- clientType = FrontendType.readFrom(in);
- }
-
- private Object readResolve() {
- return new FrontendIdentifier(memberName, clientType);
- }
- }
-
+ @java.io.Serial
private static final long serialVersionUID = 1L;
+
private final MemberName memberName;
private final FrontendType clientType;
this.memberName = requireNonNull(memberName);
}
- public static FrontendIdentifier create(final MemberName memberName, final FrontendType clientType) {
+ public static @NonNull FrontendIdentifier create(final MemberName memberName, final FrontendType clientType) {
return new FrontendIdentifier(memberName, clientType);
}
- public static FrontendIdentifier readFrom(final DataInput in) throws IOException {
- final MemberName memberName = MemberName.readFrom(in);
- final FrontendType clientType = FrontendType.readFrom(in);
+ public static @NonNull FrontendIdentifier readFrom(final DataInput in) throws IOException {
+ final var memberName = MemberName.readFrom(in);
+ final var clientType = FrontendType.readFrom(in);
return new FrontendIdentifier(memberName, clientType);
}
clientType.writeTo(out);
}
- public FrontendType getClientType() {
+ public @NonNull FrontendType getClientType() {
return clientType;
}
- public MemberName getMemberName() {
+ public @NonNull MemberName getMemberName() {
return memberName;
}
@Override
public boolean equals(final Object obj) {
- if (this == obj) {
- return true;
- }
- if (!(obj instanceof FrontendIdentifier)) {
- return false;
- }
-
- final FrontendIdentifier other = (FrontendIdentifier) obj;
- return memberName.equals(other.memberName) && clientType.equals(other.clientType);
+ return this == obj || obj instanceof FrontendIdentifier other && memberName.equals(other.memberName)
+ && clientType.equals(other.clientType);
}
- public String toPersistentId() {
+ public @NonNull String toPersistentId() {
return memberName.getName() + "-frontend-" + clientType.getName();
}
return toPersistentId();
}
+ @java.io.Serial
private Object writeReplace() {
- return new Proxy(memberName, clientType);
+ return new FI(this);
}
}
import static com.google.common.base.Verify.verifyNotNull;
import static java.util.Objects.requireNonNull;
-import com.google.common.annotations.Beta;
import com.google.common.base.MoreObjects;
import com.google.common.base.Strings;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.io.DataInput;
import java.io.DataOutput;
-import java.io.Externalizable;
import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
import java.nio.charset.StandardCharsets;
import java.util.regex.Pattern;
import org.eclipse.jdt.annotation.NonNull;
* An {@link Identifier} identifying a data store frontend type, which is able to access the data store backend.
* Frontend implementations need to define this identifier so that multiple clients existing on a member node can be
* discerned.
- *
- * @author Robert Varga
*/
-@Beta
public final class FrontendType implements Comparable<FrontendType>, WritableIdentifier {
- private static final class Proxy implements Externalizable {
- private static final long serialVersionUID = 1L;
- private byte[] serialized;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public Proxy() {
- // For Externalizable
- }
-
- Proxy(final byte[] serialized) {
- this.serialized = requireNonNull(serialized);
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) throws IOException {
- out.writeInt(serialized.length);
- out.write(serialized);
- }
-
- @Override
- public void readExternal(final ObjectInput in) throws IOException {
- serialized = new byte[in.readInt()];
- in.readFully(serialized);
- }
-
- private Object readResolve() {
- // TODO: consider caching instances here
- return new FrontendType(new String(serialized, StandardCharsets.UTF_8), serialized);
- }
- }
-
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
private static final String SIMPLE_STRING_REGEX = "^[a-zA-Z0-9-_.*+:=,!~';]+$";
private static final Pattern SIMPLE_STRING_PATTERN = Pattern.compile(SIMPLE_STRING_REGEX);
- private static final long serialVersionUID = 1L;
private final @NonNull String name;
return local;
}
- Object writeReplace() {
- return new Proxy(getSerialized());
+ @java.io.Serial
+ private Object writeReplace() {
+ return new FT(getSerialized());
}
}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.concepts;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link LocalHistoryIdentifier}.
+ *
+ * @implNote
+ * cookie is currently required only for module-based sharding, which is implemented as part of normal
+ * DataBroker interfaces. For DOMDataTreeProducer cookie will always be zero, hence we may end up not needing
+ * cookie at all.
+ * We use WritableObjects.writeLongs() to output historyId and cookie (in that order). If we end up not needing
+ * the cookie at all, we can switch to writeLong() and use zero flags for compatibility.
+ */
+final class HI implements Externalizable {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private LocalHistoryIdentifier identifier;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public HI() {
+ // for Externalizable
+ }
+
+ HI(final LocalHistoryIdentifier identifier) {
+ this.identifier = requireNonNull(identifier);
+ }
+
+ @Override
+ public void writeExternal(final ObjectOutput out) throws IOException {
+ identifier.getClientId().writeTo(out);
+ WritableObjects.writeLongs(out, identifier.getHistoryId(), identifier.getCookie());
+ }
+
+ @Override
+ public void readExternal(final ObjectInput in) throws IOException {
+ final var clientId = ClientIdentifier.readFrom(in);
+ final byte header = WritableObjects.readLongHeader(in);
+ final var historyId = WritableObjects.readFirstLong(in, header);
+ final var cookie = WritableObjects.readSecondLong(in, header);
+ identifier = new LocalHistoryIdentifier(clientId, historyId, cookie);
+ }
+
+ @java.io.Serial
+ private Object readResolve() {
+ return verifyNotNull(identifier);
+ }
+}
import com.google.common.base.MoreObjects;
import java.io.DataInput;
import java.io.DataOutput;
-import java.io.Externalizable;
import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
+import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.yangtools.concepts.WritableIdentifier;
import org.opendaylight.yangtools.concepts.WritableObjects;
* - a {@link ClientIdentifier}, which uniquely identifies a single instantiation of a particular frontend
* - an unsigned long, which uniquely identifies the history on the backend
* - an unsigned long cookie, assigned by the client and meaningless on the backend, which just reflects it back
- *
- * @author Robert Varga
*/
public final class LocalHistoryIdentifier implements WritableIdentifier {
- /*
- * Implementation note: cookie is currently required only for module-based sharding, which is implemented as part
- * of normal DataBroker interfaces. For DOMDataTreeProducer cookie will always be zero, hence
- * we may end up not needing cookie at all.
- *
- * We use WritableObjects.writeLongs() to output historyId and cookie (in that order). If we
- * end up not needing the cookie at all, we can switch to writeLong() and use zero flags for
- * compatibility.
- */
- private static final class Proxy implements Externalizable {
- private static final long serialVersionUID = 1L;
- private ClientIdentifier clientId;
- private long historyId;
- private long cookie;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public Proxy() {
- // For Externalizable
- }
-
- Proxy(final ClientIdentifier frontendId, final long historyId, final long cookie) {
- this.clientId = requireNonNull(frontendId);
- this.historyId = historyId;
- this.cookie = cookie;
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) throws IOException {
- clientId.writeTo(out);
- WritableObjects.writeLongs(out, historyId, cookie);
- }
-
- @Override
- public void readExternal(final ObjectInput in) throws IOException {
- clientId = ClientIdentifier.readFrom(in);
-
- final byte header = WritableObjects.readLongHeader(in);
- historyId = WritableObjects.readFirstLong(in, header);
- cookie = WritableObjects.readSecondLong(in, header);
- }
-
- private Object readResolve() {
- return new LocalHistoryIdentifier(clientId, historyId, cookie);
- }
- }
-
+ @java.io.Serial
private static final long serialVersionUID = 1L;
- private final ClientIdentifier clientId;
+
+ private final @NonNull ClientIdentifier clientId;
private final long historyId;
private final long cookie;
}
public LocalHistoryIdentifier(final ClientIdentifier frontendId, final long historyId, final long cookie) {
- this.clientId = requireNonNull(frontendId);
+ clientId = requireNonNull(frontendId);
this.historyId = historyId;
this.cookie = cookie;
}
- public static LocalHistoryIdentifier readFrom(final DataInput in) throws IOException {
+ public static @NonNull LocalHistoryIdentifier readFrom(final DataInput in) throws IOException {
final ClientIdentifier clientId = ClientIdentifier.readFrom(in);
final byte header = WritableObjects.readLongHeader(in);
WritableObjects.writeLongs(out, historyId, cookie);
}
- public ClientIdentifier getClientId() {
+ public @NonNull ClientIdentifier getClientId() {
return clientId;
}
if (this == obj) {
return true;
}
- if (!(obj instanceof LocalHistoryIdentifier)) {
+ if (!(obj instanceof LocalHistoryIdentifier other)) {
return false;
}
- final LocalHistoryIdentifier other = (LocalHistoryIdentifier) obj;
return historyId == other.historyId && cookie == other.cookie && clientId.equals(other.clientId);
}
.add("cookie", Long.toUnsignedString(cookie, 16)).toString();
}
+ @java.io.Serial
private Object writeReplace() {
- return new Proxy(clientId, historyId, cookie);
+ return new HI(this);
}
}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.concepts;
+
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.nio.charset.StandardCharsets;
+
+/**
+ * Serialization proxy for {@link MemberName}.
+ */
+final class MN implements Externalizable {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private byte[] serialized;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public MN() {
+ // for Externalizable
+ }
+
+ MN(final byte[] serialized) {
+ this.serialized = requireNonNull(serialized);
+ }
+
+ @Override
+ public void writeExternal(final ObjectOutput out) throws IOException {
+ out.writeInt(serialized.length);
+ out.write(serialized);
+ }
+
+ @Override
+ public void readExternal(final ObjectInput in) throws IOException {
+ serialized = new byte[in.readInt()];
+ in.readFully(serialized);
+ }
+
+ @java.io.Serial
+ private Object readResolve() {
+ // TODO: consider caching instances here
+ return new MemberName(new String(serialized, StandardCharsets.UTF_8), serialized);
+ }
+}
import static com.google.common.base.Verify.verifyNotNull;
import static java.util.Objects.requireNonNull;
-import com.google.common.annotations.Beta;
import com.google.common.base.MoreObjects;
import com.google.common.base.Strings;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.io.DataInput;
import java.io.DataOutput;
-import java.io.Externalizable;
import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
import java.nio.charset.StandardCharsets;
+import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.yangtools.concepts.WritableIdentifier;
/**
* Type-safe encapsulation of a cluster member name.
- *
- * @author Robert Varga
*/
-@Beta
public final class MemberName implements Comparable<MemberName>, WritableIdentifier {
- private static final class Proxy implements Externalizable {
- private static final long serialVersionUID = 1L;
- private byte[] serialized;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public Proxy() {
- // For Externalizable
- }
-
- Proxy(final byte[] serialized) {
- this.serialized = requireNonNull(serialized);
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) throws IOException {
- out.writeInt(serialized.length);
- out.write(serialized);
- }
-
- @Override
- public void readExternal(final ObjectInput in) throws IOException {
- serialized = new byte[in.readInt()];
- in.readFully(serialized);
- }
-
- private Object readResolve() {
- // TODO: consider caching instances here
- return new MemberName(new String(serialized, StandardCharsets.UTF_8), serialized);
- }
- }
-
+ @java.io.Serial
private static final long serialVersionUID = 1L;
- private final String name;
+
+ private final @NonNull String name;
@SuppressFBWarnings(value = "VO_VOLATILE_REFERENCE_TO_ARRAY",
justification = "The array elements are non-volatile but we don't access them.")
this.serialized = verifyNotNull(serialized);
}
- public static MemberName forName(final String name) {
+ public static @NonNull MemberName forName(final String name) {
checkArgument(!Strings.isNullOrEmpty(name));
// TODO: consider caching instances here
return new MemberName(name);
}
- public static MemberName readFrom(final DataInput in) throws IOException {
+ public static @NonNull MemberName readFrom(final DataInput in) throws IOException {
final byte[] serialized = new byte[in.readInt()];
in.readFully(serialized);
return new MemberName(new String(serialized, StandardCharsets.UTF_8));
out.write(local);
}
- public String getName() {
+ public @NonNull String getName() {
return name;
}
public org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.cds.types.rev191024
- .MemberName toYang() {
+ .@NonNull MemberName toYang() {
return new org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.cds.types.rev191024
.MemberName(name);
}
return local;
}
+ @java.io.Serial
Object writeReplace() {
- return new Proxy(getSerialized());
+ return new MN(getSerialized());
}
}
import static com.google.common.base.Verify.verifyNotNull;
import static java.util.Objects.requireNonNull;
-import com.google.common.annotations.Beta;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.MoreObjects;
import com.google.common.base.MoreObjects.ToStringHelper;
+import java.io.DataInput;
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.NotSerializableException;
+import java.io.ObjectInput;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutput;
+import java.io.ObjectOutputStream;
+import java.io.ObjectStreamException;
import java.io.Serializable;
import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.yangtools.concepts.Immutable;
import org.opendaylight.yangtools.concepts.WritableIdentifier;
+import org.opendaylight.yangtools.concepts.WritableObjects;
/**
* An abstract concept of a Message. This class cannot be instantiated directly, use its specializations {@link Request}
* Note that this class specifies the {@link Immutable} contract, which means that all subclasses must follow this API
* contract.
*
- * @author Robert Varga
- *
* @param <T> Target identifier type
* @param <C> Message type
*/
-@Beta
-public abstract class Message<T extends WritableIdentifier, C extends Message<T, C>> implements Immutable,
- Serializable {
+public abstract class Message<T extends WritableIdentifier, C extends Message<T, C>>
+ implements Immutable, Serializable {
+ /**
+ * Externalizable proxy for use with {@link Message} subclasses.
+ *
+ * @param <T> Target identifier type
+ * @param <C> Message class
+ */
+ protected interface SerialForm<T extends WritableIdentifier, C extends Message<T, C>> extends Externalizable {
+
+ @NonNull C message();
+
+ void setMessage(@NonNull C message);
+
+ @Override
+ default void writeExternal(final ObjectOutput out) throws IOException {
+ final var message = message();
+ message.getTarget().writeTo(out);
+ WritableObjects.writeLong(out, message.getSequence());
+ writeExternal(out, message);
+ }
+
+ void writeExternal(@NonNull ObjectOutput out, @NonNull C msg) throws IOException;
+
+ @Override
+ default void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+ final var target = verifyNotNull(readTarget(in));
+ final var sequence = WritableObjects.readLong(in);
+ setMessage(verifyNotNull(readExternal(in, target, sequence)));
+ }
+
+ @NonNull C readExternal(@NonNull ObjectInput in, @NonNull T target, long sequence)
+ throws IOException, ClassNotFoundException;
+
+ Object readResolve();
+
+ @NonNull T readTarget(@NonNull DataInput in) throws IOException;
+ }
+
+ @java.io.Serial
private static final long serialVersionUID = 1L;
private final @NonNull ABIVersion version;
*/
@SuppressWarnings("unchecked")
public final @NonNull C toVersion(final @NonNull ABIVersion toVersion) {
- if (this.version == toVersion) {
+ if (version == toVersion) {
return (C)this;
}
- switch (toVersion) {
- case BORON:
- case NEON_SR2:
- case SODIUM_SR1:
- case MAGNESIUM:
- return verifyNotNull(cloneAsVersion(toVersion));
- case TEST_PAST_VERSION:
- case TEST_FUTURE_VERSION:
- default:
- throw new IllegalArgumentException("Unhandled ABI version " + toVersion);
- }
+ return switch (toVersion) {
+ case POTASSIUM -> verifyNotNull(cloneAsVersion(toVersion));
+ default -> throw new IllegalArgumentException("Unhandled ABI version " + toVersion);
+ };
}
/**
* @param reqVersion Requested ABI version
* @return Proxy for this object
*/
- abstract @NonNull AbstractMessageProxy<T, C> externalizableProxy(@NonNull ABIVersion reqVersion);
+ protected abstract @NonNull SerialForm<T, C> externalizableProxy(@NonNull ABIVersion reqVersion);
+ @java.io.Serial
protected final Object writeReplace() {
return externalizableProxy(version);
}
+
+ protected final void throwNSE() throws NotSerializableException {
+ throw new NotSerializableException(getClass().getName());
+ }
+
+ @java.io.Serial
+ private void readObject(final ObjectInputStream stream) throws IOException, ClassNotFoundException {
+ throwNSE();
+ }
+
+ @java.io.Serial
+ private void readObjectNoData() throws ObjectStreamException {
+ throwNSE();
+ }
+
+ @java.io.Serial
+ private void writeObject(final ObjectOutputStream stream) throws IOException {
+ throwNSE();
+ }
}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.concepts;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.ObjectInput;
+
+/**
+ * Serialization proxy for {@link RequestEnvelope}.
+ */
+final class RE implements Envelope.SerialForm<Request<?, ?>, RequestEnvelope> {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private RequestEnvelope envelope;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public RE() {
+ // for Externalizable
+ }
+
+ RE(final RequestEnvelope envelope) {
+ this.envelope = requireNonNull(envelope);
+ }
+
+ @Override
+ public RequestEnvelope envelope() {
+ return verifyNotNull(envelope);
+ }
+
+ @Override
+ public void setEnvelope(final RequestEnvelope envelope) {
+ this.envelope = requireNonNull(envelope);
+ }
+
+ @Override
+ public RequestEnvelope readExternal(final ObjectInput in, final long sessionId, final long txSequence,
+ final Request<?, ?> message) {
+ return new RequestEnvelope(message, sessionId, txSequence);
+ }
+
+ @Override
+ public Object readResolve() {
+ return envelope();
+ }
+}
import static java.util.Objects.requireNonNull;
import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
+import akka.serialization.JavaSerializer;
+import akka.serialization.Serialization;
import com.google.common.base.MoreObjects.ToStringHelper;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.yangtools.concepts.WritableIdentifier;
* A request message concept. Upon receipt of this message, the recipient will respond with either
* a {@link RequestSuccess} or a {@link RequestFailure} message.
*
- * @author Robert Varga
- *
* @param <T> Target identifier type
* @param <C> Message type
*/
-@Beta
public abstract class Request<T extends WritableIdentifier, C extends Request<T, C>> extends Message<T, C> {
+ protected interface SerialForm<T extends WritableIdentifier, C extends Request<T, C>>
+ extends Message.SerialForm<T, C> {
+ @Override
+ default C readExternal(final ObjectInput in, final T target, final long sequence)
+ throws ClassNotFoundException, IOException {
+ return readExternal(in, target, sequence,
+ JavaSerializer.currentSystem().value().provider().resolveActorRef((String) in.readObject()));
+ }
+
+ @NonNull C readExternal(@NonNull ObjectInput in, @NonNull T target, long sequence, @NonNull ActorRef replyTo)
+ throws IOException;
+
+ @Override
+ default void writeExternal(final ObjectOutput out, final C msg) throws IOException {
+ out.writeObject(Serialization.serializedActorPath(msg.getReplyTo()));
+ }
+ }
+
+ @java.io.Serial
private static final long serialVersionUID = 1L;
+
private final @NonNull ActorRef replyTo;
protected Request(final @NonNull T target, final long sequence, final @NonNull ActorRef replyTo) {
}
@Override
- protected abstract AbstractRequestProxy<T, C> externalizableProxy(ABIVersion version);
+ protected abstract SerialForm<T, C> externalizableProxy(ABIVersion version);
}
import akka.actor.ActorRef;
public final class RequestEnvelope extends Envelope<Request<?, ?>> {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
public RequestEnvelope(final Request<?, ?> message, final long sessionId, final long txSequence) {
}
@Override
- RequestEnvelopeProxy createProxy() {
- return new RequestEnvelopeProxy(this);
+ RE createProxy() {
+ return new RE(this);
}
/**
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.concepts;
-
-final class RequestEnvelopeProxy extends AbstractEnvelopeProxy<Request<?, ?>> {
- private static final long serialVersionUID = 1L;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public RequestEnvelopeProxy() {
- // for Externalizable
- }
-
- RequestEnvelopeProxy(final RequestEnvelope envelope) {
- super(envelope);
- }
-
- @Override
- RequestEnvelope createEnvelope(final Request<?, ?> message, final long sessionId, final long txSequence) {
- return new RequestEnvelope(message, sessionId, txSequence);
- }
-}
import static java.util.Objects.requireNonNull;
-import com.google.common.annotations.Beta;
import org.eclipse.jdt.annotation.NonNull;
/**
* A failure cause behind a {@link RequestFailure} to process a {@link Request}.
- *
- * @author Robert Varga
*/
-@Beta
public abstract class RequestException extends Exception {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
protected RequestException(final @NonNull String message) {
import static java.util.Objects.requireNonNull;
-import com.google.common.annotations.Beta;
import com.google.common.base.MoreObjects.ToStringHelper;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.yangtools.concepts.WritableIdentifier;
/**
* A failure response to a {@link Request}. Contains a {@link RequestException} detailing the cause for this failure.
*
- * @author Robert Varga
- *
* @param <T> Target identifier type
* @param <C> Message class
*/
-@Beta
public abstract class RequestFailure<T extends WritableIdentifier, C extends RequestFailure<T, C>>
extends Response<T, C> {
+ /**
+ * Externalizable proxy for use with {@link RequestFailure} subclasses.
+ *
+ * @param <T> Target identifier type
+ */
+ protected interface SerialForm<T extends WritableIdentifier, C extends RequestFailure<T, C>>
+ extends Message.SerialForm<T, C> {
+ @Override
+ default C readExternal(final ObjectInput in, final T target, final long sequence)
+ throws IOException, ClassNotFoundException {
+ return createFailure(target, sequence, (RequestException) in.readObject());
+ }
+
+ @Override
+ default void writeExternal(final ObjectOutput out, final C msg) throws IOException {
+ out.writeObject(msg.getCause());
+ }
+
+ @NonNull C createFailure(@NonNull T target, long sequence, @NonNull RequestException failureCause);
+ }
+
+ @java.io.Serial
private static final long serialVersionUID = 1L;
private final @NonNull RequestException cause;
}
@Override
- protected abstract AbstractRequestFailureProxy<T, C> externalizableProxy(ABIVersion version);
+ protected abstract SerialForm<T, C> externalizableProxy(ABIVersion version);
}
*/
package org.opendaylight.controller.cluster.access.concepts;
-import com.google.common.annotations.Beta;
+import java.io.IOException;
+import java.io.ObjectOutput;
import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.yangtools.concepts.WritableIdentifier;
/**
* A successful reply to a {@link Request}.
*
- * @author Robert Varga
- *
* @param <T> Target identifier type
*/
-@Beta
-public abstract class RequestSuccess<T extends WritableIdentifier, C extends RequestSuccess<T, C>> extends
- Response<T, C> {
+public abstract class RequestSuccess<T extends WritableIdentifier, C extends RequestSuccess<T, C>>
+ extends Response<T, C> {
+ protected interface SerialForm<T extends WritableIdentifier, C extends RequestSuccess<T, C>>
+ extends Response.SerialForm<T, C> {
+ @Override
+ default void writeExternal(final ObjectOutput out, final C msg) throws IOException {
+ // Defaults to no-op
+ }
+ }
+
+ @java.io.Serial
private static final long serialVersionUID = 1L;
- protected RequestSuccess(final @NonNull C success, final @NonNull ABIVersion version) {
+ protected RequestSuccess(final @NonNull C success, final @NonNull ABIVersion version) {
super(success, version);
}
protected RequestSuccess(final @NonNull T target, final long sequence) {
super(target, sequence);
}
-
- @Override
- protected abstract AbstractSuccessProxy<T, C> externalizableProxy(ABIVersion version);
}
*/
package org.opendaylight.controller.cluster.access.concepts;
-import com.google.common.annotations.Beta;
import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.yangtools.concepts.WritableIdentifier;
* {@link RequestFailure} and {@link RequestSuccess}, which provide appropriate specialization. It is visible purely for
* the purpose of allowing to check if an object is either of those specializations with a single instanceof check.
*
- * @author Robert Varga
- *
* @param <T> Target identifier type
* @param <C> Message type
*/
-@Beta
public abstract class Response<T extends WritableIdentifier, C extends Response<T, C>> extends Message<T, C> {
+ protected interface SerialForm<T extends WritableIdentifier, C extends Response<T, C>>
+ extends Message.SerialForm<T, C> {
+
+ }
+
+ @java.io.Serial
private static final long serialVersionUID = 1L;
Response(final @NonNull T target, final long sequence) {
Response(final @NonNull C response, final @NonNull ABIVersion version) {
super(response, version);
}
-
- @Override
- abstract AbstractResponseProxy<T, C> externalizableProxy(ABIVersion version);
}
*/
package org.opendaylight.controller.cluster.access.concepts;
-import com.google.common.base.Preconditions;
+import static com.google.common.base.Preconditions.checkArgument;
+
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.concepts.WritableObjects;
public abstract class ResponseEnvelope<T extends Response<?, ?>> extends Envelope<T> {
+ interface SerialForm<T extends Response<?, ?>, E extends ResponseEnvelope<T>> extends Envelope.SerialForm<T, E> {
+ @Override
+ default void writeExternal(final ObjectOutput out, final @NonNull E envelope) throws IOException {
+ Envelope.SerialForm.super.writeExternal(out, envelope);
+ WritableObjects.writeLong(out, envelope.getExecutionTimeNanos());
+ }
+
+ @Override
+ default E readExternal(final ObjectInput in, final long sessionId, final long txSequence, final T message)
+ throws IOException {
+ return readExternal(in, sessionId, txSequence, message, WritableObjects.readLong(in));
+ }
+
+ E readExternal(ObjectInput in, long sessionId, long txSequence, T message, long executionTimeNanos);
+ }
+
+ @java.io.Serial
private static final long serialVersionUID = 1L;
private final long executionTimeNanos;
ResponseEnvelope(final T message, final long sessionId, final long txSequence, final long executionTimeNanos) {
super(message, sessionId, txSequence);
- Preconditions.checkArgument(executionTimeNanos >= 0);
+ checkArgument(executionTimeNanos >= 0, "Negative executionTime");
this.executionTimeNanos = executionTimeNanos;
}
public final long getExecutionTimeNanos() {
return executionTimeNanos;
}
-
- @Override
- abstract AbstractResponseEnvelopeProxy<T> createProxy();
}
*/
package org.opendaylight.controller.cluster.access.concepts;
-import com.google.common.annotations.Beta;
-
/**
* General error raised when the recipient of a {@link Request} determines that the request contains
* a {@link ClientIdentifier} which corresponds to an outdated generation.
- *
- * @author Robert Varga
*/
-@Beta
public final class RetiredGenerationException extends RequestException {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
public RetiredGenerationException(final long originatingGeneration, final long newGeneration) {
import static com.google.common.base.Preconditions.checkArgument;
import static java.util.Objects.requireNonNull;
-import com.google.common.annotations.Beta;
import com.google.common.base.Strings;
/**
* General error raised when the recipient of a {@link Request} fails to process a request.
- *
- * @author Robert Varga
*/
-@Beta
public final class RuntimeRequestException extends RequestException {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
public RuntimeRequestException(final String message, final Throwable cause) {
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.concepts;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.ObjectInput;
+
+/**
+ * Serialization proxy for {@link SuccessEnvelope}.
+ */
+final class SE implements ResponseEnvelope.SerialForm<RequestSuccess<?, ?>, SuccessEnvelope> {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private SuccessEnvelope envelope;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public SE() {
+ // for Externalizable
+ }
+
+ SE(final SuccessEnvelope envelope) {
+ this.envelope = requireNonNull(envelope);
+ }
+
+ @Override
+ public SuccessEnvelope envelope() {
+ return verifyNotNull(envelope);
+ }
+
+ @Override
+ public void setEnvelope(final SuccessEnvelope envelope) {
+ this.envelope = requireNonNull(envelope);
+ }
+
+ @Override
+ public SuccessEnvelope readExternal(final ObjectInput in, final long sessionId, final long txSequence,
+ final RequestSuccess<?, ?> message, final long executionTimeNanos) {
+ return new SuccessEnvelope(message, sessionId, txSequence, executionTimeNanos);
+ }
+
+ @Override
+ public Object readResolve() {
+ return envelope();
+ }
+}
*/
package org.opendaylight.controller.cluster.access.concepts;
-import com.google.common.annotations.Beta;
-
/**
* A tagging interface that specifies a message whose serialized size can be large and thus should be sliced into
* smaller chunks when transporting over the wire.
*
* @author Thomas Pantelis
*/
-@Beta
public interface SliceableMessage {
+ // Marker interface
}
package org.opendaylight.controller.cluster.access.concepts;
public final class SuccessEnvelope extends ResponseEnvelope<RequestSuccess<?, ?>> {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
public SuccessEnvelope(final RequestSuccess<?, ?> message, final long sessionId, final long txSequence,
}
@Override
- SuccessEnvelopeProxy createProxy() {
- return new SuccessEnvelopeProxy(this);
+ SE createProxy() {
+ return new SE(this);
}
}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.concepts;
-
-final class SuccessEnvelopeProxy extends AbstractResponseEnvelopeProxy<RequestSuccess<?, ?>> {
- private static final long serialVersionUID = 1L;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public SuccessEnvelopeProxy() {
- // for Externalizable
- }
-
- SuccessEnvelopeProxy(final SuccessEnvelope envelope) {
- super(envelope);
- }
-
- @Override
- ResponseEnvelope<RequestSuccess<?, ?>> createEnvelope(final RequestSuccess<?, ?> message, final long sessionId,
- final long txSequence, final long executionTimeNanos) {
- return new SuccessEnvelope(message, sessionId, txSequence, executionTimeNanos);
- }
-}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.concepts;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link TransactionIdentifier}.
+ */
+final class TI implements Externalizable {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private TransactionIdentifier identifier;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public TI() {
+ // for Externalizable
+ }
+
+ TI(final TransactionIdentifier identifier) {
+ this.identifier = requireNonNull(identifier);
+ }
+
+ @Override
+ public void readExternal(final ObjectInput in) throws IOException {
+ identifier = new TransactionIdentifier(LocalHistoryIdentifier.readFrom(in), WritableObjects.readLong(in));
+ }
+
+ @Override
+ public void writeExternal(final ObjectOutput out) throws IOException {
+ identifier.getHistoryId().writeTo(out);
+ WritableObjects.writeLong(out, identifier.getTransactionId());
+ }
+
+ @java.io.Serial
+ private Object readResolve() {
+ return verifyNotNull(identifier);
+ }
+}
import static java.util.Objects.requireNonNull;
-import com.google.common.annotations.Beta;
import java.io.DataInput;
import java.io.DataOutput;
-import java.io.Externalizable;
import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.yangtools.concepts.WritableIdentifier;
import org.opendaylight.yangtools.concepts.WritableObjects;
/**
* Globally-unique identifier of a transaction.
- *
- * @author Robert Varga
*/
-@Beta
public final class TransactionIdentifier implements WritableIdentifier {
- private static final class Proxy implements Externalizable {
- private static final long serialVersionUID = 1L;
- private LocalHistoryIdentifier historyId;
- private long transactionId;
-
- // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
- // be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public Proxy() {
- // For Externalizable
- }
-
- Proxy(final LocalHistoryIdentifier historyId, final long transactionId) {
- this.historyId = requireNonNull(historyId);
- this.transactionId = transactionId;
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) throws IOException {
- historyId.writeTo(out);
- WritableObjects.writeLong(out, transactionId);
- }
-
- @Override
- public void readExternal(final ObjectInput in) throws IOException {
- historyId = LocalHistoryIdentifier.readFrom(in);
- transactionId = WritableObjects.readLong(in);
- }
-
- private Object readResolve() {
- return new TransactionIdentifier(historyId, transactionId);
- }
- }
-
+ @java.io.Serial
private static final long serialVersionUID = 1L;
- private final LocalHistoryIdentifier historyId;
+
+ private final @NonNull LocalHistoryIdentifier historyId;
private final long transactionId;
private String shortString;
this.transactionId = transactionId;
}
- public static TransactionIdentifier readFrom(final DataInput in) throws IOException {
+ public static @NonNull TransactionIdentifier readFrom(final DataInput in) throws IOException {
final LocalHistoryIdentifier historyId = LocalHistoryIdentifier.readFrom(in);
return new TransactionIdentifier(historyId, WritableObjects.readLong(in));
}
WritableObjects.writeLong(out, transactionId);
}
- public LocalHistoryIdentifier getHistoryId() {
+ public @NonNull LocalHistoryIdentifier getHistoryId() {
return historyId;
}
@Override
public boolean equals(final Object obj) {
- if (this == obj) {
- return true;
- }
- if (!(obj instanceof TransactionIdentifier)) {
- return false;
- }
-
- final TransactionIdentifier other = (TransactionIdentifier) obj;
- return transactionId == other.transactionId && historyId.equals(other.historyId);
+ return this == obj || obj instanceof TransactionIdentifier other && transactionId == other.transactionId
+ && historyId.equals(other.historyId);
}
public String toShortString() {
return toShortString();
}
+ @java.io.Serial
private Object writeReplace() {
- return new Proxy(historyId, transactionId);
+ return new TI(this);
}
}
*/
package org.opendaylight.controller.cluster.access.concepts;
-import com.google.common.annotations.Beta;
-
/**
* General error raised when the recipient of a {@link Request} determines that it does not know how to handle
* the request.
- *
- * @author Robert Varga
*/
-@Beta
public final class UnsupportedRequestException extends RequestException {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
public UnsupportedRequestException(final Request<?, ?> request) {
package org.opendaylight.controller.cluster.access;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThrows;
import static org.junit.Assert.assertTrue;
-import static org.opendaylight.controller.cluster.access.ABIVersion.BORON;
+import static org.opendaylight.controller.cluster.access.ABIVersion.POTASSIUM;
import static org.opendaylight.controller.cluster.access.ABIVersion.TEST_FUTURE_VERSION;
import static org.opendaylight.controller.cluster.access.ABIVersion.TEST_PAST_VERSION;
@Test
public void testInvalidVersions() {
assertTrue(TEST_PAST_VERSION.compareTo(TEST_FUTURE_VERSION) < 0);
- assertTrue(TEST_PAST_VERSION.compareTo(BORON) < 0);
- assertTrue(TEST_FUTURE_VERSION.compareTo(BORON) > 0);
+ assertTrue(TEST_PAST_VERSION.compareTo(POTASSIUM) < 0);
+ assertTrue(TEST_FUTURE_VERSION.compareTo(POTASSIUM) > 0);
}
@Test
- public void testBoronVersion() throws Exception {
- assertEquals((short)5, BORON.shortValue());
- assertEquals(BORON, ABIVersion.valueOf(BORON.shortValue()));
- assertEquals(BORON, ABIVersion.readFrom(ByteStreams.newDataInput(writeVersion(BORON))));
+ public void testMagnesiumVersion() throws Exception {
+ assertEquals((short)10, POTASSIUM.shortValue());
+ assertEquals(POTASSIUM, ABIVersion.valueOf(POTASSIUM.shortValue()));
+ assertEquals(POTASSIUM, ABIVersion.readFrom(ByteStreams.newDataInput(writeVersion(POTASSIUM))));
}
- @Test(expected = PastVersionException.class)
- public void testInvalidPastVersion() throws Exception {
- ABIVersion.valueOf(TEST_PAST_VERSION.shortValue());
+ @Test
+ public void testInvalidPastVersion() {
+ assertThrows(PastVersionException.class, () -> ABIVersion.valueOf(TEST_PAST_VERSION.shortValue()));
}
- @Test(expected = FutureVersionException.class)
- public void testInvalidFutureVersion() throws Exception {
- ABIVersion.valueOf(TEST_FUTURE_VERSION.shortValue());
+ @Test
+ public void testInvalidFutureVersion() {
+ assertThrows(FutureVersionException.class, () -> ABIVersion.valueOf(TEST_FUTURE_VERSION.shortValue()));
}
private static byte[] writeVersion(final ABIVersion version) {
return bado.toByteArray();
}
- @Test(expected = IOException.class)
- public void testBadRead() throws IOException {
- ABIVersion.readFrom(ByteStreams.newDataInput(writeVersion(TEST_PAST_VERSION)));
+ @Test
+ public void testBadRead() {
+ final var in = ByteStreams.newDataInput(writeVersion(TEST_PAST_VERSION));
+ assertThrows(IOException.class, () -> ABIVersion.readFrom(in));
}
}
*/
package org.opendaylight.controller.cluster.access.commands;
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
import org.opendaylight.controller.cluster.access.concepts.FrontendType;
private static final AbortLocalTransactionRequest OBJECT = new AbortLocalTransactionRequest(TRANSACTION, ACTOR_REF);
- @Override
- protected AbortLocalTransactionRequest object() {
- return OBJECT;
+ public AbortLocalTransactionRequestTest() {
+ super(OBJECT);
}
@Override
- protected void doAdditionalAssertions(final Object deserialize) {
- Assert.assertTrue(deserialize instanceof AbortLocalTransactionRequest);
- Assert.assertEquals(OBJECT.getReplyTo(), ((AbortLocalTransactionRequest) deserialize).getReplyTo());
+ protected void doAdditionalAssertions(final AbortLocalTransactionRequest deserialize) {
+ assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
}
}
\ No newline at end of file
*/
package org.opendaylight.controller.cluster.access.commands;
-import org.apache.commons.lang.SerializationUtils;
-import org.junit.Assert;
+import static org.hamcrest.CoreMatchers.allOf;
+import static org.hamcrest.CoreMatchers.endsWith;
+import static org.hamcrest.CoreMatchers.startsWith;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertThrows;
+
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
import org.opendaylight.controller.cluster.access.ABIVersion;
public abstract class AbstractLocalTransactionRequestTest<T extends AbstractLocalTransactionRequest<T>>
extends AbstractTransactionRequestTest<T> {
- @Override
- protected abstract T object();
+ protected AbstractLocalTransactionRequestTest(final T object) {
+ super(object, -1);
+ }
@Test
public void cloneAsVersionTest() {
- Assert.assertEquals(object(), object().cloneAsVersion(ABIVersion.BORON));
+ assertSame(object(), object().cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION));
}
@Override
- @Test(expected = UnsupportedOperationException.class)
+ @Test
public void serializationTest() {
- SerializationUtils.clone(object());
+ final var ex = assertThrows(UnsupportedOperationException.class, () -> SerializationUtils.clone(object()));
+ assertThat(ex.getMessage(), allOf(
+ startsWith("Local transaction request "),
+ endsWith(" should never be serialized")));
}
}
*/
package org.opendaylight.controller.cluster.access.commands;
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+
import com.google.common.base.MoreObjects;
-import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
public abstract class AbstractReadTransactionRequestTest<T extends AbstractReadPathTransactionRequest<T>>
extends AbstractTransactionRequestTest<T> {
- protected static final YangInstanceIdentifier PATH = YangInstanceIdentifier.empty();
+ protected static final YangInstanceIdentifier PATH = YangInstanceIdentifier.of();
protected static final boolean SNAPSHOT_ONLY = true;
- @Override
- protected abstract T object();
+ protected AbstractReadTransactionRequestTest(final T object, final int baseSize) {
+ super(object, baseSize);
+ }
@Test
public void getPathTest() {
- Assert.assertEquals(PATH, object().getPath());
+ assertEquals(PATH, object().getPath());
}
@Test
public void isSnapshotOnlyTest() {
- Assert.assertEquals(SNAPSHOT_ONLY, object().isSnapshotOnly());
+ assertEquals(SNAPSHOT_ONLY, object().isSnapshotOnly());
}
@Test
public void addToStringAttributesTest() {
- final MoreObjects.ToStringHelper result = object().addToStringAttributes(MoreObjects.toStringHelper(object()));
- Assert.assertTrue(result.toString().contains("path=" + PATH));
+ final var result = object().addToStringAttributes(MoreObjects.toStringHelper(object())).toString();
+ assertThat(result, containsString("path=" + PATH));
}
}
*/
package org.opendaylight.controller.cluster.access.commands;
-import org.apache.commons.lang.SerializationUtils;
-import org.junit.Assert;
+import static java.util.Objects.requireNonNull;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
protected static final TransactionIdentifier TRANSACTION_IDENTIFIER = new TransactionIdentifier(
HISTORY_IDENTIFIER, 0);
protected static final RequestException CAUSE = new RuntimeRequestException("fail", new Throwable());
+ private static final int CAUSE_SIZE = SerializationUtils.serialize(CAUSE).length;
+
+ private final T object;
+ private final int expectedSize;
- abstract T object();
+ protected AbstractRequestFailureTest(final T object, final int baseSize) {
+ this.object = requireNonNull(object);
+ this.expectedSize = baseSize + CAUSE_SIZE;
+ }
@Test
public void getCauseTest() {
- Assert.assertEquals(CAUSE, object().getCause());
+ assertEquals(CAUSE, object.getCause());
}
@Test
public void isHardFailureTest() {
- Assert.assertTrue(object().isHardFailure());
+ assertTrue(object.isHardFailure());
}
- @SuppressWarnings("unchecked")
@Test
public void serializationTest() {
- final Object deserialize = SerializationUtils.clone(object());
+ final var bytes = SerializationUtils.serialize(object);
+ assertEquals(expectedSize, bytes.length);
+
+ @SuppressWarnings("unchecked")
+ final var deserialize = (T) SerializationUtils.deserialize(bytes);
- Assert.assertEquals(object().getTarget(), ((T) deserialize).getTarget());
- Assert.assertEquals(object().getVersion(), ((T) deserialize).getVersion());
- Assert.assertEquals(object().getSequence(), ((T) deserialize).getSequence());
+ assertEquals(object.getTarget(), deserialize.getTarget());
+ assertEquals(object.getVersion(), deserialize.getVersion());
+ assertEquals(object.getSequence(), deserialize.getSequence());
}
}
*/
package org.opendaylight.controller.cluster.access.commands;
-import org.apache.commons.lang.SerializationUtils;
-import org.junit.Assert;
+import static java.util.Objects.requireNonNull;
+import static org.junit.Assert.assertEquals;
+
+import org.apache.commons.lang3.SerializationUtils;
+import org.eclipse.jdt.annotation.NonNull;
import org.junit.Test;
import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
import org.opendaylight.controller.cluster.access.concepts.RequestSuccess;
public abstract class AbstractRequestSuccessTest<T extends RequestSuccess<?, T>> {
-
private static final FrontendIdentifier FRONTEND_IDENTIFIER = FrontendIdentifier.create(
MemberName.forName("test"), FrontendType.forName("one"));
protected static final ClientIdentifier CLIENT_IDENTIFIER = ClientIdentifier.create(FRONTEND_IDENTIFIER, 0);
- protected static final LocalHistoryIdentifier HISTORY_IDENTIFIER = new LocalHistoryIdentifier(
- CLIENT_IDENTIFIER, 0);
+ protected static final LocalHistoryIdentifier HISTORY_IDENTIFIER = new LocalHistoryIdentifier(CLIENT_IDENTIFIER, 0);
+
+ private final @NonNull T object;
+ private final int expectedSize;
- protected abstract T object();
+ protected AbstractRequestSuccessTest(final T object, final int expectedSize) {
+ this.object = requireNonNull(object);
+ this.expectedSize = expectedSize;
+ }
- @SuppressWarnings("unchecked")
@Test
public void serializationTest() {
- final Object deserialize = SerializationUtils.clone(object());
+ final var bytes = SerializationUtils.serialize(object);
+ assertEquals(expectedSize, bytes.length);
+
+ @SuppressWarnings("unchecked")
+ final var deserialize = (T) SerializationUtils.deserialize(bytes);
- Assert.assertEquals(object().getTarget(), ((T) deserialize).getTarget());
- Assert.assertEquals(object().getVersion(), ((T) deserialize).getVersion());
- Assert.assertEquals(object().getSequence(), ((T) deserialize).getSequence());
+ assertEquals(object.getTarget(), deserialize.getTarget());
+ assertEquals(object.getVersion(), deserialize.getVersion());
+ assertEquals(object.getSequence(), deserialize.getSequence());
doAdditionalAssertions(deserialize);
}
- protected abstract void doAdditionalAssertions(Object deserialize);
+ protected void doAdditionalAssertions(final T deserialize) {
+ // No-op by default
+ }
}
*/
package org.opendaylight.controller.cluster.access.commands;
-import org.junit.Assert;
+import static org.junit.Assert.assertNotNull;
+
import org.junit.Test;
import org.opendaylight.controller.cluster.access.concepts.AbstractRequestTest;
import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
import org.opendaylight.controller.cluster.access.concepts.FrontendType;
import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.access.concepts.RequestException;
import org.opendaylight.controller.cluster.access.concepts.RuntimeRequestException;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
protected static final TransactionIdentifier TRANSACTION_IDENTIFIER = new TransactionIdentifier(
HISTORY_IDENTIFIER, 0);
- @Override
- protected abstract T object();
+ protected AbstractTransactionRequestTest(final T object, final int baseSize) {
+ super(object, baseSize);
+ }
@Test
public void toRequestFailureTest() {
- final Throwable cause = new Throwable();
- final RequestException exception = new RuntimeRequestException("fail", cause);
- final TransactionFailure failure = object().toRequestFailure(exception);
- Assert.assertNotNull(failure);
+ final var exception = new RuntimeRequestException("fail", new Throwable());
+ final var failure = object().toRequestFailure(exception);
+ assertNotNull(failure);
}
}
public abstract class AbstractTransactionSuccessTest<T extends TransactionSuccess<T>>
extends AbstractRequestSuccessTest<T> {
+ protected static final TransactionIdentifier TRANSACTION_IDENTIFIER = new TransactionIdentifier(HISTORY_IDENTIFIER,
+ 0);
- protected static final TransactionIdentifier TRANSACTION_IDENTIFIER = new TransactionIdentifier(
- HISTORY_IDENTIFIER, 0);
-
+ protected AbstractTransactionSuccessTest(final T object, final int expectedSize) {
+ super(object, expectedSize);
+ }
}
*/
package org.opendaylight.controller.cluster.access.commands;
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+
import com.google.common.base.MoreObjects;
-import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
import org.opendaylight.controller.cluster.access.concepts.MemberName;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
public class CommitLocalTransactionRequestTest
extends AbstractLocalTransactionRequestTest<CommitLocalTransactionRequest> {
private static final DataTreeModification MODIFICATION = Mockito.mock(DataTreeModification.class);
private static final boolean COORDINATED = true;
- private static final CommitLocalTransactionRequest OBJECT = new CommitLocalTransactionRequest(
- TRANSACTION, 0, ACTOR_REF, MODIFICATION, null, COORDINATED);
+ private static final CommitLocalTransactionRequest OBJECT = new CommitLocalTransactionRequest(TRANSACTION, 0,
+ ACTOR_REF, MODIFICATION, null, COORDINATED);
- @Override
- protected CommitLocalTransactionRequest object() {
- return OBJECT;
+ public CommitLocalTransactionRequestTest() {
+ super(OBJECT);
}
@Test
public void getModificationTest() {
- Assert.assertEquals(MODIFICATION, OBJECT.getModification());
+ assertEquals(MODIFICATION, OBJECT.getModification());
}
@Test
public void isCoordinatedTest() {
- Assert.assertEquals(COORDINATED, OBJECT.isCoordinated());
+ assertEquals(COORDINATED, OBJECT.isCoordinated());
}
@Test
public void addToStringAttributesTest() {
- final MoreObjects.ToStringHelper result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT));
- Assert.assertTrue(result.toString().contains("coordinated=" + COORDINATED));
+ final var result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT)).toString();
+ assertThat(result, containsString("coordinated=" + COORDINATED));
}
@Override
- protected void doAdditionalAssertions(final Object deserialize) {
- Assert.assertTrue(deserialize instanceof CommitLocalTransactionRequest);
- Assert.assertEquals(OBJECT.getReplyTo(), ((CommitLocalTransactionRequest) deserialize).getReplyTo());
- Assert.assertEquals(OBJECT.getModification(), ((CommitLocalTransactionRequest) deserialize).getModification());
+ protected void doAdditionalAssertions(final CommitLocalTransactionRequest deserialize) {
+ assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
+ assertEquals(OBJECT.getModification(), deserialize.getModification());
}
}
\ No newline at end of file
*/
package org.opendaylight.controller.cluster.access.commands;
-import org.junit.Assert;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
import org.junit.Test;
import org.opendaylight.controller.cluster.access.ABIVersion;
public class ConnectClientFailureTest extends AbstractRequestFailureTest<ConnectClientFailure> {
private static final ConnectClientFailure OBJECT = new ConnectClientFailure(CLIENT_IDENTIFIER, 0, CAUSE);
- @Override
- ConnectClientFailure object() {
- return OBJECT;
+ public ConnectClientFailureTest() {
+ super(OBJECT, 99);
}
@Test
public void cloneAsVersionTest() {
final ConnectClientFailure clone = OBJECT.cloneAsVersion(ABIVersion.current());
- Assert.assertEquals(OBJECT.getTarget(), clone.getTarget());
- Assert.assertEquals(OBJECT.getSequence(), clone.getSequence());
- Assert.assertEquals(OBJECT.getCause(), clone.getCause());
+ assertEquals(OBJECT.getTarget(), clone.getTarget());
+ assertEquals(OBJECT.getSequence(), clone.getSequence());
+ assertEquals(OBJECT.getCause(), clone.getCause());
}
}
\ No newline at end of file
*/
package org.opendaylight.controller.cluster.access.commands;
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
import com.google.common.base.MoreObjects;
import com.google.common.collect.ImmutableRangeSet;
-import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.controller.cluster.access.concepts.AbstractRequestTest;
import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
import org.opendaylight.controller.cluster.access.concepts.FrontendType;
import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.access.concepts.RequestException;
public class ConnectClientRequestTest extends AbstractRequestTest<ConnectClientRequest> {
private static final FrontendIdentifier FRONTEND_IDENTIFIER = FrontendIdentifier.create(
private static final ConnectClientRequest OBJECT = new ConnectClientRequest(
CLIENT_IDENTIFIER, 0, ACTOR_REF, MIN_VERSION, MAX_VERSION);
- @Override
- protected ConnectClientRequest object() {
- return OBJECT;
+ public ConnectClientRequestTest() {
+ super(OBJECT, 112);
}
@Test
public void getMinVersionTest() {
- Assert.assertEquals(MIN_VERSION, OBJECT.getMinVersion());
+ assertEquals(MIN_VERSION, OBJECT.getMinVersion());
}
@Test
public void getMaxVersionTest() {
- Assert.assertEquals(MAX_VERSION, OBJECT.getMaxVersion());
+ assertEquals(MAX_VERSION, OBJECT.getMaxVersion());
}
@Test
public void toRequestFailureTest() {
- final RequestException exception = new DeadTransactionException(ImmutableRangeSet.of());
- final ConnectClientFailure failure = OBJECT.toRequestFailure(exception);
- Assert.assertNotNull(failure);
+ final var exception = new DeadTransactionException(ImmutableRangeSet.of());
+ final var failure = OBJECT.toRequestFailure(exception);
+ assertNotNull(failure);
}
@Test
public void cloneAsVersionTest() {
- final ConnectClientRequest clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
- Assert.assertNotNull(clone);
- Assert.assertEquals(ABIVersion.BORON, clone.getVersion());
+ final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+ assertNotNull(clone);
+ assertEquals(ABIVersion.TEST_FUTURE_VERSION, clone.getVersion());
}
@Test
public void addToStringAttributesTest() {
- final MoreObjects.ToStringHelper result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT));
- Assert.assertTrue(result.toString().contains("minVersion=" + MIN_VERSION));
- Assert.assertTrue(result.toString().contains("maxVersion=" + MAX_VERSION));
+ final var result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT)).toString();
+ assertThat(result, containsString("minVersion=" + MIN_VERSION));
+ assertThat(result, containsString("maxVersion=" + MAX_VERSION));
}
@Override
- protected void doAdditionalAssertions(final Object deserialize) {
- Assert.assertTrue(deserialize instanceof ConnectClientRequest);
- final ConnectClientRequest casted = (ConnectClientRequest) deserialize;
-
- Assert.assertEquals(OBJECT.getMaxVersion(), casted.getMaxVersion());
- Assert.assertEquals(OBJECT.getMinVersion(), casted.getMinVersion());
- Assert.assertEquals(OBJECT.getReplyTo(), casted.getReplyTo());
+ protected void doAdditionalAssertions(final ConnectClientRequest deserialize) {
+ assertEquals(OBJECT.getMaxVersion(), deserialize.getMaxVersion());
+ assertEquals(OBJECT.getMinVersion(), deserialize.getMinVersion());
+ assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
}
}
\ No newline at end of file
*/
package org.opendaylight.controller.cluster.access.commands;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
import akka.actor.ActorSystem;
import akka.testkit.TestProbe;
import com.google.common.base.MoreObjects;
import com.google.common.collect.ImmutableList;
-import java.util.Collection;
import java.util.List;
import java.util.Optional;
-import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.cluster.access.ABIVersion;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.api.ReadOnlyDataTree;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
public class ConnectClientSuccessTest extends AbstractRequestSuccessTest<ConnectClientSuccess> {
-
private static final DataTree TREE = new InMemoryDataTreeFactory().create(
DataTreeConfiguration.DEFAULT_OPERATIONAL);
private static final ActorSystem SYSTEM = ActorSystem.create("test");
private static final ActorSelection ACTOR_SELECTION = ActorSelection.apply(ACTOR_REF, "foo");
private static final List<ActorSelection> ALTERNATES = ImmutableList.of(ACTOR_SELECTION);
private static final int MAX_MESSAGES = 10;
- private static final ConnectClientSuccess OBJECT = new ConnectClientSuccess(
- CLIENT_IDENTIFIER, 0, ACTOR_REF, ALTERNATES, TREE, MAX_MESSAGES);
+ private static final ConnectClientSuccess OBJECT = new ConnectClientSuccess(CLIENT_IDENTIFIER, 0, ACTOR_REF,
+ ALTERNATES, TREE, MAX_MESSAGES);
- @Override
- protected ConnectClientSuccess object() {
- return OBJECT;
+ public ConnectClientSuccessTest() {
+ super(OBJECT, 146 + ACTOR_REF.path().toSerializationFormat().length());
}
@Before
@Test
public void testGetAlternates() {
- final Collection<ActorSelection> alternates = OBJECT.getAlternates();
- Assert.assertArrayEquals(ALTERNATES.toArray(), alternates.toArray());
+ final var alternates = OBJECT.getAlternates();
+ assertArrayEquals(ALTERNATES.toArray(), alternates.toArray());
}
@Test
public void testGetBackend() {
final ActorRef actorRef = OBJECT.getBackend();
- Assert.assertEquals(ACTOR_REF, actorRef);
+ assertEquals(ACTOR_REF, actorRef);
}
@Test
public void testGetDataTree() {
- final ReadOnlyDataTree tree = OBJECT.getDataTree().get();
- Assert.assertEquals(TREE, tree);
+ final ReadOnlyDataTree tree = OBJECT.getDataTree().orElseThrow();
+ assertEquals(TREE, tree);
}
@Test
public void testGetMaxMessages() {
- final int maxMessages = OBJECT.getMaxMessages();
- Assert.assertEquals(MAX_MESSAGES, maxMessages);
+ assertEquals(MAX_MESSAGES, OBJECT.getMaxMessages());
}
@Test
public void cloneAsVersionTest() {
- final ConnectClientSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
- Assert.assertEquals(OBJECT, clone);
+ final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+ assertEquals(OBJECT.getSequence(), clone.getSequence());
+ assertEquals(OBJECT.getTarget(), clone.getTarget());
+ assertEquals(OBJECT.getAlternates(), clone.getAlternates());
+ assertEquals(OBJECT.getBackend(), clone.getBackend());
+ assertEquals(OBJECT.getDataTree(), clone.getDataTree());
+ assertEquals(OBJECT.getMaxMessages(), clone.getMaxMessages());
}
@Test
}
@Override
- protected void doAdditionalAssertions(final Object deserialize) {
- Assert.assertTrue(deserialize instanceof ConnectClientSuccess);
- Assert.assertEquals(OBJECT.getAlternates().size(), ((ConnectClientSuccess) deserialize).getAlternates().size());
- Assert.assertEquals(OBJECT.getBackend(), ((ConnectClientSuccess) deserialize).getBackend());
- Assert.assertEquals(Optional.empty(), ((ConnectClientSuccess) deserialize).getDataTree());
- Assert.assertEquals(OBJECT.getMaxMessages(), ((ConnectClientSuccess) deserialize).getMaxMessages());
+ protected void doAdditionalAssertions(final ConnectClientSuccess deserialize) {
+ assertEquals(OBJECT.getAlternates().size(), deserialize.getAlternates().size());
+ assertEquals(OBJECT.getBackend(), deserialize.getBackend());
+ assertEquals(Optional.empty(), deserialize.getDataTree());
+ assertEquals(OBJECT.getMaxMessages(), deserialize.getMaxMessages());
}
}
*/
package org.opendaylight.controller.cluster.access.commands;
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
import org.junit.Test;
import org.opendaylight.controller.cluster.access.ABIVersion;
public class ExistsTransactionRequestTest extends AbstractReadTransactionRequestTest<ExistsTransactionRequest> {
- private static final ExistsTransactionRequest OBJECT = new ExistsTransactionRequest(
- TRANSACTION_IDENTIFIER, 0, ACTOR_REF, PATH, SNAPSHOT_ONLY);
+ private static final ExistsTransactionRequest OBJECT = new ExistsTransactionRequest(TRANSACTION_IDENTIFIER, 0,
+ ACTOR_REF, PATH, SNAPSHOT_ONLY);
- @Override
- protected ExistsTransactionRequest object() {
- return OBJECT;
+ public ExistsTransactionRequestTest() {
+ super(OBJECT, 108);
}
@Test
public void cloneAsVersionTest() {
- final ABIVersion cloneVersion = ABIVersion.TEST_FUTURE_VERSION;
- final ExistsTransactionRequest clone = OBJECT.cloneAsVersion(cloneVersion);
- Assert.assertEquals(cloneVersion, clone.getVersion());
- Assert.assertEquals(OBJECT.getPath(), clone.getPath());
- Assert.assertEquals(OBJECT.isSnapshotOnly(), clone.isSnapshotOnly());
+ final var cloneVersion = ABIVersion.TEST_FUTURE_VERSION;
+ final var clone = OBJECT.cloneAsVersion(cloneVersion);
+ assertEquals(cloneVersion, clone.getVersion());
+ assertEquals(OBJECT.getPath(), clone.getPath());
+ assertEquals(OBJECT.isSnapshotOnly(), clone.isSnapshotOnly());
}
@Override
- protected void doAdditionalAssertions(final Object deserialize) {
- Assert.assertTrue(deserialize instanceof ExistsTransactionRequest);
- Assert.assertEquals(OBJECT.getReplyTo(), ((ExistsTransactionRequest) deserialize).getReplyTo());
- Assert.assertEquals(OBJECT.getPath(), ((ExistsTransactionRequest) deserialize).getPath());
+ protected void doAdditionalAssertions(final ExistsTransactionRequest deserialize) {
+ assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
+ assertEquals(OBJECT.getPath(), deserialize.getPath());
}
}
\ No newline at end of file
*/
package org.opendaylight.controller.cluster.access.commands;
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+
import com.google.common.base.MoreObjects;
-import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.controller.cluster.access.ABIVersion;
public class ExistsTransactionSuccessTest extends AbstractTransactionSuccessTest<ExistsTransactionSuccess> {
private static final boolean EXISTS = true;
- private static final ExistsTransactionSuccess OBJECT = new ExistsTransactionSuccess(
- TRANSACTION_IDENTIFIER, 0, EXISTS);
+ private static final ExistsTransactionSuccess OBJECT = new ExistsTransactionSuccess(TRANSACTION_IDENTIFIER, 0,
+ EXISTS);
- @Override
- protected ExistsTransactionSuccess object() {
- return OBJECT;
+ public ExistsTransactionSuccessTest() {
+ super(OBJECT, 99);
}
@Test
public void getExistsTest() {
- final boolean result = OBJECT.getExists();
- Assert.assertEquals(EXISTS, result);
+ assertEquals(EXISTS, OBJECT.getExists());
}
@Test
public void cloneAsVersionTest() {
- final ExistsTransactionSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
- Assert.assertEquals(OBJECT, clone);
+ final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+ assertEquals(OBJECT.getSequence(), clone.getSequence());
+ assertEquals(OBJECT.getTarget(), clone.getTarget());
+ assertEquals(OBJECT.getExists(), clone.getExists());
}
@Test
public void addToStringAttributesTest() {
- final MoreObjects.ToStringHelper result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT));
- Assert.assertTrue(result.toString().contains("exists=" + EXISTS));
+ final var result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT)).toString();
+ assertThat(result, containsString("exists=" + EXISTS));
}
@Override
- protected void doAdditionalAssertions(final Object deserialize) {
- Assert.assertTrue(deserialize instanceof ExistsTransactionSuccess);
- Assert.assertEquals(OBJECT.getExists(), ((ExistsTransactionSuccess) deserialize).getExists());
+ protected void doAdditionalAssertions(final ExistsTransactionSuccess deserialize) {
+ assertEquals(OBJECT.getExists(), deserialize.getExists());
}
}
\ No newline at end of file
*/
package org.opendaylight.controller.cluster.access.commands;
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
import org.junit.Test;
import org.opendaylight.controller.cluster.access.ABIVersion;
public class LocalHistoryFailureTest extends AbstractRequestFailureTest<LocalHistoryFailure> {
private static final LocalHistoryFailure OBJECT = new LocalHistoryFailure(HISTORY_IDENTIFIER, 0, CAUSE);
- @Override
- LocalHistoryFailure object() {
- return OBJECT;
+ public LocalHistoryFailureTest() {
+ super(OBJECT, 99);
}
@Test
public void cloneAsVersionTest() {
- final LocalHistoryFailure clone = OBJECT.cloneAsVersion(ABIVersion.current());
- Assert.assertEquals(OBJECT, clone);
+ final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+ assertEquals(OBJECT.getSequence(), clone.getSequence());
+ assertEquals(OBJECT.getTarget(), clone.getTarget());
+ assertEquals(OBJECT.getCause(), clone.getCause());
}
}
\ No newline at end of file
package org.opendaylight.controller.cluster.access.commands;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
import org.junit.Test;
import org.opendaylight.controller.cluster.access.ABIVersion;
public class LocalHistorySuccessTest extends AbstractRequestSuccessTest<LocalHistorySuccess> {
private static final LocalHistorySuccess OBJECT = new LocalHistorySuccess(HISTORY_IDENTIFIER, 0);
- @Override
- protected LocalHistorySuccess object() {
- return OBJECT;
+ public LocalHistorySuccessTest() {
+ super(OBJECT, 96);
}
@Test
public void cloneAsVersionTest() {
- final LocalHistorySuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
- assertEquals(ABIVersion.BORON, clone.getVersion());
+ final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+ assertEquals(ABIVersion.TEST_FUTURE_VERSION, clone.getVersion());
assertEquals(OBJECT.getSequence(), clone.getSequence());
assertEquals(OBJECT.getTarget(), clone.getTarget());
}
-
- @Override
- protected void doAdditionalAssertions(final Object deserialize) {
- assertTrue(deserialize instanceof LocalHistorySuccess);
- }
}
package org.opendaylight.controller.cluster.access.commands;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.actor.Props;
import akka.testkit.TestActors;
-import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
public class ModifyTransactionRequestBuilderTest {
-
private final MemberName memberName = MemberName.forName("member-1");
private final FrontendType frontendType = FrontendType.forName("test");
private final FrontendIdentifier frontendId = FrontendIdentifier.create(memberName, frontendType);
private final TransactionIdentifier transactionIdentifier =
new TransactionIdentifier(new LocalHistoryIdentifier(clientId, 0L), 0L);
private final ActorRef actorRef = ActorSystem.create("test").actorOf(Props.create(TestActors.EchoActor.class));
- private final NormalizedNode<?, ?> node = Builders.containerBuilder().withNodeIdentifier(
- YangInstanceIdentifier.NodeIdentifier.create(QName.create("namespace", "localName"))).build();
+ private final NormalizedNode node = ImmutableNodes.newContainerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(QName.create("namespace", "localName")))
+ .build();
private final TransactionModification transactionModification =
- new TransactionWrite(YangInstanceIdentifier.empty(), node);
+ new TransactionWrite(YangInstanceIdentifier.of(), node);
private final ModifyTransactionRequestBuilder modifyTransactionRequestBuilder =
new ModifyTransactionRequestBuilder(transactionIdentifier, actorRef);
@Test
public void testGetIdentifier() {
- final TransactionIdentifier identifier = modifyTransactionRequestBuilder.getIdentifier();
- Assert.assertEquals(transactionIdentifier, identifier);
+ final var identifier = modifyTransactionRequestBuilder.getIdentifier();
+ assertEquals(transactionIdentifier, identifier);
}
@Test
public void testBuildReady() {
modifyTransactionRequestBuilder.setReady();
- final ModifyTransactionRequest modifyTransactionRequest = modifyTransactionRequestBuilder.build();
- Assert.assertEquals(PersistenceProtocol.READY, modifyTransactionRequest.getPersistenceProtocol().get());
- Assert.assertEquals(transactionModification, modifyTransactionRequest.getModifications().get(0));
+ final var modifyTransactionRequest = modifyTransactionRequestBuilder.build();
+ assertEquals(PersistenceProtocol.READY, modifyTransactionRequest.getPersistenceProtocol().orElseThrow());
+ assertEquals(transactionModification, modifyTransactionRequest.getModifications().get(0));
}
@Test
public void testBuildAbort() {
modifyTransactionRequestBuilder.setAbort();
- final ModifyTransactionRequest modifyTransactionRequest = modifyTransactionRequestBuilder.build();
- Assert.assertEquals(PersistenceProtocol.ABORT, modifyTransactionRequest.getPersistenceProtocol().get());
- Assert.assertTrue(modifyTransactionRequest.getModifications().isEmpty());
+ final var modifyTransactionRequest = modifyTransactionRequestBuilder.build();
+ assertEquals(PersistenceProtocol.ABORT, modifyTransactionRequest.getPersistenceProtocol().orElseThrow());
+ assertTrue(modifyTransactionRequest.getModifications().isEmpty());
}
@Test
public void testBuildCommitTrue() {
modifyTransactionRequestBuilder.setCommit(true);
- final ModifyTransactionRequest modifyTransactionRequest = modifyTransactionRequestBuilder.build();
- Assert.assertEquals(PersistenceProtocol.THREE_PHASE, modifyTransactionRequest.getPersistenceProtocol().get());
+ final var modifyTransactionRequest = modifyTransactionRequestBuilder.build();
+ assertEquals(PersistenceProtocol.THREE_PHASE, modifyTransactionRequest.getPersistenceProtocol().orElseThrow());
}
@Test
public void testBuildCommitFalse() {
modifyTransactionRequestBuilder.setCommit(false);
- final ModifyTransactionRequest modifyTransactionRequest = modifyTransactionRequestBuilder.build();
- Assert.assertEquals(PersistenceProtocol.SIMPLE, modifyTransactionRequest.getPersistenceProtocol().get());
+ final var modifyTransactionRequest = modifyTransactionRequestBuilder.build();
+ assertEquals(PersistenceProtocol.SIMPLE, modifyTransactionRequest.getPersistenceProtocol().orElseThrow());
}
-
}
*/
package org.opendaylight.controller.cluster.access.commands;
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+
import com.google.common.base.MoreObjects;
-import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
-import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.controller.cluster.access.ABIVersion;
public class ModifyTransactionRequestEmptyTest extends AbstractTransactionRequestTest<ModifyTransactionRequest> {
private static final PersistenceProtocol PROTOCOL = PersistenceProtocol.ABORT;
+ private static final ModifyTransactionRequest OBJECT = new ModifyTransactionRequest(TRANSACTION_IDENTIFIER, 0,
+ ACTOR_REF, List.of(), PROTOCOL);
- private static final ModifyTransactionRequest OBJECT = new ModifyTransactionRequest(
- TRANSACTION_IDENTIFIER, 0, ACTOR_REF, new ArrayList<>(), PROTOCOL);
-
- @Override
- protected ModifyTransactionRequest object() {
- return OBJECT;
+ public ModifyTransactionRequestEmptyTest() {
+ super(OBJECT, 108);
}
@Test
public void getPersistenceProtocolTest() {
- final Optional<PersistenceProtocol> result = OBJECT.getPersistenceProtocol();
- Assert.assertTrue(result.isPresent());
- Assert.assertEquals(PROTOCOL, result.get());
+ assertEquals(Optional.of(PROTOCOL), OBJECT.getPersistenceProtocol());
}
@Test
public void getModificationsTest() {
- final List<TransactionModification> result = OBJECT.getModifications();
- Assert.assertNotNull(result);
- Assert.assertTrue(result.isEmpty());
+ assertEquals(List.of(), OBJECT.getModifications());
}
@Test
public void addToStringAttributesTest() {
- final MoreObjects.ToStringHelper result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT));
- Assert.assertTrue(result.toString().contains("modifications=0"));
- Assert.assertTrue(result.toString().contains("protocol=" + PROTOCOL));
+ final var result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT)).toString();
+ assertThat(result, containsString("modifications=0"));
+ assertThat(result, containsString("protocol=" + PROTOCOL));
}
@Test
public void cloneAsVersionTest() {
- final ModifyTransactionRequest clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
- Assert.assertEquals(OBJECT, clone);
+ final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+ assertEquals(OBJECT.getSequence(), clone.getSequence());
+ assertEquals(OBJECT.getTarget(), clone.getTarget());
+ assertEquals(OBJECT.getReplyTo(), clone.getReplyTo());
+ assertEquals(OBJECT.getPersistenceProtocol(), clone.getPersistenceProtocol());
}
@Override
- protected void doAdditionalAssertions(final Object deserialize) {
- Assert.assertTrue(deserialize instanceof ModifyTransactionRequest);
- final ModifyTransactionRequest casted = (ModifyTransactionRequest) deserialize;
-
- Assert.assertEquals(OBJECT.getReplyTo(), casted.getReplyTo());
- Assert.assertEquals(OBJECT.getModifications(), casted.getModifications());
- Assert.assertEquals(OBJECT.getPersistenceProtocol(), casted.getPersistenceProtocol());
+ protected void doAdditionalAssertions(final ModifyTransactionRequest deserialize) {
+ assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
+ assertEquals(OBJECT.getModifications(), deserialize.getModifications());
+ assertEquals(OBJECT.getPersistenceProtocol(), deserialize.getPersistenceProtocol());
}
}
\ No newline at end of file
*/
package org.opendaylight.controller.cluster.access.commands;
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
import static org.opendaylight.controller.cluster.access.commands.TransactionModification.TYPE_WRITE;
import com.google.common.base.MoreObjects;
-import com.google.common.collect.Lists;
import java.util.List;
import java.util.Optional;
-import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
public class ModifyTransactionRequestTest extends AbstractTransactionRequestTest<ModifyTransactionRequest> {
- private static final NormalizedNode<?, ?> NODE = Builders.containerBuilder().withNodeIdentifier(
- YangInstanceIdentifier.NodeIdentifier.create(QName.create("namespace", "localName"))).build();
+ private static final ContainerNode NODE = ImmutableNodes.newContainerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(QName.create("namespace", "localName")))
+ .build();
- private static final List<TransactionModification> MODIFICATIONS = Lists.newArrayList(
- new TransactionWrite(YangInstanceIdentifier.empty(), NODE));
+ private static final List<TransactionModification> MODIFICATIONS = List.of(
+ new TransactionWrite(YangInstanceIdentifier.of(), NODE));
private static final PersistenceProtocol PROTOCOL = PersistenceProtocol.ABORT;
- private static final ModifyTransactionRequest OBJECT = new ModifyTransactionRequest(
- TRANSACTION_IDENTIFIER, 0, ACTOR_REF, MODIFICATIONS, PROTOCOL);
+ private static final ModifyTransactionRequest OBJECT = new ModifyTransactionRequest(TRANSACTION_IDENTIFIER, 0,
+ ACTOR_REF, MODIFICATIONS, PROTOCOL);
- @Override
- protected ModifyTransactionRequest object() {
- return OBJECT;
+ public ModifyTransactionRequestTest() {
+ super(OBJECT, 140);
}
@Test
public void getPersistenceProtocolTest() {
- final Optional<PersistenceProtocol> result = OBJECT.getPersistenceProtocol();
- Assert.assertTrue(result.isPresent());
- Assert.assertEquals(PROTOCOL, result.get());
+ assertEquals(Optional.of(PROTOCOL), OBJECT.getPersistenceProtocol());
}
@Test
public void getModificationsTest() {
- final List<TransactionModification> result = OBJECT.getModifications();
- Assert.assertNotNull(result);
- Assert.assertEquals(MODIFICATIONS, result);
+ assertEquals(MODIFICATIONS, OBJECT.getModifications());
}
@Test
public void addToStringAttributesTest() {
- final MoreObjects.ToStringHelper result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT));
- Assert.assertTrue(result.toString().contains("modifications=1"));
- Assert.assertTrue(result.toString().contains("protocol=" + PROTOCOL));
+ final var result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT)).toString();
+ assertThat(result, containsString("modifications=1"));
+ assertThat(result, containsString("protocol=" + PROTOCOL));
}
@Test
public void cloneAsVersionTest() {
- final ModifyTransactionRequest clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
- Assert.assertEquals(OBJECT, clone);
+ final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+ assertEquals(OBJECT.getSequence(), clone.getSequence());
+ assertEquals(OBJECT.getTarget(), clone.getTarget());
+ assertEquals(OBJECT.getReplyTo(), clone.getReplyTo());
+ assertEquals(OBJECT.getModifications(), clone.getModifications());
+ assertEquals(OBJECT.getPersistenceProtocol(), clone.getPersistenceProtocol());
}
@Override
- protected void doAdditionalAssertions(final Object deserialize) {
- Assert.assertTrue(deserialize instanceof ModifyTransactionRequest);
- final ModifyTransactionRequest casted = (ModifyTransactionRequest) deserialize;
-
- Assert.assertEquals(OBJECT.getReplyTo(), casted.getReplyTo());
- Assert.assertEquals(OBJECT.getPersistenceProtocol(), casted.getPersistenceProtocol());
-
- Assert.assertNotNull(casted.getModifications());
- Assert.assertEquals(1, casted.getModifications().size());
- final TransactionModification modification = casted.getModifications().get(0);
- Assert.assertEquals(YangInstanceIdentifier.empty(), modification.getPath());
- Assert.assertEquals(TYPE_WRITE, modification.getType());
+ protected void doAdditionalAssertions(final ModifyTransactionRequest deserialize) {
+ assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
+ assertEquals(OBJECT.getPersistenceProtocol(), deserialize.getPersistenceProtocol());
+ assertNotNull(deserialize.getModifications());
+ assertEquals(1, deserialize.getModifications().size());
+ final var modification = deserialize.getModifications().get(0);
+ assertEquals(YangInstanceIdentifier.of(), modification.getPath());
+ assertEquals(TYPE_WRITE, modification.getType());
}
}
package org.opendaylight.controller.cluster.access.commands;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
import org.junit.Test;
import org.opendaylight.controller.cluster.access.ABIVersion;
public class ModifyTransactionSuccessTest extends AbstractTransactionSuccessTest<ModifyTransactionSuccess> {
private static final ModifyTransactionSuccess OBJECT = new ModifyTransactionSuccess(TRANSACTION_IDENTIFIER, 0);
- @Override
- protected ModifyTransactionSuccess object() {
- return OBJECT;
+ public ModifyTransactionSuccessTest() {
+ super(OBJECT, 98);
}
@Test
public void cloneAsVersionTest() {
- final ModifyTransactionSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
- assertEquals(ABIVersion.BORON, clone.getVersion());
+ final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+ assertEquals(ABIVersion.TEST_FUTURE_VERSION, clone.getVersion());
assertEquals(OBJECT.getSequence(), clone.getSequence());
assertEquals(OBJECT.getTarget(), clone.getTarget());
}
-
- @Override
- protected void doAdditionalAssertions(final Object deserialize) {
- assertTrue(deserialize instanceof ModifyTransactionSuccess);
- }
}
\ No newline at end of file
*/
package org.opendaylight.controller.cluster.access.commands;
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
import org.junit.Test;
import org.opendaylight.controller.cluster.access.ABIVersion;
public class ReadTransactionRequestTest extends AbstractReadTransactionRequestTest<ReadTransactionRequest> {
- private static final ReadTransactionRequest OBJECT = new ReadTransactionRequest(
- TRANSACTION_IDENTIFIER, 0, ACTOR_REF, PATH, SNAPSHOT_ONLY);
+ private static final ReadTransactionRequest OBJECT = new ReadTransactionRequest(TRANSACTION_IDENTIFIER, 0,
+ ACTOR_REF, PATH, SNAPSHOT_ONLY);
- @Override
- protected ReadTransactionRequest object() {
- return OBJECT;
+ public ReadTransactionRequestTest() {
+ super(OBJECT, 108);
}
@Test
public void cloneAsVersionTest() {
- final ABIVersion cloneVersion = ABIVersion.TEST_FUTURE_VERSION;
- final ReadTransactionRequest clone = OBJECT.cloneAsVersion(cloneVersion);
- Assert.assertEquals(cloneVersion, clone.getVersion());
- Assert.assertEquals(OBJECT.getPath(), clone.getPath());
- Assert.assertEquals(OBJECT.isSnapshotOnly(), clone.isSnapshotOnly());
+ final var cloneVersion = ABIVersion.TEST_FUTURE_VERSION;
+ final var clone = OBJECT.cloneAsVersion(cloneVersion);
+ assertEquals(cloneVersion, clone.getVersion());
+ assertEquals(OBJECT.getPath(), clone.getPath());
+ assertEquals(OBJECT.isSnapshotOnly(), clone.isSnapshotOnly());
}
@Override
- protected void doAdditionalAssertions(final Object deserialize) {
- Assert.assertTrue(deserialize instanceof ReadTransactionRequest);
- Assert.assertEquals(OBJECT.getReplyTo(), ((ReadTransactionRequest) deserialize).getReplyTo());
- Assert.assertEquals(OBJECT.getPath(), ((ReadTransactionRequest) deserialize).getPath());
+ protected void doAdditionalAssertions(final ReadTransactionRequest deserialize) {
+ assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
+ assertEquals(OBJECT.getPath(), deserialize.getPath());
}
}
\ No newline at end of file
*/
package org.opendaylight.controller.cluster.access.commands;
+import static org.junit.Assert.assertEquals;
+
import java.util.Optional;
-import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.controller.cluster.access.ABIVersion;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
public class ReadTransactionSuccessNoDataTest extends AbstractTransactionSuccessTest<ReadTransactionSuccess> {
- private static final ReadTransactionSuccess OBJECT = new ReadTransactionSuccess(
- TRANSACTION_IDENTIFIER, 0, Optional.empty());
+ private static final ReadTransactionSuccess OBJECT = new ReadTransactionSuccess(TRANSACTION_IDENTIFIER, 0,
+ Optional.empty());
- @Override
- protected ReadTransactionSuccess object() {
- return OBJECT;
+ public ReadTransactionSuccessNoDataTest() {
+ super(OBJECT, 99);
}
@Test
public void getDataTest() {
- final Optional<NormalizedNode<?, ?>> result = OBJECT.getData();
- Assert.assertFalse(result.isPresent());
+ assertEquals(Optional.empty(), OBJECT.getData());
}
@Test
public void cloneAsVersionTest() {
- final ReadTransactionSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
- Assert.assertEquals(OBJECT, clone);
+ final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+ assertEquals(OBJECT.getSequence(), clone.getSequence());
+ assertEquals(OBJECT.getTarget(), clone.getTarget());
+ assertEquals(OBJECT.getData(), clone.getData());
}
@Override
- protected void doAdditionalAssertions(Object deserialize) {
- Assert.assertTrue(deserialize instanceof ReadTransactionSuccess);
- Assert.assertEquals(OBJECT.getData(), ((ReadTransactionSuccess) deserialize).getData());
+ protected void doAdditionalAssertions(final ReadTransactionSuccess deserialize) {
+ assertEquals(OBJECT.getData(), deserialize.getData());
}
}
*/
package org.opendaylight.controller.cluster.access.commands;
+import static org.junit.Assert.assertEquals;
+
import java.util.Optional;
-import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
public class ReadTransactionSuccessTest extends AbstractTransactionSuccessTest<ReadTransactionSuccess> {
- private static final NormalizedNode<?, ?> NODE = Builders.containerBuilder().withNodeIdentifier(
- YangInstanceIdentifier.NodeIdentifier.create(QName.create("namespace", "localName"))).build();
+ private static final ContainerNode NODE = ImmutableNodes.newContainerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(QName.create("namespace", "localName")))
+ .build();
- private static final ReadTransactionSuccess OBJECT = new ReadTransactionSuccess(
- TRANSACTION_IDENTIFIER, 0, Optional.of(NODE));
+ private static final ReadTransactionSuccess OBJECT = new ReadTransactionSuccess(TRANSACTION_IDENTIFIER, 0,
+ Optional.of(NODE));
- @Override
- protected ReadTransactionSuccess object() {
- return OBJECT;
+ public ReadTransactionSuccessTest() {
+ super(OBJECT, 129);
}
@Test
public void getDataTest() {
- final Optional<NormalizedNode<?, ?>> result = OBJECT.getData();
- Assert.assertTrue(result.isPresent());
- Assert.assertEquals(NODE.getValue(), result.get().getValue());
+ assertEquals(Optional.of(NODE), OBJECT.getData());
}
@Test
public void cloneAsVersionTest() {
- final ReadTransactionSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
- Assert.assertEquals(OBJECT, clone);
+ final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+ assertEquals(OBJECT.getSequence(), clone.getSequence());
+ assertEquals(OBJECT.getTarget(), clone.getTarget());
+ assertEquals(OBJECT.getData(), clone.getData());
}
@Override
- protected void doAdditionalAssertions(Object deserialize) {
- Assert.assertTrue(deserialize instanceof ReadTransactionSuccess);
- Assert.assertEquals(OBJECT.getData(), ((ReadTransactionSuccess) deserialize).getData());
+ protected void doAdditionalAssertions(final ReadTransactionSuccess deserialize) {
+ assertEquals(OBJECT.getData(), deserialize.getData());
}
}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static org.junit.Assert.assertEquals;
+
+import com.google.common.primitives.UnsignedLong;
+import java.util.List;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.access.ABIVersion;
+
+public class SkipTransactionsRequestTest extends AbstractTransactionRequestTest<SkipTransactionsRequest> {
+ private static final SkipTransactionsRequest OBJECT = new SkipTransactionsRequest(TRANSACTION_IDENTIFIER, 0,
+ ACTOR_REF, List.of(UnsignedLong.ONE));
+
+ public SkipTransactionsRequestTest() {
+ super(OBJECT, 109);
+ }
+
+ @Test
+ public void cloneAsVersionTest() {
+ final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+ assertEquals(OBJECT.getSequence(), clone.getSequence());
+ assertEquals(OBJECT.getTarget(), clone.getTarget());
+ assertEquals(OBJECT.getReplyTo(), clone.getReplyTo());
+ }
+
+ @Override
+ protected void doAdditionalAssertions(final SkipTransactionsRequest deserialize) {
+ assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static org.junit.Assert.assertEquals;
+
+import org.junit.Test;
+import org.opendaylight.controller.cluster.access.ABIVersion;
+
+public class SkipTransactionsResponseTest extends AbstractTransactionSuccessTest<SkipTransactionsResponse> {
+ private static final SkipTransactionsResponse OBJECT = new SkipTransactionsResponse(TRANSACTION_IDENTIFIER, 0);
+
+ public SkipTransactionsResponseTest() {
+ super(OBJECT, 98);
+ }
+
+ @Test
+ public void cloneAsVersionTest() {
+ final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+ assertEquals(OBJECT.getSequence(), clone.getSequence());
+ assertEquals(OBJECT.getTarget(), clone.getTarget());
+ }
+}
\ No newline at end of file
*/
package org.opendaylight.controller.cluster.access.commands;
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
import org.junit.Test;
import org.opendaylight.controller.cluster.access.ABIVersion;
public class TransactionAbortRequestTest extends AbstractTransactionRequestTest<TransactionAbortRequest> {
- private static final TransactionAbortRequest OBJECT = new TransactionAbortRequest(
- TRANSACTION_IDENTIFIER, 0, ACTOR_REF);
+ private static final TransactionAbortRequest OBJECT = new TransactionAbortRequest(TRANSACTION_IDENTIFIER, 0,
+ ACTOR_REF);
- @Override
- protected TransactionAbortRequest object() {
- return OBJECT;
+ public TransactionAbortRequestTest() {
+ super(OBJECT, 101);
}
@Test
public void cloneAsVersionTest() {
- final TransactionAbortRequest clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
- Assert.assertEquals(OBJECT, clone);
+ final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+ assertEquals(OBJECT.getSequence(), clone.getSequence());
+ assertEquals(OBJECT.getTarget(), clone.getTarget());
+ assertEquals(OBJECT.getReplyTo(), clone.getReplyTo());
}
@Override
- protected void doAdditionalAssertions(final Object deserialize) {
- Assert.assertTrue(deserialize instanceof TransactionAbortRequest);
- Assert.assertEquals(OBJECT.getReplyTo(), ((TransactionAbortRequest)deserialize).getReplyTo());
+ protected void doAdditionalAssertions(final TransactionAbortRequest deserialize) {
+ assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
}
}
\ No newline at end of file
*/
package org.opendaylight.controller.cluster.access.commands;
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
import org.junit.Test;
import org.opendaylight.controller.cluster.access.ABIVersion;
public class TransactionAbortSuccessTest extends AbstractTransactionSuccessTest<TransactionAbortSuccess> {
- private static final TransactionAbortSuccess OBJECT = new TransactionAbortSuccess(
- TRANSACTION_IDENTIFIER, 0);
+ private static final TransactionAbortSuccess OBJECT = new TransactionAbortSuccess(TRANSACTION_IDENTIFIER, 0);
- @Override
- protected TransactionAbortSuccess object() {
- return OBJECT;
+ public TransactionAbortSuccessTest() {
+ super(OBJECT, 98);
}
@Test
public void cloneAsVersionTest() {
- final TransactionAbortSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
- Assert.assertEquals(OBJECT, clone);
- }
-
- @Override
- protected void doAdditionalAssertions(Object deserialize) {
- Assert.assertTrue(deserialize instanceof TransactionAbortSuccess);
+ final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+ assertEquals(OBJECT.getSequence(), clone.getSequence());
+ assertEquals(OBJECT.getTarget(), clone.getTarget());
}
}
\ No newline at end of file
*/
package org.opendaylight.controller.cluster.access.commands;
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
import org.junit.Test;
import org.opendaylight.controller.cluster.access.ABIVersion;
public class TransactionCanCommitSuccessTest extends AbstractTransactionSuccessTest<TransactionCanCommitSuccess> {
- private static final TransactionCanCommitSuccess OBJECT = new TransactionCanCommitSuccess(
- TRANSACTION_IDENTIFIER, 0);
+ private static final TransactionCanCommitSuccess OBJECT = new TransactionCanCommitSuccess(TRANSACTION_IDENTIFIER,
+ 0);
- @Override
- protected TransactionCanCommitSuccess object() {
- return OBJECT;
+ public TransactionCanCommitSuccessTest() {
+ super(OBJECT, 99);
}
@Test
public void cloneAsVersionTest() {
- final TransactionCanCommitSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
- Assert.assertEquals(OBJECT, clone);
- }
-
- @Override
- protected void doAdditionalAssertions(Object deserialize) {
- Assert.assertTrue(deserialize instanceof TransactionCanCommitSuccess);
+ final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+ assertEquals(OBJECT.getSequence(), clone.getSequence());
+ assertEquals(OBJECT.getTarget(), clone.getTarget());
}
}
\ No newline at end of file
*/
package org.opendaylight.controller.cluster.access.commands;
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
import org.junit.Test;
import org.opendaylight.controller.cluster.access.ABIVersion;
public class TransactionCommitSuccessTest extends AbstractTransactionSuccessTest<TransactionCommitSuccess> {
- private static final TransactionCommitSuccess OBJECT = new TransactionCommitSuccess(
- TRANSACTION_IDENTIFIER, 0);
+ private static final TransactionCommitSuccess OBJECT = new TransactionCommitSuccess(TRANSACTION_IDENTIFIER, 0);
- @Override
- protected TransactionCommitSuccess object() {
- return OBJECT;
+ public TransactionCommitSuccessTest() {
+ super(OBJECT, 98);
}
@Test
public void cloneAsVersionTest() {
- final TransactionCommitSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
- Assert.assertEquals(OBJECT, clone);
- }
-
- @Override
- protected void doAdditionalAssertions(Object deserialize) {
- Assert.assertTrue(deserialize instanceof TransactionCommitSuccess);
+ final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+ assertEquals(OBJECT.getSequence(), clone.getSequence());
+ assertEquals(OBJECT.getTarget(), clone.getTarget());
}
-}
\ No newline at end of file
+}
*/
package org.opendaylight.controller.cluster.access.commands;
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
import org.junit.Test;
import org.opendaylight.controller.cluster.access.ABIVersion;
public class TransactionDoCommitRequestTest extends AbstractTransactionRequestTest<TransactionDoCommitRequest> {
- private static final TransactionDoCommitRequest OBJECT = new TransactionDoCommitRequest(
- TRANSACTION_IDENTIFIER, 0, ACTOR_REF);
+ private static final TransactionDoCommitRequest OBJECT = new TransactionDoCommitRequest(TRANSACTION_IDENTIFIER, 0,
+ ACTOR_REF);
- @Override
- protected TransactionDoCommitRequest object() {
- return OBJECT;
+ public TransactionDoCommitRequestTest() {
+ super(OBJECT, 102);
}
@Test
public void cloneAsVersionTest() {
- final TransactionDoCommitRequest clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
- Assert.assertEquals(OBJECT, clone);
+ final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+ assertEquals(OBJECT.getSequence(), clone.getSequence());
+ assertEquals(OBJECT.getTarget(), clone.getTarget());
+ assertEquals(OBJECT.getReplyTo(), clone.getReplyTo());
}
@Override
- protected void doAdditionalAssertions(final Object deserialize) {
- Assert.assertTrue(deserialize instanceof TransactionDoCommitRequest);
- Assert.assertEquals(OBJECT.getReplyTo(), ((TransactionDoCommitRequest) deserialize).getReplyTo());
+ protected void doAdditionalAssertions(final TransactionDoCommitRequest deserialize) {
+ assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
}
}
\ No newline at end of file
*/
package org.opendaylight.controller.cluster.access.commands;
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
import org.junit.Test;
import org.opendaylight.controller.cluster.access.ABIVersion;
public class TransactionFailureTest extends AbstractRequestFailureTest<TransactionFailure> {
private static final TransactionFailure OBJECT = new TransactionFailure(TRANSACTION_IDENTIFIER, 0, CAUSE);
- @Override
- TransactionFailure object() {
- return OBJECT;
+ public TransactionFailureTest() {
+ super(OBJECT, 100);
}
@Test
public void cloneAsVersionTest() {
- final TransactionFailure clone = OBJECT.cloneAsVersion(ABIVersion.current());
- Assert.assertEquals(OBJECT, clone);
+ final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+ assertEquals(OBJECT.getSequence(), clone.getSequence());
+ assertEquals(OBJECT.getTarget(), clone.getTarget());
+ assertEquals(OBJECT.getCause(), clone.getCause());
}
}
\ No newline at end of file
*/
package org.opendaylight.controller.cluster.access.commands;
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
import org.junit.Test;
import org.opendaylight.controller.cluster.access.ABIVersion;
public class TransactionPreCommitRequestTest extends AbstractTransactionRequestTest<TransactionPreCommitRequest> {
- private static final TransactionPreCommitRequest OBJECT = new TransactionPreCommitRequest(
- TRANSACTION_IDENTIFIER, 0, ACTOR_REF);
+ private static final TransactionPreCommitRequest OBJECT = new TransactionPreCommitRequest(TRANSACTION_IDENTIFIER, 0,
+ ACTOR_REF);
- @Override
- protected TransactionPreCommitRequest object() {
- return OBJECT;
+ public TransactionPreCommitRequestTest() {
+ super(OBJECT, 102);
}
@Test
public void cloneAsVersionTest() {
- final TransactionPreCommitRequest clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
- Assert.assertEquals(OBJECT, clone);
+ final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+ assertEquals(OBJECT.getSequence(), clone.getSequence());
+ assertEquals(OBJECT.getTarget(), clone.getTarget());
+ assertEquals(OBJECT.getReplyTo(), clone.getReplyTo());
}
@Override
- protected void doAdditionalAssertions(final Object deserialize) {
- Assert.assertTrue(deserialize instanceof TransactionPreCommitRequest);
- Assert.assertEquals(OBJECT.getReplyTo(), ((TransactionPreCommitRequest) deserialize).getReplyTo());
+ protected void doAdditionalAssertions(final TransactionPreCommitRequest deserialize) {
+ assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
}
}
\ No newline at end of file
*/
package org.opendaylight.controller.cluster.access.commands;
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
import org.junit.Test;
import org.opendaylight.controller.cluster.access.ABIVersion;
public class TransactionPreCommitSuccessTest extends AbstractTransactionSuccessTest<TransactionPreCommitSuccess> {
- private static final TransactionPreCommitSuccess OBJECT = new TransactionPreCommitSuccess(
- TRANSACTION_IDENTIFIER, 0);
+ private static final TransactionPreCommitSuccess OBJECT = new TransactionPreCommitSuccess(TRANSACTION_IDENTIFIER,
+ 0);
- @Override
- protected TransactionPreCommitSuccess object() {
- return OBJECT;
+ public TransactionPreCommitSuccessTest() {
+ super(OBJECT, 99);
}
@Test
public void cloneAsVersionTest() {
- final TransactionPreCommitSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
- Assert.assertEquals(OBJECT, clone);
- }
-
- @Override
- protected void doAdditionalAssertions(Object deserialize) {
- Assert.assertTrue(deserialize instanceof TransactionPreCommitSuccess);
+ final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+ assertEquals(OBJECT.getSequence(), clone.getSequence());
+ assertEquals(OBJECT.getTarget(), clone.getTarget());
}
}
\ No newline at end of file
*/
package org.opendaylight.controller.cluster.access.commands;
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
import org.junit.Test;
import org.opendaylight.controller.cluster.access.ABIVersion;
public class TransactionPurgeRequestTest extends AbstractTransactionRequestTest<TransactionPurgeRequest> {
- private static final TransactionPurgeRequest OBJECT = new TransactionPurgeRequest(
- TRANSACTION_IDENTIFIER, 0, ACTOR_REF);
+ private static final TransactionPurgeRequest OBJECT = new TransactionPurgeRequest(TRANSACTION_IDENTIFIER, 0,
+ ACTOR_REF);
- @Override
- protected TransactionPurgeRequest object() {
- return OBJECT;
+ public TransactionPurgeRequestTest() {
+ super(OBJECT, 101);
}
@Test
public void cloneAsVersionTest() {
- final TransactionPurgeRequest clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
- Assert.assertEquals(OBJECT, clone);
+ final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+ assertEquals(OBJECT.getSequence(), clone.getSequence());
+ assertEquals(OBJECT.getTarget(), clone.getTarget());
+ assertEquals(OBJECT.getReplyTo(), clone.getReplyTo());
}
@Override
- protected void doAdditionalAssertions(final Object deserialize) {
- Assert.assertTrue(deserialize instanceof TransactionPurgeRequest);
- Assert.assertEquals(OBJECT.getReplyTo(), ((TransactionPurgeRequest) deserialize).getReplyTo());
+ protected void doAdditionalAssertions(final TransactionPurgeRequest deserialize) {
+ assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
}
}
\ No newline at end of file
*/
package org.opendaylight.controller.cluster.access.commands;
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
import org.junit.Test;
import org.opendaylight.controller.cluster.access.ABIVersion;
public class TransactionPurgeResponseTest extends AbstractTransactionSuccessTest<TransactionPurgeResponse> {
- private static final TransactionPurgeResponse OBJECT = new TransactionPurgeResponse(
- TRANSACTION_IDENTIFIER, 0);
+ private static final TransactionPurgeResponse OBJECT = new TransactionPurgeResponse(TRANSACTION_IDENTIFIER, 0);
- @Override
- protected TransactionPurgeResponse object() {
- return OBJECT;
+ public TransactionPurgeResponseTest() {
+ super(OBJECT, 98);
}
@Test
public void cloneAsVersionTest() {
- final TransactionPurgeResponse clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
- Assert.assertEquals(OBJECT, clone);
- }
-
- @Override
- protected void doAdditionalAssertions(Object deserialize) {
- Assert.assertTrue(deserialize instanceof TransactionPurgeResponse);
+ final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+ assertEquals(OBJECT.getSequence(), clone.getSequence());
+ assertEquals(OBJECT.getTarget(), clone.getTarget());
}
}
\ No newline at end of file
*/
package org.opendaylight.controller.cluster.access.concepts;
-import org.apache.commons.lang.SerializationUtils;
-import org.junit.Assert;
+import static java.util.Objects.requireNonNull;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Before;
import org.junit.Test;
public abstract class AbstractEnvelopeTest<E extends Envelope<?>> {
+ protected record EnvelopeDetails<E extends Envelope<?>>(E envelope, int expectedSize) {
+ // Nothing else
+ }
+
private static final FrontendIdentifier FRONTEND =
new FrontendIdentifier(MemberName.forName("test"), FrontendIdentifierTest.ONE_FRONTEND_TYPE);
private static final ClientIdentifier CLIENT = new ClientIdentifier(FRONTEND, 0);
protected static final TransactionIdentifier OBJECT = new TransactionIdentifier(HISTORY, 0);
private E envelope;
+ private int expectedSize;
@Before
public void setUp() throws Exception {
- envelope = createEnvelope();
+ final var details = createEnvelope();
+ envelope = requireNonNull(details.envelope);
+ expectedSize = details.expectedSize;
}
@Test
public void testProxySerializationDeserialization() {
final byte[] serializedBytes = SerializationUtils.serialize(envelope);
- final Object deserialize = SerializationUtils.deserialize(serializedBytes);
- checkDeserialized((E) deserialize);
+ assertEquals(expectedSize, serializedBytes.length);
+ @SuppressWarnings("unchecked")
+ final E deserialize = (E) SerializationUtils.deserialize(serializedBytes);
+ checkDeserialized(deserialize);
}
private void checkDeserialized(final E deserializedEnvelope) {
- Assert.assertEquals(envelope.getSessionId(), deserializedEnvelope.getSessionId());
- Assert.assertEquals(envelope.getTxSequence(), deserializedEnvelope.getTxSequence());
- final Message<?, ?> expectedMessage = envelope.getMessage();
- final Message<?, ?> actualMessage = deserializedEnvelope.getMessage();
- Assert.assertEquals(expectedMessage.getSequence(), actualMessage.getSequence());
- Assert.assertEquals(expectedMessage.getTarget(), actualMessage.getTarget());
- Assert.assertEquals(expectedMessage.getVersion(), actualMessage.getVersion());
- Assert.assertEquals(expectedMessage.getClass(), actualMessage.getClass());
+ assertEquals(envelope.getSessionId(), deserializedEnvelope.getSessionId());
+ assertEquals(envelope.getTxSequence(), deserializedEnvelope.getTxSequence());
+ final var expectedMessage = envelope.getMessage();
+ final var actualMessage = deserializedEnvelope.getMessage();
+ assertEquals(expectedMessage.getSequence(), actualMessage.getSequence());
+ assertEquals(expectedMessage.getTarget(), actualMessage.getTarget());
+ assertEquals(expectedMessage.getVersion(), actualMessage.getVersion());
+ assertEquals(expectedMessage.getClass(), actualMessage.getClass());
doAdditionalAssertions(envelope, deserializedEnvelope);
}
- protected abstract E createEnvelope();
+ protected abstract EnvelopeDetails<E> createEnvelope();
- @SuppressWarnings("checkstyle:hiddenField")
protected abstract void doAdditionalAssertions(E envelope, E resolvedObject);
}
assertEquals(object().hashCode(), equalObject().hashCode());
}
-
@Test
public final void testSerialization() throws Exception {
assertTrue(object().equals(copy(object())));
*/
package org.opendaylight.controller.cluster.access.concepts;
+import static java.util.Objects.requireNonNull;
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.actor.ExtendedActorSystem;
import akka.serialization.JavaSerializer;
import akka.testkit.TestProbe;
import com.google.common.base.MoreObjects;
-import org.apache.commons.lang.SerializationUtils;
-import org.junit.Assert;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Before;
import org.junit.Test;
public abstract class AbstractRequestTest<T extends Request<?, T>> {
private static final ActorSystem SYSTEM = ActorSystem.create("test");
protected static final ActorRef ACTOR_REF = TestProbe.apply(SYSTEM).ref();
+ private static final int ACTOR_REF_SIZE = ACTOR_REF.path().toSerializationFormat().length();
+
+ private final T object;
+ private final int expectedSize;
- protected abstract T object();
+ protected AbstractRequestTest(final T object, final int baseSize) {
+ this.object = requireNonNull(object);
+ this.expectedSize = baseSize + ACTOR_REF_SIZE;
+ }
+
+ protected final T object() {
+ return object;
+ }
@Before
public void setUp() {
@Test
public void getReplyToTest() {
- Assert.assertEquals(ACTOR_REF, object().getReplyTo());
+ assertEquals(ACTOR_REF, object.getReplyTo());
}
@Test
public void addToStringAttributesCommonTest() {
- final MoreObjects.ToStringHelper result = object().addToStringAttributes(MoreObjects.toStringHelper(object()));
- Assert.assertTrue(result.toString().contains("replyTo=" + ACTOR_REF));
+ final var result = object.addToStringAttributes(MoreObjects.toStringHelper(object));
+ assertThat(result.toString(), containsString("replyTo=" + ACTOR_REF));
}
- @SuppressWarnings("unchecked")
@Test
public void serializationTest() {
- final Object deserialize = SerializationUtils.clone(object());
+ final byte[] bytes = SerializationUtils.serialize(object);
+ assertEquals(expectedSize, bytes.length);
+ @SuppressWarnings("unchecked")
+ final T deserialize = (T) SerializationUtils.deserialize(bytes);
- Assert.assertEquals(object().getTarget(), ((T) deserialize).getTarget());
- Assert.assertEquals(object().getVersion(), ((T) deserialize).getVersion());
- Assert.assertEquals(object().getSequence(), ((T) deserialize).getSequence());
+ assertEquals(object.getTarget(), deserialize.getTarget());
+ assertEquals(object.getVersion(), deserialize.getVersion());
+ assertEquals(object.getSequence(), deserialize.getSequence());
doAdditionalAssertions(deserialize);
}
- protected abstract void doAdditionalAssertions(Object deserialize);
+ protected abstract void doAdditionalAssertions(T deserialize);
}
@Override
int expectedSize() {
- return 114;
+ return 94;
}
}
*/
package org.opendaylight.controller.cluster.access.concepts;
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
import static org.junit.Assert.assertEquals;
import java.io.DataInput;
import java.io.IOException;
+import org.apache.commons.lang3.SerializationUtils;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.yangtools.concepts.WritableIdentifier;
public class FailureEnvelopeTest extends AbstractEnvelopeTest<FailureEnvelope> {
-
@Override
- protected FailureEnvelope createEnvelope() {
- final RequestFailure<?, ?> message =
- new MockFailure(OBJECT, new RuntimeRequestException("msg", new RuntimeException()), 42);
- return new FailureEnvelope(message, 1L, 2L, 11L);
+ protected EnvelopeDetails<FailureEnvelope> createEnvelope() {
+ final var cause = new RuntimeRequestException("msg", new RuntimeException());
+ final int causeSize = SerializationUtils.serialize(cause).length;
+ return new EnvelopeDetails<>(new FailureEnvelope(new MockFailure(OBJECT, cause, 42), 1L, 2L, 11L),
+ causeSize + 216);
}
@Override
protected void doAdditionalAssertions(final FailureEnvelope envelope, final FailureEnvelope resolvedObject) {
assertEquals(envelope.getExecutionTimeNanos(), resolvedObject.getExecutionTimeNanos());
- final RequestException expectedCause = envelope.getMessage().getCause();
- final RequestException actualCause = resolvedObject.getMessage().getCause();
+ final var expectedCause = envelope.getMessage().getCause();
+ final var actualCause = resolvedObject.getMessage().getCause();
assertEquals(expectedCause.getMessage(), actualCause.getMessage());
assertEquals(expectedCause.isRetriable(), actualCause.isRetriable());
}
- private static class MockRequestFailureProxy extends AbstractRequestFailureProxy<WritableIdentifier, MockFailure> {
+ private static class MockRequestFailureProxy implements RequestFailure.SerialForm<WritableIdentifier, MockFailure> {
+ @java.io.Serial
+ private static final long serialVersionUID = 5015515628523887221L;
+
+ private MockFailure message;
@SuppressWarnings("checkstyle:RedundantModifier")
public MockRequestFailureProxy() {
}
private MockRequestFailureProxy(final MockFailure mockFailure) {
- super(mockFailure);
+ message = requireNonNull(mockFailure);
}
@Override
- protected MockFailure createFailure(final WritableIdentifier target, final long sequence,
- final RequestException failureCause) {
+ public MockFailure createFailure(final WritableIdentifier target, final long sequence,
+ final RequestException failureCause) {
return new MockFailure(target, failureCause, sequence);
}
@Override
- protected WritableIdentifier readTarget(final DataInput in) throws IOException {
+ public WritableIdentifier readTarget(final DataInput in) throws IOException {
return TransactionIdentifier.readFrom(in);
}
+ @Override
+ public MockFailure message() {
+ return verifyNotNull(message);
+ }
+
+ @Override
+ public void setMessage(final MockFailure message) {
+ this.message = requireNonNull(message);
+ }
+
+ @Override
+ public Object readResolve() {
+ return message();
+ }
}
private static class MockFailure extends RequestFailure<WritableIdentifier, MockFailure> {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
MockFailure(final WritableIdentifier target, final RequestException cause, final long sequence) {
}
@Override
- protected AbstractRequestFailureProxy<WritableIdentifier, MockFailure> externalizableProxy(
+ protected RequestFailure.SerialForm<WritableIdentifier, MockFailure> externalizableProxy(
final ABIVersion version) {
return new MockRequestFailureProxy(this);
}
@Override
int expectedSize() {
- return 115;
+ return 93;
}
}
@Override
int expectedSize() {
- return 104;
+ return 88;
}
@Test
@Override
int expectedSize() {
- return 121;
+ return 95;
}
}
@Override
int expectedSize() {
- return 101;
+ return 87;
}
@Test
*/
package org.opendaylight.controller.cluster.access.concepts;
+import static org.hamcrest.CoreMatchers.instanceOf;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.actor.ExtendedActorSystem;
import akka.serialization.JavaSerializer;
import akka.testkit.TestProbe;
import org.junit.After;
-import org.junit.Assert;
import org.junit.Before;
import org.opendaylight.controller.cluster.access.commands.TransactionPurgeRequest;
import org.opendaylight.controller.cluster.access.commands.TransactionPurgeResponse;
public class RequestEnvelopeTest extends AbstractEnvelopeTest<RequestEnvelope> {
-
private ActorSystem system;
private ActorRef replyTo;
private TestProbe replyToProbe;
}
@Override
- protected RequestEnvelope createEnvelope() {
+ protected EnvelopeDetails<RequestEnvelope> createEnvelope() {
replyToProbe = new TestProbe(system);
replyTo = replyToProbe.ref();
- final TransactionPurgeRequest message = new TransactionPurgeRequest(OBJECT, 2L, replyTo);
- return new RequestEnvelope(message, 1L, 2L);
+ final int refSize = replyTo.path().toSerializationFormat().length();
+
+ return new EnvelopeDetails<>(new RequestEnvelope(new TransactionPurgeRequest(OBJECT, 2L, replyTo), 1L, 2L),
+ refSize + 179);
}
@Override
protected void doAdditionalAssertions(final RequestEnvelope envelope, final RequestEnvelope resolvedObject) {
final Request<?, ?> actual = resolvedObject.getMessage();
- Assert.assertTrue(actual instanceof TransactionPurgeRequest);
- final TransactionPurgeRequest purgeRequest = (TransactionPurgeRequest) actual;
- Assert.assertEquals(replyTo, purgeRequest.getReplyTo());
- final TransactionPurgeResponse response = new TransactionPurgeResponse(OBJECT, 2L);
+ assertThat(actual, instanceOf(TransactionPurgeRequest.class));
+ final var purgeRequest = (TransactionPurgeRequest) actual;
+ assertEquals(replyTo, purgeRequest.getReplyTo());
+ final var response = new TransactionPurgeResponse(OBJECT, 2L);
resolvedObject.sendSuccess(response, 11L);
- final SuccessEnvelope successEnvelope = replyToProbe.expectMsgClass(SuccessEnvelope.class);
- Assert.assertEquals(response, successEnvelope.getMessage());
- final RuntimeRequestException failResponse = new RuntimeRequestException("fail", new RuntimeException());
+ final var successEnvelope = replyToProbe.expectMsgClass(SuccessEnvelope.class);
+ assertEquals(response, successEnvelope.getMessage());
+ final var failResponse = new RuntimeRequestException("fail", new RuntimeException());
resolvedObject.sendFailure(failResponse, 11L);
- final FailureEnvelope failureEnvelope = replyToProbe.expectMsgClass(FailureEnvelope.class);
- Assert.assertEquals(failResponse, failureEnvelope.getMessage().getCause());
+ final var failureEnvelope = replyToProbe.expectMsgClass(FailureEnvelope.class);
+ assertEquals(failResponse, failureEnvelope.getMessage().getCause());
}
@After
*/
package org.opendaylight.controller.cluster.access.concepts;
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
import org.opendaylight.controller.cluster.access.commands.TransactionAbortSuccess;
public class SuccessEnvelopeTest extends AbstractEnvelopeTest<SuccessEnvelope> {
-
@Override
- protected SuccessEnvelope createEnvelope() {
- final RequestSuccess<?, ?> message = new TransactionAbortSuccess(OBJECT, 2L);
- return new SuccessEnvelope(message, 1L, 2L, 11L);
+ protected EnvelopeDetails<SuccessEnvelope> createEnvelope() {
+ return new EnvelopeDetails<>(new SuccessEnvelope(new TransactionAbortSuccess(OBJECT, 2L), 1L, 2L, 11L), 180);
}
@Override
- protected void doAdditionalAssertions(final SuccessEnvelope envelope,
- final SuccessEnvelope resolvedObject) {
- Assert.assertEquals(envelope.getExecutionTimeNanos(), resolvedObject.getExecutionTimeNanos());
+ protected void doAdditionalAssertions(final SuccessEnvelope envelope, final SuccessEnvelope resolvedObject) {
+ assertEquals(envelope.getExecutionTimeNanos(), resolvedObject.getExecutionTimeNanos());
}
}
\ No newline at end of file
@Override
int expectedSize() {
- return 121;
+ return 96;
}
}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>mdsal-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../parent</relativePath>
</parent>
<packaging>bundle</packaging>
<dependencies>
+ <dependency>
+ <groupId>com.github.spotbugs</groupId>
+ <artifactId>spotbugs-annotations</artifactId>
+ <optional>true</optional>
+ </dependency>
+ <dependency>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jdt</groupId>
+ <artifactId>org.eclipse.jdt.annotation</artifactId>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>cds-access-api</artifactId>
</dependency>
<dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-actor_2.13</artifactId>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>repackaged-akka</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-clustering-commons</artifactId>
</dependency>
-
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>concepts</artifactId>
</dependency>
<dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-data-api</artifactId>
+ <groupId>org.scala-lang</groupId>
+ <artifactId>scala-library</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.checkerframework</groupId>
+ <artifactId>checker-qual</artifactId>
+ <optional>true</optional>
</dependency>
<dependency>
- <groupId>org.mockito</groupId>
- <artifactId>mockito-core</artifactId>
+ <groupId>com.typesafe</groupId>
+ <artifactId>config</artifactId>
+ <scope>test</scope>
</dependency>
<dependency>
<groupId>com.typesafe.akka</groupId>
<artifactId>akka-testkit_2.13</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava-testlib</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-common</artifactId>
<scope>test</scope>
</dependency>
<dependency>
- <groupId>com.google.guava</groupId>
- <artifactId>guava-testlib</artifactId>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-api</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-impl</artifactId>
+ <scope>test</scope>
</dependency>
<dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-clustering-commons</artifactId>
- <type>test-jar</type>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-clustering-commons</artifactId>
+ <type>test-jar</type>
</dependency>
</dependencies>
<build>
+ <pluginManagement>
+ <plugins>
+ <plugin>
+ <artifactId>maven-javadoc-plugin</artifactId>
+ <version>3.1.1</version>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+
<plugins>
<plugin>
<groupId>org.apache.felix</groupId>
import akka.actor.ActorRef;
import akka.actor.PoisonPill;
import akka.persistence.AbstractPersistentActor;
-import com.google.common.annotations.Beta;
import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Frontend actor which takes care of persisting generations and creates an appropriate ClientIdentifier.
- *
- * @author Robert Varga
*/
-@Beta
public abstract class AbstractClientActor extends AbstractPersistentActor {
private static final Logger LOG = LoggerFactory.getLogger(AbstractClientActor.class);
private AbstractClientActorBehavior<?> currentBehavior;
import static java.util.Objects.requireNonNull;
import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
import org.eclipse.jdt.annotation.NonNull;
import org.eclipse.jdt.annotation.Nullable;
* Base behavior attached to {@link AbstractClientActor}.
*
* @param <C> Type of associated context
- *
- * @author Robert Varga
*/
-@Beta
public abstract class AbstractClientActorBehavior<C extends AbstractClientActorContext> implements AutoCloseable {
private final @NonNull C context;
@Override
public void close() {
+ // No-op
}
/**
private static final long MAX_DELAY_NANOS = TimeUnit.SECONDS.toNanos(MAX_DELAY_SECONDS);
private final Lock lock = new ReentrantLock();
- private final ClientActorContext context;
- @GuardedBy("lock")
- private final TransmitQueue queue;
+ private final @NonNull ClientActorContext context;
private final @NonNull Long cookie;
private final String backendName;
+ @GuardedBy("lock")
+ private final TransmitQueue queue;
@GuardedBy("lock")
private boolean haveTimer;
// Private constructor to avoid code duplication.
private AbstractClientConnection(final AbstractClientConnection<T> oldConn, final TransmitQueue newQueue,
final String backendName) {
- this.context = requireNonNull(oldConn.context);
- this.cookie = requireNonNull(oldConn.cookie);
+ context = oldConn.context;
+ cookie = oldConn.cookie;
this.backendName = requireNonNull(backendName);
- this.queue = requireNonNull(newQueue);
+ queue = requireNonNull(newQueue);
// Will be updated in finishReplay if needed.
- this.lastReceivedTicks = oldConn.lastReceivedTicks;
+ lastReceivedTicks = oldConn.lastReceivedTicks;
}
// This constructor is only to be called by ConnectingClientConnection constructor.
this.context = requireNonNull(context);
this.cookie = requireNonNull(cookie);
this.backendName = requireNonNull(backendName);
- this.queue = new TransmitQueue.Halted(queueDepth);
- this.lastReceivedTicks = currentTime();
+ queue = new TransmitQueue.Halted(queueDepth);
+ lastReceivedTicks = currentTime();
}
// This constructor is only to be called (indirectly) by ReconnectingClientConnection constructor.
requireNonNull(oldConn.context).messageSlicer()), newBackend.getName());
}
- public final ClientActorContext context() {
+ public final @NonNull ClientActorContext context() {
return context;
}
return cookie;
}
- public final ActorRef localActor() {
+ public final @NonNull ActorRef localActor() {
return context.self();
}
*
* <p>
* Note that unlike {@link #sendRequest(Request, Consumer)}, this method does not exert backpressure, hence it
- * should never be called from an application thread.
+ * should never be called from an application thread and serves mostly for moving requests between queues.
*
* @param request Request to send
* @param callback Callback to invoke
if (delay.isPresent()) {
// If there is new delay, schedule a timer
- scheduleTimer(delay.getAsLong());
+ scheduleTimer(delay.orElseThrow());
} else {
LOG.debug("{}: not scheduling timeout on {}", context.persistenceId(), this);
}
}
if (maybeEntry.isPresent()) {
- final TransmittedConnectionEntry entry = maybeEntry.get();
+ final TransmittedConnectionEntry entry = maybeEntry.orElseThrow();
LOG.debug("Completing {} with {}", entry, envelope);
entry.complete(envelope.getMessage());
}
import static java.util.Objects.requireNonNull;
-import com.google.common.annotations.Beta;
import com.google.common.base.Stopwatch;
import com.google.common.base.Verify;
import java.util.Collection;
/**
* A behavior, which handles messages sent to a {@link AbstractClientActor}.
- *
- * @author Robert Varga
*/
-@Beta
public abstract class ClientActorBehavior<T extends BackendInfo> extends
RecoveredClientActorBehavior<ClientActorContext> implements Identifiable<ClientIdentifier> {
/**
return ((InternalCommand<T>) command).execute(this);
}
- if (command instanceof SuccessEnvelope) {
- return onRequestSuccess((SuccessEnvelope) command);
+ if (command instanceof SuccessEnvelope successEnvelope) {
+ return onRequestSuccess(successEnvelope);
}
-
- if (command instanceof FailureEnvelope) {
- return internalOnRequestFailure((FailureEnvelope) command);
+ if (command instanceof FailureEnvelope failureEnvelope) {
+ return internalOnRequestFailure(failureEnvelope);
}
if (MessageAssembler.isHandledMessage(command)) {
}
private static long extractCookie(final Identifier id) {
- if (id instanceof TransactionIdentifier) {
- return ((TransactionIdentifier) id).getHistoryId().getCookie();
- } else if (id instanceof LocalHistoryIdentifier) {
- return ((LocalHistoryIdentifier) id).getCookie();
+ if (id instanceof TransactionIdentifier transactionId) {
+ return transactionId.getHistoryId().getCookie();
+ } else if (id instanceof LocalHistoryIdentifier historyId) {
+ return historyId.getCookie();
} else {
throw new IllegalArgumentException("Unhandled identifier " + id);
}
* sessionId and if it does not match our current connection just ignore it.
*/
final Optional<T> optBackend = conn.getBackendInfo();
- if (optBackend.isPresent() && optBackend.get().getSessionId() != command.getSessionId()) {
+ if (optBackend.isPresent() && optBackend.orElseThrow().getSessionId() != command.getSessionId()) {
LOG.debug("{}: Mismatched current connection {} and envelope {}, ignoring response", persistenceId(),
conn, command);
return this;
LOG.error("{}: failed to resolve shard {}", persistenceId(), shard, failure);
final RequestException cause;
- if (failure instanceof RequestException) {
- cause = (RequestException) failure;
+ if (failure instanceof RequestException requestException) {
+ cause = requestException;
} else {
cause = new RuntimeRequestException("Failed to resolve shard " + shard, failure);
}
final Long shard = oldConn.cookie();
LOG.info("{}: refreshing backend for shard {}", persistenceId(), shard);
- resolver().refreshBackendInfo(shard, conn.getBackendInfo().get()).whenComplete(
+ resolver().refreshBackendInfo(shard, conn.getBackendInfo().orElseThrow()).whenComplete(
(backend, failure) -> context().executeInActor(behavior -> {
backendConnectFinished(shard, conn, backend, failure);
return behavior;
import akka.actor.ActorSystem;
import akka.actor.Cancellable;
import akka.actor.Scheduler;
-import com.google.common.annotations.Beta;
import com.google.common.base.Ticker;
import java.util.concurrent.TimeUnit;
import org.eclipse.jdt.annotation.NonNull;
* Time-keeping in a client actor is based on monotonic time. The precision of this time can be expected to be the
* same as {@link System#nanoTime()}, but it is not tied to that particular clock. Actor clock is exposed as
* a {@link Ticker}, which can be obtained via {@link #ticker()}. This class is thread-safe.
- *
- * @author Robert Varga
*/
-@Beta
public class ClientActorContext extends AbstractClientActorContext implements Identifiable<ClientIdentifier> {
private final ExecutionContext executionContext;
private final ClientIdentifier identifier;
final ClientIdentifier identifier, final ClientActorConfig config) {
super(self, persistenceId);
this.identifier = requireNonNull(identifier);
- this.scheduler = requireNonNull(system).scheduler();
- this.executionContext = system.dispatcher();
- this.dispatchers = new Dispatchers(system.dispatchers());
+ scheduler = requireNonNull(system).scheduler();
+ executionContext = system.dispatcher();
+ dispatchers = new Dispatchers(system.dispatchers());
this.config = requireNonNull(config);
messageSlicer = MessageSlicer.builder().messageSliceSize(config.getMaximumMessageSliceSize())
*/
package org.opendaylight.controller.cluster.access.client;
-import com.google.common.annotations.Beta;
import org.opendaylight.controller.cluster.access.concepts.RequestException;
-@Beta
+/**
+ * A connected connection.
+ *
+ * @param <T> Backend info type
+ */
public final class ConnectedClientConnection<T extends BackendInfo> extends AbstractReceivingClientConnection<T> {
-
ConnectedClientConnection(final AbstractClientConnection<T> oldConnection, final T newBackend) {
super(oldConnection, newBackend);
}
*/
package org.opendaylight.controller.cluster.access.client;
-import com.google.common.annotations.Beta;
import java.util.Optional;
import org.opendaylight.controller.cluster.access.concepts.RequestException;
-@Beta
public final class ConnectingClientConnection<T extends BackendInfo> extends AbstractClientConnection<T> {
/**
* A wild estimate on how deep a queue should be. Without having knowledge of the remote actor we can only
import static java.util.Objects.requireNonNull;
-import com.google.common.annotations.Beta;
import com.google.common.base.MoreObjects;
import com.google.common.base.MoreObjects.ToStringHelper;
import java.util.function.Consumer;
/**
* Single entry in a {@link AbstractClientConnection}. Tracks the request, the associated callback and time when
* the request was first enqueued.
- *
- * @author Robert Varga
*/
-@Beta
public class ConnectionEntry implements Immutable {
private final Consumer<Response<?, ?>> callback;
private final Request<?, ?> request;
ConnectionEntry(final Request<?, ?> request, final Consumer<Response<?, ?>> callback, final long now) {
this.request = requireNonNull(request);
this.callback = requireNonNull(callback);
- this.enqueuedTicks = now;
+ enqueuedTicks = now;
}
ConnectionEntry(final ConnectionEntry entry) {
*/
package org.opendaylight.controller.cluster.access.client;
-import com.google.common.annotations.Beta;
-import com.google.common.base.Verify;
+import static com.google.common.base.Verify.verify;
+import static com.google.common.base.Verify.verifyNotNull;
+
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.VarHandle;
import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
import java.util.concurrent.locks.StampedLock;
/**
* A lock implementation which allows users to perform optimistic reads and validate them in a fashion similar
* to {@link StampedLock}. In case a read is contented with a write, the read side will throw
* an {@link InversibleLockException}, which the caller can catch and use to wait for the write to resolve.
- *
- * @author Robert Varga
*/
-@Beta
public final class InversibleLock {
- private static final AtomicReferenceFieldUpdater<InversibleLock, CountDownLatch> LATCH_UPDATER =
- AtomicReferenceFieldUpdater.newUpdater(InversibleLock.class, CountDownLatch.class, "latch");
+ private static final VarHandle LATCH;
+
+ static {
+ try {
+ LATCH = MethodHandles.lookup().findVarHandle(InversibleLock.class, "latch", CountDownLatch.class);
+ } catch (NoSuchFieldException | IllegalAccessException e) {
+ throw new ExceptionInInitializerError(e);
+ }
+ }
private final StampedLock lock = new StampedLock();
+
+ @SuppressFBWarnings(value = "UWF_UNWRITTEN_FIELD",
+ justification = "https://github.com/spotbugs/spotbugs/issues/2749")
private volatile CountDownLatch latch;
/**
// Write-locked. Read the corresponding latch and if present report an exception, which will propagate
// and force release of locks.
- final CountDownLatch local = latch;
+ final var local = latch;
if (local != null) {
throw new InversibleLockException(local);
}
}
public long writeLock() {
- final CountDownLatch local = new CountDownLatch(1);
- final boolean taken = LATCH_UPDATER.compareAndSet(this, null, local);
- Verify.verify(taken);
-
+ verify(LATCH.compareAndSet(this, null, new CountDownLatch(1)));
return lock.writeLock();
}
public void unlockWrite(final long stamp) {
- final CountDownLatch local = LATCH_UPDATER.getAndSet(this, null);
- Verify.verifyNotNull(local);
+ final var local = verifyNotNull((CountDownLatch) LATCH.getAndSet(this, null));
lock.unlockWrite(stamp);
local.countDown();
}
-
}
import static java.util.Objects.requireNonNull;
-import com.google.common.annotations.Beta;
+import java.io.Serial;
import java.util.concurrent.CountDownLatch;
/**
* Exception thrown from {@link InversibleLock#optimisticRead()} and can be used to wait for the racing write
* to complete using {@link #awaitResolution()}.
- *
- * @author Robert Varga
*/
-@Beta
public final class InversibleLockException extends RuntimeException {
+ @Serial
private static final long serialVersionUID = 1L;
private final transient CountDownLatch latch;
* @param now tick number corresponding to caller's present
*/
ProgressTracker(final ProgressTracker oldTracker, final long now) {
- this.defaultTicksPerTask = oldTracker.defaultTicksPerTask;
- this.tasksEncountered = this.tasksClosed = oldTracker.tasksClosed;
- this.lastClosed = oldTracker.lastClosed;
- this.nearestAllowed = oldTracker.nearestAllowed; // Call cancelDebt explicitly if needed.
- this.lastIdle = oldTracker.lastIdle;
- this.elapsedBeforeIdle = oldTracker.elapsedBeforeIdle;
+ defaultTicksPerTask = oldTracker.defaultTicksPerTask;
+ tasksEncountered = tasksClosed = oldTracker.tasksClosed;
+ lastClosed = oldTracker.lastClosed;
+ // Call cancelDebt explicitly if needed.
+ nearestAllowed = oldTracker.nearestAllowed;
+ lastIdle = oldTracker.lastIdle;
+ elapsedBeforeIdle = oldTracker.elapsedBeforeIdle;
if (!oldTracker.isIdle()) {
transitToIdle(now);
}
*
* @return number of tasks started but not finished yet
*/
- final long tasksOpen() { // TODO: Should we return int?
+ // TODO: Should we return int?
+ final long tasksOpen() {
// TODO: Should we check the return value is non-negative?
return tasksEncountered - tasksClosed;
}
LOG.debug("{}: persisting new identifier {}", persistenceId(), nextId);
context().saveSnapshot(nextId);
return new SavingClientActorBehavior(context(), nextId);
- } else if (recover instanceof SnapshotOffer) {
- lastId = (ClientIdentifier) ((SnapshotOffer)recover).snapshot();
+ } else if (recover instanceof SnapshotOffer snapshotOffer) {
+ lastId = (ClientIdentifier) snapshotOffer.snapshot();
LOG.debug("{}: recovered identifier {}", persistenceId(), lastId);
} else {
LOG.warn("{}: ignoring recovery message {}", persistenceId(), recover);
SavingClientActorBehavior(final InitialClientActorContext context, final ClientIdentifier nextId) {
super(context);
- this.myId = requireNonNull(nextId);
+ myId = requireNonNull(nextId);
}
@Override
AbstractClientActorBehavior<?> onReceiveCommand(final Object command) {
- if (command instanceof SaveSnapshotFailure) {
- LOG.error("{}: failed to persist state", persistenceId(), ((SaveSnapshotFailure) command).cause());
+ if (command instanceof SaveSnapshotFailure saveFailure) {
+ LOG.error("{}: failed to persist state", persistenceId(), saveFailure.cause());
return null;
- } else if (command instanceof SaveSnapshotSuccess) {
- LOG.debug("{}: got command: {}", persistenceId(), command);
- SaveSnapshotSuccess saved = (SaveSnapshotSuccess)command;
+ } else if (command instanceof SaveSnapshotSuccess saved) {
+ LOG.debug("{}: got command: {}", persistenceId(), saved);
context().deleteSnapshots(new SnapshotSelectionCriteria(scala.Long.MaxValue(),
saved.metadata().timestamp() - 1, 0L, 0L));
return this;
- } else if (command instanceof DeleteSnapshotsSuccess) {
- LOG.debug("{}: got command: {}", persistenceId(), command);
- } else if (command instanceof DeleteSnapshotsFailure) {
+ } else if (command instanceof DeleteSnapshotsSuccess deleteSuccess) {
+ LOG.debug("{}: got command: {}", persistenceId(), deleteSuccess);
+ } else if (command instanceof DeleteSnapshotsFailure deleteFailure) {
// Not treating this as a fatal error.
- LOG.warn("{}: failed to delete prior snapshots", persistenceId(),
- ((DeleteSnapshotsFailure) command).cause());
+ LOG.warn("{}: failed to delete prior snapshots", persistenceId(), deleteFailure.cause());
} else {
LOG.debug("{}: stashing command {}", persistenceId(), command);
context().stash();
*
* <p>
* This class is not thread-safe, as it is expected to be guarded by {@link AbstractClientConnection}.
- *
- * @author Robert Varga
*/
-abstract class TransmitQueue {
+abstract sealed class TransmitQueue {
static final class Halted extends TransmitQueue {
// For ConnectingClientConnection.
Halted(final int targetDepth) {
private final Deque<TransmittedConnectionEntry> inflight = new ArrayDeque<>();
private final Deque<ConnectionEntry> pending = new ArrayDeque<>();
- private final AveragingProgressTracker tracker; // Cannot be just ProgressTracker as we are inheriting limits.
+ // Cannot be just ProgressTracker as we are inheriting limits.
+ private final AveragingProgressTracker tracker;
private ReconnectForwarder successor;
/**
return Optional.empty();
}
- final TransmittedConnectionEntry entry = maybeEntry.get();
+ final TransmittedConnectionEntry entry = maybeEntry.orElseThrow();
tracker.closeTask(now, entry.getEnqueuedTicks(), entry.getTxTicks(), envelope.getExecutionTimeNanos());
// We have freed up a slot, try to transmit something
return false;
}
- inflight.addLast(maybeTransmitted.get());
+ inflight.addLast(maybeTransmitted.orElseThrow());
return true;
}
}
// Check if the entry has (ever) been transmitted
- if (!(e instanceof TransmittedConnectionEntry)) {
+ if (!(e instanceof TransmittedConnectionEntry te)) {
return Optional.empty();
}
- final TransmittedConnectionEntry te = (TransmittedConnectionEntry) e;
-
// Now check session match
if (envelope.getSessionId() != te.getSessionId()) {
LOG.debug("Expecting session {}, ignoring response {}", te.getSessionId(), envelope);
/**
* Abstract base class for client actors and their components.
- *
- * @author Robert Varga
*/
public abstract class AbstractClientActorTest {
private static final MemberName MEMBER_NAME = MemberName.forName("member-1");
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
-import org.mockito.MockitoAnnotations;
import org.opendaylight.controller.cluster.access.commands.AbortLocalTransactionRequest;
import org.opendaylight.controller.cluster.access.commands.TransactionAbortSuccess;
import org.opendaylight.controller.cluster.access.commands.TransactionFailure;
@Before
public void setUp() {
- MockitoAnnotations.initMocks(this);
system = ActorSystem.apply();
backendProbe = new TestProbe(system);
contextProbe = new TestProbe(system);
package org.opendaylight.controller.cluster.access.client;
import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.lenient;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
public static ClientActorConfig newMockClientActorConfig() {
ClientActorConfig mockConfig = mock(ClientActorConfig.class);
- doReturn(2_000_000).when(mockConfig).getMaximumMessageSliceSize();
- doReturn(1_000_000_000).when(mockConfig).getFileBackedStreamingThreshold();
- doReturn(AbstractClientConnection.DEFAULT_BACKEND_ALIVE_TIMEOUT_NANOS)
- .when(mockConfig).getBackendAlivenessTimerInterval();
+ lenient().doReturn(2_000_000).when(mockConfig).getMaximumMessageSliceSize();
+ lenient().doReturn(1_000_000_000).when(mockConfig).getFileBackedStreamingThreshold();
doReturn(AbstractClientConnection.DEFAULT_REQUEST_TIMEOUT_NANOS).when(mockConfig).getRequestTimeout();
- doReturn(AbstractClientConnection.DEFAULT_NO_PROGRESS_TIMEOUT_NANOS)
- .when(mockConfig).getNoProgressTimeout();
+ lenient().doReturn(AbstractClientConnection.DEFAULT_BACKEND_ALIVE_TIMEOUT_NANOS)
+ .when(mockConfig).getBackendAlivenessTimerInterval();
+ lenient().doReturn(AbstractClientConnection.DEFAULT_NO_PROGRESS_TIMEOUT_NANOS)
+ .when(mockConfig).getNoProgressTimeout();
return mockConfig;
}
*/
package org.opendaylight.controller.cluster.access.client;
+import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.timeout;
import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.testkit.TestProbe;
import akka.testkit.javadsl.TestKit;
import com.typesafe.config.ConfigFactory;
-import java.lang.reflect.Field;
import java.util.Optional;
import java.util.concurrent.TimeUnit;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.ExtendWith;
+import org.mockito.Answers;
+import org.mockito.Mock;
+import org.mockito.junit.jupiter.MockitoExtension;
import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
import org.opendaylight.controller.cluster.access.concepts.FrontendType;
import org.opendaylight.controller.cluster.access.concepts.MemberName;
import scala.concurrent.duration.FiniteDuration;
-public class ActorBehaviorTest {
-
+@ExtendWith(MockitoExtension.class)
+class ActorBehaviorTest {
private static final String MEMBER_1_FRONTEND_TYPE_1 = "member-1-frontend-type-1";
private static final FiniteDuration TIMEOUT = FiniteDuration.create(5, TimeUnit.SECONDS);
+ @Mock
+ private InternalCommand<BackendInfo> cmd;
+ @Mock(answer = Answers.CALLS_REAL_METHODS)
+ private ClientActorBehavior<BackendInfo> initialBehavior;
+ @Mock
+ private AbstractClientActorContext ctx;
+
private ActorSystem system;
private TestProbe probe;
- private ClientActorBehavior<BackendInfo> initialBehavior;
private MockedSnapshotStore.SaveRequest saveRequest;
private FrontendIdentifier id;
private ActorRef mockedActor;
- @Before
- public void setUp() throws Exception {
- initialBehavior = createInitialBehaviorMock();
+ @BeforeEach
+ void beforeEach() throws Exception {
+ //persistenceId() in AbstractClientActorBehavior is final and can't be mocked
+ //use reflection to work around this
+ final var context = AbstractClientActorBehavior.class.getDeclaredField("context");
+ context.setAccessible(true);
+ context.set(initialBehavior, ctx);
+ final var persistenceId = AbstractClientActorContext.class.getDeclaredField("persistenceId");
+ persistenceId.setAccessible(true);
+ persistenceId.set(ctx, MEMBER_1_FRONTEND_TYPE_1);
+
system = ActorSystem.apply("system1");
final ActorRef storeRef = system.registerExtension(Persistence.lookup()).snapshotStoreFor(null,
ConfigFactory.empty());
saveRequest = handleRecovery(null);
}
- @After
- public void tearDown() {
+ @AfterEach
+ void afterEach() {
TestKit.shutdownActorSystem(system);
}
@Test
- public void testInitialBehavior() {
- final InternalCommand<BackendInfo> cmd = mock(InternalCommand.class);
- when(cmd.execute(any())).thenReturn(initialBehavior);
+ void testInitialBehavior() {
+ doReturn(initialBehavior).when(cmd).execute(any());
mockedActor.tell(cmd, ActorRef.noSender());
verify(cmd, timeout(1000)).execute(initialBehavior);
}
@Test
- public void testCommandStashing() {
+ void testCommandStashing() {
system.stop(mockedActor);
mockedActor = system.actorOf(MockedActor.props(id, initialBehavior));
- final InternalCommand<BackendInfo> cmd = mock(InternalCommand.class);
- when(cmd.execute(any())).thenReturn(initialBehavior);
+ doReturn(initialBehavior).when(cmd).execute(any());
//send messages before recovery is completed
mockedActor.tell(cmd, ActorRef.noSender());
mockedActor.tell(cmd, ActorRef.noSender());
}
@Test
- public void testRecoveryAfterRestart() {
+ void testRecoveryAfterRestart() {
system.stop(mockedActor);
mockedActor = system.actorOf(MockedActor.props(id, initialBehavior));
final MockedSnapshotStore.SaveRequest newSaveRequest =
handleRecovery(new SelectedSnapshot(saveRequest.getMetadata(), saveRequest.getSnapshot()));
- Assert.assertEquals(MEMBER_1_FRONTEND_TYPE_1, newSaveRequest.getMetadata().persistenceId());
+ assertEquals(MEMBER_1_FRONTEND_TYPE_1, newSaveRequest.getMetadata().persistenceId());
}
@Test
- public void testRecoveryAfterRestartFrontendIdMismatch() {
+ void testRecoveryAfterRestartFrontendIdMismatch() {
system.stop(mockedActor);
//start actor again
mockedActor = system.actorOf(MockedActor.props(id, initialBehavior));
}
@Test
- public void testRecoveryAfterRestartSaveSnapshotFail() {
+ void testRecoveryAfterRestartSaveSnapshotFail() {
system.stop(mockedActor);
mockedActor = system.actorOf(MockedActor.props(id, initialBehavior));
probe.watch(mockedActor);
}
@Test
- public void testRecoveryAfterRestartDeleteSnapshotsFail() {
+ void testRecoveryAfterRestartDeleteSnapshotsFail() {
system.stop(mockedActor);
mockedActor = system.actorOf(MockedActor.props(id, initialBehavior));
probe.watch(mockedActor);
probe.expectNoMessage();
}
- @SuppressWarnings("unchecked")
- private static ClientActorBehavior<BackendInfo> createInitialBehaviorMock() throws Exception {
- final ClientActorBehavior<BackendInfo> initialBehavior = mock(ClientActorBehavior.class);
- //persistenceId() in AbstractClientActorBehavior is final and can't be mocked
- //use reflection to work around this
- final Field context = AbstractClientActorBehavior.class.getDeclaredField("context");
- context.setAccessible(true);
- final AbstractClientActorContext ctx = mock(AbstractClientActorContext.class);
- context.set(initialBehavior, ctx);
- final Field persistenceId = AbstractClientActorContext.class.getDeclaredField("persistenceId");
- persistenceId.setAccessible(true);
- persistenceId.set(ctx, MEMBER_1_FRONTEND_TYPE_1);
- return initialBehavior;
- }
-
private MockedSnapshotStore.SaveRequest handleRecovery(final SelectedSnapshot savedState) {
probe.expectMsgClass(MockedSnapshotStore.LoadRequest.class);
//offer snapshot
}
private static class MockedActor extends AbstractClientActor {
-
private final ClientActorBehavior<?> initialBehavior;
private final ClientActorConfig mockConfig = AccessClientUtil.newMockClientActorConfig();
return mockConfig;
}
}
-
}
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
+import org.junit.runner.RunWith;
import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
import org.opendaylight.controller.cluster.access.concepts.FrontendType;
import org.opendaylight.controller.cluster.access.concepts.MemberName;
import scala.concurrent.duration.FiniteDuration;
+@RunWith(MockitoJUnitRunner.class)
public class ClientActorContextTest {
private static final MemberName MEMBER_NAME = MemberName.forName("member-1");
private static final FrontendType FRONTEND_TYPE =
@Before
public void setup() {
- MockitoAnnotations.initMocks(this);
system = ActorSystem.apply();
probe = new TestProbe(system);
ctx = new ClientActorContext(probe.ref(), PERSISTENCE_ID, system,
import org.opendaylight.controller.cluster.messaging.MessageSlice;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
public class ConnectedClientConnectionTest
extends AbstractClientConnectionTest<ConnectedClientConnection<BackendInfo>, BackendInfo> {
@Override
protected ConnectedClientConnection<BackendInfo> createConnection() {
- final BackendInfo backend = new BackendInfo(backendProbe.ref(), "test", 0L, ABIVersion.BORON, 10);
+ final BackendInfo backend = new BackendInfo(backendProbe.ref(), "test", 0L, ABIVersion.current(), 10);
final ConnectingClientConnection<BackendInfo> connectingConn = new ConnectingClientConnection<>(context, 0L,
backend.getName());
return new ConnectedClientConnection<>(connectingConn, backend);
new TransactionIdentifier(new LocalHistoryIdentifier(CLIENT_ID, 0L), 0L);
ModifyTransactionRequestBuilder reqBuilder =
new ModifyTransactionRequestBuilder(identifier, replyToProbe.ref());
- reqBuilder.addModification(new TransactionWrite(YangInstanceIdentifier.empty(), Builders.containerBuilder()
- .withNodeIdentifier(YangInstanceIdentifier.NodeIdentifier.create(
- QName.create("namespace", "localName"))).build()));
+ reqBuilder.addModification(new TransactionWrite(YangInstanceIdentifier.of(),
+ ImmutableNodes.newContainerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(QName.create("namespace", "localName")))
+ .build()));
reqBuilder.setSequence(0L);
final Request<?, ?> request = reqBuilder.build();
connection.sendRequest(request, callback);
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
+import org.junit.runner.RunWith;
import org.mockito.ArgumentCaptor;
import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
import org.opendaylight.controller.cluster.access.ABIVersion;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestFailureProxy;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestProxy;
import org.opendaylight.controller.cluster.access.concepts.FailureEnvelope;
import org.opendaylight.controller.cluster.access.concepts.Request;
import org.opendaylight.controller.cluster.access.concepts.RequestEnvelope;
/**
* Test suite covering logic contained in {@link ConnectingClientConnection}. It assumes {@link ConnectionEntryTest}
* passes.
- *
- * @author Robert Varga
*/
+@RunWith(MockitoJUnitRunner.class)
public class ConnectingClientConnectionTest {
private static class MockFailure extends RequestFailure<WritableIdentifier, MockFailure> {
private static final long serialVersionUID = 1L;
}
@Override
- protected AbstractRequestFailureProxy<WritableIdentifier, MockFailure> externalizableProxy(
- final ABIVersion version) {
+ protected SerialForm<WritableIdentifier, MockFailure> externalizableProxy(final ABIVersion version) {
return null;
}
}
@Override
- protected AbstractRequestProxy<WritableIdentifier, MockRequest> externalizableProxy(final ABIVersion version) {
+ protected Request.SerialForm<WritableIdentifier, MockRequest> externalizableProxy(final ABIVersion version) {
return null;
}
@Before
public void setup() {
- MockitoAnnotations.initMocks(this);
-
doNothing().when(mockCallback).accept(any(MockFailure.class));
ticker = new FakeTicker();
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
+import org.junit.runner.RunWith;
import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
import org.opendaylight.controller.cluster.access.ABIVersion;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestFailureProxy;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestProxy;
import org.opendaylight.controller.cluster.access.concepts.Request;
import org.opendaylight.controller.cluster.access.concepts.RequestException;
import org.opendaylight.controller.cluster.access.concepts.RequestFailure;
/**
* Test suite covering logic contained in {@link ConnectionEntry}.
- *
- * @author Robert Varga
*/
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
public class ConnectionEntryTest {
private static class MockFailure extends RequestFailure<WritableIdentifier, MockFailure> {
private static final long serialVersionUID = 1L;
}
@Override
- protected AbstractRequestFailureProxy<WritableIdentifier, MockFailure> externalizableProxy(
- final ABIVersion version) {
+ protected SerialForm<WritableIdentifier, MockFailure> externalizableProxy(final ABIVersion version) {
return null;
}
}
@Override
- protected AbstractRequestProxy<WritableIdentifier, MockRequest> externalizableProxy(final ABIVersion version) {
+ protected Request.SerialForm<WritableIdentifier, MockRequest> externalizableProxy(final ABIVersion version) {
return null;
}
@Before
public void setup() {
- MockitoAnnotations.initMocks(this);
-
doNothing().when(mockCallback).accept(any(MockFailure.class));
ticker = new FakeTicker();
@Override
protected ReconnectingClientConnection<BackendInfo> createConnection() {
- final BackendInfo backend = new BackendInfo(backendProbe.ref(), "test", 0L, ABIVersion.BORON, 10);
+ final BackendInfo backend = new BackendInfo(backendProbe.ref(), "test", 0L, ABIVersion.current(), 10);
final ConnectingClientConnection<BackendInfo> connectingConn = new ConnectingClientConnection<>(context, 0L,
backend.getName());
final ConnectedClientConnection<BackendInfo> connectedConn =
@Override
protected TransmitQueue.Transmitting createQueue() {
doReturn(false).when(mockMessageSlicer).slice(any());
- backendInfo = new BackendInfo(probe.ref(), "test", 0L, ABIVersion.BORON, 3);
+ backendInfo = new BackendInfo(probe.ref(), "test", 0L, ABIVersion.current(), 3);
return new TransmitQueue.Transmitting(new TransmitQueue.Halted(0), 0, backendInfo, now(), mockMessageSlicer);
}
Optional<TransmittedConnectionEntry> transmitted = queue.transmit(entry, now);
assertTrue(transmitted.isPresent());
- assertEquals(request, transmitted.get().getRequest());
- assertEquals(callback, transmitted.get().getCallback());
+ assertEquals(request, transmitted.orElseThrow().getRequest());
+ assertEquals(callback, transmitted.orElseThrow().getCallback());
final RequestEnvelope requestEnvelope = probe.expectMsgClass(RequestEnvelope.class);
assertEquals(request, requestEnvelope.getMessage());
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>mdsal-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../parent</relativePath>
</parent>
<dependencies>
<dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-actor_2.13</artifactId>
+ <groupId>org.eclipse.jdt</groupId>
+ <artifactId>org.eclipse.jdt.annotation</artifactId>
</dependency>
-
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>concepts</artifactId>
</dependency>
- <dependency>
- <groupId>org.opendaylight.mdsal</groupId>
- <artifactId>mdsal-dom-api</artifactId>
- </dependency>
</dependencies>
<build>
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.dom.api;
-
-import com.google.common.annotations.Beta;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeProducer;
-
-/**
- * An extension to {@link DOMDataTreeProducer}, which allows users access
- * to information about the backing shard.
- *
- * @author Robert Varga
- */
-@Beta
-public interface CDSDataTreeProducer extends DOMDataTreeProducer {
- /**
- * Return a {@link CDSShardAccess} handle. This handle will remain valid
- * as long as this producer is operational. Returned handle can be accessed
- * independently from this producer and is not subject to the usual access
- * restrictions imposed on DOMDataTreeProducer state.
- *
- * @param subtree One of the subtrees to which are currently under control of this producer
- * @return A shard access handle.
- * @throws NullPointerException when subtree is null
- * @throws IllegalArgumentException if the specified subtree is not controlled by this producer
- * @throws IllegalStateException if this producer is no longer operational
- * @throws IllegalThreadStateException if the access rules to this producer
- * are violated, for example if this producer is bound and this thread
- * is currently not executing from a listener context.
- */
- @NonNull CDSShardAccess getShardAccess(@NonNull DOMDataTreeIdentifier subtree);
-}
-
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.dom.api;
-
-import com.google.common.annotations.Beta;
-import java.util.concurrent.CompletionStage;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-
-/**
- * Unprivileged access interface to shard information. Provides read-only access to operational details about a CDS
- * shard.
- *
- * @author Robert Varga
- */
-@Beta
-public interface CDSShardAccess {
- /**
- * Return the shard identifier.
- *
- * @return Shard identifier.
- * @throws IllegalStateException if the {@link CDSDataTreeProducer} from which the associated
- * {@link CDSDataTreeProducer} is no longer valid.
- */
- @NonNull DOMDataTreeIdentifier getShardIdentifier();
-
- /**
- * Return the shard leader location relative to the local node.
- *
- * @return Shard leader location.
- * @throws IllegalStateException if the {@link CDSDataTreeProducer} from which the associated
- * {@link CDSDataTreeProducer} is no longer valid.
- */
- @NonNull LeaderLocation getLeaderLocation();
-
- /**
- * Request the shard leader to be moved to the local node. The request will be evaluated against shard state and
- * satisfied if leader movement is possible. If current shard policy or state prevents the movement from happening,
- * the returned {@link CompletionStage} will report an exception.
- *
- * <p>
- * This is a one-time operation, which does not prevent further movement happening in future. Even if this request
- * succeeds, there is no guarantee that the leader will remain local in face of failures, shutdown or any future
- * movement requests from other nodes.
- *
- * <p>
- * Note that due to asynchronous nature of CDS, the leader may no longer be local by the time the returned
- * {@link CompletionStage} reports success.
- *
- * @return A {@link CompletionStage} representing the request.
- * @throws IllegalStateException if the {@link CDSDataTreeProducer} from which the associated
- * {@link CDSDataTreeProducer} is no longer valid.
- */
- @NonNull CompletionStage<Void> makeLeaderLocal();
-
- /**
- * Register a listener to shard location changes. Each listener object can be registered at most once.
- *
- * @param listener Listener object
- * @return A {@link LeaderLocationListenerRegistration} for the listener.
- * @throws IllegalArgumentException if the specified listener is already registered.
- * @throws IllegalStateException if the {@link CDSDataTreeProducer} from which the associated
- * {@link CDSDataTreeProducer} is no longer valid.
- * @throws NullPointerException if listener is null.
- */
- @NonNull <L extends LeaderLocationListener> LeaderLocationListenerRegistration<L> registerLeaderLocationListener(
- @NonNull L listener);
-}
*/
package org.opendaylight.controller.cluster.dom.api;
-import com.google.common.annotations.Beta;
-
/**
* Enumeration of possible shard leader locations relative to the local node.
- *
- * @author Robert Varga
*/
-@Beta
public enum LeaderLocation {
/**
* The leader is co-located on this node.
*/
package org.opendaylight.controller.cluster.dom.api;
-import com.google.common.annotations.Beta;
-import java.util.EventListener;
import org.eclipse.jdt.annotation.NonNull;
/**
* Listener for shard leader location changes.
- *
- * @author Robert Varga
*/
-@Beta
-public interface LeaderLocationListener extends EventListener {
+public interface LeaderLocationListener {
/**
* Invoked when shard leader location changes.
*
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.dom.api;
-
-import com.google.common.annotations.Beta;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-
-/**
- * Registration of a {@link LeaderLocationListener}.
- *
- * @author Robert Varga
- *
- * @param <T> Listener type
- */
-@Beta
-public interface LeaderLocationListenerRegistration<T extends LeaderLocationListener> extends ListenerRegistration<T> {
-
-}
<?xml version="1.0" encoding="UTF-8"?>
-<!-- vi: set et smarttab sw=4 tabstop=4: -->
-<!--
- Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
--->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>mdsal-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
- <relativePath>../../md-sal/parent</relativePath>
+ <version>9.0.3-SNAPSHOT</version>
+ <relativePath>../parent</relativePath>
</parent>
- <groupId>org.opendaylight.controller.model</groupId>
- <artifactId>model-topology</artifactId>
+ <artifactId>cds-mgmt-api</artifactId>
<packaging>bundle</packaging>
<dependencies>
<dependency>
- <groupId>org.opendaylight.controller.model</groupId>
- <artifactId>model-inventory</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.mdsal.model</groupId>
- <artifactId>ietf-topology</artifactId>
+ <groupId>org.eclipse.jdt</groupId>
+ <artifactId>org.eclipse.jdt.annotation</artifactId>
</dependency>
</dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <configuration>
+ <instructions>
+ <Import-Package>
+ javax.management;resolution:=optional,
+ *
+ </Import-Package>
+ </instructions>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+
<scm>
<connection>scm:git:http://git.opendaylight.org/gerrit/controller.git</connection>
<developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
<tag>HEAD</tag>
- <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
+ <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL:Architecture:Clustering</url>
</scm>
</project>
--- /dev/null
+/*
+ * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+module org.opendaylight.controller.cluster.mgmt.api {
+ exports org.opendaylight.controller.cluster.mgmt.api;
+ // FIXME: 4.0.0: collapse these packages
+ exports org.opendaylight.controller.cluster.datastore.jmx.mbeans;
+ exports org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard;
+
+ // Annotation-only dependencies
+ requires static transitive java.management;
+ requires static transitive org.eclipse.jdt.annotation;
+}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.datastore.jmx.mbeans;
+import javax.management.MXBean;
+
/**
* MXBean interface for retrieving write Tx commit statistics.
*
* @author Thomas Pantelis
*/
+@MXBean
public interface CommitStatsMXBean {
-
/**
* Returns the total number of commits that have occurred.
*
*/
package org.opendaylight.controller.cluster.datastore.jmx.mbeans;
+import javax.management.MXBean;
/**
* MXBean interface for data store configuration.
*
* @author Thomas Pantelis
*/
+@MXBean
public interface DatastoreConfigurationMXBean {
long getShardTransactionIdleTimeoutInSeconds();
int getShardSnapshotDataThresholdPercentage();
+ int getShardSnapshotDataThreshold();
+
long getShardSnapshotBatchCount();
long getShardTransactionCommitTimeoutInSeconds();
boolean getTransactionContextDebugEnabled();
- @Deprecated(forRemoval = true)
- int getMaxShardDataChangeExecutorPoolSize();
-
- @Deprecated(forRemoval = true)
- int getMaxShardDataChangeExecutorQueueSize();
-
- @Deprecated(forRemoval = true)
- int getMaxShardDataChangeListenerQueueSize();
-
- @Deprecated(forRemoval = true)
- int getMaxShardDataStoreExecutorQueueSize();
-
int getMaximumMessageSliceSize();
}
*/
package org.opendaylight.controller.cluster.datastore.jmx.mbeans;
+import javax.management.MXBean;
+
/**
* JMX bean for general datastore info.
*
* @author Thomas Pantelis
*/
+@MXBean
public interface DatastoreInfoMXBean {
+
double getTransactionCreationRateLimit();
+
+ /**
+ * Return the number of {@code AskTimeoutException}s encountered by the datastore.
+ *
+ * @return Number of exceptions encountered
+ */
+ long getAskTimeoutExceptionCount();
+
+ /**
+ * Reset the number of {@code AskTimeoutException}s encountered by the datastore.
+ */
+ void resetAskTimeoutExceptionCount();
}
package org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard;
import java.util.List;
-import org.opendaylight.controller.cluster.datastore.messages.DataTreeListenerInfo;
+import javax.management.MXBean;
+import org.eclipse.jdt.annotation.NonNullByDefault;
+import org.opendaylight.controller.cluster.mgmt.api.DataTreeListenerInfo;
/**
* MXBean interface for reporting shard data tree change listener information.
*
* @author Thomas Pantelis
*/
+@MXBean
+@NonNullByDefault
public interface ShardDataTreeListenerInfoMXBean {
+
List<DataTreeListenerInfo> getDataTreeChangeListenerInfo();
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard;
import java.util.List;
-import org.opendaylight.controller.cluster.raft.client.messages.FollowerInfo;
+import javax.management.MXBean;
+import org.opendaylight.controller.cluster.mgmt.api.FollowerInfo;
/**
* MXBean interface for shard stats.
*
* @author syedbahm
*/
+@MXBean
public interface ShardStatsMXBean {
String getShardName();
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.cluster.datastore.messages;
+package org.opendaylight.controller.cluster.mgmt.api;
import static java.util.Objects.requireNonNull;
-import java.beans.ConstructorProperties;
+import javax.management.ConstructorParameters;
+import org.eclipse.jdt.annotation.NonNullByDefault;
/**
- * Response to a {@link GetInfo} query from a data tree listener actor.
+ * Information about a registered listener.
*
* @author Thomas Pantelis
*/
-public class DataTreeListenerInfo {
+@NonNullByDefault
+public final class DataTreeListenerInfo {
private final String listener;
private final String registeredPath;
private final boolean isEnabled;
private final long notificationCount;
- @ConstructorProperties({"listener","registeredPath", "isEnabled", "notificationCount"})
+ @ConstructorParameters({"listener","registeredPath", "isEnabled", "notificationCount"})
public DataTreeListenerInfo(final String listener, final String registeredPath, final boolean isEnabled,
final long notificationCount) {
this.listener = requireNonNull(listener);
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.cluster.raft.client.messages;
+package org.opendaylight.controller.cluster.mgmt.api;
-import java.beans.ConstructorProperties;
+import static java.util.Objects.requireNonNull;
+
+import javax.management.ConstructorParameters;
+import org.eclipse.jdt.annotation.NonNullByDefault;
/**
* A bean class containing a snapshot of information for a follower returned from GetOnDemandRaftStats.
*
* @author Thomas Pantelis
*/
-public class FollowerInfo {
+@NonNullByDefault
+public final class FollowerInfo {
private final String id;
private final long nextIndex;
private final long matchIndex;
private final String timeSinceLastActivity;
private final boolean isVoting;
- @ConstructorProperties({"id","nextIndex", "matchIndex", "active", "timeSinceLastActivity", "voting"})
- public FollowerInfo(String id, long nextIndex, long matchIndex, boolean active, String timeSinceLastActivity,
- boolean voting) {
- this.id = id;
+ @ConstructorParameters({"id","nextIndex", "matchIndex", "active", "timeSinceLastActivity", "voting"})
+ public FollowerInfo(final String id, final long nextIndex, final long matchIndex, final boolean active,
+ final String timeSinceLastActivity, final boolean voting) {
+ this.id = requireNonNull(id);
this.nextIndex = nextIndex;
this.matchIndex = matchIndex;
this.isActive = active;
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ ~ Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>mdsal-parent</artifactId>
+ <version>9.0.3-SNAPSHOT</version>
+ <relativePath>../parent</relativePath>
+ </parent>
+
+ <artifactId>eos-dom-akka</artifactId>
+ <packaging>bundle</packaging>
+
+ <dependencies>
+ <dependency>
+ <groupId>com.github.spotbugs</groupId>
+ <artifactId>spotbugs-annotations</artifactId>
+ <optional>true</optional>
+ </dependency>
+ <dependency>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>repackaged-akka</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-clustering-commons</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>mdsal-eos-common-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>mdsal-eos-dom-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>mdsal-binding-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>mdsal-binding-dom-codec-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal.model</groupId>
+ <artifactId>general-entity</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>concepts</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-common</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.osgi</groupId>
+ <artifactId>org.osgi.service.component.annotations</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.guicedee.services</groupId>
+ <artifactId>javax.inject</artifactId>
+ <optional>true</optional>
+ </dependency>
+ <dependency>
+ <groupId>jakarta.annotation</groupId>
+ <artifactId>jakarta.annotation-api</artifactId>
+ <scope>provided</scope>
+ <optional>true</optional>
+ </dependency>
+ <dependency>
+ <groupId>org.scala-lang</groupId>
+ <artifactId>scala-library</artifactId>
+ </dependency>
+
+ <dependency>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-testkit_2.13</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-actor-testkit-typed_2.13</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.awaitility</groupId>
+ <artifactId>awaitility</artifactId>
+ </dependency>
+
+ <dependency>
+ <groupId>com.typesafe</groupId>
+ <artifactId>config</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>mdsal-binding-dom-codec</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>mdsal-binding-generator</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>mdsal-binding-runtime-api</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>mdsal-binding-runtime-spi</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>mdsal-singleton-api</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>mdsal-singleton-impl</artifactId>
+ <scope>test</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>org.opendaylight.mdsal.model</groupId>
+ <artifactId>ietf-topology</artifactId>
+ <scope>test</scope>
+ </dependency>
+
+ </dependencies>
+</project>
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka;
+
+import akka.actor.ActorSystem;
+import akka.actor.typed.ActorRef;
+import akka.actor.typed.Scheduler;
+import akka.actor.typed.javadsl.Adapter;
+import akka.actor.typed.javadsl.AskPattern;
+import akka.actor.typed.javadsl.Behaviors;
+import akka.cluster.typed.Cluster;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.SettableFuture;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.time.Duration;
+import java.util.Optional;
+import java.util.Set;
+import java.util.concurrent.CompletionStage;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutionException;
+import java.util.function.Function;
+import javax.annotation.PreDestroy;
+import javax.inject.Inject;
+import javax.inject.Singleton;
+import org.opendaylight.controller.cluster.ActorSystemProvider;
+import org.opendaylight.controller.eos.akka.bootstrap.EOSMain;
+import org.opendaylight.controller.eos.akka.bootstrap.command.BootstrapCommand;
+import org.opendaylight.controller.eos.akka.bootstrap.command.GetRunningContext;
+import org.opendaylight.controller.eos.akka.bootstrap.command.RunningContext;
+import org.opendaylight.controller.eos.akka.bootstrap.command.Terminate;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetEntitiesRequest;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetEntityOwnerReply;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetEntityOwnerRequest;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetEntityReply;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetEntityRequest;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetOwnershipState;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetOwnershipStateReply;
+import org.opendaylight.controller.eos.akka.owner.checker.command.StateCheckerCommand;
+import org.opendaylight.controller.eos.akka.owner.checker.command.StateCheckerReply;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ActivateDataCenter;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.DeactivateDataCenter;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.CandidateRegistryCommand;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.RegisterCandidate;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.UnregisterCandidate;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.RegisterListener;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.TypeListenerRegistryCommand;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.UnregisterListener;
+import org.opendaylight.mdsal.binding.api.RpcProviderService;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingCodecTree;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingInstanceIdentifierCodec;
+import org.opendaylight.mdsal.eos.common.api.CandidateAlreadyRegisteredException;
+import org.opendaylight.mdsal.eos.common.api.EntityOwnershipState;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntities;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntitiesInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntitiesOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntity;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOwner;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOwnerInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOwnerOutput;
+import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.yang.binding.RpcOutput;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * DOMEntityOwnershipService implementation backed by native Akka clustering constructs. We use distributed-data
+ * to track all registered candidates and cluster-singleton to maintain a single cluster-wide authority which selects
+ * the appropriate owners.
+ */
+@Singleton
+@Component(immediate = true, service = { DOMEntityOwnershipService.class, DataCenterControl.class })
+public class AkkaEntityOwnershipService implements DOMEntityOwnershipService, DataCenterControl, AutoCloseable {
+ private static final Logger LOG = LoggerFactory.getLogger(AkkaEntityOwnershipService.class);
+ private static final String DATACENTER_PREFIX = "dc";
+ private static final Duration DATACENTER_OP_TIMEOUT = Duration.ofSeconds(20);
+ private static final Duration QUERY_TIMEOUT = Duration.ofSeconds(10);
+
+ private final Set<DOMEntity> registeredEntities = ConcurrentHashMap.newKeySet();
+ private final String localCandidate;
+ private final Scheduler scheduler;
+ private final String datacenter;
+
+ private final ActorRef<BootstrapCommand> bootstrap;
+ private final RunningContext runningContext;
+ private final ActorRef<CandidateRegistryCommand> candidateRegistry;
+ private final ActorRef<TypeListenerRegistryCommand> listenerRegistry;
+ private final ActorRef<StateCheckerCommand> ownerStateChecker;
+ protected final ActorRef<OwnerSupervisorCommand> ownerSupervisor;
+
+ private final BindingInstanceIdentifierCodec iidCodec;
+
+ private Registration reg;
+
+ @VisibleForTesting
+ protected AkkaEntityOwnershipService(final ActorSystem actorSystem, final BindingCodecTree codecTree)
+ throws ExecutionException, InterruptedException {
+ final var typedActorSystem = Adapter.toTyped(actorSystem);
+ scheduler = typedActorSystem.scheduler();
+
+ final Cluster cluster = Cluster.get(typedActorSystem);
+ datacenter = cluster.selfMember().dataCenter();
+
+ localCandidate = cluster.selfMember().getRoles().stream()
+ .filter(role -> !role.contains(DATACENTER_PREFIX))
+ .findFirst()
+ .orElseThrow(() -> new IllegalArgumentException("No valid role found."));
+
+ iidCodec = codecTree.getInstanceIdentifierCodec();
+ bootstrap = Adapter.spawn(actorSystem, Behaviors.setup(
+ context -> EOSMain.create(iidCodec)), "EOSBootstrap");
+
+ final CompletionStage<RunningContext> ask = AskPattern.ask(bootstrap,
+ GetRunningContext::new, Duration.ofSeconds(5), scheduler);
+ runningContext = ask.toCompletableFuture().get();
+
+ candidateRegistry = runningContext.getCandidateRegistry();
+ listenerRegistry = runningContext.getListenerRegistry();
+ ownerStateChecker = runningContext.getOwnerStateChecker();
+ ownerSupervisor = runningContext.getOwnerSupervisor();
+ }
+
+ @Inject
+ @Activate
+ @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR",
+ justification = "Non-final for testing 'this' reference is expected to be stable at registration time")
+ public AkkaEntityOwnershipService(@Reference final ActorSystemProvider actorProvider,
+ @Reference final RpcProviderService rpcProvider, @Reference final BindingCodecTree codecTree)
+ throws ExecutionException, InterruptedException {
+ this(actorProvider.getActorSystem(), codecTree);
+
+ reg = rpcProvider.registerRpcImplementations(
+ (GetEntity) this::getEntity,
+ (GetEntities) this::getEntities,
+ (GetEntityOwner) this::getEntityOwner);
+ }
+
+ @PreDestroy
+ @Deactivate
+ @Override
+ public void close() throws InterruptedException, ExecutionException {
+ if (reg != null) {
+ reg.close();
+ reg = null;
+ }
+ AskPattern.ask(bootstrap, Terminate::new, Duration.ofSeconds(5), scheduler).toCompletableFuture().get();
+ }
+
+ @Override
+ public Registration registerCandidate(final DOMEntity entity)
+ throws CandidateAlreadyRegisteredException {
+ if (!registeredEntities.add(entity)) {
+ throw new CandidateAlreadyRegisteredException(entity);
+ }
+
+ final RegisterCandidate msg = new RegisterCandidate(entity, localCandidate);
+ LOG.debug("Registering candidate with message: {}", msg);
+ candidateRegistry.tell(msg);
+
+ return new CandidateRegistration(entity, this);
+ }
+
+ @Override
+ public Registration registerListener(final String entityType, final DOMEntityOwnershipListener listener) {
+ LOG.debug("Registering listener {} for type {}", listener, entityType);
+ listenerRegistry.tell(new RegisterListener(entityType, listener));
+
+ return new ListenerRegistration(listener, entityType, this);
+ }
+
+ @Override
+ public Optional<EntityOwnershipState> getOwnershipState(final DOMEntity entity) {
+ LOG.debug("Retrieving ownership state for {}", entity);
+
+ final CompletionStage<GetOwnershipStateReply> result = AskPattern.ask(ownerStateChecker,
+ replyTo -> new GetOwnershipState(entity, replyTo),
+ Duration.ofSeconds(5), scheduler);
+
+ final GetOwnershipStateReply reply;
+ try {
+ reply = result.toCompletableFuture().get();
+ } catch (final InterruptedException | ExecutionException exception) {
+ LOG.warn("Failed to retrieve ownership state for {}", entity, exception);
+ return Optional.empty();
+ }
+
+ return Optional.ofNullable(reply.getOwnershipState());
+ }
+
+ @Override
+ public boolean isCandidateRegistered(final DOMEntity forEntity) {
+ return registeredEntities.contains(forEntity);
+ }
+
+ @Override
+ public ListenableFuture<Empty> activateDataCenter() {
+ LOG.debug("Activating datacenter: {}", datacenter);
+
+ return toListenableFuture("Activate",
+ AskPattern.ask(ownerSupervisor, ActivateDataCenter::new, DATACENTER_OP_TIMEOUT, scheduler));
+ }
+
+ @Override
+ public ListenableFuture<Empty> deactivateDataCenter() {
+ LOG.debug("Deactivating datacenter: {}", datacenter);
+ return toListenableFuture("Deactivate",
+ AskPattern.ask(ownerSupervisor, DeactivateDataCenter::new, DATACENTER_OP_TIMEOUT, scheduler));
+ }
+
+ @VisibleForTesting
+ final ListenableFuture<RpcResult<GetEntitiesOutput>> getEntities(final GetEntitiesInput input) {
+ return toRpcFuture(AskPattern.ask(ownerStateChecker, GetEntitiesRequest::new, QUERY_TIMEOUT, scheduler),
+ reply -> reply.toOutput(iidCodec));
+ }
+
+ @VisibleForTesting
+ final ListenableFuture<RpcResult<GetEntityOutput>> getEntity(final GetEntityInput input) {
+ return toRpcFuture(AskPattern.ask(ownerStateChecker,
+ (final ActorRef<GetEntityReply> replyTo) -> new GetEntityRequest(replyTo, input), QUERY_TIMEOUT, scheduler),
+ GetEntityReply::toOutput);
+ }
+
+ @VisibleForTesting
+ final ListenableFuture<RpcResult<GetEntityOwnerOutput>> getEntityOwner(final GetEntityOwnerInput input) {
+ return toRpcFuture(AskPattern.ask(ownerStateChecker,
+ (final ActorRef<GetEntityOwnerReply> replyTo) -> new GetEntityOwnerRequest(replyTo, input), QUERY_TIMEOUT,
+ scheduler), GetEntityOwnerReply::toOutput);
+ }
+
+ void unregisterCandidate(final DOMEntity entity) {
+ LOG.debug("Unregistering candidate for {}", entity);
+
+ if (registeredEntities.remove(entity)) {
+ candidateRegistry.tell(new UnregisterCandidate(entity, localCandidate));
+ }
+ }
+
+ void unregisterListener(final String entityType, final DOMEntityOwnershipListener listener) {
+ LOG.debug("Unregistering listener {} for type {}", listener, entityType);
+
+ listenerRegistry.tell(new UnregisterListener(entityType, listener));
+ }
+
+ @VisibleForTesting
+ RunningContext getRunningContext() {
+ return runningContext;
+ }
+
+ private static <R extends StateCheckerReply, O extends RpcOutput> ListenableFuture<RpcResult<O>> toRpcFuture(
+ final CompletionStage<R> stage, final Function<R, O> outputFunction) {
+
+ final SettableFuture<RpcResult<O>> future = SettableFuture.create();
+ stage.whenComplete((reply, failure) -> {
+ if (failure != null) {
+ future.setException(failure);
+ } else {
+ future.set(RpcResultBuilder.success(outputFunction.apply(reply)).build());
+ }
+ });
+ return future;
+ }
+
+ private static ListenableFuture<Empty> toListenableFuture(final String op, final CompletionStage<?> stage) {
+ final SettableFuture<Empty> future = SettableFuture.create();
+ stage.whenComplete((reply, failure) -> {
+ if (failure != null) {
+ LOG.warn("{} DataCenter failed", op, failure);
+ future.setException(failure);
+ } else {
+ LOG.debug("{} DataCenter successful", op);
+ future.set(Empty.value());
+ }
+ });
+ return future;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka;
+
+import static java.util.Objects.requireNonNull;
+
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
+
+final class CandidateRegistration extends AbstractObjectRegistration<DOMEntity> {
+ private final AkkaEntityOwnershipService service;
+
+ CandidateRegistration(final DOMEntity instance, final AkkaEntityOwnershipService service) {
+ super(instance);
+ this.service = requireNonNull(service);
+ }
+
+ @Override
+ protected void removeRegistration() {
+ service.unregisterCandidate(getInstance());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.yang.common.Empty;
+
+/**
+ * Service used to bring up/down the Entity Ownership Service service in individual datacenters.
+ * Active datacenter in native eos terms means that the candidates from this datacenter can become owners of entities.
+ * Additionally the singleton component makings ownership decisions, runs only in an active datacenter.
+ *
+ * <p>
+ * Caller must make sure that only one datacenter is active at a time, otherwise the singleton actors
+ * in each datacenter will interfere with each other. The methods provided byt this service can be called
+ * on any node from the datacenter to be activated. Datacenters only need to brought up when using non-default
+ * datacenter or multiple datacenters.
+ */
+public interface DataCenterControl {
+ /**
+ * Activates the Entity Ownership Service in the datacenter that this method is called.
+ *
+ * @return Completion future
+ */
+ @NonNull ListenableFuture<Empty> activateDataCenter();
+
+ /**
+ * Deactivates the Entity Ownership Service in the datacenter that this method is called.
+ *
+ * @return Completion future
+ */
+ @NonNull ListenableFuture<Empty> deactivateDataCenter();
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka;
+
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.base.MoreObjects;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
+import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
+
+final class ListenerRegistration extends AbstractObjectRegistration<DOMEntityOwnershipListener> {
+ private final AkkaEntityOwnershipService service;
+ private final @NonNull String entityType;
+
+ ListenerRegistration(final DOMEntityOwnershipListener listener, final String entityType,
+ final AkkaEntityOwnershipService service) {
+ super(listener);
+ this.entityType = requireNonNull(entityType);
+ this.service = requireNonNull(service);
+ }
+
+ public String entityType() {
+ return entityType;
+ }
+
+ @Override
+ protected void removeRegistration() {
+ service.unregisterListener(entityType, getInstance());
+ }
+
+ @Override
+ protected MoreObjects.ToStringHelper addToStringAttributes(final MoreObjects.ToStringHelper toStringHelper) {
+ return toStringHelper.add("entityType", entityType);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.bootstrap;
+
+import akka.actor.typed.ActorRef;
+import akka.actor.typed.Behavior;
+import akka.actor.typed.SupervisorStrategy;
+import akka.actor.typed.javadsl.AbstractBehavior;
+import akka.actor.typed.javadsl.ActorContext;
+import akka.actor.typed.javadsl.Behaviors;
+import akka.actor.typed.javadsl.Receive;
+import akka.cluster.typed.Cluster;
+import akka.cluster.typed.ClusterSingleton;
+import akka.cluster.typed.SingletonActor;
+import org.opendaylight.controller.eos.akka.bootstrap.command.BootstrapCommand;
+import org.opendaylight.controller.eos.akka.bootstrap.command.GetRunningContext;
+import org.opendaylight.controller.eos.akka.bootstrap.command.RunningContext;
+import org.opendaylight.controller.eos.akka.bootstrap.command.Terminate;
+import org.opendaylight.controller.eos.akka.owner.checker.OwnerStateChecker;
+import org.opendaylight.controller.eos.akka.owner.checker.command.StateCheckerCommand;
+import org.opendaylight.controller.eos.akka.owner.supervisor.IdleSupervisor;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand;
+import org.opendaylight.controller.eos.akka.registry.candidate.CandidateRegistryInit;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.CandidateRegistryCommand;
+import org.opendaylight.controller.eos.akka.registry.listener.type.EntityTypeListenerRegistry;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.TypeListenerRegistryCommand;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingInstanceIdentifierCodec;
+import org.opendaylight.yangtools.yang.common.Empty;
+
+public final class EOSMain extends AbstractBehavior<BootstrapCommand> {
+ private final ActorRef<TypeListenerRegistryCommand> listenerRegistry;
+ private final ActorRef<CandidateRegistryCommand> candidateRegistry;
+ private final ActorRef<OwnerSupervisorCommand> ownerSupervisor;
+ private final ActorRef<StateCheckerCommand> ownerStateChecker;
+
+ private EOSMain(final ActorContext<BootstrapCommand> context, final BindingInstanceIdentifierCodec iidCodec) {
+ super(context);
+
+ final String role = Cluster.get(context.getSystem()).selfMember().getRoles().iterator().next();
+
+ listenerRegistry = context.spawn(EntityTypeListenerRegistry.create(role), "ListenerRegistry");
+
+ final ClusterSingleton clusterSingleton = ClusterSingleton.get(context.getSystem());
+ // start the initial sync behavior that switches to the regular one after syncing
+ ownerSupervisor = clusterSingleton.init(
+ SingletonActor.of(Behaviors.supervise(IdleSupervisor.create(iidCodec))
+ .onFailure(SupervisorStrategy.restart()), "OwnerSupervisor"));
+ candidateRegistry = context.spawn(CandidateRegistryInit.create(ownerSupervisor), "CandidateRegistry");
+
+ ownerStateChecker = context.spawn(OwnerStateChecker.create(role, ownerSupervisor, iidCodec),
+ "OwnerStateChecker");
+ }
+
+ public static Behavior<BootstrapCommand> create(final BindingInstanceIdentifierCodec iidCodec) {
+ return Behaviors.setup(context -> new EOSMain(context, iidCodec));
+ }
+
+ @Override
+ public Receive<BootstrapCommand> createReceive() {
+ return newReceiveBuilder()
+ .onMessage(GetRunningContext.class, this::onGetRunningContext)
+ .onMessage(Terminate.class, this::onTerminate)
+ .build();
+ }
+
+ private Behavior<BootstrapCommand> onGetRunningContext(final GetRunningContext request) {
+ request.getReplyTo().tell(
+ new RunningContext(listenerRegistry, candidateRegistry, ownerStateChecker, ownerSupervisor));
+ return this;
+ }
+
+ private Behavior<BootstrapCommand> onTerminate(final Terminate request) {
+ request.getReplyTo().tell(Empty.value());
+ return Behaviors.stopped();
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.bootstrap.command;
+
+public abstract class BootstrapCommand {
+ BootstrapCommand() {
+ // Hidden on purpose
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.bootstrap.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.typed.ActorRef;
+
+public final class GetRunningContext extends BootstrapCommand {
+ private final ActorRef<RunningContext> replyTo;
+
+ public GetRunningContext(final ActorRef<RunningContext> replyTo) {
+ this.replyTo = requireNonNull(replyTo);
+ }
+
+ public ActorRef<RunningContext> getReplyTo() {
+ return replyTo;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.bootstrap.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.typed.ActorRef;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.controller.eos.akka.owner.checker.command.StateCheckerCommand;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.CandidateRegistryCommand;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.TypeListenerRegistryCommand;
+
+public final class RunningContext extends BootstrapCommand {
+ private final @NonNull ActorRef<TypeListenerRegistryCommand> listenerRegistry;
+ private final @NonNull ActorRef<CandidateRegistryCommand> candidateRegistry;
+ private final @NonNull ActorRef<StateCheckerCommand> ownerStateChecker;
+ private final @NonNull ActorRef<OwnerSupervisorCommand> ownerSupervisor;
+
+ public RunningContext(final ActorRef<TypeListenerRegistryCommand> listenerRegistry,
+ final ActorRef<CandidateRegistryCommand> candidateRegistry,
+ final ActorRef<StateCheckerCommand> ownerStateChecker,
+ final ActorRef<OwnerSupervisorCommand> ownerSupervisor) {
+ this.listenerRegistry = requireNonNull(listenerRegistry);
+ this.candidateRegistry = requireNonNull(candidateRegistry);
+ this.ownerStateChecker = requireNonNull(ownerStateChecker);
+ this.ownerSupervisor = requireNonNull(ownerSupervisor);
+ }
+
+ public @NonNull ActorRef<TypeListenerRegistryCommand> getListenerRegistry() {
+ return listenerRegistry;
+ }
+
+ public @NonNull ActorRef<CandidateRegistryCommand> getCandidateRegistry() {
+ return candidateRegistry;
+ }
+
+ public @NonNull ActorRef<StateCheckerCommand> getOwnerStateChecker() {
+ return ownerStateChecker;
+ }
+
+ public @NonNull ActorRef<OwnerSupervisorCommand> getOwnerSupervisor() {
+ return ownerSupervisor;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.bootstrap.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.typed.ActorRef;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.yang.common.Empty;
+
+public final class Terminate extends BootstrapCommand {
+ private final @NonNull ActorRef<Empty> replyTo;
+
+ public Terminate(final ActorRef<Empty> replyTo) {
+ this.replyTo = requireNonNull(replyTo);
+ }
+
+ public @NonNull ActorRef<Empty> getReplyTo() {
+ return replyTo;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker;
+
+import static com.google.common.base.Verify.verifyNotNull;
+
+import akka.actor.typed.ActorRef;
+import akka.actor.typed.Behavior;
+import akka.actor.typed.javadsl.AbstractBehavior;
+import akka.actor.typed.javadsl.ActorContext;
+import akka.actor.typed.javadsl.AskPattern;
+import akka.actor.typed.javadsl.Behaviors;
+import akka.actor.typed.javadsl.Receive;
+import akka.cluster.ddata.LWWRegister;
+import akka.cluster.ddata.LWWRegisterKey;
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.typed.javadsl.DistributedData;
+import akka.cluster.ddata.typed.javadsl.Replicator;
+import akka.cluster.ddata.typed.javadsl.ReplicatorMessageAdapter;
+import java.time.Duration;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.CompletionStage;
+import org.opendaylight.controller.eos.akka.owner.checker.command.AbstractEntityRequest;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetCandidates;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetCandidatesForEntity;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetEntitiesReply;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetEntitiesRequest;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetEntityOwnerReply;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetEntityOwnerRequest;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetEntityReply;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetEntityRequest;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetOwnerForEntity;
+import org.opendaylight.controller.eos.akka.owner.checker.command.OwnerDataResponse;
+import org.opendaylight.controller.eos.akka.owner.checker.command.SingleEntityOwnerDataResponse;
+import org.opendaylight.controller.eos.akka.owner.checker.command.StateCheckerCommand;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntitiesBackendReply;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntitiesBackendRequest;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntityBackendReply;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntityBackendRequest;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntityOwnerBackendReply;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntityOwnerBackendRequest;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand;
+import org.opendaylight.controller.eos.akka.registry.candidate.CandidateRegistry;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingInstanceIdentifierCodec;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Short-lived actor that is spawned purely for execution of rpcs from the entity-owners model.
+ */
+public final class EntityRpcHandler extends AbstractBehavior<StateCheckerCommand> {
+ private static final Logger LOG = LoggerFactory.getLogger(EntityRpcHandler.class);
+ private static final Duration ASK_TIMEOUT = Duration.ofSeconds(5);
+
+ private final ReplicatorMessageAdapter<StateCheckerCommand, LWWRegister<String>> ownerReplicator;
+ private final ReplicatorMessageAdapter<StateCheckerCommand, ORMap<DOMEntity, ORSet<String>>> candidateReplicator;
+
+ private final ActorRef<OwnerSupervisorCommand> ownerSupervisor;
+ private final ActorRef<Replicator.Command> replicator;
+
+ private final BindingInstanceIdentifierCodec iidCodec;
+
+ private final Map<DOMEntity, Set<String>> currentCandidates = new HashMap<>();
+ private final Map<DOMEntity, String> currentOwners = new HashMap<>();
+ private final Map<String, DOMEntity> entityLookup = new HashMap<>();
+ private int toSync = -1;
+
+ public EntityRpcHandler(final ActorContext<StateCheckerCommand> context,
+ final ActorRef<OwnerSupervisorCommand> ownerSupervisor,
+ final BindingInstanceIdentifierCodec iidCodec) {
+ super(context);
+
+ replicator = DistributedData.get(context.getSystem()).replicator();
+ ownerReplicator = new ReplicatorMessageAdapter<>(context, replicator, ASK_TIMEOUT);
+ candidateReplicator = new ReplicatorMessageAdapter<>(getContext(), replicator, ASK_TIMEOUT);
+ this.ownerSupervisor = ownerSupervisor;
+
+ this.iidCodec = iidCodec;
+ }
+
+ public static Behavior<StateCheckerCommand> create(final ActorRef<OwnerSupervisorCommand> ownerSupervisor,
+ final BindingInstanceIdentifierCodec iidCodec) {
+ return Behaviors.setup(ctx -> new EntityRpcHandler(ctx, ownerSupervisor, iidCodec));
+ }
+
+ @Override
+ public Receive<StateCheckerCommand> createReceive() {
+ return newReceiveBuilder()
+ .onMessage(GetEntitiesRequest.class, this::onGetEntities)
+ .onMessage(GetEntityRequest.class, this::onGetEntity)
+ .onMessage(GetEntityOwnerRequest.class, this::onGetEntityOwner)
+ .onMessage(GetCandidates.class, this::onCandidatesReceived)
+ .onMessage(GetCandidatesForEntity.class, this::onCandidatesForEntityReceived)
+ .onMessage(OwnerDataResponse.class, this::onOwnerDataReceived)
+ .onMessage(SingleEntityOwnerDataResponse.class, this::onSingleOwnerReceived)
+ .onMessage(GetOwnerForEntity.class, this::onReplyWithOwner)
+ .build();
+ }
+
+ private Behavior<StateCheckerCommand> onGetEntities(final GetEntitiesRequest request) {
+ LOG.debug("{} : Executing get-entities rpc.", getContext().getSelf());
+ final CompletionStage<GetEntitiesBackendReply> result = AskPattern.askWithStatus(
+ ownerSupervisor,
+ GetEntitiesBackendRequest::new,
+ ASK_TIMEOUT,
+ getContext().getSystem().scheduler()
+ );
+
+ result.whenComplete((response, throwable) -> {
+ if (response != null) {
+ request.getReplyTo().tell(new GetEntitiesReply(response));
+ } else {
+ // retry backed with distributed-data
+ LOG.debug("{} : Get-entities failed with owner supervisor, falling back to distributed-data.",
+ getContext().getSelf(), throwable);
+ getCandidates(request.getReplyTo());
+ }
+ });
+ return this;
+ }
+
+ private Behavior<StateCheckerCommand> onGetEntity(final GetEntityRequest request) {
+ LOG.debug("{} : Executing get-entity rpc.", getContext().getSelf());
+ final CompletionStage<GetEntityBackendReply> result = AskPattern.askWithStatus(
+ ownerSupervisor,
+ replyTo -> new GetEntityBackendRequest(replyTo, request.getEntity()),
+ ASK_TIMEOUT,
+ getContext().getSystem().scheduler()
+ );
+
+ result.whenComplete((response, throwable) -> {
+ if (response != null) {
+ request.getReplyTo().tell(new GetEntityReply(response));
+ } else {
+ // retry backed with distributed-data
+ LOG.debug("{} : Get-entity failed with owner supervisor, falling back to distributed-data.",
+ getContext().getSelf(), throwable);
+ getCandidatesForEntity(extractEntity(request), request.getReplyTo());
+ }
+ });
+ return this;
+ }
+
+ private Behavior<StateCheckerCommand> onGetEntityOwner(final GetEntityOwnerRequest request) {
+ LOG.debug("{} : Executing get-entity-owner rpc.", getContext().getSelf());
+ final CompletionStage<GetEntityOwnerBackendReply> result = AskPattern.askWithStatus(
+ ownerSupervisor,
+ replyTo -> new GetEntityOwnerBackendRequest(replyTo, request.getEntity()),
+ ASK_TIMEOUT,
+ getContext().getSystem().scheduler()
+ );
+
+ result.whenComplete((response, throwable) -> {
+ if (response != null) {
+ request.getReplyTo().tell(new GetEntityOwnerReply(response.getOwner()));
+ } else {
+ // retry backed with distributed-data
+ LOG.debug("{} : Get-entity-owner failed with owner supervisor, falling back to distributed-data.",
+ getContext().getSelf(), throwable);
+ getOwnerForEntity(extractEntity(request), request.getReplyTo());
+ }
+ });
+ return this;
+ }
+
+ private void getCandidates(final ActorRef<GetEntitiesReply> replyTo) {
+ candidateReplicator.askGet(
+ askReplyTo -> new Replicator.Get<>(CandidateRegistry.KEY, Replicator.readLocal(), askReplyTo),
+ replicatorResponse -> new GetCandidates(replicatorResponse, replyTo));
+ }
+
+ private void getCandidatesForEntity(final DOMEntity entity, final ActorRef<GetEntityReply> replyTo) {
+ candidateReplicator.askGet(
+ askReplyTo -> new Replicator.Get<>(CandidateRegistry.KEY, Replicator.readLocal(), askReplyTo),
+ replicatorResponse -> new GetCandidatesForEntity(replicatorResponse, entity, replyTo));
+ }
+
+ private void getOwnerForEntity(final DOMEntity entity, final ActorRef<GetEntityOwnerReply> replyTo) {
+ ownerReplicator.askGet(
+ askReplyTo -> new Replicator.Get<>(
+ new LWWRegisterKey<>(entity.toString()), Replicator.readLocal(), askReplyTo),
+ replicatorReponse -> new GetOwnerForEntity(replicatorReponse, entity, replyTo));
+ }
+
+ private Behavior<StateCheckerCommand> onReplyWithOwner(final GetOwnerForEntity message) {
+ final Replicator.GetResponse<LWWRegister<String>> response = message.getResponse();
+ if (response instanceof Replicator.GetSuccess) {
+ message.getReplyTo().tell(new GetEntityOwnerReply(
+ ((Replicator.GetSuccess<LWWRegister<String>>) response).dataValue().getValue()));
+ } else {
+ LOG.debug("Unable to retrieve owner for entity: {}, response: {}", message.getEntity(), response);
+ message.getReplyTo().tell(new GetEntityOwnerReply(""));
+ }
+
+ return Behaviors.stopped();
+ }
+
+ private Behavior<StateCheckerCommand> onCandidatesReceived(final GetCandidates message) {
+ final Replicator.GetResponse<ORMap<DOMEntity, ORSet<String>>> response = message.getResponse();
+ if (response instanceof Replicator.GetSuccess) {
+ return extractCandidates((Replicator.GetSuccess<ORMap<DOMEntity, ORSet<String>>>) response,
+ message.getReplyTo());
+ }
+
+ LOG.debug("Unable to retrieve candidates from distributed-data. Response: {}", response);
+ message.getReplyTo().tell(new GetEntitiesReply(Collections.emptyMap(), Collections.emptyMap()));
+ return Behaviors.stopped();
+ }
+
+ private Behavior<StateCheckerCommand> extractCandidates(
+ final Replicator.GetSuccess<ORMap<DOMEntity, ORSet<String>>> response,
+ final ActorRef<GetEntitiesReply> replyTo) {
+ final ORMap<DOMEntity, ORSet<String>> candidates = response.get(CandidateRegistry.KEY);
+ candidates.getEntries().forEach((key, value) -> currentCandidates.put(key, new HashSet<>(value.getElements())));
+
+ toSync = candidates.keys().size();
+ for (final DOMEntity entity : candidates.keys().getElements()) {
+ entityLookup.put(entity.toString(), entity);
+
+ ownerReplicator.askGet(
+ askReplyTo -> new Replicator.Get<>(
+ new LWWRegisterKey<>(entity.toString()),
+ Replicator.readLocal(),
+ askReplyTo),
+ replicatorResponse -> new OwnerDataResponse(replicatorResponse, replyTo));
+ }
+
+ return this;
+ }
+
+ private Behavior<StateCheckerCommand> onOwnerDataReceived(final OwnerDataResponse message) {
+ final Replicator.GetResponse<LWWRegister<String>> response = message.getResponse();
+ if (response instanceof Replicator.GetSuccess) {
+ handleOwnerRsp((Replicator.GetSuccess<LWWRegister<String>>) response);
+ } else if (response instanceof Replicator.NotFound) {
+ handleNotFoundOwnerRsp((Replicator.NotFound<LWWRegister<String>>) response);
+ } else {
+ LOG.debug("Owner retrieval failed, response: {}", response);
+ }
+
+ // count the responses, on last respond to rpc and shutdown
+ toSync--;
+ if (toSync == 0) {
+ final GetEntitiesReply getEntitiesReply = new GetEntitiesReply(currentCandidates, currentOwners);
+ message.getReplyTo().tell(getEntitiesReply);
+ return Behaviors.stopped();
+ }
+
+ return this;
+ }
+
+ private Behavior<StateCheckerCommand> onCandidatesForEntityReceived(final GetCandidatesForEntity message) {
+ LOG.debug("Received CandidatesForEntity: {}", message);
+ final Replicator.GetResponse<ORMap<DOMEntity, ORSet<String>>> response = message.getResponse();
+ if (response instanceof Replicator.GetSuccess) {
+ return extractCandidatesForEntity((Replicator.GetSuccess<ORMap<DOMEntity, ORSet<String>>>) response,
+ message.getEntity(), message.getReplyTo());
+ } else {
+ LOG.debug("Unable to retrieve candidates for entity: {}. Response:: {}", message.getEntity(), response);
+ message.getReplyTo().tell(new GetEntityReply(null, Collections.emptySet()));
+ return this;
+ }
+ }
+
+ private Behavior<StateCheckerCommand> extractCandidatesForEntity(
+ final Replicator.GetSuccess<ORMap<DOMEntity, ORSet<String>>> response, final DOMEntity entity,
+ final ActorRef<GetEntityReply> replyTo) {
+ final Map<DOMEntity, ORSet<String>> entries = response.get(CandidateRegistry.KEY).getEntries();
+ currentCandidates.put(entity, entries.get(entity).getElements());
+
+ entityLookup.put(entity.toString(), entity);
+ ownerReplicator.askGet(
+ askReplyTo -> new Replicator.Get<>(
+ new LWWRegisterKey<>(entity.toString()),
+ Replicator.readLocal(),
+ askReplyTo),
+ replicatorResponse -> new SingleEntityOwnerDataResponse(replicatorResponse, entity, replyTo));
+
+ return this;
+ }
+
+ private void handleOwnerRsp(final Replicator.GetSuccess<LWWRegister<String>> rsp) {
+ final DOMEntity entity = entityLookup.get(rsp.key().id());
+ final String owner = rsp.get(rsp.key()).getValue();
+
+ currentOwners.put(entity, owner);
+ }
+
+ private static void handleNotFoundOwnerRsp(final Replicator.NotFound<LWWRegister<String>> rsp) {
+ LOG.debug("Owner not found. {}", rsp);
+ }
+
+ private Behavior<StateCheckerCommand> onSingleOwnerReceived(final SingleEntityOwnerDataResponse message) {
+ LOG.debug("Received owner for single entity: {}", message);
+ final Replicator.GetResponse<LWWRegister<String>> response = message.getResponse();
+ final GetEntityReply reply;
+ if (response instanceof Replicator.GetSuccess) {
+ reply = new GetEntityReply(((Replicator.GetSuccess<LWWRegister<String>>) response).dataValue().getValue(),
+ currentCandidates.get(message.getEntity()));
+ } else {
+ reply = new GetEntityReply(null, currentCandidates.get(message.getEntity()));
+ }
+
+ message.getReplyTo().tell(reply);
+ return Behaviors.stopped();
+ }
+
+ private DOMEntity extractEntity(final AbstractEntityRequest<?> request) {
+ final var name = request.getName();
+ final var iid = name.getInstanceIdentifier();
+ if (iid != null) {
+ return new DOMEntity(request.getType().getValue(), iidCodec.fromBinding(iid));
+ }
+ final var str = verifyNotNull(name.getString(), "Unhandled entity name %s", name);
+ return new DOMEntity(request.getType().getValue(), str);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.typed.ActorRef;
+import akka.actor.typed.Behavior;
+import akka.actor.typed.javadsl.AbstractBehavior;
+import akka.actor.typed.javadsl.ActorContext;
+import akka.actor.typed.javadsl.Behaviors;
+import akka.actor.typed.javadsl.Receive;
+import akka.cluster.ddata.LWWRegister;
+import akka.cluster.ddata.LWWRegisterKey;
+import akka.cluster.ddata.typed.javadsl.DistributedData;
+import akka.cluster.ddata.typed.javadsl.Replicator;
+import akka.cluster.ddata.typed.javadsl.Replicator.Get;
+import akka.cluster.ddata.typed.javadsl.Replicator.GetFailure;
+import akka.cluster.ddata.typed.javadsl.Replicator.GetResponse;
+import akka.cluster.ddata.typed.javadsl.Replicator.GetSuccess;
+import akka.cluster.ddata.typed.javadsl.Replicator.NotFound;
+import akka.cluster.ddata.typed.javadsl.Replicator.ReadMajority;
+import akka.cluster.ddata.typed.javadsl.ReplicatorMessageAdapter;
+import java.time.Duration;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetEntitiesRequest;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetEntityOwnerRequest;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetEntityRequest;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetOwnershipState;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetOwnershipStateReply;
+import org.opendaylight.controller.eos.akka.owner.checker.command.InternalGetReply;
+import org.opendaylight.controller.eos.akka.owner.checker.command.StateCheckerCommand;
+import org.opendaylight.controller.eos.akka.owner.checker.command.StateCheckerRequest;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingInstanceIdentifierCodec;
+import org.opendaylight.mdsal.eos.common.api.EntityOwnershipState;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public final class OwnerStateChecker extends AbstractBehavior<StateCheckerCommand> {
+ private static final Logger LOG = LoggerFactory.getLogger(OwnerStateChecker.class);
+ private static final Duration GET_OWNERSHIP_TIMEOUT = Duration.ofSeconds(5);
+ private static final Duration UNEXPECTED_ASK_TIMEOUT = Duration.ofSeconds(5);
+
+ private final ReplicatorMessageAdapter<StateCheckerCommand, LWWRegister<String>> ownerReplicator;
+ private final ActorRef<OwnerSupervisorCommand> ownerSupervisor;
+ private final BindingInstanceIdentifierCodec iidCodec;
+ private final ActorRef<Replicator.Command> replicator;
+ private final String localMember;
+
+ private OwnerStateChecker(final ActorContext<StateCheckerCommand> context,
+ final String localMember,
+ final ActorRef<OwnerSupervisorCommand> ownerSupervisor,
+ final BindingInstanceIdentifierCodec iidCodec) {
+ super(context);
+ this.localMember = requireNonNull(localMember);
+ this.ownerSupervisor = requireNonNull(ownerSupervisor);
+ this.iidCodec = requireNonNull(iidCodec);
+ replicator = DistributedData.get(context.getSystem()).replicator();
+ ownerReplicator = new ReplicatorMessageAdapter<>(context, replicator, UNEXPECTED_ASK_TIMEOUT);
+ }
+
+ public static Behavior<StateCheckerCommand> create(final String localMember,
+ final ActorRef<OwnerSupervisorCommand> ownerSupervisor,
+ final BindingInstanceIdentifierCodec iidCodec) {
+ return Behaviors.setup(ctx -> new OwnerStateChecker(ctx, localMember, ownerSupervisor, iidCodec));
+ }
+
+ @Override
+ public Receive<StateCheckerCommand> createReceive() {
+ return newReceiveBuilder()
+ .onMessage(GetOwnershipState.class, this::onGetOwnershipState)
+ .onMessage(InternalGetReply.class, this::respondWithState)
+ .onMessage(GetEntitiesRequest.class, this::executeEntityRpc)
+ .onMessage(GetEntityRequest.class, this::executeEntityRpc)
+ .onMessage(GetEntityOwnerRequest.class, this::executeEntityRpc)
+ .build();
+ }
+
+ private Behavior<StateCheckerCommand> onGetOwnershipState(final GetOwnershipState message) {
+ ownerReplicator.askGet(
+ askReplyTo -> new Get<>(
+ new LWWRegisterKey<>(message.getEntity().toString()),
+ new ReadMajority(GET_OWNERSHIP_TIMEOUT),
+ askReplyTo),
+ reply -> new InternalGetReply(reply, message.getEntity(), message.getReplyTo()));
+ return this;
+ }
+
+ private Behavior<StateCheckerCommand> respondWithState(final InternalGetReply reply) {
+ final GetResponse<LWWRegister<String>> response = reply.getResponse();
+ if (response instanceof NotFound) {
+ LOG.debug("Data for owner not found, most likely no owner has beed picked for entity: {}",
+ reply.getEntity());
+ reply.getReplyTo().tell(new GetOwnershipStateReply(null));
+ } else if (response instanceof GetFailure) {
+ LOG.warn("Failure retrieving data for entity: {}", reply.getEntity());
+ reply.getReplyTo().tell(new GetOwnershipStateReply(null));
+ } else if (response instanceof GetSuccess) {
+ final String owner = ((GetSuccess<LWWRegister<String>>) response).get(response.key()).getValue();
+ LOG.debug("Data for owner received. {}, owner: {}", response, owner);
+
+ final boolean isOwner = localMember.equals(owner);
+ final boolean hasOwner = !owner.isEmpty();
+
+ reply.getReplyTo().tell(new GetOwnershipStateReply(EntityOwnershipState.from(isOwner, hasOwner)));
+ }
+ return this;
+ }
+
+ private Behavior<StateCheckerCommand> executeEntityRpc(final StateCheckerRequest request) {
+ final ActorRef<StateCheckerCommand> rpcHandler =
+ getContext().spawnAnonymous(EntityRpcHandler.create(ownerSupervisor, iidCodec));
+
+ LOG.debug("Executing entity rpc: {} in actor: {}", request, rpcHandler);
+ rpcHandler.tell(request);
+ return this;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+import akka.actor.typed.ActorRef;
+import com.google.common.base.MoreObjects;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityName;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.get.entities.output.EntitiesBuilder;
+
+public abstract class AbstractEntityRequest<T extends StateCheckerReply> extends StateCheckerRequest<T> {
+ private static final long serialVersionUID = 1L;
+
+ private final @NonNull EntityType type;
+ private final @NonNull EntityName name;
+
+ AbstractEntityRequest(final ActorRef<T> replyTo, final EntityId entity) {
+ super(replyTo);
+ type = entity.requireType();
+ name = entity.requireName();
+ }
+
+ public final @NonNull EntityId getEntity() {
+ return new EntitiesBuilder().setType(type).setName(name).build();
+ }
+
+ public final @NonNull EntityType getType() {
+ return type;
+ }
+
+ public final @NonNull EntityName getName() {
+ return name;
+ }
+
+ @Override
+ public final String toString() {
+ return MoreObjects.toStringHelper(this).add("type", type).add("name", name).toString();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.typed.ActorRef;
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.typed.javadsl.Replicator.GetResponse;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public final class GetCandidates extends StateCheckerCommand {
+ private final @Nullable GetResponse<ORMap<DOMEntity, ORSet<String>>> response;
+ private final @NonNull ActorRef<GetEntitiesReply> replyTo;
+
+ public GetCandidates(final GetResponse<ORMap<DOMEntity, ORSet<String>>> response,
+ final ActorRef<GetEntitiesReply> replyTo) {
+ this.response = response;
+ this.replyTo = requireNonNull(replyTo);
+ }
+
+ public @Nullable GetResponse<ORMap<DOMEntity, ORSet<String>>> getResponse() {
+ return response;
+ }
+
+ public @NonNull ActorRef<GetEntitiesReply> getReplyTo() {
+ return replyTo;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.typed.ActorRef;
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.typed.javadsl.Replicator.GetResponse;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public final class GetCandidatesForEntity extends StateCheckerCommand {
+ private final @Nullable GetResponse<ORMap<DOMEntity, ORSet<String>>> response;
+ private final @NonNull DOMEntity entity;
+ private final @NonNull ActorRef<GetEntityReply> replyTo;
+
+ public GetCandidatesForEntity(final GetResponse<ORMap<DOMEntity, ORSet<String>>> response,
+ final DOMEntity entity, final ActorRef<GetEntityReply> replyTo) {
+ this.response = response;
+ this.entity = requireNonNull(entity);
+ this.replyTo = requireNonNull(replyTo);
+ }
+
+ public @Nullable GetResponse<ORMap<DOMEntity, ORSet<String>>> getResponse() {
+ return response;
+ }
+
+ public @NonNull DOMEntity getEntity() {
+ return entity;
+ }
+
+ public @NonNull ActorRef<GetEntityReply> getReplyTo() {
+ return replyTo;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+import static com.google.common.base.Verify.verify;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSetMultimap;
+import com.google.common.collect.Iterables;
+import java.io.Serializable;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntitiesBackendReply;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingInstanceIdentifierCodec;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityName;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntitiesOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntitiesOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.NodeName;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.get.entities.output.EntitiesBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.mdsal.core.general.entity.rev150930.Entity;
+import org.opendaylight.yangtools.yang.binding.util.BindingMap;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+
+public final class GetEntitiesReply extends StateCheckerReply implements Serializable {
+ private static final long serialVersionUID = 1L;
+
+ private final ImmutableSetMultimap<DOMEntity, String> candidates;
+ private final ImmutableMap<DOMEntity, String> owners;
+
+ public GetEntitiesReply(final GetEntitiesBackendReply response) {
+ this.owners = response.getOwners();
+ this.candidates = response.getCandidates();
+ }
+
+ public GetEntitiesReply(final Map<DOMEntity, Set<String>> candidates, final Map<DOMEntity, String> owners) {
+ final ImmutableSetMultimap.Builder<DOMEntity, String> builder = ImmutableSetMultimap.builder();
+ for (Map.Entry<DOMEntity, Set<String>> entry : candidates.entrySet()) {
+ builder.putAll(entry.getKey(), entry.getValue());
+ }
+ this.candidates = builder.build();
+ this.owners = ImmutableMap.copyOf(owners);
+ }
+
+ public @NonNull GetEntitiesOutput toOutput(final BindingInstanceIdentifierCodec iidCodec) {
+ final Set<DOMEntity> entities = new HashSet<>();
+ entities.addAll(owners.keySet());
+ entities.addAll(candidates.keySet());
+
+ return new GetEntitiesOutputBuilder()
+ .setEntities(entities.stream()
+ .map(entity -> {
+ final EntitiesBuilder eb = new EntitiesBuilder()
+ .setType(new EntityType(entity.getType()))
+ .setName(extractName(entity, iidCodec))
+ .setCandidateNodes(candidates.get(entity).stream()
+ .map(NodeName::new).collect(Collectors.toUnmodifiableList()));
+
+ final String owner = owners.get(entity);
+ if (owner != null) {
+ eb.setOwnerNode(new NodeName(owner));
+ }
+ return eb.build();
+ })
+ .collect(BindingMap.toMap()))
+ .build();
+ }
+
+ /**
+ * if the entity is general entity then shorthand the name to only the last path argument, otherwise return
+ * full YIID path encoded as string.
+ *
+ * @param entity Entity to extract the name from
+ * @param iidCodec codec to encode entity name back to InstanceIdentifier if needed
+ * @return Extracted name
+ */
+ private static EntityName extractName(final DOMEntity entity, final BindingInstanceIdentifierCodec iidCodec) {
+ final var id = entity.getIdentifier();
+ if (id.isEmpty() || !id.getPathArguments().get(0).getNodeType().equals(Entity.QNAME)) {
+ return new EntityName(iidCodec.toBinding(id));
+ }
+
+ final PathArgument last = id.getLastPathArgument();
+ verify(last instanceof NodeIdentifierWithPredicates, "Unexpected last argument %s", last);
+ final Object value = Iterables.getOnlyElement(((NodeIdentifierWithPredicates) last).values());
+ verify(value instanceof String, "Unexpected predicate value %s", value);
+ return new EntityName((String) value);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+import akka.actor.typed.ActorRef;
+
+public final class GetEntitiesRequest extends StateCheckerRequest<GetEntitiesReply> {
+ private static final long serialVersionUID = 1L;
+
+ public GetEntitiesRequest(final ActorRef<GetEntitiesReply> replyTo) {
+ super(replyTo);
+ }
+
+ @Override
+ public String toString() {
+ return "GetEntitiesRequest{} " + super.toString();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+import java.io.Serializable;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOwnerOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOwnerOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.NodeName;
+
+public final class GetEntityOwnerReply extends StateCheckerReply implements Serializable {
+ private static final long serialVersionUID = 1L;
+
+ private final String owner;
+
+ public GetEntityOwnerReply(final @Nullable String owner) {
+ this.owner = owner;
+ }
+
+ public @NonNull GetEntityOwnerOutput toOutput() {
+ final GetEntityOwnerOutputBuilder builder = new GetEntityOwnerOutputBuilder();
+ if (owner != null) {
+ builder.setOwnerNode(new NodeName(owner));
+ }
+ return builder.build();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+import akka.actor.typed.ActorRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityId;
+
+public final class GetEntityOwnerRequest extends AbstractEntityRequest<GetEntityOwnerReply> {
+ private static final long serialVersionUID = 1L;
+
+ public GetEntityOwnerRequest(final ActorRef<GetEntityOwnerReply> replyTo, final EntityId entity) {
+ super(replyTo, entity);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+import com.google.common.collect.ImmutableSet;
+import java.io.Serializable;
+import java.util.Set;
+import java.util.stream.Collectors;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntityBackendReply;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.NodeName;
+
+public final class GetEntityReply extends StateCheckerReply implements Serializable {
+ private static final long serialVersionUID = 1L;
+
+ private final ImmutableSet<String> candidates;
+ private final String owner;
+
+ public GetEntityReply(final GetEntityBackendReply backendReply) {
+ candidates = backendReply.getCandidates();
+ owner = backendReply.getOwner();
+ }
+
+ public GetEntityReply(final @Nullable String owner, final @Nullable Set<String> candidates) {
+ this.owner = owner;
+ this.candidates = candidates == null ? ImmutableSet.of() : ImmutableSet.copyOf(candidates);
+ }
+
+ public @NonNull GetEntityOutput toOutput() {
+ final GetEntityOutputBuilder builder = new GetEntityOutputBuilder();
+ if (owner != null) {
+ builder.setOwnerNode(new NodeName(owner));
+ }
+ return builder
+ .setCandidateNodes(candidates.stream().map(NodeName::new).collect(Collectors.toUnmodifiableList()))
+ .build();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+import akka.actor.typed.ActorRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityId;
+
+public final class GetEntityRequest extends AbstractEntityRequest<GetEntityReply> {
+ private static final long serialVersionUID = 1L;
+
+ public GetEntityRequest(final ActorRef<GetEntityReply> replyTo, final EntityId entity) {
+ super(replyTo, entity);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+import akka.actor.typed.ActorRef;
+import akka.cluster.ddata.LWWRegister;
+import akka.cluster.ddata.typed.javadsl.Replicator.GetResponse;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public class GetOwnerForEntity extends StateCheckerCommand {
+ private final @NonNull GetResponse<LWWRegister<String>> response;
+ private final DOMEntity entity;
+ private final ActorRef<GetEntityOwnerReply> replyTo;
+
+ public GetOwnerForEntity(final @NonNull GetResponse<LWWRegister<String>> response,
+ final DOMEntity entity, final ActorRef<GetEntityOwnerReply> replyTo) {
+ this.response = response;
+ this.entity = entity;
+ this.replyTo = replyTo;
+ }
+
+ public GetResponse<LWWRegister<String>> getResponse() {
+ return response;
+ }
+
+ public DOMEntity getEntity() {
+ return entity;
+ }
+
+ public ActorRef<GetEntityOwnerReply> getReplyTo() {
+ return replyTo;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.typed.ActorRef;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public final class GetOwnershipState extends StateCheckerCommand {
+ private final @NonNull DOMEntity entity;
+ private final @NonNull ActorRef<GetOwnershipStateReply> replyTo;
+
+ public GetOwnershipState(final DOMEntity entity, final ActorRef<GetOwnershipStateReply> replyTo) {
+ this.entity = requireNonNull(entity);
+ this.replyTo = requireNonNull(replyTo);
+ }
+
+ public @NonNull DOMEntity getEntity() {
+ return entity;
+ }
+
+ public @NonNull ActorRef<GetOwnershipStateReply> getReplyTo() {
+ return replyTo;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+import org.eclipse.jdt.annotation.Nullable;
+import org.opendaylight.mdsal.eos.common.api.EntityOwnershipState;
+
+public final class GetOwnershipStateReply extends StateCheckerReply {
+ private final @Nullable EntityOwnershipState ownershipState;
+
+ public GetOwnershipStateReply(final EntityOwnershipState ownershipState) {
+ this.ownershipState = ownershipState;
+ }
+
+ public @Nullable EntityOwnershipState getOwnershipState() {
+ return ownershipState;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.typed.ActorRef;
+import akka.cluster.ddata.LWWRegister;
+import akka.cluster.ddata.typed.javadsl.Replicator.GetResponse;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public final class InternalGetReply extends StateCheckerCommand {
+ private final @NonNull GetResponse<LWWRegister<String>> response;
+ private final @NonNull ActorRef<GetOwnershipStateReply> replyTo;
+ private final @NonNull DOMEntity entity;
+
+ public InternalGetReply(final GetResponse<LWWRegister<String>> response, final DOMEntity entity,
+ final ActorRef<GetOwnershipStateReply> replyTo) {
+ this.response = requireNonNull(response);
+ this.entity = requireNonNull(entity);
+ this.replyTo = requireNonNull(replyTo);
+ }
+
+ public @NonNull GetResponse<LWWRegister<String>> getResponse() {
+ return response;
+ }
+
+ public @NonNull DOMEntity getEntity() {
+ return entity;
+ }
+
+ public @NonNull ActorRef<GetOwnershipStateReply> getReplyTo() {
+ return replyTo;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.typed.ActorRef;
+import akka.cluster.ddata.LWWRegister;
+import akka.cluster.ddata.typed.javadsl.Replicator.GetResponse;
+import org.eclipse.jdt.annotation.NonNull;
+
+public class OwnerDataResponse extends StateCheckerCommand {
+ private final @NonNull GetResponse<LWWRegister<String>> response;
+ private final ActorRef<GetEntitiesReply> replyTo;
+
+ public OwnerDataResponse(final GetResponse<LWWRegister<String>> response,
+ final ActorRef<GetEntitiesReply> replyTo) {
+ this.response = requireNonNull(response);
+ this.replyTo = replyTo;
+ }
+
+ public @NonNull GetResponse<LWWRegister<String>> getResponse() {
+ return response;
+ }
+
+ public ActorRef<GetEntitiesReply> getReplyTo() {
+ return replyTo;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.typed.ActorRef;
+import akka.cluster.ddata.LWWRegister;
+import akka.cluster.ddata.typed.javadsl.Replicator.GetResponse;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public class SingleEntityOwnerDataResponse extends StateCheckerCommand {
+ private final @NonNull GetResponse<LWWRegister<String>> response;
+ private final DOMEntity entity;
+ private final ActorRef<GetEntityReply> replyTo;
+
+ public SingleEntityOwnerDataResponse(final @NonNull GetResponse<LWWRegister<String>> response,
+ final DOMEntity entity,
+ final ActorRef<GetEntityReply> replyTo) {
+ this.response = requireNonNull(response);
+ this.entity = requireNonNull(entity);
+ this.replyTo = requireNonNull(replyTo);
+ }
+
+ public @NonNull GetResponse<LWWRegister<String>> getResponse() {
+ return response;
+ }
+
+ public DOMEntity getEntity() {
+ return entity;
+ }
+
+ public ActorRef<GetEntityReply> getReplyTo() {
+ return replyTo;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+public abstract class StateCheckerCommand {
+ StateCheckerCommand() {
+ // Hidden on purpose
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+public abstract class StateCheckerReply {
+ StateCheckerReply() {
+ // Hidden on purpose
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.typed.ActorRef;
+import java.io.Serializable;
+import org.eclipse.jdt.annotation.NonNull;
+
+public abstract class StateCheckerRequest<T extends StateCheckerReply> extends StateCheckerCommand
+ implements Serializable {
+ private static final long serialVersionUID = 1L;
+
+ private final @NonNull ActorRef<T> replyTo;
+
+ StateCheckerRequest(final ActorRef<T> replyTo) {
+ this.replyTo = requireNonNull(replyTo);
+ }
+
+ public final @NonNull ActorRef<T> getReplyTo() {
+ return replyTo;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor;
+
+import akka.actor.typed.ActorRef;
+import akka.actor.typed.Behavior;
+import akka.actor.typed.javadsl.AbstractBehavior;
+import akka.actor.typed.javadsl.ActorContext;
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.typed.javadsl.DistributedData;
+import akka.cluster.ddata.typed.javadsl.Replicator;
+import akka.cluster.ddata.typed.javadsl.ReplicatorMessageAdapter;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.time.Duration;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidates;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesForMember;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesResponse;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand;
+import org.opendaylight.controller.eos.akka.registry.candidate.CandidateRegistry;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+import org.slf4j.Logger;
+
+abstract class AbstractSupervisor extends AbstractBehavior<OwnerSupervisorCommand> {
+
+ final ReplicatorMessageAdapter<OwnerSupervisorCommand, ORMap<DOMEntity, ORSet<String>>> candidateReplicator;
+
+ @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR",
+ justification = "getContext() is non-final")
+ AbstractSupervisor(final ActorContext<OwnerSupervisorCommand> context) {
+ super(context);
+
+ final ActorRef<Replicator.Command> replicator = DistributedData.get(getContext().getSystem()).replicator();
+ candidateReplicator = new ReplicatorMessageAdapter<>(getContext(), replicator, Duration.ofSeconds(5));
+ }
+
+ Behavior<OwnerSupervisorCommand> onClearCandidatesForMember(final ClearCandidatesForMember command) {
+ getLogger().debug("Clearing candidates for member: {}", command.getCandidate());
+
+ candidateReplicator.askGet(
+ askReplyTo -> new Replicator.Get<>(CandidateRegistry.KEY,
+ new Replicator.ReadMajority(Duration.ofSeconds(15)), askReplyTo),
+ response -> new ClearCandidates(response, command));
+
+ return this;
+ }
+
+ Behavior<OwnerSupervisorCommand> finishClearCandidates(final ClearCandidates command) {
+ if (command.getResponse() instanceof Replicator.GetSuccess) {
+ getLogger().debug("Retrieved candidate data, clearing candidates for {}",
+ command.getOriginalMessage().getCandidate());
+
+ getContext().spawnAnonymous(CandidateCleaner.create()).tell(command);
+ } else {
+ getLogger().debug("Unable to retrieve candidate data for {}, no candidates present sending empty reply",
+ command.getOriginalMessage().getCandidate());
+ command.getOriginalMessage().getReplyTo().tell(new ClearCandidatesResponse());
+ }
+
+ return this;
+ }
+
+ abstract Logger getLogger();
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor;
+
+import akka.actor.typed.ActorRef;
+import akka.actor.typed.Behavior;
+import akka.actor.typed.javadsl.AbstractBehavior;
+import akka.actor.typed.javadsl.ActorContext;
+import akka.actor.typed.javadsl.Behaviors;
+import akka.actor.typed.javadsl.Receive;
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.SelfUniqueAddress;
+import akka.cluster.ddata.typed.javadsl.DistributedData;
+import akka.cluster.ddata.typed.javadsl.Replicator;
+import akka.cluster.ddata.typed.javadsl.ReplicatorMessageAdapter;
+import java.time.Duration;
+import java.util.Map;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidates;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesResponse;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesUpdateResponse;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand;
+import org.opendaylight.controller.eos.akka.registry.candidate.CandidateRegistry;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Actor that can be spawned by all the supervisor implementations that executes clearing of candidates once
+ * candidate retrieval succeeds. Once candidates for the member are cleared(or immediately if none need to be cleared),
+ * the actor stops itself.
+ */
+public final class CandidateCleaner extends AbstractBehavior<OwnerSupervisorCommand> {
+ private static final Logger LOG = LoggerFactory.getLogger(CandidateCleaner.class);
+
+ private final ReplicatorMessageAdapter<OwnerSupervisorCommand, ORMap<DOMEntity, ORSet<String>>> candidateReplicator;
+ private final SelfUniqueAddress node;
+
+ private int remaining = 0;
+
+ private CandidateCleaner(final ActorContext<OwnerSupervisorCommand> context) {
+ super(context);
+
+ final ActorRef<Replicator.Command> replicator = DistributedData.get(getContext().getSystem()).replicator();
+ candidateReplicator = new ReplicatorMessageAdapter<>(getContext(), replicator, Duration.ofSeconds(5));
+ node = DistributedData.get(context.getSystem()).selfUniqueAddress();
+
+ }
+
+ public static Behavior<OwnerSupervisorCommand> create() {
+ return Behaviors.setup(CandidateCleaner::new);
+ }
+
+ @Override
+ public Receive<OwnerSupervisorCommand> createReceive() {
+ return newReceiveBuilder()
+ .onMessage(ClearCandidates.class, this::onClearCandidates)
+ .onMessage(ClearCandidatesUpdateResponse.class, this::onClearCandidatesUpdateResponse)
+ .build();
+ }
+
+ private Behavior<OwnerSupervisorCommand> onClearCandidates(final ClearCandidates command) {
+ LOG.debug("Clearing candidates for member: {}", command.getOriginalMessage().getCandidate());
+
+ final ORMap<DOMEntity, ORSet<String>> candidates =
+ ((Replicator.GetSuccess<ORMap<DOMEntity, ORSet<String>>>) command.getResponse())
+ .get(CandidateRegistry.KEY);
+
+ for (final Map.Entry<DOMEntity, ORSet<String>> entry : candidates.getEntries().entrySet()) {
+ if (entry.getValue().contains(command.getOriginalMessage().getCandidate())) {
+ LOG.debug("Removing {} from {}", command.getOriginalMessage().getCandidate(), entry.getKey());
+
+ remaining++;
+ candidateReplicator.askUpdate(
+ askReplyTo -> new Replicator.Update<>(
+ CandidateRegistry.KEY,
+ ORMap.empty(),
+ new Replicator.WriteMajority(Duration.ofSeconds(10)),
+ askReplyTo,
+ map -> map.update(node, entry.getKey(), ORSet.empty(),
+ value -> value.remove(node, command.getOriginalMessage().getCandidate()))),
+ updateResponse -> new ClearCandidatesUpdateResponse(updateResponse,
+ command.getOriginalMessage().getReplyTo()));
+ }
+ }
+
+ if (remaining == 0) {
+ LOG.debug("Did not clear any candidates for {}", command.getOriginalMessage().getCandidate());
+ command.getOriginalMessage().getReplyTo().tell(new ClearCandidatesResponse());
+ return Behaviors.stopped();
+ }
+ return this;
+ }
+
+ private Behavior<OwnerSupervisorCommand> onClearCandidatesUpdateResponse(
+ final ClearCandidatesUpdateResponse command) {
+ remaining--;
+ if (remaining == 0) {
+ LOG.debug("Last update response for candidate removal received, replying to: {}", command.getReplyTo());
+ command.getReplyTo().tell(new ClearCandidatesResponse());
+ return Behaviors.stopped();
+ } else {
+ LOG.debug("Have still {} outstanding requests after {}", remaining, command.getResponse());
+ }
+ return this;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.typed.Behavior;
+import akka.actor.typed.javadsl.ActorContext;
+import akka.actor.typed.javadsl.Behaviors;
+import akka.actor.typed.javadsl.Receive;
+import akka.cluster.Member;
+import akka.cluster.typed.Cluster;
+import akka.pattern.StatusReply;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ActivateDataCenter;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidates;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesForMember;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntitiesBackendRequest;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntityBackendRequest;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntityOwnerBackendRequest;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorRequest;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingInstanceIdentifierCodec;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Initial Supervisor behavior that stays idle and only switches itself to the active behavior when its running
+ * in the primary datacenter, or is activated on demand. Once the supervisor instance is no longer needed in the
+ * secondary datacenter it needs to be deactivated manually.
+ */
+public final class IdleSupervisor extends AbstractSupervisor {
+ private static final Logger LOG = LoggerFactory.getLogger(IdleSupervisor.class);
+
+ private static final String DATACENTER_PREFIX = "dc-";
+ private static final String DEFAULT_DATACENTER = "dc-default";
+
+ private final BindingInstanceIdentifierCodec iidCodec;
+
+ private IdleSupervisor(final ActorContext<OwnerSupervisorCommand> context,
+ final BindingInstanceIdentifierCodec iidCodec) {
+ super(context);
+ this.iidCodec = requireNonNull(iidCodec);
+ final Cluster cluster = Cluster.get(context.getSystem());
+
+ final String datacenterRole = extractDatacenterRole(cluster.selfMember());
+ if (datacenterRole.equals(DEFAULT_DATACENTER)) {
+ LOG.debug("No datacenter configured, activating default data center");
+ context.getSelf().tell(new ActivateDataCenter(null));
+ }
+
+ LOG.debug("Idle supervisor started on {}.", cluster.selfMember());
+ }
+
+ public static Behavior<OwnerSupervisorCommand> create(final BindingInstanceIdentifierCodec iidCodec) {
+ return Behaviors.setup(context -> new IdleSupervisor(context, iidCodec));
+ }
+
+ @Override
+ public Receive<OwnerSupervisorCommand> createReceive() {
+ return newReceiveBuilder()
+ .onMessage(ActivateDataCenter.class, this::onActivateDataCenter)
+ .onMessage(GetEntitiesBackendRequest.class, this::onFailEntityRpc)
+ .onMessage(GetEntityBackendRequest.class, this::onFailEntityRpc)
+ .onMessage(GetEntityOwnerBackendRequest.class, this::onFailEntityRpc)
+ .onMessage(ClearCandidatesForMember.class, this::onClearCandidatesForMember)
+ .onMessage(ClearCandidates.class, this::finishClearCandidates)
+ .build();
+ }
+
+ private Behavior<OwnerSupervisorCommand> onFailEntityRpc(final OwnerSupervisorRequest message) {
+ LOG.debug("Failing rpc request. {}", message);
+ message.getReplyTo().tell(StatusReply.error("OwnerSupervisor is inactive so it"
+ + " cannot handle entity rpc requests."));
+ return this;
+ }
+
+ private Behavior<OwnerSupervisorCommand> onActivateDataCenter(final ActivateDataCenter message) {
+ LOG.debug("Received ActivateDataCenter command switching to syncer behavior,");
+ return OwnerSyncer.create(message.getReplyTo(), iidCodec);
+ }
+
+ private static String extractDatacenterRole(final Member selfMember) {
+ return selfMember.getRoles().stream()
+ .filter(role -> role.startsWith(DATACENTER_PREFIX))
+ .findFirst()
+ .orElseThrow(() -> new IllegalArgumentException(selfMember + " does not have a valid role"));
+ }
+
+ @Override
+ Logger getLogger() {
+ return LOG;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.typed.ActorRef;
+import akka.actor.typed.Behavior;
+import akka.actor.typed.javadsl.ActorContext;
+import akka.actor.typed.javadsl.Behaviors;
+import akka.actor.typed.javadsl.Receive;
+import akka.cluster.ClusterEvent;
+import akka.cluster.ClusterEvent.CurrentClusterState;
+import akka.cluster.Member;
+import akka.cluster.ddata.LWWRegister;
+import akka.cluster.ddata.LWWRegisterKey;
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.SelfUniqueAddress;
+import akka.cluster.ddata.typed.javadsl.DistributedData;
+import akka.cluster.ddata.typed.javadsl.Replicator;
+import akka.cluster.ddata.typed.javadsl.ReplicatorMessageAdapter;
+import akka.cluster.typed.Cluster;
+import akka.cluster.typed.Subscribe;
+import akka.pattern.StatusReply;
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Multimap;
+import com.google.common.collect.Sets;
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.function.BiPredicate;
+import java.util.stream.Collectors;
+import java.util.stream.StreamSupport;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.AbstractEntityRequest;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.CandidatesChanged;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidates;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesForMember;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.DataCenterDeactivated;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.DeactivateDataCenter;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntitiesBackendReply;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntitiesBackendRequest;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntityBackendReply;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntityBackendRequest;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntityOwnerBackendReply;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntityOwnerBackendRequest;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.MemberDownEvent;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.MemberReachableEvent;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.MemberUnreachableEvent;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.MemberUpEvent;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerChanged;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand;
+import org.opendaylight.controller.eos.akka.registry.candidate.CandidateRegistry;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingInstanceIdentifierCodec;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.collection.JavaConverters;
+
+/**
+ * Responsible for tracking candidates and assigning ownership of entities. This behavior is subscribed to the candidate
+ * registry in distributed-data and picks entity owners based on the current cluster state and registered candidates.
+ * On cluster up/down etc. events the owners are reassigned if possible.
+ */
+public final class OwnerSupervisor extends AbstractSupervisor {
+
+ private static final Logger LOG = LoggerFactory.getLogger(OwnerSupervisor.class);
+ private static final String DATACENTER_PREFIX = "dc-";
+
+ private final ReplicatorMessageAdapter<OwnerSupervisorCommand, LWWRegister<String>> ownerReplicator;
+
+ // Our own clock implementation so we do not have to rely on synchronized clocks. This basically functions as an
+ // increasing counter which is fine for our needs as we only ever have a single writer since t supervisor is
+ // running in a cluster-singleton
+ private static final LWWRegister.Clock<String> CLOCK = (currentTimestamp, value) -> currentTimestamp + 1;
+
+ private final Cluster cluster;
+ private final SelfUniqueAddress node;
+ private final String dataCenter;
+
+ private final Set<String> activeMembers;
+
+ // currently registered candidates
+ private final Map<DOMEntity, Set<String>> currentCandidates;
+ // current owners
+ private final Map<DOMEntity, String> currentOwners;
+ // reverse lookup of owner to entity
+ private final Multimap<String, DOMEntity> ownerToEntity = HashMultimap.create();
+
+ // only reassign owner for those entities that lost this candidate or is not reachable
+ private final BiPredicate<DOMEntity, String> reassignPredicate = (entity, candidate) ->
+ !isActiveCandidate(candidate) || !isCandidateFor(entity, candidate);
+
+ private final BindingInstanceIdentifierCodec iidCodec;
+
+ private OwnerSupervisor(final ActorContext<OwnerSupervisorCommand> context,
+ final Map<DOMEntity, Set<String>> currentCandidates,
+ final Map<DOMEntity, String> currentOwners,
+ final BindingInstanceIdentifierCodec iidCodec) {
+ super(context);
+ this.iidCodec = requireNonNull(iidCodec);
+
+ final DistributedData distributedData = DistributedData.get(context.getSystem());
+ final ActorRef<Replicator.Command> replicator = distributedData.replicator();
+
+ cluster = Cluster.get(context.getSystem());
+ ownerReplicator = new ReplicatorMessageAdapter<>(context, replicator, Duration.ofSeconds(5));
+ dataCenter = extractDatacenterRole(cluster.selfMember());
+
+ node = distributedData.selfUniqueAddress();
+ activeMembers = getActiveMembers();
+
+ this.currentCandidates = currentCandidates;
+ this.currentOwners = currentOwners;
+
+ for (final Map.Entry<DOMEntity, String> entry : currentOwners.entrySet()) {
+ ownerToEntity.put(entry.getValue(), entry.getKey());
+ }
+
+ // check whether we have any unreachable/missing owners
+ reassignUnreachableOwners();
+ assignMissingOwners();
+
+ final ActorRef<ClusterEvent.MemberEvent> memberEventAdapter =
+ context.messageAdapter(ClusterEvent.MemberEvent.class, event -> {
+ if (event instanceof ClusterEvent.MemberUp) {
+ return new MemberUpEvent(event.member().address(), event.member().getRoles());
+ } else {
+ return new MemberDownEvent(event.member().address(), event.member().getRoles());
+ }
+ });
+ cluster.subscriptions().tell(Subscribe.create(memberEventAdapter, ClusterEvent.MemberEvent.class));
+
+ final ActorRef<ClusterEvent.ReachabilityEvent> reachabilityEventAdapter =
+ context.messageAdapter(ClusterEvent.ReachabilityEvent.class, event -> {
+ if (event instanceof ClusterEvent.ReachableMember) {
+ return new MemberReachableEvent(event.member().address(), event.member().getRoles());
+ } else {
+ return new MemberUnreachableEvent(event.member().address(), event.member().getRoles());
+ }
+ });
+ cluster.subscriptions().tell(Subscribe.create(reachabilityEventAdapter, ClusterEvent.ReachabilityEvent.class));
+
+ candidateReplicator.subscribe(CandidateRegistry.KEY, CandidatesChanged::new);
+
+ LOG.debug("Owner Supervisor started");
+ }
+
+ public static Behavior<OwnerSupervisorCommand> create(final Map<DOMEntity, Set<String>> currentCandidates,
+ final Map<DOMEntity, String> currentOwners, final BindingInstanceIdentifierCodec iidCodec) {
+ return Behaviors.setup(ctx -> new OwnerSupervisor(ctx, currentCandidates, currentOwners, iidCodec));
+ }
+
+ @Override
+ public Receive<OwnerSupervisorCommand> createReceive() {
+ return newReceiveBuilder()
+ .onMessage(CandidatesChanged.class, this::onCandidatesChanged)
+ .onMessage(DeactivateDataCenter.class, this::onDeactivateDatacenter)
+ .onMessage(OwnerChanged.class, this::onOwnerChanged)
+ .onMessage(MemberUpEvent.class, this::onPeerUp)
+ .onMessage(MemberDownEvent.class, this::onPeerDown)
+ .onMessage(MemberReachableEvent.class, this::onPeerReachable)
+ .onMessage(MemberUnreachableEvent.class, this::onPeerUnreachable)
+ .onMessage(GetEntitiesBackendRequest.class, this::onGetEntities)
+ .onMessage(GetEntityBackendRequest.class, this::onGetEntity)
+ .onMessage(GetEntityOwnerBackendRequest.class, this::onGetEntityOwner)
+ .onMessage(ClearCandidatesForMember.class, this::onClearCandidatesForMember)
+ .onMessage(ClearCandidates.class, this::finishClearCandidates)
+ .build();
+ }
+
+ private Behavior<OwnerSupervisorCommand> onDeactivateDatacenter(final DeactivateDataCenter command) {
+ LOG.debug("Deactivating Owner Supervisor on {}", cluster.selfMember());
+ command.getReplyTo().tell(DataCenterDeactivated.INSTANCE);
+ return IdleSupervisor.create(iidCodec);
+ }
+
+ private Behavior<OwnerSupervisorCommand> onOwnerChanged(final OwnerChanged command) {
+ LOG.debug("Owner has changed for {}", command.getResponse().key());
+ return this;
+ }
+
+ private void reassignUnreachableOwners() {
+ final Set<String> ownersToReassign = new HashSet<>();
+ for (final String owner : ownerToEntity.keys()) {
+ if (!isActiveCandidate(owner)) {
+ ownersToReassign.add(owner);
+ }
+ }
+
+ for (final String owner : ownersToReassign) {
+ reassignCandidatesFor(owner, ImmutableList.copyOf(ownerToEntity.get(owner)), reassignPredicate);
+ }
+ }
+
+ private void assignMissingOwners() {
+ for (final Map.Entry<DOMEntity, Set<String>> entry : currentCandidates.entrySet()) {
+ if (!currentOwners.containsKey(entry.getKey())) {
+ assignOwnerFor(entry.getKey());
+ }
+ }
+ }
+
+ private Behavior<OwnerSupervisorCommand> onCandidatesChanged(final CandidatesChanged message) {
+ LOG.debug("onCandidatesChanged {}", message.getResponse());
+ if (message.getResponse() instanceof Replicator.Changed) {
+ final Replicator.Changed<ORMap<DOMEntity, ORSet<String>>> changed =
+ (Replicator.Changed<ORMap<DOMEntity, ORSet<String>>>) message.getResponse();
+ processCandidateChanges(changed.get(CandidateRegistry.KEY));
+ }
+ return this;
+ }
+
+ private void processCandidateChanges(final ORMap<DOMEntity, ORSet<String>> candidates) {
+ final Map<DOMEntity, ORSet<String>> entries = candidates.getEntries();
+ for (final Map.Entry<DOMEntity, ORSet<String>> entry : entries.entrySet()) {
+ processCandidatesFor(entry.getKey(), entry.getValue());
+ }
+ }
+
+ private void processCandidatesFor(final DOMEntity entity, final ORSet<String> receivedCandidates) {
+ LOG.debug("Processing candidates for : {}, new value: {}", entity, receivedCandidates.elements());
+
+ final Set<String> candidates = JavaConverters.asJava(receivedCandidates.elements());
+ // only insert candidates if there are any to insert, otherwise we would generate unnecessary notification with
+ // no owner
+ if (!currentCandidates.containsKey(entity) && !candidates.isEmpty()) {
+ LOG.debug("Candidates missing for entity: {} adding all candidates", entity);
+ currentCandidates.put(entity, new HashSet<>(candidates));
+
+ LOG.debug("Current state for {} : {}", entity, currentCandidates.get(entity).toString());
+ assignOwnerFor(entity);
+
+ return;
+ }
+
+ final Set<String> currentlyPresent = currentCandidates.getOrDefault(entity, Set.of());
+ final Set<String> difference = ImmutableSet.copyOf(Sets.symmetricDifference(currentlyPresent, candidates));
+
+ LOG.debug("currently present candidates: {}", currentlyPresent);
+ LOG.debug("difference: {}", difference);
+
+ final List<String> ownersToReassign = new ArrayList<>();
+
+ // first add/remove candidates from entities
+ for (final String toCheck : difference) {
+ if (!currentlyPresent.contains(toCheck)) {
+ // add new candidate
+ LOG.debug("Adding new candidate for entity: {} : {}", entity, toCheck);
+ currentCandidates.get(entity).add(toCheck);
+
+ final String currentOwner = currentOwners.get(entity);
+
+ if (currentOwner == null || !activeMembers.contains(currentOwner)) {
+ // might as well assign right away when we don't have an owner or its unreachable
+ assignOwnerFor(entity);
+ }
+
+ LOG.debug("Current state for entity: {} : {}", entity, currentCandidates.get(entity).toString());
+ continue;
+ }
+
+ if (!candidates.contains(toCheck)) {
+ // remove candidate
+ LOG.debug("Removing candidate from entity: {} - {}", entity, toCheck);
+ currentCandidates.get(entity).remove(toCheck);
+ if (ownerToEntity.containsKey(toCheck)) {
+ ownersToReassign.add(toCheck);
+ }
+ }
+ }
+
+ // then reassign those that need new owners
+ for (final String toReassign : ownersToReassign) {
+ reassignCandidatesFor(toReassign, ImmutableList.copyOf(ownerToEntity.get(toReassign)),
+ reassignPredicate);
+ }
+
+ if (currentCandidates.get(entity) == null) {
+ LOG.debug("Last candidate removed for {}", entity);
+ } else {
+ LOG.debug("Current state for entity: {} : {}", entity, currentCandidates.get(entity).toString());
+ }
+ }
+
+ private void reassignCandidatesFor(final String oldOwner, final Collection<DOMEntity> entities,
+ final BiPredicate<DOMEntity, String> predicate) {
+ LOG.debug("Reassigning owners for {}", entities);
+ for (final DOMEntity entity : entities) {
+ if (predicate.test(entity, oldOwner)) {
+
+ if (!isActiveCandidate(oldOwner) && isCandidateFor(entity, oldOwner) && hasSingleCandidate(entity)) {
+ // only skip new owner assignment, only if unreachable, still is a candidate and is the ONLY
+ // candidate
+ LOG.debug("{} is the only candidate for {}. Skipping reassignment.", oldOwner, entity);
+ continue;
+ }
+ ownerToEntity.remove(oldOwner, entity);
+ assignOwnerFor(entity);
+ }
+ }
+ }
+
+ private boolean isActiveCandidate(final String candidate) {
+ return activeMembers.contains(candidate);
+ }
+
+ private boolean isCandidateFor(final DOMEntity entity, final String candidate) {
+ return currentCandidates.getOrDefault(entity, Set.of()).contains(candidate);
+ }
+
+ private boolean hasSingleCandidate(final DOMEntity entity) {
+ return currentCandidates.getOrDefault(entity, Set.of()).size() == 1;
+ }
+
+ private void assignOwnerFor(final DOMEntity entity) {
+ final Set<String> candidatesForEntity = currentCandidates.get(entity);
+ if (candidatesForEntity.isEmpty()) {
+ LOG.debug("No candidates present for entity: {}", entity);
+ removeOwner(entity);
+ return;
+ }
+
+ String pickedCandidate = null;
+ for (final String candidate : candidatesForEntity) {
+ if (activeMembers.contains(candidate)) {
+ pickedCandidate = candidate;
+ break;
+ }
+ }
+ if (pickedCandidate == null) {
+ LOG.debug("No candidate is reachable for {}, activeMembers: {}, currentCandidates: {}",
+ entity, activeMembers, currentCandidates.get(entity));
+ // no candidate is reachable so only remove owner if necessary
+ removeOwner(entity);
+ return;
+ }
+ ownerToEntity.put(pickedCandidate, entity);
+
+ LOG.debug("Entity {} new owner: {}", entity, pickedCandidate);
+ currentOwners.put(entity, pickedCandidate);
+ writeNewOwner(entity, pickedCandidate);
+ }
+
+ private void removeOwner(final DOMEntity entity) {
+ if (currentOwners.containsKey(entity)) {
+ // assign empty owner to dd, as we cannot delete data for a key since that would prevent
+ // writes for the same key
+ currentOwners.remove(entity);
+
+ writeNewOwner(entity, "");
+ }
+ }
+
+ private void writeNewOwner(final DOMEntity entity, final String candidate) {
+ ownerReplicator.askUpdate(
+ askReplyTo -> new Replicator.Update<>(
+ new LWWRegisterKey<>(entity.toString()),
+ new LWWRegister<>(node.uniqueAddress(), candidate, 0),
+ Replicator.writeLocal(),
+ askReplyTo,
+ register -> register.withValue(node, candidate, CLOCK)),
+ OwnerChanged::new);
+ }
+
+ private Behavior<OwnerSupervisorCommand> onPeerUp(final MemberUpEvent event) {
+ LOG.debug("Received MemberUp : {}", event);
+
+ handleReachableEvent(event.getRoles());
+ return this;
+ }
+
+ private Behavior<OwnerSupervisorCommand> onPeerReachable(final MemberReachableEvent event) {
+ LOG.debug("Received MemberReachable : {}", event);
+
+ handleReachableEvent(event.getRoles());
+ return this;
+ }
+
+ private Behavior<OwnerSupervisorCommand> onGetEntities(final GetEntitiesBackendRequest request) {
+ request.getReplyTo().tell(StatusReply.success(new GetEntitiesBackendReply(currentOwners, currentCandidates)));
+ return this;
+ }
+
+ private Behavior<OwnerSupervisorCommand> onGetEntity(final GetEntityBackendRequest request) {
+ final DOMEntity entity = extractEntity(request);
+ request.getReplyTo().tell(StatusReply.success(
+ new GetEntityBackendReply(currentOwners.get(entity), currentCandidates.get(entity))));
+ return this;
+ }
+
+ private Behavior<OwnerSupervisorCommand> onGetEntityOwner(final GetEntityOwnerBackendRequest request) {
+ request.getReplyTo().tell(
+ StatusReply.success(new GetEntityOwnerBackendReply(currentOwners.get(extractEntity(request)))));
+ return this;
+ }
+
+ private void handleReachableEvent(final Set<String> roles) {
+ if (roles.contains(dataCenter)) {
+ activeMembers.add(extractRole(roles));
+ assignMissingOwners();
+ } else {
+ LOG.debug("Received reachable event from a foreign datacenter, Ignoring... Roles: {}", roles);
+ }
+ }
+
+ private Behavior<OwnerSupervisorCommand> onPeerDown(final MemberDownEvent event) {
+ LOG.debug("Received MemberDown : {}", event);
+
+ handleUnreachableEvent(event.getRoles());
+ return this;
+ }
+
+ private Behavior<OwnerSupervisorCommand> onPeerUnreachable(final MemberUnreachableEvent event) {
+ LOG.debug("Received MemberUnreachable : {}", event);
+
+ handleUnreachableEvent(event.getRoles());
+ return this;
+ }
+
+ private void handleUnreachableEvent(final Set<String> roles) {
+ if (roles.contains(dataCenter)) {
+ activeMembers.remove(extractRole(roles));
+ reassignUnreachableOwners();
+ } else {
+ LOG.debug("Received unreachable event from a foreign datacenter, Ignoring... Roles: {}", roles);
+ }
+ }
+
+ private Set<String> getActiveMembers() {
+ final CurrentClusterState clusterState = cluster.state();
+ final Set<String> unreachableRoles = clusterState.getUnreachable().stream()
+ .map(OwnerSupervisor::extractRole)
+ .collect(Collectors.toSet());
+
+ return StreamSupport.stream(clusterState.getMembers().spliterator(), false)
+ // We are evaluating the set of roles for each member
+ .map(Member::getRoles)
+ // Filter out any members which do not share our dataCenter
+ .filter(roles -> roles.contains(dataCenter))
+ // Find first legal role
+ .map(OwnerSupervisor::extractRole)
+ // filter out unreachable roles
+ .filter(role -> !unreachableRoles.contains(role))
+ .collect(Collectors.toSet());
+ }
+
+ private DOMEntity extractEntity(final AbstractEntityRequest<?> request) {
+ final var name = request.getName();
+ final var iid = name.getInstanceIdentifier();
+ if (iid != null) {
+ return new DOMEntity(request.getType().getValue(), iidCodec.fromBinding(iid));
+ }
+ final var str = verifyNotNull(name.getString(), "Unhandled entity name %s", name);
+ return new DOMEntity(request.getType().getValue(), str);
+ }
+
+ private static String extractRole(final Member member) {
+ return extractRole(member.getRoles());
+ }
+
+ private static String extractRole(final Set<String> roles) {
+ return roles.stream().filter(role -> !role.startsWith(DATACENTER_PREFIX))
+ .findFirst().orElseThrow(() -> new IllegalArgumentException("No valid role found."));
+ }
+
+ private static String extractDatacenterRole(final Member member) {
+ return member.getRoles().stream().filter(role -> role.startsWith(DATACENTER_PREFIX))
+ .findFirst().orElseThrow(() -> new IllegalArgumentException("No valid role found."));
+ }
+
+ @Override
+ Logger getLogger() {
+ return LOG;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.typed.ActorRef;
+import akka.actor.typed.Behavior;
+import akka.actor.typed.javadsl.ActorContext;
+import akka.actor.typed.javadsl.Behaviors;
+import akka.actor.typed.javadsl.Receive;
+import akka.cluster.ddata.LWWRegister;
+import akka.cluster.ddata.LWWRegisterKey;
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.typed.javadsl.DistributedData;
+import akka.cluster.ddata.typed.javadsl.Replicator;
+import akka.cluster.ddata.typed.javadsl.ReplicatorMessageAdapter;
+import akka.pattern.StatusReply;
+import java.time.Duration;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import org.eclipse.jdt.annotation.Nullable;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidates;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesForMember;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.DataCenterActivated;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntitiesBackendRequest;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntityBackendRequest;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntityOwnerBackendRequest;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.InitialCandidateSync;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.InitialOwnerSync;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorReply;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorRequest;
+import org.opendaylight.controller.eos.akka.registry.candidate.CandidateRegistry;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingInstanceIdentifierCodec;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Behavior that retrieves current candidates/owners from distributed-data and switches to OwnerSupervisor when the
+ * sync has finished.
+ */
+public final class OwnerSyncer extends AbstractSupervisor {
+ private static final Logger LOG = LoggerFactory.getLogger(OwnerSyncer.class);
+
+ private final ReplicatorMessageAdapter<OwnerSupervisorCommand, LWWRegister<String>> ownerReplicator;
+ private final Map<DOMEntity, Set<String>> currentCandidates = new HashMap<>();
+ private final Map<DOMEntity, String> currentOwners = new HashMap<>();
+
+ // String representation of Entity to DOMEntity
+ private final Map<String, DOMEntity> entityLookup = new HashMap<>();
+ private final BindingInstanceIdentifierCodec iidCodec;
+
+ private int toSync = -1;
+
+ private OwnerSyncer(final ActorContext<OwnerSupervisorCommand> context,
+ final @Nullable ActorRef<OwnerSupervisorReply> notifyDatacenterStarted,
+ final BindingInstanceIdentifierCodec iidCodec) {
+ super(context);
+ this.iidCodec = requireNonNull(iidCodec);
+ LOG.debug("Starting candidate and owner sync");
+
+ final ActorRef<Replicator.Command> replicator = DistributedData.get(context.getSystem()).replicator();
+
+ ownerReplicator = new ReplicatorMessageAdapter<>(context, replicator, Duration.ofSeconds(5));
+
+ candidateReplicator.askGet(
+ askReplyTo -> new Replicator.Get<>(CandidateRegistry.KEY, Replicator.readLocal(), askReplyTo),
+ InitialCandidateSync::new);
+
+ if (notifyDatacenterStarted != null) {
+ notifyDatacenterStarted.tell(DataCenterActivated.INSTANCE);
+ }
+ }
+
+ public static Behavior<OwnerSupervisorCommand> create(final ActorRef<OwnerSupervisorReply> notifyDatacenterStarted,
+ final BindingInstanceIdentifierCodec iidCodec) {
+ return Behaviors.setup(ctx -> new OwnerSyncer(ctx, notifyDatacenterStarted, iidCodec));
+ }
+
+ @Override
+ public Receive<OwnerSupervisorCommand> createReceive() {
+ return newReceiveBuilder()
+ .onMessage(InitialCandidateSync.class, this::onInitialCandidateSync)
+ .onMessage(InitialOwnerSync.class, this::onInitialOwnerSync)
+ .onMessage(GetEntitiesBackendRequest.class, this::onFailEntityRpc)
+ .onMessage(GetEntityBackendRequest.class, this::onFailEntityRpc)
+ .onMessage(GetEntityOwnerBackendRequest.class, this::onFailEntityRpc)
+ .onMessage(ClearCandidatesForMember.class, this::onClearCandidatesForMember)
+ .onMessage(ClearCandidates.class, this::finishClearCandidates)
+ .build();
+ }
+
+ private Behavior<OwnerSupervisorCommand> onFailEntityRpc(final OwnerSupervisorRequest message) {
+ LOG.debug("Failing rpc request. {}", message);
+ message.getReplyTo().tell(StatusReply.error(
+ "OwnerSupervisor is inactive so it cannot handle entity rpc requests."));
+ return this;
+ }
+
+ private Behavior<OwnerSupervisorCommand> onInitialCandidateSync(final InitialCandidateSync rsp) {
+ final Replicator.GetResponse<ORMap<DOMEntity, ORSet<String>>> response = rsp.getResponse();
+ if (response instanceof Replicator.GetSuccess) {
+ return doInitialSync((Replicator.GetSuccess<ORMap<DOMEntity, ORSet<String>>>) response);
+ } else if (response instanceof Replicator.NotFound) {
+ LOG.debug("No candidates found switching to supervisor");
+ return switchToSupervisor();
+ } else {
+ LOG.debug("Initial candidate sync failed, switching to supervisor. Sync reply: {}", response);
+ return switchToSupervisor();
+ }
+ }
+
+ private Behavior<OwnerSupervisorCommand> doInitialSync(
+ final Replicator.GetSuccess<ORMap<DOMEntity, ORSet<String>>> response) {
+
+ final ORMap<DOMEntity, ORSet<String>> candidates = response.get(CandidateRegistry.KEY);
+ candidates.getEntries().entrySet().forEach(entry -> {
+ currentCandidates.put(entry.getKey(), new HashSet<>(entry.getValue().getElements()));
+ });
+
+ toSync = candidates.keys().size();
+ for (final DOMEntity entity : candidates.keys().getElements()) {
+ entityLookup.put(entity.toString(), entity);
+
+ ownerReplicator.askGet(
+ askReplyTo -> new Replicator.Get<>(
+ new LWWRegisterKey<>(entity.toString()),
+ Replicator.readLocal(),
+ askReplyTo),
+ InitialOwnerSync::new);
+ }
+
+ return this;
+ }
+
+ private Behavior<OwnerSupervisorCommand> onInitialOwnerSync(final InitialOwnerSync rsp) {
+ final Replicator.GetResponse<LWWRegister<String>> response = rsp.getResponse();
+ if (response instanceof Replicator.GetSuccess) {
+ handleOwnerRsp((Replicator.GetSuccess<LWWRegister<String>>) response);
+ } else if (response instanceof Replicator.NotFound) {
+ handleNotFoundOwnerRsp((Replicator.NotFound<LWWRegister<String>>) response);
+ } else {
+ LOG.debug("Initial sync failed response: {}", response);
+ }
+
+ // count the responses, on last switch behaviors
+ toSync--;
+ if (toSync == 0) {
+ return switchToSupervisor();
+ }
+
+ return this;
+ }
+
+ private Behavior<OwnerSupervisorCommand> switchToSupervisor() {
+ LOG.debug("Initial sync done, switching to supervisor. candidates: {}, owners: {}",
+ currentCandidates, currentOwners);
+ return Behaviors.setup(ctx -> OwnerSupervisor.create(currentCandidates, currentOwners, iidCodec));
+ }
+
+ private void handleOwnerRsp(final Replicator.GetSuccess<LWWRegister<String>> rsp) {
+ final DOMEntity entity = entityLookup.get(rsp.key().id());
+ final String owner = rsp.get(rsp.key()).getValue();
+
+ currentOwners.put(entity, owner);
+ }
+
+ private static void handleNotFoundOwnerRsp(final Replicator.NotFound<LWWRegister<String>> rsp) {
+ LOG.debug("Owner not found. {}", rsp);
+ }
+
+ @Override
+ Logger getLogger() {
+ return LOG;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import akka.actor.typed.ActorRef;
+import akka.pattern.StatusReply;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityName;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityType;
+
+public abstract class AbstractEntityRequest<T extends OwnerSupervisorReply> extends OwnerSupervisorRequest<T> {
+ private static final long serialVersionUID = 1L;
+
+ private final @NonNull EntityType type;
+ private final @NonNull EntityName name;
+
+ AbstractEntityRequest(final ActorRef<StatusReply<T>> replyTo, final EntityId entity) {
+ super(replyTo);
+ this.type = entity.requireType();
+ this.name = entity.requireName();
+ }
+
+ public final @NonNull EntityType getType() {
+ return type;
+ }
+
+ public final @NonNull EntityName getName() {
+ return name;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import akka.actor.typed.ActorRef;
+import java.io.Serializable;
+import org.eclipse.jdt.annotation.Nullable;
+
+public final class ActivateDataCenter extends OwnerSupervisorCommand implements Serializable {
+ private static final long serialVersionUID = 1L;
+
+ private final ActorRef<OwnerSupervisorReply> replyTo;
+
+ public ActivateDataCenter(final @Nullable ActorRef<OwnerSupervisorReply> replyTo) {
+ this.replyTo = replyTo;
+ }
+
+ public ActorRef<OwnerSupervisorReply> getReplyTo() {
+ return replyTo;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.typed.javadsl.Replicator.SubscribeResponse;
+import com.google.common.base.MoreObjects;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public final class CandidatesChanged extends OwnerSupervisorCommand {
+ private final @NonNull SubscribeResponse<ORMap<DOMEntity, ORSet<String>>> response;
+
+ public CandidatesChanged(final SubscribeResponse<ORMap<DOMEntity, ORSet<String>>> subscribeResponse) {
+ this.response = requireNonNull(subscribeResponse);
+ }
+
+ public @NonNull SubscribeResponse<ORMap<DOMEntity, ORSet<String>>> getResponse() {
+ return response;
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(this).add("response", response).toString();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.typed.javadsl.Replicator;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public class ClearCandidates extends OwnerSupervisorCommand {
+
+ private final Replicator.GetResponse<ORMap<DOMEntity, ORSet<String>>> response;
+ private final ClearCandidatesForMember originalMessage;
+
+ public ClearCandidates(final Replicator.GetResponse<ORMap<DOMEntity, ORSet<String>>> response,
+ final ClearCandidatesForMember originalMessage) {
+ this.response = response;
+ this.originalMessage = originalMessage;
+ }
+
+ public Replicator.GetResponse<ORMap<DOMEntity, ORSet<String>>> getResponse() {
+ return response;
+ }
+
+ public ClearCandidatesForMember getOriginalMessage() {
+ return originalMessage;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import akka.actor.typed.ActorRef;
+import java.io.Serializable;
+
+/**
+ * Request sent from Candidate registration actors to clear the candidate from all entities. Issued at start to clear
+ * candidates from previous iteration of a node. Owner supervisor responds to this request to notify the registration
+ * actor it can start up and process candidate requests.
+ */
+public class ClearCandidatesForMember extends OwnerSupervisorCommand implements Serializable {
+ private static final long serialVersionUID = 1L;
+
+ private final ActorRef<ClearCandidatesResponse> replyTo;
+ private final String candidate;
+
+ public ClearCandidatesForMember(final ActorRef<ClearCandidatesResponse> replyTo, final String candidate) {
+ this.replyTo = replyTo;
+ this.candidate = candidate;
+ }
+
+ public ActorRef<ClearCandidatesResponse> getReplyTo() {
+ return replyTo;
+ }
+
+ public String getCandidate() {
+ return candidate;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import java.io.Serializable;
+
+/**
+ * Response sent from OwnerSupervisor to the ClearCandidatesForMember request, notifying the caller that removal has
+ * finished.
+ */
+public class ClearCandidatesResponse implements Serializable {
+
+ private static final long serialVersionUID = 1L;
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import akka.actor.typed.ActorRef;
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.typed.javadsl.Replicator;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public class ClearCandidatesUpdateResponse extends OwnerSupervisorCommand {
+ private final Replicator.UpdateResponse<ORMap<DOMEntity, ORSet<String>>> response;
+ private final ActorRef<ClearCandidatesResponse> replyTo;
+
+ public ClearCandidatesUpdateResponse(final Replicator.UpdateResponse<ORMap<DOMEntity, ORSet<String>>> response,
+ final ActorRef<ClearCandidatesResponse> replyTo) {
+ this.response = response;
+ this.replyTo = replyTo;
+ }
+
+ public Replicator.UpdateResponse<ORMap<DOMEntity, ORSet<String>>> getResponse() {
+ return response;
+ }
+
+
+ public ActorRef<ClearCandidatesResponse> getReplyTo() {
+ return replyTo;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import java.io.Serializable;
+
+public final class DataCenterActivated extends OwnerSupervisorReply implements Serializable {
+ private static final long serialVersionUID = 1L;
+ public static final DataCenterActivated INSTANCE = new DataCenterActivated();
+
+ private DataCenterActivated() {
+ // NOOP
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import java.io.Serializable;
+
+public final class DataCenterDeactivated extends OwnerSupervisorReply implements Serializable {
+ private static final long serialVersionUID = 1L;
+ public static final DataCenterDeactivated INSTANCE = new DataCenterDeactivated();
+
+ private DataCenterDeactivated() {
+ // NOOP
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import akka.actor.typed.ActorRef;
+import java.io.Serializable;
+import org.eclipse.jdt.annotation.Nullable;
+
+public final class DeactivateDataCenter extends OwnerSupervisorCommand implements Serializable {
+ private static final long serialVersionUID = 1L;
+
+ private final ActorRef<OwnerSupervisorReply> replyTo;
+
+ public DeactivateDataCenter(final @Nullable ActorRef<OwnerSupervisorReply> replyTo) {
+ this.replyTo = replyTo;
+ }
+
+ public ActorRef<OwnerSupervisorReply> getReplyTo() {
+ return replyTo;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSetMultimap;
+import java.io.Serializable;
+import java.util.Map;
+import java.util.Set;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public final class GetEntitiesBackendReply extends OwnerSupervisorReply implements Serializable {
+ private static final long serialVersionUID = 1L;
+
+ private final ImmutableSetMultimap<DOMEntity, String> candidates;
+ private final ImmutableMap<DOMEntity, String> owners;
+
+ public GetEntitiesBackendReply(final Map<DOMEntity, String> owners, final Map<DOMEntity, Set<String>> candidates) {
+ final ImmutableSetMultimap.Builder<DOMEntity, String> builder = ImmutableSetMultimap.builder();
+ for (Map.Entry<DOMEntity, Set<String>> entry : candidates.entrySet()) {
+ builder.putAll(entry.getKey(), entry.getValue());
+ }
+ this.candidates = builder.build();
+ this.owners = ImmutableMap.copyOf(owners);
+ }
+
+ public ImmutableSetMultimap<DOMEntity, String> getCandidates() {
+ return candidates;
+ }
+
+ public ImmutableMap<DOMEntity, String> getOwners() {
+ return owners;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import akka.actor.typed.ActorRef;
+import akka.pattern.StatusReply;
+
+public final class GetEntitiesBackendRequest extends OwnerSupervisorRequest<GetEntitiesBackendReply> {
+ private static final long serialVersionUID = 1L;
+
+ public GetEntitiesBackendRequest(final ActorRef<StatusReply<GetEntitiesBackendReply>> replyTo) {
+ super(replyTo);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import com.google.common.collect.ImmutableSet;
+import java.io.Serializable;
+import java.util.Set;
+import org.eclipse.jdt.annotation.Nullable;
+
+public final class GetEntityBackendReply extends OwnerSupervisorReply implements Serializable {
+ private static final long serialVersionUID = 1L;
+
+ private final ImmutableSet<String> candidates;
+ private final String owner;
+
+ public GetEntityBackendReply(final @Nullable String owner, final @Nullable Set<String> candidates) {
+ this.owner = owner;
+ this.candidates = candidates == null ? ImmutableSet.of() : ImmutableSet.copyOf(candidates);
+ }
+
+ public ImmutableSet<String> getCandidates() {
+ return candidates;
+ }
+
+ public String getOwner() {
+ return owner;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import akka.actor.typed.ActorRef;
+import akka.pattern.StatusReply;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityId;
+
+public final class GetEntityBackendRequest extends AbstractEntityRequest<GetEntityBackendReply> {
+ private static final long serialVersionUID = 1L;
+
+ public GetEntityBackendRequest(final ActorRef<StatusReply<GetEntityBackendReply>> replyTo, final EntityId entity) {
+ super(replyTo, entity);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import java.io.Serializable;
+import org.eclipse.jdt.annotation.Nullable;
+
+public final class GetEntityOwnerBackendReply extends OwnerSupervisorReply implements Serializable {
+ private static final long serialVersionUID = 1L;
+
+ private final String owner;
+
+ public GetEntityOwnerBackendReply(final @Nullable String owner) {
+ this.owner = owner;
+ }
+
+ public String getOwner() {
+ return owner;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import akka.actor.typed.ActorRef;
+import akka.pattern.StatusReply;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityId;
+
+public final class GetEntityOwnerBackendRequest extends AbstractEntityRequest<GetEntityOwnerBackendReply> {
+ private static final long serialVersionUID = 1L;
+
+ public GetEntityOwnerBackendRequest(final ActorRef<StatusReply<GetEntityOwnerBackendReply>> replyTo,
+ final EntityId entity) {
+ super(replyTo, entity);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.typed.javadsl.Replicator.GetResponse;
+import org.eclipse.jdt.annotation.Nullable;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public final class InitialCandidateSync extends OwnerSupervisorCommand {
+ private final @Nullable GetResponse<ORMap<DOMEntity, ORSet<String>>> response;
+
+ public InitialCandidateSync(final GetResponse<ORMap<DOMEntity, ORSet<String>>> response) {
+ this.response = response;
+ }
+
+ public @Nullable GetResponse<ORMap<DOMEntity, ORSet<String>>> getResponse() {
+ return response;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.cluster.ddata.LWWRegister;
+import akka.cluster.ddata.typed.javadsl.Replicator.GetResponse;
+import org.eclipse.jdt.annotation.NonNull;
+
+public final class InitialOwnerSync extends OwnerSupervisorCommand {
+ private final @NonNull GetResponse<LWWRegister<String>> response;
+
+ public InitialOwnerSync(final GetResponse<LWWRegister<String>> response) {
+ this.response = requireNonNull(response);
+ }
+
+ public @NonNull GetResponse<LWWRegister<String>> getResponse() {
+ return response;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.Address;
+import com.google.common.base.MoreObjects;
+import java.util.Set;
+import org.eclipse.jdt.annotation.NonNull;
+
+public abstract class InternalClusterEvent extends OwnerSupervisorCommand {
+ private final @NonNull Set<String> roles;
+ private final @NonNull Address address;
+
+ InternalClusterEvent(final Address address, final Set<String> roles) {
+ this.address = requireNonNull(address);
+ this.roles = Set.copyOf(roles);
+ }
+
+ public final @NonNull Address getAddress() {
+ return address;
+ }
+
+ public final @NonNull Set<String> getRoles() {
+ return roles;
+ }
+
+ @Override
+ public final String toString() {
+ return MoreObjects.toStringHelper(this).add("address", address).add("roles", roles).toString();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import akka.actor.Address;
+import java.util.Set;
+
+public final class MemberDownEvent extends InternalClusterEvent {
+ public MemberDownEvent(final Address address, final Set<String> roles) {
+ super(address, roles);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import akka.actor.Address;
+import java.util.Set;
+
+public final class MemberReachableEvent extends InternalClusterEvent {
+ public MemberReachableEvent(final Address address, final Set<String> roles) {
+ super(address, roles);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import akka.actor.Address;
+import java.util.Set;
+
+public final class MemberUnreachableEvent extends InternalClusterEvent {
+ public MemberUnreachableEvent(final Address address, final Set<String> roles) {
+ super(address, roles);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import akka.actor.Address;
+import java.util.Set;
+
+public final class MemberUpEvent extends InternalClusterEvent {
+ public MemberUpEvent(final Address address, final Set<String> roles) {
+ super(address, roles);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.cluster.ddata.LWWRegister;
+import akka.cluster.ddata.typed.javadsl.Replicator.UpdateResponse;
+import org.eclipse.jdt.annotation.NonNull;
+
+public final class OwnerChanged extends OwnerSupervisorCommand {
+ private final @NonNull UpdateResponse<LWWRegister<String>> rsp;
+
+ public OwnerChanged(final UpdateResponse<LWWRegister<String>> rsp) {
+ this.rsp = requireNonNull(rsp);
+ }
+
+ public @NonNull UpdateResponse<LWWRegister<String>> getResponse() {
+ return rsp;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+public abstract class OwnerSupervisorCommand {
+ OwnerSupervisorCommand() {
+ // Hidden on purpose
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+public abstract class OwnerSupervisorReply {
+ OwnerSupervisorReply() {
+ // Hidden on purpose
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.typed.ActorRef;
+import akka.pattern.StatusReply;
+import java.io.Serializable;
+import org.eclipse.jdt.annotation.NonNull;
+
+public abstract class OwnerSupervisorRequest<T extends OwnerSupervisorReply> extends OwnerSupervisorCommand
+ implements Serializable {
+ private static final long serialVersionUID = 1L;
+
+ private final @NonNull ActorRef<StatusReply<T>> replyTo;
+
+ OwnerSupervisorRequest(final ActorRef<StatusReply<T>> replyTo) {
+ this.replyTo = requireNonNull(replyTo);
+ }
+
+ public final @NonNull ActorRef<StatusReply<T>> getReplyTo() {
+ return replyTo;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.candidate;
+
+import akka.actor.typed.Behavior;
+import akka.actor.typed.javadsl.AbstractBehavior;
+import akka.actor.typed.javadsl.ActorContext;
+import akka.actor.typed.javadsl.Behaviors;
+import akka.actor.typed.javadsl.Receive;
+import akka.cluster.Cluster;
+import akka.cluster.ddata.Key;
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORMapKey;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.SelfUniqueAddress;
+import akka.cluster.ddata.typed.javadsl.DistributedData;
+import akka.cluster.ddata.typed.javadsl.Replicator;
+import akka.cluster.ddata.typed.javadsl.ReplicatorMessageAdapter;
+import java.util.Set;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.CandidateRegistryCommand;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.InternalUpdateResponse;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.RegisterCandidate;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.UnregisterCandidate;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Actor responsible for handling registrations of candidates into distributed-data.
+ */
+public final class CandidateRegistry extends AbstractBehavior<CandidateRegistryCommand> {
+
+ private static final Logger LOG = LoggerFactory.getLogger(CandidateRegistry.class);
+
+ private static final String DATACENTER_PREFIX = "dc-";
+
+ public static final Key<ORMap<DOMEntity, ORSet<String>>> KEY = new ORMapKey<>("candidateRegistry");
+
+ private final ReplicatorMessageAdapter<CandidateRegistryCommand, ORMap<DOMEntity, ORSet<String>>> replicatorAdapter;
+ private final SelfUniqueAddress node;
+ private final String selfRole;
+
+ private CandidateRegistry(final ActorContext<CandidateRegistryCommand> context,
+ final ReplicatorMessageAdapter<CandidateRegistryCommand,
+ ORMap<DOMEntity, ORSet<String>>> replicatorAdapter) {
+ super(context);
+ this.replicatorAdapter = replicatorAdapter;
+
+ this.node = DistributedData.get(context.getSystem()).selfUniqueAddress();
+ this.selfRole = extractRole(Cluster.get(context.getSystem()).selfMember().getRoles());
+
+ LOG.debug("{} : Candidate registry started", selfRole);
+ }
+
+ public static Behavior<CandidateRegistryCommand> create() {
+ return Behaviors.setup(ctx ->
+ DistributedData.withReplicatorMessageAdapter(
+ (ReplicatorMessageAdapter<CandidateRegistryCommand,
+ ORMap<DOMEntity,ORSet<String>>> replicatorAdapter) ->
+ new CandidateRegistry(ctx, replicatorAdapter)));
+ }
+
+ @Override
+ public Receive<CandidateRegistryCommand> createReceive() {
+ return newReceiveBuilder()
+ .onMessage(RegisterCandidate.class, this::onRegisterCandidate)
+ .onMessage(UnregisterCandidate.class, this::onUnregisterCandidate)
+ .onMessage(InternalUpdateResponse.class, this::onInternalUpdateResponse)
+ .build();
+ }
+
+ private Behavior<CandidateRegistryCommand> onRegisterCandidate(final RegisterCandidate registerCandidate) {
+ LOG.debug("{} - Registering candidate({}) for entity: {}", selfRole,
+ registerCandidate.getCandidate(), registerCandidate.getEntity());
+ replicatorAdapter.askUpdate(
+ askReplyTo -> new Replicator.Update<>(
+ KEY,
+ ORMap.empty(),
+ Replicator.writeLocal(),
+ askReplyTo,
+ map -> map.update(node, registerCandidate.getEntity(), ORSet.empty(),
+ value -> value.add(node, registerCandidate.getCandidate()))),
+ InternalUpdateResponse::new);
+ return this;
+ }
+
+ private Behavior<CandidateRegistryCommand> onUnregisterCandidate(final UnregisterCandidate unregisterCandidate) {
+ LOG.debug("{} - Removing candidate({}) from entity: {}", selfRole,
+ unregisterCandidate.getCandidate(), unregisterCandidate.getEntity());
+ replicatorAdapter.askUpdate(
+ askReplyTo -> new Replicator.Update<>(
+ KEY,
+ ORMap.empty(),
+ Replicator.writeLocal(),
+ askReplyTo,
+ map -> map.update(node, unregisterCandidate.getEntity(), ORSet.empty(),
+ value -> value.remove(node, unregisterCandidate.getCandidate()))),
+ InternalUpdateResponse::new);
+ return this;
+ }
+
+ private Behavior<CandidateRegistryCommand> onInternalUpdateResponse(final InternalUpdateResponse updateResponse) {
+ LOG.debug("{} : Received update response: {}", selfRole, updateResponse.getRsp());
+ return this;
+ }
+
+ private static String extractRole(final Set<String> roles) {
+ return roles.stream().filter(role -> !role.contains(DATACENTER_PREFIX))
+ .findFirst().orElseThrow(() -> new IllegalArgumentException("No valid role found."));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.candidate;
+
+import akka.actor.typed.ActorRef;
+import akka.actor.typed.Behavior;
+import akka.actor.typed.javadsl.AbstractBehavior;
+import akka.actor.typed.javadsl.ActorContext;
+import akka.actor.typed.javadsl.Behaviors;
+import akka.actor.typed.javadsl.Receive;
+import akka.actor.typed.javadsl.StashBuffer;
+import akka.cluster.Cluster;
+import java.time.Duration;
+import java.util.Set;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesForMember;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesResponse;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.CandidateRegistryCommand;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.CandidateRemovalFailed;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.CandidateRemovalFinished;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.RegisterCandidate;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.RemovePreviousCandidates;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.UnregisterCandidate;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class CandidateRegistryInit extends AbstractBehavior<CandidateRegistryCommand> {
+
+ private static final Logger LOG = LoggerFactory.getLogger(CandidateRegistryInit.class);
+
+ private static final String DATACENTER_PREFIX = "dc-";
+
+ private final StashBuffer<CandidateRegistryCommand> stash;
+ private final ActorRef<OwnerSupervisorCommand> ownerSupervisor;
+ private final String selfRole;
+
+ public CandidateRegistryInit(final ActorContext<CandidateRegistryCommand> ctx,
+ final StashBuffer<CandidateRegistryCommand> stash,
+ final ActorRef<OwnerSupervisorCommand> ownerSupervisor) {
+ super(ctx);
+ this.stash = stash;
+ this.ownerSupervisor = ownerSupervisor;
+ this.selfRole = extractRole(Cluster.get(ctx.getSystem()).selfMember().getRoles());
+
+ ctx.getSelf().tell(new RemovePreviousCandidates());
+
+ LOG.debug("{} : CandidateRegistry syncing behavior started.", selfRole);
+ }
+
+ public static Behavior<CandidateRegistryCommand> create(final ActorRef<OwnerSupervisorCommand> ownerSupervisor) {
+ return Behaviors.withStash(100,
+ stash ->
+ Behaviors.setup(ctx -> new CandidateRegistryInit(ctx, stash, ownerSupervisor)));
+ }
+
+ @Override
+ public Receive<CandidateRegistryCommand> createReceive() {
+ return newReceiveBuilder()
+ .onMessage(RemovePreviousCandidates.class, this::onRemoveCandidates)
+ .onMessage(CandidateRemovalFinished.class, command -> switchToCandidateRegistry())
+ .onMessage(CandidateRemovalFailed.class, this::candidateRemovalFailed)
+ .onMessage(RegisterCandidate.class, this::stashCommand)
+ .onMessage(UnregisterCandidate.class, this::stashCommand)
+ .build();
+ }
+
+ private Behavior<CandidateRegistryCommand> candidateRemovalFailed(final CandidateRemovalFailed command) {
+ LOG.warn("{} : Initial removal of candidates from previous iteration failed. Rescheduling.", selfRole,
+ command.getThrowable());
+ getContext().getSelf().tell(new RemovePreviousCandidates());
+ return this;
+ }
+
+ private Behavior<CandidateRegistryCommand> onRemoveCandidates(final RemovePreviousCandidates command) {
+ LOG.debug("Sending RemovePreviousCandidates.");
+ getContext().ask(ClearCandidatesResponse.class,
+ ownerSupervisor, Duration.ofSeconds(5),
+ ref -> new ClearCandidatesForMember(ref, selfRole),
+ (response, throwable) -> {
+ if (response != null) {
+ return new CandidateRemovalFinished();
+ } else {
+ return new CandidateRemovalFailed(throwable);
+ }
+ });
+
+ return this;
+ }
+
+ private Behavior<CandidateRegistryCommand> stashCommand(final CandidateRegistryCommand command) {
+ LOG.debug("Stashing {}", command);
+ stash.stash(command);
+ return this;
+ }
+
+ private Behavior<CandidateRegistryCommand> switchToCandidateRegistry() {
+ LOG.debug("{} : Clearing of candidates from previous instance done, switching to CandidateRegistry.", selfRole);
+ return stash.unstashAll(CandidateRegistry.create());
+ }
+
+ private static String extractRole(final Set<String> roles) {
+ return roles.stream().filter(role -> !role.contains(DATACENTER_PREFIX))
+ .findFirst().orElseThrow(() -> new IllegalArgumentException("No valid role found."));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.candidate.command;
+
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.base.MoreObjects;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public abstract class AbstractCandidateCommand extends CandidateRegistryCommand {
+ private final @NonNull DOMEntity entity;
+ private final @NonNull String candidate;
+
+ AbstractCandidateCommand(final DOMEntity entity, final String candidate) {
+ this.entity = requireNonNull(entity);
+ this.candidate = requireNonNull(candidate);
+ }
+
+ public final @NonNull DOMEntity getEntity() {
+ return entity;
+ }
+
+ public final @NonNull String getCandidate() {
+ return candidate;
+ }
+
+ @Override
+ public final String toString() {
+ return MoreObjects.toStringHelper(this).add("entity", entity).add("candidate", candidate).toString();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.candidate.command;
+
+public abstract class CandidateRegistryCommand {
+ CandidateRegistryCommand() {
+ // Hidden on purpose
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.candidate.command;
+
+public class CandidateRemovalFailed extends CandidateRegistryCommand {
+
+ private final Throwable throwable;
+
+ public CandidateRemovalFailed(final Throwable throwable) {
+ this.throwable = throwable;
+ }
+
+ public Throwable getThrowable() {
+ return throwable;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.candidate.command;
+
+public class CandidateRemovalFinished extends CandidateRegistryCommand {
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.candidate.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.typed.javadsl.Replicator.UpdateResponse;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public final class InternalUpdateResponse extends CandidateRegistryCommand {
+ private final @NonNull UpdateResponse<ORMap<DOMEntity, ORSet<String>>> rsp;
+
+ public InternalUpdateResponse(final UpdateResponse<ORMap<DOMEntity, ORSet<String>>> rsp) {
+ this.rsp = requireNonNull(rsp);
+ }
+
+ public @NonNull UpdateResponse<ORMap<DOMEntity, ORSet<String>>> getRsp() {
+ return rsp;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.candidate.command;
+
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+/**
+ * Sent to Candidate registry to register the candidate for a given entity.
+ */
+public final class RegisterCandidate extends AbstractCandidateCommand {
+ public RegisterCandidate(final DOMEntity entity, final String candidate) {
+ super(entity, candidate);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.candidate.command;
+
+/**
+ * Message sent to candidate registry initial behavior by self to trigger and retrigger(in case of failures) removal
+ * of candidates registered by the previous iteration of this node.
+ */
+public class RemovePreviousCandidates extends CandidateRegistryCommand {
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.candidate.command;
+
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+/**
+ * Sent to CandidateRegistry to unregister the candidate for a given entity.
+ */
+public final class UnregisterCandidate extends AbstractCandidateCommand {
+ public UnregisterCandidate(final DOMEntity entity, final String candidate) {
+ super(entity, candidate);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.listener.owner;
+
+import akka.actor.typed.ActorRef;
+import akka.actor.typed.Behavior;
+import akka.actor.typed.javadsl.AbstractBehavior;
+import akka.actor.typed.javadsl.ActorContext;
+import akka.actor.typed.javadsl.Behaviors;
+import akka.actor.typed.javadsl.Receive;
+import akka.cluster.ddata.LWWRegister;
+import akka.cluster.ddata.LWWRegisterKey;
+import akka.cluster.ddata.typed.javadsl.DistributedData;
+import akka.cluster.ddata.typed.javadsl.Replicator;
+import akka.cluster.ddata.typed.javadsl.ReplicatorMessageAdapter;
+import java.time.Duration;
+import org.opendaylight.controller.eos.akka.registry.listener.owner.command.InitialOwnerSync;
+import org.opendaylight.controller.eos.akka.registry.listener.owner.command.ListenerCommand;
+import org.opendaylight.controller.eos.akka.registry.listener.owner.command.OwnerChanged;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.EntityOwnerChanged;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.TypeListenerCommand;
+import org.opendaylight.mdsal.eos.common.api.EntityOwnershipStateChange;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Keeps track of owners for a single entity, which is mapped to a single LWWRegister in distributed-data.
+ * Notifies the listener responsible for tracking the whole entity-type of changes.
+ */
+public class SingleEntityListenerActor extends AbstractBehavior<ListenerCommand> {
+ private static final Logger LOG = LoggerFactory.getLogger(SingleEntityListenerActor.class);
+
+ private final String localMember;
+ private final DOMEntity entity;
+ private final ActorRef<TypeListenerCommand> toNotify;
+ private final ReplicatorMessageAdapter<ListenerCommand, LWWRegister<String>> ownerReplicator;
+
+ private String currentOwner = "";
+
+ public SingleEntityListenerActor(final ActorContext<ListenerCommand> context, final String localMember,
+ final DOMEntity entity, final ActorRef<TypeListenerCommand> toNotify) {
+ super(context);
+ this.localMember = localMember;
+ this.entity = entity;
+ this.toNotify = toNotify;
+
+ final ActorRef<Replicator.Command> replicator = DistributedData.get(context.getSystem()).replicator();
+ ownerReplicator = new ReplicatorMessageAdapter<>(context, replicator, Duration.ofSeconds(5));
+
+ ownerReplicator.askGet(
+ replyTo -> new Replicator.Get<>(new LWWRegisterKey<>(entity.toString()), Replicator.readLocal(), replyTo),
+ InitialOwnerSync::new);
+ LOG.debug("OwnerListenerActor for {} started", entity.toString());
+ }
+
+ public static Behavior<ListenerCommand> create(final String localMember, final DOMEntity entity,
+ final ActorRef<TypeListenerCommand> toNotify) {
+ return Behaviors.setup(ctx -> new SingleEntityListenerActor(ctx, localMember, entity, toNotify));
+ }
+
+ @Override
+ public Receive<ListenerCommand> createReceive() {
+ return newReceiveBuilder()
+ .onMessage(OwnerChanged.class, this::onOwnerChanged)
+ .onMessage(InitialOwnerSync.class, this::onInitialOwnerSync)
+ .build();
+ }
+
+ private Behavior<ListenerCommand> onInitialOwnerSync(final InitialOwnerSync ownerSync) {
+ final Replicator.GetResponse<LWWRegister<String>> response = ownerSync.getResponse();
+ LOG.debug("Received initial sync response for: {}, response: {}", entity, response);
+
+ // only trigger initial notification when there is no owner present as we wont get a subscription callback
+ // when distributed-data does not have any data for a key
+ if (response instanceof Replicator.NotFound) {
+
+ // no data is present, trigger initial notification with no owner
+ triggerNoOwnerNotification();
+ } else if (response instanceof Replicator.GetSuccess) {
+
+ // when we get a success just let subscribe callback handle the initial notification
+ LOG.debug("Owner present for entity: {} at the time of initial sync.", entity);
+ } else {
+ LOG.warn("Get has failed for entity: {}", response);
+ }
+
+ // make sure to subscribe AFTER initial notification
+ ownerReplicator.subscribe(new LWWRegisterKey<>(entity.toString()), OwnerChanged::new);
+
+ return this;
+ }
+
+ private void triggerNoOwnerNotification() {
+ LOG.debug("Triggering initial notification without an owner for: {}", entity);
+ toNotify.tell(new EntityOwnerChanged(entity, EntityOwnershipStateChange.REMOTE_OWNERSHIP_LOST_NO_OWNER, false));
+ }
+
+ private Behavior<ListenerCommand> onOwnerChanged(final OwnerChanged ownerChanged) {
+
+ final Replicator.SubscribeResponse<LWWRegister<String>> response = ownerChanged.getResponse();
+ if (response instanceof Replicator.Changed) {
+
+ final Replicator.Changed<LWWRegister<String>> registerChanged =
+ (Replicator.Changed<LWWRegister<String>>) response;
+ LOG.debug("Owner changed for: {}, prevOwner: {}, newOwner: {}",
+ entity, currentOwner, registerChanged.get(registerChanged.key()).getValue());
+ handleOwnerChange(registerChanged);
+ } else if (response instanceof Replicator.Deleted) {
+ handleOwnerLost((Replicator.Deleted<LWWRegister<String>>) response);
+ }
+
+ return this;
+ }
+
+ private void handleOwnerChange(final Replicator.Changed<LWWRegister<String>> changed) {
+ final String newOwner = changed.get(changed.key()).getValue();
+
+ final boolean wasOwner = currentOwner.equals(localMember);
+ final boolean isOwner = newOwner.equals(localMember);
+ final boolean hasOwner = !newOwner.equals("");
+
+ LOG.debug("Owner changed for entity:{}, currentOwner: {}, wasOwner: {}, isOwner: {}, hasOwner:{}",
+ entity, currentOwner, wasOwner, isOwner, hasOwner);
+
+ currentOwner = newOwner;
+
+ toNotify.tell(new EntityOwnerChanged(entity, EntityOwnershipStateChange.from(wasOwner, isOwner, hasOwner),
+ false));
+ }
+
+ private void handleOwnerLost(final Replicator.Deleted<LWWRegister<String>> changed) {
+ final boolean wasOwner = currentOwner.equals(localMember);
+
+ LOG.debug("Owner lost for entity:{}, currentOwner: {}, wasOwner: {}", entity, currentOwner, wasOwner);
+
+ currentOwner = "";
+ toNotify.tell(new EntityOwnerChanged(entity, EntityOwnershipStateChange.from(wasOwner, false, false), false));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.listener.owner.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.cluster.ddata.LWWRegister;
+import akka.cluster.ddata.typed.javadsl.Replicator.GetResponse;
+import org.eclipse.jdt.annotation.NonNull;
+
+public final class InitialOwnerSync extends ListenerCommand {
+ private final @NonNull GetResponse<LWWRegister<String>> response;
+
+ public InitialOwnerSync(final GetResponse<LWWRegister<String>> response) {
+ this.response = requireNonNull(response);
+ }
+
+ public @NonNull GetResponse<LWWRegister<String>> getResponse() {
+ return response;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.listener.owner.command;
+
+public abstract class ListenerCommand {
+ ListenerCommand() {
+ // Hidden on purpose
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.listener.owner.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.cluster.ddata.LWWRegister;
+import akka.cluster.ddata.typed.javadsl.Replicator.SubscribeResponse;
+import org.eclipse.jdt.annotation.NonNull;
+
+/**
+ * Notification from distributed-data sent to the SingleEntityListenerActor when owner changes for the tracked entity.
+ */
+public final class OwnerChanged extends ListenerCommand {
+ private final @NonNull SubscribeResponse<LWWRegister<String>> response;
+
+ public OwnerChanged(final SubscribeResponse<LWWRegister<String>> response) {
+ this.response = requireNonNull(response);
+ }
+
+ public @NonNull SubscribeResponse<LWWRegister<String>> getResponse() {
+ return response;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.listener.type;
+
+import akka.actor.typed.ActorRef;
+import akka.actor.typed.Behavior;
+import akka.actor.typed.javadsl.AbstractBehavior;
+import akka.actor.typed.javadsl.ActorContext;
+import akka.actor.typed.javadsl.Behaviors;
+import akka.actor.typed.javadsl.Receive;
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.typed.javadsl.DistributedData;
+import akka.cluster.ddata.typed.javadsl.Replicator.Changed;
+import akka.cluster.ddata.typed.javadsl.Replicator.SubscribeResponse;
+import akka.cluster.ddata.typed.javadsl.ReplicatorMessageAdapter;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Sets;
+import java.time.Duration;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.UUID;
+import java.util.stream.Collectors;
+import org.opendaylight.controller.eos.akka.registry.candidate.CandidateRegistry;
+import org.opendaylight.controller.eos.akka.registry.listener.owner.SingleEntityListenerActor;
+import org.opendaylight.controller.eos.akka.registry.listener.owner.command.ListenerCommand;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.CandidatesChanged;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.EntityOwnerChanged;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.TerminateListener;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.TypeListenerCommand;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class EntityTypeListenerActor extends AbstractBehavior<TypeListenerCommand> {
+ private static final Logger LOG = LoggerFactory.getLogger(EntityTypeListenerActor.class);
+
+ private final Map<DOMEntity, ActorRef<ListenerCommand>> activeListeners = new HashMap<>();
+ private final String localMember;
+ private final String entityType;
+ private final DOMEntityOwnershipListener listener;
+
+ public EntityTypeListenerActor(final ActorContext<TypeListenerCommand> context, final String localMember,
+ final String entityType, final DOMEntityOwnershipListener listener) {
+ super(context);
+ this.localMember = localMember;
+ this.entityType = entityType;
+ this.listener = listener;
+
+ new ReplicatorMessageAdapter<TypeListenerCommand, ORMap<DOMEntity, ORSet<String>>>(context,
+ DistributedData.get(context.getSystem()).replicator(), Duration.ofSeconds(5))
+ .subscribe(CandidateRegistry.KEY, CandidatesChanged::new);
+ }
+
+ public static Behavior<TypeListenerCommand> create(final String localMember, final String entityType,
+ final DOMEntityOwnershipListener listener) {
+ return Behaviors.setup(ctx -> new EntityTypeListenerActor(ctx, localMember, entityType, listener));
+ }
+
+ @Override
+ public Receive<TypeListenerCommand> createReceive() {
+ return newReceiveBuilder()
+ .onMessage(CandidatesChanged.class, this::onCandidatesChanged)
+ .onMessage(EntityOwnerChanged.class, this::onOwnerChanged)
+ .onMessage(TerminateListener.class, this::onTerminate)
+ .build();
+ }
+
+ private Behavior<TypeListenerCommand> onCandidatesChanged(final CandidatesChanged notification) {
+ final SubscribeResponse<ORMap<DOMEntity, ORSet<String>>> response = notification.getResponse();
+ if (response instanceof Changed) {
+ processCandidates(((Changed<ORMap<DOMEntity, ORSet<String>>>) response).get(response.key()).getEntries());
+ } else {
+ LOG.warn("Unexpected notification from replicator: {}", response);
+ }
+ return this;
+ }
+
+ private void processCandidates(final Map<DOMEntity, ORSet<String>> entries) {
+ final Map<DOMEntity, ORSet<String>> filteredCandidates = entries.entrySet().stream()
+ .filter(entry -> entry.getKey().getType().equals(entityType))
+ .collect(Collectors.toMap(Entry::getKey, Entry::getValue));
+ LOG.debug("Entity-type: {} current candidates: {}", entityType, filteredCandidates);
+
+ final Set<DOMEntity> removed =
+ ImmutableSet.copyOf(Sets.difference(activeListeners.keySet(), filteredCandidates.keySet()));
+ if (!removed.isEmpty()) {
+ LOG.debug("Stopping listeners for {}", removed);
+ // kill actors for the removed
+ removed.forEach(removedEntity -> getContext().stop(activeListeners.remove(removedEntity)));
+ }
+
+ for (final Entry<DOMEntity, ORSet<String>> entry : filteredCandidates.entrySet()) {
+ activeListeners.computeIfAbsent(entry.getKey(), key -> {
+ // spawn actor for this entity
+ LOG.debug("Starting listener for {}", key);
+ return getContext().spawn(SingleEntityListenerActor.create(localMember, key, getContext().getSelf()),
+ "SingleEntityListener-" + encodeEntityToActorName(key));
+ });
+ }
+ }
+
+ private Behavior<TypeListenerCommand> onOwnerChanged(final EntityOwnerChanged rsp) {
+ LOG.debug("{} : Entity-type: {} listener, owner change: {}", localMember, entityType, rsp);
+ listener.ownershipChanged(rsp.entity(), rsp.change(), false);
+ return this;
+ }
+
+ private Behavior<TypeListenerCommand> onTerminate(final TerminateListener command) {
+ LOG.debug("Terminating listener for type: {}, listener: {}", entityType, listener);
+ return Behaviors.stopped();
+ }
+
+ private static String encodeEntityToActorName(final DOMEntity entity) {
+ return "type=" + entity.getType() + ",entity="
+ + entity.getIdentifier().getLastPathArgument().getNodeType().getLocalName() + "-" + UUID.randomUUID();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.listener.type;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.typed.ActorRef;
+import akka.actor.typed.Behavior;
+import akka.actor.typed.javadsl.AbstractBehavior;
+import akka.actor.typed.javadsl.ActorContext;
+import akka.actor.typed.javadsl.Behaviors;
+import akka.actor.typed.javadsl.Receive;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.UUID;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.RegisterListener;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.TerminateListener;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.TypeListenerCommand;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.TypeListenerRegistryCommand;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.UnregisterListener;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class EntityTypeListenerRegistry extends AbstractBehavior<TypeListenerRegistryCommand> {
+ private static final Logger LOG = LoggerFactory.getLogger(EntityTypeListenerRegistry.class);
+
+ private final Map<DOMEntityOwnershipListener, ActorRef<TypeListenerCommand>> spawnedListenerActors =
+ new HashMap<>();
+ private final String localMember;
+
+ public EntityTypeListenerRegistry(final ActorContext<TypeListenerRegistryCommand> context,
+ final String localMember) {
+ super(context);
+ this.localMember = requireNonNull(localMember);
+ }
+
+ public static Behavior<TypeListenerRegistryCommand> create(final String role) {
+ return Behaviors.setup(ctx -> new EntityTypeListenerRegistry(ctx, role));
+ }
+
+ @Override
+ public Receive<TypeListenerRegistryCommand> createReceive() {
+ return newReceiveBuilder()
+ .onMessage(RegisterListener.class, this::onRegisterListener)
+ .onMessage(UnregisterListener.class, this::onUnregisterListener)
+ .build();
+ }
+
+ private Behavior<TypeListenerRegistryCommand> onRegisterListener(final RegisterListener command) {
+ LOG.debug("Spawning entity type listener actor for: {}", command.getEntityType());
+
+ final ActorRef<TypeListenerCommand> listenerActor =
+ getContext().spawn(EntityTypeListenerActor.create(localMember,
+ command.getEntityType(), command.getDelegateListener()),
+ "TypeListener:" + encodeEntityToActorName(command.getEntityType()));
+ spawnedListenerActors.put(command.getDelegateListener(), listenerActor);
+ return this;
+ }
+
+ private Behavior<TypeListenerRegistryCommand> onUnregisterListener(final UnregisterListener command) {
+ LOG.debug("Stopping entity type listener actor for: {}", command.getEntityType());
+
+ final ActorRef<TypeListenerCommand> actor = spawnedListenerActors.remove(command.getDelegateListener());
+ if (actor != null) {
+ actor.tell(TerminateListener.INSTANCE);
+ }
+ return this;
+ }
+
+ private static String encodeEntityToActorName(final String entityType) {
+ return "type=" + entityType + "-" + UUID.randomUUID();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.listener.type.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.typed.javadsl.Replicator.SubscribeResponse;
+import com.google.common.base.MoreObjects;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+/**
+ * Adapted notification from distributed-data sent to EntityTypeListenerActor when candidates change.
+ */
+public final class CandidatesChanged extends TypeListenerCommand {
+ private final @NonNull SubscribeResponse<ORMap<DOMEntity, ORSet<String>>> response;
+
+ public CandidatesChanged(final SubscribeResponse<ORMap<DOMEntity, ORSet<String>>> response) {
+ this.response = requireNonNull(response);
+ }
+
+ public @NonNull SubscribeResponse<ORMap<DOMEntity, ORSet<String>>> getResponse() {
+ return response;
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(this).add("response", response).toString();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.listener.type.command;
+
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.base.MoreObjects;
+import org.eclipse.jdt.annotation.NonNullByDefault;
+import org.opendaylight.controller.eos.akka.registry.listener.type.EntityTypeListenerActor;
+import org.opendaylight.mdsal.eos.common.api.EntityOwnershipStateChange;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+/**
+ * Notification sent to {@link EntityTypeListenerActor} when there is an owner change for an Entity of a given type.
+ */
+@NonNullByDefault
+public final class EntityOwnerChanged extends TypeListenerCommand {
+ private final DOMEntity entity;
+ private final EntityOwnershipStateChange change;
+ private final boolean inJeopardy;
+
+ public EntityOwnerChanged(final DOMEntity entity, final EntityOwnershipStateChange change,
+ final boolean inJeopardy) {
+ this.entity = requireNonNull(entity);
+ this.change = requireNonNull(change);
+ this.inJeopardy = requireNonNull(inJeopardy);
+ }
+
+ public DOMEntity entity() {
+ return entity;
+ }
+
+ public EntityOwnershipStateChange change() {
+ return change;
+ }
+
+ public boolean inJeopardy() {
+ return inJeopardy;
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(this)
+ .add("entity", entity)
+ .add("change", change)
+ .add("inJeopardy", inJeopardy)
+ .toString();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.listener.type.command;
+
+import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
+
+/**
+ * Register a DOMEntityOwnershipListener for a given entity-type.
+ */
+public final class RegisterListener extends TypeListenerRegistryCommand {
+ public RegisterListener(final String entityType, final DOMEntityOwnershipListener delegateListener) {
+ super(entityType, delegateListener);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.listener.type.command;
+
+/**
+ * Sent to the listener actor to stop it on demand ie during listener unregistration.
+ */
+public final class TerminateListener extends TypeListenerCommand {
+
+ public static final TerminateListener INSTANCE = new TerminateListener();
+
+ private TerminateListener() {
+ // Hidden on purpose
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.listener.type.command;
+
+public abstract class TypeListenerCommand {
+ TypeListenerCommand() {
+ // Hidden on purpose
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.listener.type.command;
+
+import static java.util.Objects.requireNonNull;
+
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
+
+public abstract class TypeListenerRegistryCommand {
+ private final @NonNull String entityType;
+ private final @NonNull DOMEntityOwnershipListener delegateListener;
+
+ TypeListenerRegistryCommand(final String entityType, final DOMEntityOwnershipListener delegateListener) {
+ this.entityType = requireNonNull(entityType);
+ this.delegateListener = requireNonNull(delegateListener);
+ }
+
+ public final @NonNull String getEntityType() {
+ return entityType;
+ }
+
+ public final @NonNull DOMEntityOwnershipListener getDelegateListener() {
+ return delegateListener;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.listener.type.command;
+
+import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
+
+/**
+ * Unregister a listener from the EntityTypeListenerRegistry.
+ */
+public final class UnregisterListener extends TypeListenerRegistryCommand {
+ public UnregisterListener(final String entityType, final DOMEntityOwnershipListener delegateListener) {
+ super(entityType, delegateListener);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev;
+
+public final class EntityNameBuilder {
+ private EntityNameBuilder() {
+ // Hidden on purpose
+ }
+
+ public static EntityName getDefaultInstance(final String defaultValue) {
+ throw new UnsupportedOperationException("Not yet implemented");
+ }
+}
--- /dev/null
+module odl-entity-owners {
+ namespace urn:opendaylight:params:xml:ns:yang:controller:entity-owners;
+ prefix entity-owners;
+
+ organization 'OpenDaylight Project';
+ description "An initial cut at modeling entity ownership status information
+ in a way which is not dependent on the datastore.
+
+ This model is considered experimental and
+ implementation-specific. It can change incompatibly between
+ OpenDaylight releases.";
+
+ typedef entity-type {
+ type string {
+ length 1..max;
+ // FIXME: it would be nice to have a pattern here, or even better
+ // if we turn this into an extensible enum (i.e. identityref)
+ }
+ }
+
+ typedef entity-name {
+ type union {
+ type instance-identifier;
+ type string {
+ length 1..max;
+ }
+ }
+ }
+
+ typedef node-name {
+ type string {
+ length 1..max;
+ }
+ }
+
+ grouping entity-id {
+ leaf type {
+ type entity-type;
+ mandatory true;
+ }
+ leaf name {
+ type entity-name;
+ mandatory true;
+ }
+ }
+
+ grouping owner {
+ leaf owner-node {
+ type node-name;
+ }
+ }
+
+ grouping candidates {
+ leaf-list candidate-nodes {
+ type node-name;
+ ordered-by user;
+ min-elements 1;
+ }
+ }
+
+ grouping details {
+ uses owner;
+ uses candidates;
+ }
+
+ rpc get-entities {
+ output {
+ list entities {
+ key 'type name';
+ uses entity-id;
+ uses details;
+ }
+ }
+ }
+
+ rpc get-entity {
+ input {
+ uses entity-id;
+ }
+
+ output {
+ uses details;
+ }
+ }
+
+ rpc get-entity-owner {
+ input {
+ uses entity-id;
+ }
+
+ output {
+ uses owner;
+ }
+ }
+}
+
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka;
+
+import static org.awaitility.Awaitility.await;
+import static org.junit.Assert.assertEquals;
+
+import akka.actor.ActorSystem;
+import akka.actor.Address;
+import akka.actor.typed.ActorRef;
+import akka.actor.typed.Behavior;
+import akka.actor.typed.javadsl.Adapter;
+import akka.actor.typed.javadsl.AskPattern;
+import akka.actor.typed.javadsl.Behaviors;
+import akka.cluster.ddata.LWWRegister;
+import akka.cluster.ddata.LWWRegisterKey;
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.typed.javadsl.DistributedData;
+import akka.cluster.ddata.typed.javadsl.Replicator;
+import com.typesafe.config.Config;
+import com.typesafe.config.ConfigFactory;
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.CompletionStage;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Supplier;
+import org.opendaylight.controller.eos.akka.bootstrap.EOSMain;
+import org.opendaylight.controller.eos.akka.bootstrap.command.BootstrapCommand;
+import org.opendaylight.controller.eos.akka.bootstrap.command.GetRunningContext;
+import org.opendaylight.controller.eos.akka.bootstrap.command.RunningContext;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ActivateDataCenter;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.DeactivateDataCenter;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.MemberReachableEvent;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.MemberUnreachableEvent;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorReply;
+import org.opendaylight.controller.eos.akka.registry.candidate.CandidateRegistry;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.CandidateRegistryCommand;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.RegisterCandidate;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.UnregisterCandidate;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.EntityOwnerChanged;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.RegisterListener;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.TypeListenerRegistryCommand;
+import org.opendaylight.mdsal.binding.dom.codec.impl.BindingCodecContext;
+import org.opendaylight.mdsal.binding.generator.impl.DefaultBindingRuntimeGenerator;
+import org.opendaylight.mdsal.binding.runtime.api.BindingRuntimeGenerator;
+import org.opendaylight.mdsal.binding.runtime.spi.BindingRuntimeHelpers;
+import org.opendaylight.mdsal.eos.common.api.EntityOwnershipStateChange;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public abstract class AbstractNativeEosTest {
+
+ public static final DOMEntity ENTITY_1 = new DOMEntity("test-type", "entity-1");
+ public static final DOMEntity ENTITY_2 = new DOMEntity("test-type-2", "entity-2");
+
+ protected static final String DEFAULT_DATACENTER = "dc-default";
+
+ protected static final List<String> TWO_NODE_SEED_NODES =
+ List.of("akka://ClusterSystem@127.0.0.1:2550",
+ "akka://ClusterSystem@127.0.0.1:2551");
+
+ protected static final List<String> THREE_NODE_SEED_NODES =
+ List.of("akka://ClusterSystem@127.0.0.1:2550",
+ "akka://ClusterSystem@127.0.0.1:2551",
+ "akka://ClusterSystem@127.0.0.1:2552");
+
+ protected static final List<String> DATACENTER_SEED_NODES =
+ List.of("akka://ClusterSystem@127.0.0.1:2550",
+ "akka://ClusterSystem@127.0.0.1:2551",
+ "akka://ClusterSystem@127.0.0.1:2552",
+ "akka://ClusterSystem@127.0.0.1:2553");
+
+ private static final BindingRuntimeGenerator BINDING_RUNTIME_GENERATOR = new DefaultBindingRuntimeGenerator();
+
+ protected static BindingCodecContext CODEC_CONTEXT
+ = new BindingCodecContext(BindingRuntimeHelpers.createRuntimeContext());
+
+ private static final String REMOTE_PROTOCOL = "akka";
+ private static final String PORT_PARAM = "akka.remote.artery.canonical.port";
+ private static final String ROLE_PARAM = "akka.cluster.roles";
+ private static final String SEED_NODES_PARAM = "akka.cluster.seed-nodes";
+ private static final String DATA_CENTER_PARAM = "akka.cluster.multi-data-center.self-data-center";
+
+ protected static MockNativeEntityOwnershipService startupNativeService(final int port, final List<String> roles,
+ final List<String> seedNodes)
+ throws ExecutionException, InterruptedException {
+ final Map<String, Object> overrides = new HashMap<>();
+ overrides.put(PORT_PARAM, port);
+ overrides.put(ROLE_PARAM, roles);
+ if (!seedNodes.isEmpty()) {
+ overrides.put(SEED_NODES_PARAM, seedNodes);
+ }
+
+ final Config config = ConfigFactory.parseMap(overrides)
+ .withFallback(ConfigFactory.load());
+
+ // Create a classic Akka system since thats what we will have in osgi
+ final akka.actor.ActorSystem system = akka.actor.ActorSystem.create("ClusterSystem", config);
+
+ return new MockNativeEntityOwnershipService(system);
+ }
+
+ protected static ClusterNode startupRemote(final int port, final List<String> roles)
+ throws ExecutionException, InterruptedException {
+ return startup(port, roles, THREE_NODE_SEED_NODES);
+ }
+
+ protected static ClusterNode startupRemote(final int port, final List<String> roles, final List<String> seedNodes)
+ throws ExecutionException, InterruptedException {
+ return startup(port, roles, seedNodes);
+ }
+
+ protected static ClusterNode startup(final int port, final List<String> roles)
+ throws ExecutionException, InterruptedException {
+ return startup(port, roles, List.of());
+ }
+
+ protected static ClusterNode startup(final int port, final List<String> roles, final List<String> seedNodes)
+ throws ExecutionException, InterruptedException {
+
+ return startup(port, roles, seedNodes, AbstractNativeEosTest::rootBehavior);
+ }
+
+ protected static ClusterNode startup(final int port, final List<String> roles, final List<String> seedNodes,
+ final Supplier<Behavior<BootstrapCommand>> bootstrap)
+ throws ExecutionException, InterruptedException {
+ // Override the configuration
+ final Map<String, Object> overrides = new HashMap<>(4);
+ overrides.put(PORT_PARAM, port);
+ overrides.put(ROLE_PARAM, roles);
+ if (!seedNodes.isEmpty()) {
+ overrides.put(SEED_NODES_PARAM, seedNodes);
+ }
+
+ final Config config = ConfigFactory.parseMap(overrides).withFallback(ConfigFactory.load());
+
+ // Create a classic Akka system since thats what we will have in osgi
+ final akka.actor.ActorSystem system = akka.actor.ActorSystem.create("ClusterSystem", config);
+ final ActorRef<BootstrapCommand> eosBootstrap =
+ Adapter.spawn(system, bootstrap.get(), "EOSBootstrap");
+
+ final CompletionStage<RunningContext> ask = AskPattern.ask(eosBootstrap,
+ GetRunningContext::new,
+ Duration.ofSeconds(5),
+ Adapter.toTyped(system.scheduler()));
+ final RunningContext runningContext = ask.toCompletableFuture().get();
+
+ return new ClusterNode(port, roles, system, eosBootstrap, runningContext.getListenerRegistry(),
+ runningContext.getCandidateRegistry(), runningContext.getOwnerSupervisor());
+ }
+
+ protected static ClusterNode startupWithDatacenter(final int port, final List<String> roles,
+ final List<String> seedNodes, final String dataCenter)
+ throws ExecutionException, InterruptedException {
+ final akka.actor.ActorSystem system = startupActorSystem(port, roles, seedNodes, dataCenter);
+ final ActorRef<BootstrapCommand> eosBootstrap =
+ Adapter.spawn(system, EOSMain.create(CODEC_CONTEXT.getInstanceIdentifierCodec()), "EOSBootstrap");
+
+ final CompletionStage<RunningContext> ask = AskPattern.ask(eosBootstrap,
+ GetRunningContext::new,
+ Duration.ofSeconds(5),
+ Adapter.toTyped(system.scheduler()));
+ final RunningContext runningContext = ask.toCompletableFuture().get();
+
+ return new ClusterNode(port, roles, system, eosBootstrap, runningContext.getListenerRegistry(),
+ runningContext.getCandidateRegistry(), runningContext.getOwnerSupervisor());
+ }
+
+ protected static akka.actor.ActorSystem startupActorSystem(final int port, final List<String> roles,
+ final List<String> seedNodes) {
+ final Map<String, Object> overrides = new HashMap<>();
+ overrides.put(PORT_PARAM, port);
+ overrides.put(ROLE_PARAM, roles);
+ if (!seedNodes.isEmpty()) {
+ overrides.put(SEED_NODES_PARAM, seedNodes);
+ }
+
+ final Config config = ConfigFactory.parseMap(overrides)
+ .withFallback(ConfigFactory.load());
+
+ // Create a classic Akka system since thats what we will have in osgi
+ return akka.actor.ActorSystem.create("ClusterSystem", config);
+ }
+
+ protected static akka.actor.ActorSystem startupActorSystem(final int port, final List<String> roles,
+ final List<String> seedNodes, final String dataCenter) {
+ final Map<String, Object> overrides = new HashMap<>();
+ overrides.put(PORT_PARAM, port);
+ overrides.put(ROLE_PARAM, roles);
+ if (!seedNodes.isEmpty()) {
+ overrides.put(SEED_NODES_PARAM, seedNodes);
+ }
+ overrides.put(DATA_CENTER_PARAM, dataCenter);
+
+ final Config config = ConfigFactory.parseMap(overrides)
+ .withFallback(ConfigFactory.load());
+
+ // Create a classic Akka system since thats what we will have in osgi
+ return akka.actor.ActorSystem.create("ClusterSystem", config);
+ }
+
+ private static Behavior<BootstrapCommand> rootBehavior() {
+ return Behaviors.setup(context -> EOSMain.create(CODEC_CONTEXT.getInstanceIdentifierCodec()));
+ }
+
+ protected static void registerCandidates(final ClusterNode node, final DOMEntity entity, final String... members) {
+ final ActorRef<CandidateRegistryCommand> candidateRegistry = node.getCandidateRegistry();
+ registerCandidates(candidateRegistry, entity, members);
+ }
+
+ protected static void registerCandidates(final ActorRef<CandidateRegistryCommand> candidateRegistry,
+ final DOMEntity entity, final String... members) {
+ for (final String member : members) {
+ candidateRegistry.tell(new RegisterCandidate(entity, member));
+ }
+ }
+
+ protected static void unregisterCandidates(final ClusterNode node, final DOMEntity entity,
+ final String... members) {
+ final ActorRef<CandidateRegistryCommand> candidateRegistry = node.getCandidateRegistry();
+ for (final String member : members) {
+ candidateRegistry.tell(new UnregisterCandidate(entity, member));
+ }
+ }
+
+ protected static MockEntityOwnershipListener registerListener(final ClusterNode node, final DOMEntity entity) {
+ final ActorRef<TypeListenerRegistryCommand> listenerRegistry = node.getListenerRegistry();
+ final MockEntityOwnershipListener listener = new MockEntityOwnershipListener(node.getRoles().get(0));
+ listenerRegistry.tell(new RegisterListener(entity.getType(), listener));
+
+ return listener;
+ }
+
+ protected static void reachableMember(final ClusterNode node, final String... role) {
+ reachableMember(node.getOwnerSupervisor(), role);
+ }
+
+ protected static void reachableMember(final ActorRef<OwnerSupervisorCommand> ownerSupervisor,
+ final String... role) {
+ ownerSupervisor.tell(new MemberReachableEvent(
+ new Address(REMOTE_PROTOCOL, "ClusterSystem@127.0.0.1:2550"), Set.of(role)));
+ }
+
+ protected static void unreachableMember(final ClusterNode node, final String... role) {
+ unreachableMember(node.getOwnerSupervisor(), role);
+ }
+
+ protected static void unreachableMember(final ActorRef<OwnerSupervisorCommand> ownerSupervisor,
+ final String... role) {
+ ownerSupervisor.tell(new MemberUnreachableEvent(
+ new Address(REMOTE_PROTOCOL, "ClusterSystem@127.0.0.1:2550"), Set.of(role)));
+ }
+
+ protected static void waitUntillOwnerPresent(final ClusterNode clusterNode, final DOMEntity entity) {
+ await().atMost(Duration.ofSeconds(15)).until(() -> {
+ final DistributedData distributedData = DistributedData.get(clusterNode.getActorSystem());
+ final CompletionStage<Replicator.GetResponse<LWWRegister<String>>> ask =
+ AskPattern.ask(distributedData.replicator(),
+ replyTo -> new Replicator.Get<>(
+ new LWWRegisterKey<>(entity.toString()), Replicator.readLocal(), replyTo),
+ Duration.ofSeconds(5),
+ clusterNode.getActorSystem().scheduler());
+
+ final Replicator.GetResponse<LWWRegister<String>> response =
+ ask.toCompletableFuture().get(5, TimeUnit.SECONDS);
+
+ if (response instanceof Replicator.GetSuccess) {
+ final String owner = ((Replicator.GetSuccess<LWWRegister<String>>) response).dataValue().getValue();
+ return !owner.isEmpty();
+ }
+
+ return false;
+ });
+ }
+
+ protected static void waitUntillCandidatePresent(final ClusterNode clusterNode, final DOMEntity entity,
+ final String candidate) {
+ await().atMost(Duration.ofSeconds(15)).until(() -> {
+ final DistributedData distributedData = DistributedData.get(clusterNode.getActorSystem());
+
+ final CompletionStage<Replicator.GetResponse<ORMap<DOMEntity, ORSet<String>>>> ask =
+ AskPattern.ask(distributedData.replicator(),
+ replyTo -> new Replicator.Get<>(
+ CandidateRegistry.KEY, Replicator.readLocal(), replyTo),
+ Duration.ofSeconds(5),
+ clusterNode.getActorSystem().scheduler());
+
+ final Replicator.GetResponse<ORMap<DOMEntity, ORSet<String>>> response =
+ ask.toCompletableFuture().get(5, TimeUnit.SECONDS);
+
+ if (response instanceof Replicator.GetSuccess) {
+ final Map<DOMEntity, ORSet<String>> entries =
+ ((Replicator.GetSuccess<ORMap<DOMEntity, ORSet<String>>>) response).dataValue().getEntries();
+
+ return entries.get(entity).contains(candidate);
+
+ }
+ return false;
+ });
+ }
+
+ protected static CompletableFuture<OwnerSupervisorReply> activateDatacenter(final ClusterNode clusterNode) {
+ final CompletionStage<OwnerSupervisorReply> ask =
+ AskPattern.ask(clusterNode.getOwnerSupervisor(),
+ ActivateDataCenter::new,
+ Duration.ofSeconds(20),
+ clusterNode.actorSystem.scheduler());
+ return ask.toCompletableFuture();
+ }
+
+ protected static CompletableFuture<OwnerSupervisorReply> deactivateDatacenter(final ClusterNode clusterNode) {
+ final CompletionStage<OwnerSupervisorReply> ask =
+ AskPattern.ask(clusterNode.getOwnerSupervisor(),
+ DeactivateDataCenter::new,
+ Duration.ofSeconds(20),
+ clusterNode.actorSystem.scheduler());
+ return ask.toCompletableFuture();
+ }
+
+ protected static void verifyListenerState(final MockEntityOwnershipListener listener, final DOMEntity entity,
+ final boolean hasOwner, final boolean isOwner, final boolean wasOwner) {
+ await().until(() -> !listener.getChanges().isEmpty());
+
+ await().atMost(Duration.ofSeconds(10)).untilAsserted(() -> {
+ final var changes = listener.getChanges();
+ final var domEntityOwnershipChange = listener.getChanges().get(changes.size() - 1);
+ assertEquals(entity, domEntityOwnershipChange.entity());
+
+ assertEquals(hasOwner, domEntityOwnershipChange.change().hasOwner());
+ assertEquals(isOwner, domEntityOwnershipChange.change().isOwner());
+ assertEquals(wasOwner, domEntityOwnershipChange.change().wasOwner());
+ });
+ }
+
+ protected static void verifyNoNotifications(final MockEntityOwnershipListener listener) {
+ verifyNoNotifications(listener, 2);
+ }
+
+ protected static void verifyNoNotifications(final MockEntityOwnershipListener listener, final long delaySeconds) {
+ await().pollDelay(delaySeconds, TimeUnit.SECONDS).until(() -> listener.getChanges().isEmpty());
+ }
+
+ protected static void verifyNoAdditionalNotifications(
+ final MockEntityOwnershipListener listener, final long delaySeconds) {
+ listener.resetListener();
+ verifyNoNotifications(listener, delaySeconds);
+ }
+
+ protected static final class ClusterNode {
+ private final int port;
+ private final List<String> roles;
+ private final akka.actor.typed.ActorSystem<Void> actorSystem;
+ private final ActorRef<BootstrapCommand> eosBootstrap;
+ private final ActorRef<TypeListenerRegistryCommand> listenerRegistry;
+ private final ActorRef<CandidateRegistryCommand> candidateRegistry;
+ private final ActorRef<OwnerSupervisorCommand> ownerSupervisor;
+
+ private ClusterNode(final int port,
+ final List<String> roles,
+ final ActorSystem actorSystem,
+ final ActorRef<BootstrapCommand> eosBootstrap,
+ final ActorRef<TypeListenerRegistryCommand> listenerRegistry,
+ final ActorRef<CandidateRegistryCommand> candidateRegistry,
+ final ActorRef<OwnerSupervisorCommand> ownerSupervisor) {
+ this.port = port;
+ this.roles = roles;
+ this.actorSystem = Adapter.toTyped(actorSystem);
+ this.eosBootstrap = eosBootstrap;
+ this.listenerRegistry = listenerRegistry;
+ this.candidateRegistry = candidateRegistry;
+ this.ownerSupervisor = ownerSupervisor;
+ }
+
+ public int getPort() {
+ return port;
+ }
+
+ public akka.actor.typed.ActorSystem<Void> getActorSystem() {
+ return actorSystem;
+ }
+
+ public ActorRef<BootstrapCommand> getEosBootstrap() {
+ return eosBootstrap;
+ }
+
+ public ActorRef<TypeListenerRegistryCommand> getListenerRegistry() {
+ return listenerRegistry;
+ }
+
+ public ActorRef<CandidateRegistryCommand> getCandidateRegistry() {
+ return candidateRegistry;
+ }
+
+ public ActorRef<OwnerSupervisorCommand> getOwnerSupervisor() {
+ return ownerSupervisor;
+ }
+
+ public List<String> getRoles() {
+ return roles;
+ }
+ }
+
+ protected static final class MockEntityOwnershipListener implements DOMEntityOwnershipListener {
+ private final List<EntityOwnerChanged> changes = new ArrayList<>();
+ private final String member;
+ private final Logger log;
+
+ public MockEntityOwnershipListener(final String member) {
+ log = LoggerFactory.getLogger("EOS-listener-" + member);
+ this.member = member;
+ }
+
+ @Override
+ public void ownershipChanged(final DOMEntity entity, final EntityOwnershipStateChange change,
+ final boolean inJeopardy) {
+ final var changed = new EntityOwnerChanged(entity, change, inJeopardy);
+ log.info("{} Received ownershipCHanged: {}", member, changed);
+ log.info("{} changes: {}", member, changes.size());
+ changes.add(changed);
+ }
+
+ public List<EntityOwnerChanged> getChanges() {
+ return changes;
+ }
+
+ public void resetListener() {
+ changes.clear();
+ }
+ }
+
+ protected static final class MockNativeEntityOwnershipService extends AkkaEntityOwnershipService {
+ private final ActorSystem classicActorSystem;
+
+ protected MockNativeEntityOwnershipService(final ActorSystem classicActorSystem)
+ throws ExecutionException, InterruptedException {
+ super(classicActorSystem, CODEC_CONTEXT);
+ this.classicActorSystem = classicActorSystem;
+ }
+
+ protected void reachableMember(final String... role) {
+ AbstractNativeEosTest.reachableMember(ownerSupervisor, role);
+ }
+
+ public void unreachableMember(final String... role) {
+ AbstractNativeEosTest.unreachableMember(ownerSupervisor, role);
+ }
+
+ public ActorSystem getActorSystem() {
+ return classicActorSystem;
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka;
+
+import static org.awaitility.Awaitility.await;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import akka.actor.ActorSystem;
+import akka.actor.testkit.typed.javadsl.ActorTestKit;
+import akka.actor.typed.ActorRef;
+import akka.actor.typed.javadsl.Adapter;
+import akka.actor.typed.javadsl.AskPattern;
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.typed.javadsl.DistributedData;
+import akka.cluster.ddata.typed.javadsl.Replicator;
+import com.typesafe.config.ConfigFactory;
+import java.time.Duration;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.concurrent.CompletionStage;
+import java.util.concurrent.ExecutionException;
+import org.awaitility.Durations;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.eos.akka.bootstrap.command.RunningContext;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand;
+import org.opendaylight.controller.eos.akka.registry.candidate.CandidateRegistry;
+import org.opendaylight.mdsal.eos.common.api.CandidateAlreadyRegisteredException;
+import org.opendaylight.mdsal.eos.common.api.EntityOwnershipState;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityName;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntitiesInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOwnerInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.NodeName;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.get.entities.output.EntitiesKey;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
+import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
+
+public class AkkaEntityOwnershipServiceTest extends AbstractNativeEosTest {
+ static final String ENTITY_TYPE = "test";
+ static final String ENTITY_TYPE2 = "test2";
+ static final QName QNAME = QName.create("test", "2015-08-11", "foo");
+
+ private ActorSystem system;
+ private akka.actor.typed.ActorSystem<Void> typedSystem;
+ private AkkaEntityOwnershipService service;
+ private ActorRef<Replicator.Command> replicator;
+
+ @Before
+ public void setUp() throws Exception {
+ system = ActorSystem.create("ClusterSystem", ConfigFactory.load());
+ typedSystem = Adapter.toTyped(system);
+ replicator = DistributedData.get(typedSystem).replicator();
+
+ service = new AkkaEntityOwnershipService(system, CODEC_CONTEXT);
+ }
+
+ @After
+ public void tearDown() throws InterruptedException, ExecutionException {
+ service.close();
+ ActorTestKit.shutdown(Adapter.toTyped(system), Duration.ofSeconds(20));
+ }
+
+ @Test
+ public void testRegisterCandidate() throws Exception {
+ final YangInstanceIdentifier entityId = YangInstanceIdentifier.of(QNAME);
+ final DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId);
+
+ final Registration reg = service.registerCandidate(entity);
+ assertNotNull(reg);
+
+ verifyEntityCandidateRegistered(ENTITY_TYPE, entityId, "member-1");
+
+ try {
+ service.registerCandidate(entity);
+ fail("Expected CandidateAlreadyRegisteredException");
+ } catch (final CandidateAlreadyRegisteredException e) {
+ // expected
+ assertEquals("getEntity", entity, e.getEntity());
+ }
+
+ final DOMEntity entity2 = new DOMEntity(ENTITY_TYPE2, entityId);
+ final Registration reg2 = service.registerCandidate(entity2);
+
+ assertNotNull(reg2);
+ verifyEntityCandidateRegistered(ENTITY_TYPE2, entityId, "member-1");
+ }
+
+ @Test
+ public void testUnregisterCandidate() throws Exception {
+ final YangInstanceIdentifier entityId = YangInstanceIdentifier.of(QNAME);
+ final DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId);
+
+ final Registration reg = service.registerCandidate(entity);
+ assertNotNull(reg);
+
+ verifyEntityCandidateRegistered(ENTITY_TYPE, entityId, "member-1");
+
+ reg.close();
+ verifyEntityCandidateMissing(ENTITY_TYPE, entityId, "member-1");
+
+ service.registerCandidate(entity);
+ verifyEntityCandidateRegistered(ENTITY_TYPE, entityId, "member-1");
+ }
+
+ @Test
+ public void testListenerRegistration() throws Exception {
+
+ final YangInstanceIdentifier entityId = YangInstanceIdentifier.of(QNAME);
+ final DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId);
+ final MockEntityOwnershipListener listener = new MockEntityOwnershipListener("member-1");
+
+ final Registration reg = service.registerListener(entity.getType(), listener);
+
+ assertNotNull("EntityOwnershipListenerRegistration null", reg);
+
+ final Registration candidate = service.registerCandidate(entity);
+
+ verifyListenerState(listener, entity, true, true, false);
+ final int changes = listener.getChanges().size();
+
+ reg.close();
+ candidate.close();
+
+ verifyEntityCandidateMissing(ENTITY_TYPE, entityId, "member-1");
+
+ service.registerCandidate(entity);
+ // check listener not called when listener registration closed
+ await().pollDelay(Durations.TWO_SECONDS).until(() -> listener.getChanges().size() == changes);
+ }
+
+ @Test
+ public void testGetOwnershipState() throws Exception {
+ final DOMEntity entity = new DOMEntity(ENTITY_TYPE, "one");
+
+ final Registration registration = service.registerCandidate(entity);
+ verifyGetOwnershipState(service, entity, EntityOwnershipState.IS_OWNER);
+
+ final RunningContext runningContext = service.getRunningContext();
+ registerCandidates(runningContext.getCandidateRegistry(), entity, "member-2");
+
+ final ActorRef<OwnerSupervisorCommand> ownerSupervisor = runningContext.getOwnerSupervisor();
+ reachableMember(ownerSupervisor, "member-2", DEFAULT_DATACENTER);
+ unreachableMember(ownerSupervisor, "member-1", DEFAULT_DATACENTER);
+ verifyGetOwnershipState(service, entity, EntityOwnershipState.OWNED_BY_OTHER);
+
+ final DOMEntity entity2 = new DOMEntity(ENTITY_TYPE, "two");
+ final Optional<EntityOwnershipState> state = service.getOwnershipState(entity2);
+ assertFalse(state.isPresent());
+
+ unreachableMember(ownerSupervisor, "member-2", DEFAULT_DATACENTER);
+ verifyGetOwnershipState(service, entity, EntityOwnershipState.NO_OWNER);
+ }
+
+ @Test
+ public void testIsCandidateRegistered() throws Exception {
+ final DOMEntity test = new DOMEntity("test-type", "test");
+
+ assertFalse(service.isCandidateRegistered(test));
+
+ service.registerCandidate(test);
+
+ assertTrue(service.isCandidateRegistered(test));
+ }
+
+ @Test
+ public void testEntityRetrievalWithYiid() throws Exception {
+ final YangInstanceIdentifier entityId = YangInstanceIdentifier.of(new NodeIdentifier(NetworkTopology.QNAME),
+ new NodeIdentifier(Topology.QNAME),
+ NodeIdentifierWithPredicates.of(Topology.QNAME, QName.create(Topology.QNAME, "topology-id"), "test"),
+ new NodeIdentifier(Node.QNAME),
+ NodeIdentifierWithPredicates.of(Node.QNAME, QName.create(Node.QNAME, "node-id"), "test://test-node"));
+
+ final DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId);
+
+ final Registration reg = service.registerCandidate(entity);
+
+ assertNotNull(reg);
+ verifyEntityCandidateRegistered(ENTITY_TYPE, entityId, "member-1");
+
+ var result = service.getEntity(new GetEntityInputBuilder()
+ .setName(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)))
+ .setType(new EntityType(ENTITY_TYPE))
+ .build())
+ .get()
+ .getResult();
+
+ assertEquals(result.getOwnerNode().getValue(), "member-1");
+ assertEquals(result.getCandidateNodes().get(0).getValue(), "member-1");
+
+ // we should not be able to retrieve the entity when using string
+ final String entityPathEncoded =
+ "/network-topology:network-topology/topology[topology-id='test']/node[node-id='test://test-node']";
+
+ result = service.getEntity(new GetEntityInputBuilder()
+ .setName(new EntityName(entityPathEncoded))
+ .setType(new EntityType(ENTITY_TYPE))
+ .build())
+ .get()
+ .getResult();
+
+ assertNull(result.getOwnerNode());
+ assertEquals(List.of(), result.getCandidateNodes());
+
+ final var getEntitiesResult = service.getEntities(new GetEntitiesInputBuilder().build()).get().getResult();
+ final var entities = getEntitiesResult.nonnullEntities();
+ assertEquals(1, entities.size());
+ assertTrue(entities.get(new EntitiesKey(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)),
+ new EntityType(ENTITY_TYPE))).getCandidateNodes().contains(new NodeName("member-1")));
+ assertTrue(entities.get(new EntitiesKey(
+ new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)),
+ new EntityType(ENTITY_TYPE)))
+ .getOwnerNode().getValue().equals("member-1"));
+
+ final var getOwnerResult = service.getEntityOwner(new GetEntityOwnerInputBuilder()
+ .setName(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)))
+ .setType(new EntityType(ENTITY_TYPE))
+ .build()).get().getResult();
+
+ assertEquals(getOwnerResult.getOwnerNode().getValue(), "member-1");
+ }
+
+ private static void verifyGetOwnershipState(final DOMEntityOwnershipService service, final DOMEntity entity,
+ final EntityOwnershipState expState) {
+ await().atMost(Duration.ofSeconds(5)).untilAsserted(() -> {
+ assertEquals(Optional.of(expState), service.getOwnershipState(entity));
+ });
+ }
+
+ private void verifyEntityCandidateRegistered(final String entityType,
+ final YangInstanceIdentifier entityId,
+ final String candidateName) {
+ await().atMost(Duration.ofSeconds(5))
+ .untilAsserted(() -> doVerifyEntityCandidateRegistered(entityType, entityId, candidateName));
+ }
+
+ private void doVerifyEntityCandidateRegistered(final String entityType,
+ final YangInstanceIdentifier entityId,
+ final String candidateName)
+ throws ExecutionException, InterruptedException {
+ final Map<DOMEntity, ORSet<String>> entries = getCandidateData();
+ final DOMEntity entity = new DOMEntity(entityType, entityId);
+ assertTrue(entries.containsKey(entity));
+ assertTrue(entries.get(entity).getElements().contains(candidateName));
+ }
+
+ private void verifyEntityCandidateMissing(final String entityType,
+ final YangInstanceIdentifier entityId,
+ final String candidateName) {
+ await().atMost(Duration.ofSeconds(5))
+ .untilAsserted(() -> doVerifyEntityCandidateMissing(entityType, entityId, candidateName));
+ }
+
+ private void doVerifyEntityCandidateMissing(final String entityType,
+ final YangInstanceIdentifier entityId,
+ final String candidateName)
+ throws ExecutionException, InterruptedException {
+ final Map<DOMEntity, ORSet<String>> entries = getCandidateData();
+ final DOMEntity entity = new DOMEntity(entityType, entityId);
+ assertTrue(entries.containsKey(entity));
+ assertFalse(entries.get(entity).getElements().contains(candidateName));
+ }
+
+ private Map<DOMEntity, ORSet<String>> getCandidateData() throws ExecutionException, InterruptedException {
+ final CompletionStage<Replicator.GetResponse<ORMap<DOMEntity, ORSet<String>>>> ask =
+ AskPattern.ask(replicator, replyTo ->
+ new Replicator.Get<>(
+ CandidateRegistry.KEY,
+ Replicator.readLocal(),
+ replyTo),
+ Duration.ofSeconds(5),
+ typedSystem.scheduler());
+
+ final Replicator.GetResponse<ORMap<DOMEntity, ORSet<String>>> response = ask.toCompletableFuture().get();
+ assertTrue(response instanceof Replicator.GetSuccess);
+
+ final Replicator.GetSuccess<ORMap<DOMEntity, ORSet<String>>> success =
+ (Replicator.GetSuccess<ORMap<DOMEntity, ORSet<String>>>) response;
+
+ return success.get(CandidateRegistry.KEY).getEntries();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka;
+
+import akka.actor.testkit.typed.javadsl.ActorTestKit;
+import akka.cluster.Member;
+import akka.cluster.MemberStatus;
+import akka.cluster.typed.Cluster;
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import org.awaitility.Awaitility;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public class DataCentersTest extends AbstractNativeEosTest {
+
+ private ClusterNode node1 = null;
+ private ClusterNode node2 = null;
+ private ClusterNode node3 = null;
+ private ClusterNode node4 = null;
+ public static final DOMEntity ENTITY_1 = new DOMEntity("test-type", "entity-1");
+ public static final DOMEntity ENTITY_2 = new DOMEntity("test-type-2", "entity-2");
+
+ @Before
+ public void setUp() throws Exception {
+ node1 = startupWithDatacenter(2550, Collections.singletonList("member-1"), DATACENTER_SEED_NODES, "dc-primary");
+ node2 = startupWithDatacenter(2551, Collections.singletonList("member-2"), DATACENTER_SEED_NODES, "dc-primary");
+ node3 = startupWithDatacenter(2552, Collections.singletonList("member-3"), DATACENTER_SEED_NODES, "dc-backup");
+ node4 = startupWithDatacenter(2553, Collections.singletonList("member-4"), DATACENTER_SEED_NODES, "dc-backup");
+
+ // need to wait until all nodes are ready
+ final Cluster cluster = Cluster.get(node4.getActorSystem());
+ Awaitility.await().atMost(Duration.ofSeconds(20)).until(() -> {
+ final List<Member> members = new ArrayList<>();
+ cluster.state().getMembers().forEach(members::add);
+ if (members.size() != 4) {
+ return false;
+ }
+
+ for (final Member member : members) {
+ if (!member.status().equals(MemberStatus.up())) {
+ return false;
+ }
+ }
+
+ return true;
+ });
+ }
+
+ @Test
+ public void testDatacenterActivation() throws Exception {
+ registerCandidates(node1, ENTITY_1, "member-1");
+ registerCandidates(node3, ENTITY_1, "member-3");
+
+ activateDatacenter(node1).get();
+
+ waitUntillOwnerPresent(node1, ENTITY_1);
+ final MockEntityOwnershipListener listener1 = registerListener(node1, ENTITY_1);
+ verifyListenerState(listener1, ENTITY_1, true, true, false);
+
+ final MockEntityOwnershipListener listener2 = registerListener(node3, ENTITY_1);
+ verifyListenerState(listener2, ENTITY_1, true, false, false);
+
+ unregisterCandidates(node1, ENTITY_1, "member-1");
+
+ verifyListenerState(listener1, ENTITY_1, false, false, true);
+ verifyListenerState(listener2, ENTITY_1, false, false, false);
+
+ deactivateDatacenter(node1).get();
+ activateDatacenter(node4).get();
+
+ verifyListenerState(listener1, ENTITY_1, true, false, false);
+ verifyListenerState(listener2, ENTITY_1, true, true, false);
+
+ registerCandidates(node4, ENTITY_1, "member-4");
+ unregisterCandidates(node3, ENTITY_1, "member-3");
+
+ // checking index after notif so current + 1
+ verifyListenerState(listener1, ENTITY_1, true, false, false);
+ verifyListenerState(listener2, ENTITY_1, true, false, false);
+
+ deactivateDatacenter(node3).get();
+ activateDatacenter(node2).get();
+ }
+
+ @Test
+ public void testDataCenterShutdown() throws Exception {
+ registerCandidates(node1, ENTITY_1, "member-1");
+ registerCandidates(node3, ENTITY_1, "member-3");
+ registerCandidates(node4, ENTITY_1, "member-4");
+
+ waitUntillCandidatePresent(node1, ENTITY_1, "member-1");
+ waitUntillCandidatePresent(node1, ENTITY_1, "member-3");
+ waitUntillCandidatePresent(node1, ENTITY_1, "member-4");
+
+ activateDatacenter(node1).get();
+
+ waitUntillOwnerPresent(node4, ENTITY_1);
+ final MockEntityOwnershipListener listener1 = registerListener(node1, ENTITY_1);
+ verifyListenerState(listener1, ENTITY_1, true, true, false);
+
+ final MockEntityOwnershipListener listener2 = registerListener(node3, ENTITY_1);
+ verifyListenerState(listener2, ENTITY_1, true, false, false);
+
+ unregisterCandidates(node1, ENTITY_1, "member-1");
+
+ verifyListenerState(listener1, ENTITY_1, false, false, true);
+ verifyListenerState(listener2, ENTITY_1, false, false, false);
+
+ ActorTestKit.shutdown(node1.getActorSystem(), Duration.ofSeconds(20));
+ ActorTestKit.shutdown(node2.getActorSystem(), Duration.ofSeconds(20));
+
+ activateDatacenter(node3).get();
+ verifyListenerState(listener2, ENTITY_1, true, true, false);
+
+ waitUntillOwnerPresent(node3, ENTITY_1);
+ unregisterCandidates(node3, ENTITY_1, "member-3");
+ verifyListenerState(listener2, ENTITY_1, true, false, true);
+ }
+
+ @After
+ public void tearDown() {
+ ActorTestKit.shutdown(node1.getActorSystem(), Duration.ofSeconds(20));
+ ActorTestKit.shutdown(node2.getActorSystem(), Duration.ofSeconds(20));
+ ActorTestKit.shutdown(node3.getActorSystem(), Duration.ofSeconds(20));
+ ActorTestKit.shutdown(node4.getActorSystem(), Duration.ofSeconds(20));
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka;
+
+import static org.awaitility.Awaitility.await;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import akka.actor.ActorSystem;
+import akka.actor.testkit.typed.javadsl.ActorTestKit;
+import akka.actor.typed.javadsl.Adapter;
+import akka.cluster.Member;
+import akka.cluster.MemberStatus;
+import akka.cluster.typed.Cluster;
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+import org.awaitility.Awaitility;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityName;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntitiesInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOwnerInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.NodeName;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.get.entities.output.EntitiesKey;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
+import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
+
+public class EntityRpcHandlerTest extends AbstractNativeEosTest {
+ static final String ENTITY_TYPE = "test";
+
+ private ActorSystem system1;
+ private ActorSystem system2;
+
+ private AkkaEntityOwnershipService service1;
+ private AkkaEntityOwnershipService service2;
+
+ @Before
+ public void setUp() throws Exception {
+ system1 = startupActorSystem(2550, List.of("member-1"), TWO_NODE_SEED_NODES);
+ system2 = startupActorSystem(2551, List.of("member-2"), TWO_NODE_SEED_NODES, "dc-backup");
+
+ service1 = new AkkaEntityOwnershipService(system1, CODEC_CONTEXT);
+ service2 = new AkkaEntityOwnershipService(system2, CODEC_CONTEXT);
+
+ // need to wait until all nodes are ready
+ final var cluster = Cluster.get(Adapter.toTyped(system2));
+ Awaitility.await().atMost(Duration.ofSeconds(20)).until(() -> {
+ final List<Member> members = new ArrayList<>();
+ cluster.state().getMembers().forEach(members::add);
+ if (members.size() != 2) {
+ return false;
+ }
+
+ for (final Member member : members) {
+ if (!member.status().equals(MemberStatus.up())) {
+ return false;
+ }
+ }
+
+ return true;
+ });
+ }
+
+ @After
+ public void tearDown() throws InterruptedException, ExecutionException {
+ service1.close();
+ service2.close();
+ ActorTestKit.shutdown(Adapter.toTyped(system1), Duration.ofSeconds(20));
+ ActorTestKit.shutdown(Adapter.toTyped(system2), Duration.ofSeconds(20));
+ }
+
+ /*
+ * Tests entity rpcs handled both by the owner supervisor(service1) and with an idle supervisor(falling
+ * back to distributed-data in an inactive datacenter). This covers both the available cases, datacenters and case
+ * in which the node with active akka-singleton is shutdown and another one takes over.
+ */
+ @Test
+ public void testEntityRetrievalWithUnavailableSupervisor() throws Exception {
+ final YangInstanceIdentifier entityId = YangInstanceIdentifier.of(new NodeIdentifier(NetworkTopology.QNAME),
+ new NodeIdentifier(Topology.QNAME),
+ NodeIdentifierWithPredicates.of(Topology.QNAME, QName.create(Topology.QNAME, "topology-id"), "test"),
+ new NodeIdentifier(Node.QNAME),
+ NodeIdentifierWithPredicates.of(Node.QNAME, QName.create(Node.QNAME, "node-id"), "test://test-node"));
+
+ final DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId);
+
+ final Registration reg = service1.registerCandidate(entity);
+
+ await().untilAsserted(() -> {
+ final var getEntityResult = service1.getEntity(new GetEntityInputBuilder()
+ .setName(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)))
+ .setType(new EntityType(ENTITY_TYPE))
+ .build()).get();
+
+ assertEquals(getEntityResult.getResult().getOwnerNode().getValue(), "member-1");
+ assertEquals(getEntityResult.getResult().getCandidateNodes().get(0).getValue(), "member-1");
+ });
+
+ // keep this under ask timeout to make sure the singleton actor in the inactive datacenter responds with failure
+ // immediately, so that the rpc actor retries with distributed-data asap
+ await().atMost(Duration.ofSeconds(2)).untilAsserted(() -> {
+ final var getEntitiesResult = service2.getEntities(new GetEntitiesInputBuilder().build()).get().getResult();
+ final var entities = getEntitiesResult.nonnullEntities();
+ assertEquals(1, entities.size());
+ assertTrue(entities.get(new EntitiesKey(
+ new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)),
+ new EntityType(ENTITY_TYPE)))
+ .getCandidateNodes().contains(new NodeName("member-1")));
+ assertTrue(entities.get(new EntitiesKey(
+ new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)),
+ new EntityType(ENTITY_TYPE)))
+ .getOwnerNode().getValue().equals("member-1"));
+ });
+
+ await().atMost(Duration.ofSeconds(2)).untilAsserted(() -> {
+ final var getEntityResult = service2.getEntity(new GetEntityInputBuilder()
+ .setName(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)))
+ .setType(new EntityType(ENTITY_TYPE))
+ .build()).get().getResult();
+
+ assertEquals(getEntityResult.getOwnerNode().getValue(), "member-1");
+ assertEquals(getEntityResult.getCandidateNodes().get(0).getValue(), "member-1");
+ });
+
+ await().atMost(Duration.ofSeconds(2)).untilAsserted(() -> {
+ final var getOwnerResult = service2.getEntityOwner(new GetEntityOwnerInputBuilder()
+ .setName(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)))
+ .setType(new EntityType(ENTITY_TYPE))
+ .build()).get().getResult();
+
+ assertEquals(getOwnerResult.getOwnerNode().getValue(), "member-1");
+ });
+
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka;
+
+import akka.actor.testkit.typed.javadsl.ActorTestKit;
+import java.time.Duration;
+import java.util.List;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public class SingleNodeTest extends AbstractNativeEosTest {
+
+ public static final DOMEntity ENTITY_1 = new DOMEntity("test-type", "entity-1");
+ public static final DOMEntity ENTITY_2 = new DOMEntity("test-type-2", "entity-2");
+
+ private ClusterNode clusterNode;
+
+ @Before
+ public void setUp() throws Exception {
+ clusterNode = startup(2550, List.of("member-1"));
+ }
+
+ @After
+ public void tearDown() {
+ ActorTestKit.shutdown(clusterNode.getActorSystem(), Duration.ofSeconds(20));
+ }
+
+ @Test
+ public void testNotificationPriorToCandidateRegistration() {
+ final MockEntityOwnershipListener listener = registerListener(clusterNode, ENTITY_1);
+ verifyNoNotifications(listener);
+
+ registerCandidates(clusterNode, ENTITY_1, "member-1");
+ verifyListenerState(listener, ENTITY_1, true, true, false);
+ }
+
+ @Test
+ public void testListenerPriorToAddingCandidates() {
+ final MockEntityOwnershipListener listener = registerListener(clusterNode, ENTITY_1);
+
+ registerCandidates(clusterNode, ENTITY_1, "member-1");
+ waitUntillOwnerPresent(clusterNode, ENTITY_1);
+
+ reachableMember(clusterNode, "member-2", DEFAULT_DATACENTER);
+ reachableMember(clusterNode, "member-3", DEFAULT_DATACENTER);
+
+ registerCandidates(clusterNode, ENTITY_1, "member-2", "member-3");
+ verifyListenerState(listener, ENTITY_1, true, true, false);
+ verifyNoAdditionalNotifications(listener, 5);
+
+ unregisterCandidates(clusterNode, ENTITY_1, "member-1");
+ verifyListenerState(listener, ENTITY_1, true, false, true);
+ }
+
+ @Test
+ public void testListenerRegistrationAfterCandidates() {
+ registerCandidates(clusterNode, ENTITY_1, "member-1", "member-2", "member-3");
+ waitUntillOwnerPresent(clusterNode, ENTITY_1);
+
+ reachableMember(clusterNode, "member-2", DEFAULT_DATACENTER);
+ reachableMember(clusterNode, "member-3", DEFAULT_DATACENTER);
+
+ final MockEntityOwnershipListener listener = registerListener(clusterNode, ENTITY_1);
+ verifyListenerState(listener, ENTITY_1, true, true, false);
+ verifyNoAdditionalNotifications(listener, 5);
+
+ unregisterCandidates(clusterNode, ENTITY_1, "member-1", "member-2");
+ verifyListenerState(listener, ENTITY_1, true, false, true);
+ }
+
+ @Test
+ public void testMultipleEntities() {
+ registerCandidates(clusterNode, ENTITY_1, "member-1", "member-2", "member-3");
+ waitUntillOwnerPresent(clusterNode, ENTITY_1);
+
+ reachableMember(clusterNode, "member-2", DEFAULT_DATACENTER);
+ reachableMember(clusterNode, "member-3", DEFAULT_DATACENTER);
+
+ final MockEntityOwnershipListener listener1 = registerListener(clusterNode, ENTITY_1);
+ final MockEntityOwnershipListener listener2 = registerListener(clusterNode, ENTITY_2);
+
+ verifyListenerState(listener1, ENTITY_1, true, true, false);
+ verifyNoNotifications(listener2);
+
+ unregisterCandidates(clusterNode, ENTITY_1, "member-1");
+ verifyListenerState(listener1, ENTITY_1, true, false, true);
+ verifyNoNotifications(listener2);
+
+ registerCandidates(clusterNode, ENTITY_2, "member-2");
+ verifyListenerState(listener1, ENTITY_1, true, false, true);
+ verifyListenerState(listener2, ENTITY_2, true, false, false);
+
+ unregisterCandidates(clusterNode, ENTITY_2, "member-2");
+
+ verifyListenerState(listener1, ENTITY_1, true, false, true);
+ verifyListenerState(listener2, ENTITY_2, false, false, false);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka;
+
+import akka.actor.testkit.typed.javadsl.ActorTestKit;
+import akka.cluster.Member;
+import akka.cluster.MemberStatus;
+import akka.cluster.typed.Cluster;
+import com.google.common.collect.ImmutableList;
+import java.time.Duration;
+import java.util.List;
+import org.awaitility.Awaitility;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public class ThreeNodeBaseTest extends AbstractNativeEosTest {
+ public static final DOMEntity ENTITY_1 = new DOMEntity("test-type", "entity-1");
+ public static final DOMEntity ENTITY_2 = new DOMEntity("test-type-2", "entity-2");
+
+ private ClusterNode node1;
+ private ClusterNode node2;
+ private ClusterNode node3;
+
+ @Before
+ public void setUp() throws Exception {
+ node1 = startupRemote(2550, List.of("member-1"));
+ node2 = startupRemote(2551, List.of("member-2"));
+ node3 = startupRemote(2552, List.of("member-3"));
+
+ // need to wait until all nodes are ready
+ final Cluster cluster = Cluster.get(node3.getActorSystem());
+ // need a longer timeout with classic remoting, artery.tcp doesnt need to wait as long for init
+ Awaitility.await().atMost(Duration.ofSeconds(20)).until(() -> {
+ final List<Member> members = ImmutableList.copyOf(cluster.state().getMembers());
+ if (members.size() != 3) {
+ return false;
+ }
+
+ for (final Member member : members) {
+ if (!member.status().equals(MemberStatus.up())) {
+ return false;
+ }
+ }
+
+ return true;
+ });
+ }
+
+ @After
+ public void tearDown() {
+ // same issue with classic remoting as in setup
+ ActorTestKit.shutdown(node1.getActorSystem(), Duration.ofSeconds(20));
+ ActorTestKit.shutdown(node2.getActorSystem(), Duration.ofSeconds(20));
+ ActorTestKit.shutdown(node3.getActorSystem(), Duration.ofSeconds(20));
+ }
+
+ @Test
+ public void testInitialNotificationsWithoutOwner() throws Exception {
+ final MockEntityOwnershipListener listener1 = registerListener(node1, ENTITY_1);
+ verifyNoNotifications(listener1);
+
+ final MockEntityOwnershipListener listener2 = registerListener(node2, ENTITY_1);
+ verifyNoNotifications(listener2);
+
+ final MockEntityOwnershipListener listener3 = registerListener(node3, ENTITY_1);
+ verifyNoNotifications(listener3);
+ }
+
+ @Test
+ public void testInitialNotificationsWithOwner() {
+ registerCandidates(node1, ENTITY_1, "member-1");
+ // make sure we register other candidates after the first is seen everywhere to prevent different results due
+ // to timing
+ waitUntillOwnerPresent(node3, ENTITY_1);
+
+ registerCandidates(node2, ENTITY_1, "member-2");
+ registerCandidates(node3, ENTITY_1, "member-3");
+
+ final MockEntityOwnershipListener listener1 = registerListener(node1, ENTITY_1);
+ verifyListenerState(listener1, ENTITY_1, true, true, false);
+
+ final MockEntityOwnershipListener listener2 = registerListener(node2, ENTITY_1);
+ verifyListenerState(listener2, ENTITY_1, true, false, false);
+
+ final MockEntityOwnershipListener listener3 = registerListener(node3, ENTITY_1);
+ verifyListenerState(listener3, ENTITY_1, true, false, false);
+ }
+
+ @Test
+ public void testMultipleEntities() {
+ registerCandidates(node1, ENTITY_1, "member-1");
+ registerCandidates(node2, ENTITY_1, "member-2");
+ registerCandidates(node3, ENTITY_1, "member-3");
+
+ waitUntillOwnerPresent(node3, ENTITY_1);
+
+ registerCandidates(node2, ENTITY_2, "member-2");
+ waitUntillOwnerPresent(node2, ENTITY_2);
+ registerCandidates(node1, ENTITY_2, "member-1");
+
+ final MockEntityOwnershipListener firstEntityListener1 = registerListener(node1, ENTITY_1);
+ final MockEntityOwnershipListener firstEntityListener2 = registerListener(node2, ENTITY_1);
+ final MockEntityOwnershipListener firstEntityListener3 = registerListener(node3, ENTITY_1);
+
+ verifyListenerState(firstEntityListener1, ENTITY_1, true, true, false);
+ verifyListenerState(firstEntityListener2, ENTITY_1, true, false, false);
+ verifyListenerState(firstEntityListener3, ENTITY_1, true, false, false);
+
+ final MockEntityOwnershipListener secondEntityListener1 = registerListener(node1, ENTITY_2);
+ final MockEntityOwnershipListener secondEntityListener2 = registerListener(node2, ENTITY_2);
+ final MockEntityOwnershipListener secondEntityListener3 = registerListener(node3, ENTITY_2);
+
+ verifyListenerState(secondEntityListener1, ENTITY_2, true, false, false);
+ verifyListenerState(secondEntityListener2, ENTITY_2, true, true, false);
+ verifyListenerState(secondEntityListener3, ENTITY_2, true, false, false);
+
+ unregisterCandidates(node1, ENTITY_1, "member-1");
+
+ verifyListenerState(firstEntityListener1, ENTITY_1, true, false, true);
+ verifyListenerState(firstEntityListener2, ENTITY_1, true, true, false);
+ verifyListenerState(firstEntityListener3, ENTITY_1, true, false, false);
+
+ unregisterCandidates(node2, ENTITY_1, "member-2");
+
+ verifyListenerState(firstEntityListener1, ENTITY_1, true, false, false);
+ verifyListenerState(firstEntityListener2, ENTITY_1, true, false, true);
+ verifyListenerState(firstEntityListener3, ENTITY_1, true, true, false);
+
+ unregisterCandidates(node3, ENTITY_1, "member-3");
+
+ verifyListenerState(firstEntityListener1, ENTITY_1, false, false, false);
+ verifyListenerState(firstEntityListener2, ENTITY_1, false, false, false);
+ verifyListenerState(firstEntityListener3, ENTITY_1, false, false, true);
+
+ // check second listener hasnt moved
+ verifyListenerState(secondEntityListener1, ENTITY_2, true, false, false);
+ verifyListenerState(secondEntityListener2, ENTITY_2, true, true, false);
+ verifyListenerState(secondEntityListener3, ENTITY_2, true, false, false);
+
+ registerCandidates(node1, ENTITY_1, "member-1");
+
+ verifyListenerState(firstEntityListener1, ENTITY_1, true, true, false);
+ verifyListenerState(firstEntityListener2, ENTITY_1, true, false, false);
+ verifyListenerState(firstEntityListener3, ENTITY_1, true, false, false);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka;
+
+import static org.awaitility.Awaitility.await;
+
+import akka.actor.testkit.typed.javadsl.ActorTestKit;
+import akka.cluster.Member;
+import akka.cluster.MemberStatus;
+import akka.cluster.typed.Cluster;
+import com.google.common.collect.ImmutableList;
+import java.time.Duration;
+import java.util.List;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public class ThreeNodeReachabilityTest extends AbstractNativeEosTest {
+ public static final DOMEntity ENTITY_1 = new DOMEntity("test-type", "entity-1");
+ public static final DOMEntity ENTITY_2 = new DOMEntity("test-type-2", "entity-2");
+
+ private ClusterNode node1 = null;
+ private ClusterNode node2 = null;
+ private ClusterNode node3 = null;
+
+ @Before
+ public void setUp() throws Exception {
+ node1 = startupRemote(2550, List.of("member-1"), TWO_NODE_SEED_NODES);
+ node2 = startupRemote(2551, List.of("member-2"), TWO_NODE_SEED_NODES);
+
+ // need to wait until all nodes are ready
+ final Cluster cluster = Cluster.get(node2.getActorSystem());
+ await().atMost(Duration.ofSeconds(20)).until(() -> {
+ final List<Member> members = ImmutableList.copyOf(cluster.state().getMembers());
+ if (members.size() != 2) {
+ return false;
+ }
+
+ for (final Member member : members) {
+ if (!member.status().equals(MemberStatus.up())) {
+ return false;
+ }
+ }
+
+ return true;
+ });
+ }
+
+ @After
+ public void tearDown() {
+ ActorTestKit.shutdown(node1.getActorSystem(), Duration.ofSeconds(20));
+ ActorTestKit.shutdown(node2.getActorSystem(), Duration.ofSeconds(20));
+
+ if (node3 != null) {
+ ActorTestKit.shutdown(node3.getActorSystem(), Duration.ofSeconds(20));
+ }
+ }
+
+ @Test
+ public void testNodeLateStart() throws Exception {
+ registerCandidates(node1, ENTITY_1, "member-1");
+ registerCandidates(node2, ENTITY_1, "member-2");
+
+ registerCandidates(node2, ENTITY_2, "member-2");
+ waitUntillOwnerPresent(node2, ENTITY_2);
+ registerCandidates(node1, ENTITY_2, "member-1");
+
+ final MockEntityOwnershipListener firstEntityListener1 = registerListener(node1, ENTITY_1);
+ final MockEntityOwnershipListener firstEntityListener2 = registerListener(node2, ENTITY_1);
+
+ verifyListenerState(firstEntityListener1, ENTITY_1, true, true, false);
+ verifyListenerState(firstEntityListener2, ENTITY_1, true, false, false);
+
+ final MockEntityOwnershipListener secondEntityListener1 = registerListener(node1, ENTITY_2);
+ final MockEntityOwnershipListener secondEntityListener2 = registerListener(node2, ENTITY_2);
+
+ verifyListenerState(secondEntityListener1, ENTITY_2, true, false, false);
+ verifyListenerState(secondEntityListener2, ENTITY_2, true, true, false);
+
+ unregisterCandidates(node1, ENTITY_1, "member-1");
+
+ verifyListenerState(firstEntityListener1, ENTITY_1, true, false, true);
+ verifyListenerState(firstEntityListener2, ENTITY_1, true, true, false);
+
+ unregisterCandidates(node2, ENTITY_1, "member-2");
+
+ verifyListenerState(firstEntityListener1, ENTITY_1, false, false, false);
+ verifyListenerState(firstEntityListener2, ENTITY_1, false, false, true);
+
+ startNode3();
+
+ final MockEntityOwnershipListener firstEntityListener3 = registerListener(node3, ENTITY_1);
+ verifyListenerState(firstEntityListener3, ENTITY_1, false, false, false);
+
+ final MockEntityOwnershipListener secondEntityListener3 = registerListener(node3, ENTITY_2);
+ verifyListenerState(secondEntityListener3, ENTITY_2, true, false, false);
+
+ registerCandidates(node3, ENTITY_1, "member-3");
+ waitUntillOwnerPresent(node3, ENTITY_1);
+
+ verifyListenerState(firstEntityListener1, ENTITY_1, true, false, false);
+ verifyListenerState(firstEntityListener2, ENTITY_1, true, false, false);
+
+ verifyListenerState(firstEntityListener3, ENTITY_1, true, true, false);
+ }
+
+ @Test
+ public void testReachabilityChangesDuringRuntime() throws Exception {
+ startNode3();
+
+ registerCandidates(node2, ENTITY_1, "member-2");
+ // we want singleton on node1 but owner on node2
+ waitUntillOwnerPresent(node2, ENTITY_1);
+
+ registerCandidates(node1, ENTITY_1, "member-1");
+ registerCandidates(node3, ENTITY_1, "member-3");
+
+ registerCandidates(node2, ENTITY_2, "member-2");
+ waitUntillOwnerPresent(node2, ENTITY_2);
+ registerCandidates(node1, ENTITY_2, "member-1");
+
+ final MockEntityOwnershipListener firstEntityListener1 = registerListener(node1, ENTITY_1);
+ final MockEntityOwnershipListener firstEntityListener2 = registerListener(node2, ENTITY_1);
+ final MockEntityOwnershipListener firstEntityListener3 = registerListener(node3, ENTITY_1);
+
+ verifyListenerState(firstEntityListener1, ENTITY_1, true, false, false);
+ verifyListenerState(firstEntityListener2, ENTITY_1, true, true, false);
+ verifyListenerState(firstEntityListener3, ENTITY_1, true, false, false);
+
+ final MockEntityOwnershipListener secondEntityListener1 = registerListener(node1, ENTITY_2);
+ final MockEntityOwnershipListener secondEntityListener2 = registerListener(node2, ENTITY_2);
+ final MockEntityOwnershipListener secondEntityListener3 = registerListener(node3, ENTITY_2);
+
+ verifyListenerState(secondEntityListener1, ENTITY_2, true, false, false);
+ verifyListenerState(secondEntityListener2, ENTITY_2, true, true, false);
+ verifyListenerState(secondEntityListener3, ENTITY_2, true, false, false);
+
+ unreachableMember(node1, "member-2", DEFAULT_DATACENTER);
+
+ verifyListenerState(firstEntityListener1, ENTITY_1, true, true, false);
+ verifyListenerState(firstEntityListener2, ENTITY_1, true, false, true);
+ verifyListenerState(firstEntityListener3, ENTITY_1, true, false, false);
+
+ verifyListenerState(secondEntityListener1, ENTITY_2, true, true, false);
+ verifyListenerState(secondEntityListener2, ENTITY_2, true, false, true);
+ verifyListenerState(secondEntityListener3, ENTITY_2, true, false, false);
+
+ unreachableMember(node1, "member-3", DEFAULT_DATACENTER);
+
+ verifyListenerState(firstEntityListener1, ENTITY_1, true, true, false);
+ verifyListenerState(firstEntityListener2, ENTITY_1, true, false, true);
+ verifyListenerState(firstEntityListener3, ENTITY_1, true, false, false);
+
+ unregisterCandidates(node1, ENTITY_1, "member-1", DEFAULT_DATACENTER);
+ unregisterCandidates(node1, ENTITY_2, "member-1", DEFAULT_DATACENTER);
+
+ verifyListenerState(firstEntityListener1, ENTITY_1, false, false, true);
+ verifyListenerState(firstEntityListener2, ENTITY_1, false, false, false);
+ verifyListenerState(firstEntityListener3, ENTITY_1, false, false, false);
+
+ verifyListenerState(secondEntityListener1, ENTITY_2, false, false, true);
+ verifyListenerState(secondEntityListener2, ENTITY_2, false, false, false);
+ verifyListenerState(secondEntityListener3, ENTITY_2, false, false, false);
+
+ reachableMember(node1, "member-2", DEFAULT_DATACENTER);
+ verifyListenerState(firstEntityListener1, ENTITY_1, true, false, false);
+ verifyListenerState(firstEntityListener2, ENTITY_1, true, true, false);
+ verifyListenerState(firstEntityListener3, ENTITY_1, true, false, false);
+ }
+
+ @Test
+ public void testSingletonMoving() throws Exception {
+ final MockEntityOwnershipListener listener1 = registerListener(node2, ENTITY_1);
+ final MockEntityOwnershipListener listener2 = registerListener(node2, ENTITY_2);
+ verifyNoNotifications(listener1);
+ verifyNoNotifications(listener2);
+
+ registerCandidates(node1, ENTITY_1, "member-1");
+ registerCandidates(node2, ENTITY_1, "member-2");
+
+ registerCandidates(node2, ENTITY_2, "member-2");
+ waitUntillOwnerPresent(node2, ENTITY_2);
+ registerCandidates(node1, ENTITY_2, "member-1");
+ // end up with node1 - member-1, node2 - member-2 owners
+ verifyListenerState(listener1, ENTITY_1, true, false, false);
+ verifyListenerState(listener2, ENTITY_2, true, true, false);
+
+ ActorTestKit.shutdown(node1.getActorSystem(), Duration.ofSeconds(20));
+
+ verifyListenerState(listener1, ENTITY_1, true, true, false);
+ verifyListenerState(listener2, ENTITY_2, true, true, false);
+
+ startNode3(2);
+
+ final MockEntityOwnershipListener listener3 = registerListener(node3, ENTITY_2);
+ verifyListenerState(listener3, ENTITY_2, true, false, false);
+
+ node1 = startupRemote(2550, List.of("member-1"));
+
+ final Cluster cluster = Cluster.get(node2.getActorSystem());
+ await().atMost(Duration.ofSeconds(20)).until(() -> {
+ final List<Member> members = ImmutableList.copyOf(cluster.state().getMembers());
+ if (members.size() != 3) {
+ return false;
+ }
+
+ for (final Member member : members) {
+ if (!member.status().equals(MemberStatus.up())) {
+ return false;
+ }
+ }
+
+ return true;
+ });
+
+ final MockEntityOwnershipListener node1Listener = registerListener(node1, ENTITY_1);
+ verifyListenerState(node1Listener, ENTITY_1, true, false, false);
+ }
+
+ @Test
+ public void testOwnerNotReassignedWhenOnlyCandidate() throws Exception {
+ startNode3();
+ final MockEntityOwnershipListener listener1 = registerListener(node1, ENTITY_1);
+ final MockEntityOwnershipListener listener2 = registerListener(node2, ENTITY_1);
+ verifyNoNotifications(listener1);
+ verifyNoNotifications(listener2);
+
+ registerCandidates(node3, ENTITY_1, "member-3");
+ waitUntillOwnerPresent(node1, ENTITY_1);
+
+ MockEntityOwnershipListener listener3 = registerListener(node3, ENTITY_1);
+ verifyListenerState(listener1, ENTITY_1, true, false, false);
+ verifyListenerState(listener3, ENTITY_1, true, true, false);
+
+ ActorTestKit.shutdown(node3.getActorSystem(), Duration.ofSeconds(20));
+
+ verifyListenerState(listener1, ENTITY_1, true, false, false);
+ verifyListenerState(listener2, ENTITY_1, true, false, false);
+
+ startNode3();
+ verifyListenerState(listener1, ENTITY_1, false, false, false);
+
+ listener3 = registerListener(node3, ENTITY_1);
+ verifyListenerState(listener3, ENTITY_1, false, false, false);
+
+ registerCandidates(node1, ENTITY_1, "member-1");
+
+ verifyListenerState(listener1, ENTITY_1, true, true, false);
+ verifyListenerState(listener3, ENTITY_1, true, false, false);
+
+ }
+
+ private void startNode3() throws Exception {
+ startNode3(3);
+ }
+
+ private void startNode3(final int membersPresent) throws Exception {
+ node3 = startupRemote(2552, List.of("member-3"), THREE_NODE_SEED_NODES);
+
+ // need to wait until all nodes are ready
+ final Cluster cluster = Cluster.get(node2.getActorSystem());
+ await().atMost(Duration.ofSeconds(30)).until(() -> {
+ final List<Member> members = ImmutableList.copyOf(cluster.state().getMembers());
+ if (members.size() != membersPresent) {
+ return false;
+ }
+
+ for (final Member member : members) {
+ if (!member.status().equals(MemberStatus.up())) {
+ return false;
+ }
+ }
+
+ return true;
+ });
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor;
+
+import akka.actor.testkit.typed.javadsl.ActorTestKit;
+import akka.actor.typed.ActorRef;
+import akka.actor.typed.Behavior;
+import akka.actor.typed.javadsl.AbstractBehavior;
+import akka.actor.typed.javadsl.ActorContext;
+import akka.actor.typed.javadsl.Behaviors;
+import akka.actor.typed.javadsl.Receive;
+import akka.cluster.typed.Cluster;
+import akka.cluster.typed.ClusterSingleton;
+import akka.cluster.typed.SingletonActor;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import org.junit.Test;
+import org.opendaylight.controller.eos.akka.AbstractNativeEosTest;
+import org.opendaylight.controller.eos.akka.bootstrap.command.BootstrapCommand;
+import org.opendaylight.controller.eos.akka.bootstrap.command.GetRunningContext;
+import org.opendaylight.controller.eos.akka.bootstrap.command.RunningContext;
+import org.opendaylight.controller.eos.akka.owner.checker.OwnerStateChecker;
+import org.opendaylight.controller.eos.akka.owner.checker.command.StateCheckerCommand;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.InitialCandidateSync;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand;
+import org.opendaylight.controller.eos.akka.registry.candidate.CandidateRegistry;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.CandidateRegistryCommand;
+import org.opendaylight.controller.eos.akka.registry.listener.type.EntityTypeListenerRegistry;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.TypeListenerRegistryCommand;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public class OwnerSupervisorTest extends AbstractNativeEosTest {
+
+ @Test
+ public void testCandidatePickingWhenUnreachableCandidates() throws Exception {
+
+ final ClusterNode node = startup(2550, Collections.singletonList("member-1"));
+ try {
+ reachableMember(node, "member-2", DEFAULT_DATACENTER);
+ reachableMember(node, "member-3", DEFAULT_DATACENTER);
+ registerCandidates(node, ENTITY_1, "member-1", "member-2", "member-3");
+
+ final MockEntityOwnershipListener listener = registerListener(node, ENTITY_1);
+ verifyListenerState(listener, ENTITY_1,true, true, false);
+
+ unreachableMember(node, "member-1", DEFAULT_DATACENTER);
+ verifyListenerState(listener, ENTITY_1, true, false, true);
+
+ unreachableMember(node, "member-2", DEFAULT_DATACENTER);
+ verifyListenerState(listener, ENTITY_1, true, false, false);
+
+ unreachableMember(node, "member-3", DEFAULT_DATACENTER);
+ verifyListenerState(listener, ENTITY_1, false, false, false);
+
+ reachableMember(node, "member-2", DEFAULT_DATACENTER);
+ verifyListenerState(listener, ENTITY_1, true, false, false);
+
+ // no notification here as member-2 is already the owner
+ reachableMember(node, "member-1", DEFAULT_DATACENTER);
+
+ unreachableMember(node, "member-2", DEFAULT_DATACENTER);
+ verifyListenerState(listener, ENTITY_1,true, true, false);
+ } finally {
+ ActorTestKit.shutdown(node.getActorSystem());
+ }
+ }
+
+ @Test
+ public void testSupervisorInitWithMissingOwners() throws Exception {
+ final Map<DOMEntity, Set<String>> candidates = new HashMap<>();
+ candidates.put(ENTITY_1, Set.of("member-1"));
+ candidates.put(ENTITY_2, Set.of("member-2"));
+
+ final ClusterNode node = startup(2550, Collections.singletonList("member-1"), Collections.emptyList(),
+ () -> mockedBootstrap(candidates, new HashMap<>()));
+
+ try {
+ waitUntillOwnerPresent(node, ENTITY_1);
+
+ // also do a proper register so the listener from the type lister actor are spawned
+ registerCandidates(node, ENTITY_1, "member-1");
+ registerCandidates(node, ENTITY_2, "member-2");
+
+ final MockEntityOwnershipListener listener1 = registerListener(node, ENTITY_1);
+ final MockEntityOwnershipListener listener2 = registerListener(node, ENTITY_2);
+
+ // first entity should have correctly assigned owner as its reachable
+ verifyListenerState(listener1, ENTITY_1, true, true, false);
+ // this one could not be assigned during init as we dont have member-2 thats reachable
+ verifyListenerState(listener2, ENTITY_2, false, false, false);
+
+ reachableMember(node, "member-2", DEFAULT_DATACENTER);
+ verifyListenerState(listener2, ENTITY_2, true, false, false);
+ } finally {
+ ActorTestKit.shutdown(node.getActorSystem());
+ }
+ }
+
+ private static Behavior<BootstrapCommand> mockedBootstrap(final Map<DOMEntity, Set<String>> currentCandidates,
+ final Map<DOMEntity, String> currentOwners) {
+ return Behaviors.setup(context -> MockBootstrap.create(currentCandidates, currentOwners));
+ }
+
+ /**
+ * Initial behavior that skips initial sync and instead initializes OwnerSupervisor with provided values.
+ */
+ private static final class MockSyncer extends AbstractBehavior<OwnerSupervisorCommand> {
+
+ private final Map<DOMEntity, Set<String>> currentCandidates;
+ private final Map<DOMEntity, String> currentOwners;
+
+ private MockSyncer(final ActorContext<OwnerSupervisorCommand> context,
+ final Map<DOMEntity, Set<String>> currentCandidates,
+ final Map<DOMEntity, String> currentOwners) {
+ super(context);
+ this.currentCandidates = currentCandidates;
+ this.currentOwners = currentOwners;
+
+ context.getSelf().tell(new InitialCandidateSync(null));
+ }
+
+ public static Behavior<OwnerSupervisorCommand> create(final Map<DOMEntity, Set<String>> currentCandidates,
+ final Map<DOMEntity, String> currentOwners) {
+ return Behaviors.setup(ctx -> new MockSyncer(ctx, currentCandidates, currentOwners));
+ }
+
+ @Override
+ public Receive<OwnerSupervisorCommand> createReceive() {
+ return newReceiveBuilder()
+ .onMessage(InitialCandidateSync.class, this::switchToSupervisor)
+ .build();
+ }
+
+ private Behavior<OwnerSupervisorCommand> switchToSupervisor(final InitialCandidateSync message) {
+ return OwnerSupervisor.create(currentCandidates, currentOwners, CODEC_CONTEXT.getInstanceIdentifierCodec());
+ }
+ }
+
+ /**
+ * Bootstrap with OwnerSyncer replaced with the testing syncer behavior.
+ */
+ private static final class MockBootstrap extends AbstractBehavior<BootstrapCommand> {
+
+ private final ActorRef<TypeListenerRegistryCommand> listenerRegistry;
+ private final ActorRef<CandidateRegistryCommand> candidateRegistry;
+ private final ActorRef<StateCheckerCommand> ownerStateChecker;
+ private final ActorRef<OwnerSupervisorCommand> ownerSupervisor;
+
+ private MockBootstrap(final ActorContext<BootstrapCommand> context,
+ final Map<DOMEntity, Set<String>> currentCandidates,
+ final Map<DOMEntity, String> currentOwners) {
+ super(context);
+
+ final Cluster cluster = Cluster.get(context.getSystem());
+ final String role = cluster.selfMember().getRoles().iterator().next();
+
+ listenerRegistry = context.spawn(EntityTypeListenerRegistry.create(role), "ListenerRegistry");
+ candidateRegistry = context.spawn(CandidateRegistry.create(), "CandidateRegistry");
+
+ final ClusterSingleton clusterSingleton = ClusterSingleton.get(context.getSystem());
+ // start the initial sync behavior that switches to the regular one after syncing
+ ownerSupervisor = clusterSingleton.init(SingletonActor.of(
+ MockSyncer.create(currentCandidates, currentOwners), "OwnerSupervisor"));
+
+ ownerStateChecker = context.spawn(OwnerStateChecker.create(role, ownerSupervisor, null),
+ "OwnerStateChecker");
+ }
+
+ public static Behavior<BootstrapCommand> create(final Map<DOMEntity, Set<String>> currentCandidates,
+ final Map<DOMEntity, String> currentOwners) {
+ return Behaviors.setup(ctx -> new MockBootstrap(ctx, currentCandidates, currentOwners));
+ }
+
+ @Override
+ public Receive<BootstrapCommand> createReceive() {
+ return newReceiveBuilder()
+ .onMessage(GetRunningContext.class, this::onGetRunningContext)
+ .build();
+ }
+
+ private Behavior<BootstrapCommand> onGetRunningContext(final GetRunningContext request) {
+ request.getReplyTo().tell(
+ new RunningContext(listenerRegistry, candidateRegistry,ownerStateChecker, ownerSupervisor));
+ return this;
+ }
+ }
+
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.service;
+
+import static org.awaitility.Awaitility.await;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import akka.actor.testkit.typed.javadsl.ActorTestKit;
+import akka.actor.typed.javadsl.Adapter;
+import akka.cluster.Member;
+import akka.cluster.MemberStatus;
+import akka.cluster.typed.Cluster;
+import akka.testkit.javadsl.TestKit;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import org.awaitility.Awaitility;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.eos.akka.AbstractNativeEosTest;
+import org.opendaylight.mdsal.singleton.api.ClusterSingletonService;
+import org.opendaylight.mdsal.singleton.api.ServiceGroupIdentifier;
+import org.opendaylight.mdsal.singleton.impl.EOSClusterSingletonServiceProvider;
+import org.opendaylight.yangtools.concepts.Registration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class ClusterSingletonIntegrationTest extends AbstractNativeEosTest {
+
+ private static final Logger LOG = LoggerFactory.getLogger(ClusterSingletonIntegrationTest.class);
+
+ private AbstractNativeEosTest.MockNativeEntityOwnershipService node1;
+ private MockNativeEntityOwnershipService node2;
+ private MockNativeEntityOwnershipService node3;
+
+ private EOSClusterSingletonServiceProvider singletonNode1;
+ private EOSClusterSingletonServiceProvider singletonNode2;
+ private EOSClusterSingletonServiceProvider singletonNode3;
+
+
+ @Before
+ public void setUp() throws Exception {
+ node1 = startupNativeService(2550, List.of("member-1"), THREE_NODE_SEED_NODES);
+ node2 = startupNativeService(2551, List.of("member-2"), THREE_NODE_SEED_NODES);
+ node3 = startupNativeService(2552, List.of("member-3"), THREE_NODE_SEED_NODES);
+
+ singletonNode1 = new EOSClusterSingletonServiceProvider(node1);
+ singletonNode2 = new EOSClusterSingletonServiceProvider(node2);
+ singletonNode3 = new EOSClusterSingletonServiceProvider(node3);
+
+ waitUntillNodeReady(node3);
+ }
+
+ @After
+ public void tearDown() {
+ ActorTestKit.shutdown(Adapter.toTyped(node1.getActorSystem()), Duration.ofSeconds(20));
+ ActorTestKit.shutdown(Adapter.toTyped(node2.getActorSystem()), Duration.ofSeconds(20));
+ ActorTestKit.shutdown(Adapter.toTyped(node3.getActorSystem()), Duration.ofSeconds(20));
+ }
+
+ @Test
+ public void testSingletonOwnershipNotDropped() {
+ final MockClusterSingletonService service = new MockClusterSingletonService("member-1", "service-1");
+ singletonNode1.registerClusterSingletonService(service);
+
+ verifyServiceActive(service);
+
+ final MockClusterSingletonService service2 = new MockClusterSingletonService("member-2", "service-1");
+ singletonNode2.registerClusterSingletonService(service2);
+
+ verifyServiceInactive(service2, 2);
+ }
+
+ @Test
+ public void testSingletonOwnershipHandoff() {
+ final MockClusterSingletonService service = new MockClusterSingletonService("member-1", "service-1");
+ final Registration registration = singletonNode1.registerClusterSingletonService(service);
+
+ verifyServiceActive(service);
+
+ final MockClusterSingletonService service2 = new MockClusterSingletonService("member-2", "service-1");
+ singletonNode2.registerClusterSingletonService(service2);
+
+ verifyServiceInactive(service2, 2);
+
+ registration.close();
+ verifyServiceInactive(service);
+ verifyServiceActive(service2);
+ }
+
+ @Test
+ public void testSingletonOwnershipHandoffOnNodeShutdown() throws Exception {
+ MockClusterSingletonService service2 = new MockClusterSingletonService("member-2", "service-1");
+ Registration registration2 = singletonNode2.registerClusterSingletonService(service2);
+
+ verifyServiceActive(service2);
+
+ final MockClusterSingletonService service3 = new MockClusterSingletonService("member-3", "service-1");
+ final Registration registration3 = singletonNode3.registerClusterSingletonService(service3);
+
+ verifyServiceInactive(service3, 2);
+
+ LOG.debug("Shutting down node2");
+ TestKit.shutdownActorSystem(node2.getActorSystem());
+ verifyServiceActive(service3);
+
+ node2 = startupNativeService(2551, List.of("member-1"), THREE_NODE_SEED_NODES);
+ singletonNode2 = new EOSClusterSingletonServiceProvider(node2);
+
+ waitUntillNodeReady(node2);
+ service2 = new MockClusterSingletonService("member-2", "service-1");
+ singletonNode2.registerClusterSingletonService(service2);
+
+ verifyServiceActive(service3);
+ verifyServiceInactive(service2, 5);
+ }
+
+ private static void waitUntillNodeReady(final MockNativeEntityOwnershipService node) {
+ // need to wait until all nodes are ready
+ final Cluster cluster = Cluster.get(Adapter.toTyped(node.getActorSystem()));
+ Awaitility.await().atMost(Duration.ofSeconds(20)).until(() -> {
+ final List<Member> members = new ArrayList<>();
+ cluster.state().getMembers().forEach(members::add);
+ if (members.size() != 3) {
+ return false;
+ }
+
+ for (final Member member : members) {
+ if (!member.status().equals(MemberStatus.up())) {
+ return false;
+ }
+ }
+
+ return true;
+ });
+ }
+
+ private static void verifyServiceActive(final MockClusterSingletonService service) {
+ await().untilAsserted(() -> assertTrue(service.isActivated()));
+ }
+
+ private static void verifyServiceActive(final MockClusterSingletonService service, final long delay) {
+ await().pollDelay(delay, TimeUnit.SECONDS).untilAsserted(() -> assertTrue(service.isActivated()));
+ }
+
+ private static void verifyServiceInactive(final MockClusterSingletonService service) {
+ await().untilAsserted(() -> assertFalse(service.isActivated()));
+ }
+
+ private static void verifyServiceInactive(final MockClusterSingletonService service, final long delay) {
+ await().pollDelay(delay, TimeUnit.SECONDS).untilAsserted(() -> assertFalse(service.isActivated()));
+ }
+
+ private static class MockClusterSingletonService implements ClusterSingletonService {
+
+ private final String member;
+ private final ServiceGroupIdentifier identifier;
+ private boolean activated = false;
+
+ MockClusterSingletonService(final String member, final String identifier) {
+ this.member = member;
+ this.identifier = new ServiceGroupIdentifier(identifier);
+ }
+
+ @Override
+ public void instantiateServiceInstance() {
+ LOG.debug("{} : Activating service: {}", member, identifier);
+ activated = true;
+ }
+
+ @Override
+ public ListenableFuture<? extends Object> closeServiceInstance() {
+ LOG.debug("{} : Closing service: {}", member, identifier);
+ activated = false;
+ return Futures.immediateFuture(null);
+ }
+
+ @Override
+ public ServiceGroupIdentifier getIdentifier() {
+ return identifier;
+ }
+
+ public boolean isActivated() {
+ return activated;
+ }
+ }
+}
--- /dev/null
+akka {
+ loglevel = debug
+ actor {
+ warn-about-java-serializer-usage = off
+ allow-java-serialization = on
+ provider = cluster
+ }
+
+ remote {
+ artery {
+ enabled = on
+ canonical.hostname = "127.0.0.1"
+ canonical.port = 2550
+ }
+ }
+ cluster {
+ seed-nodes = [
+ "akka://ClusterSystem@127.0.0.1:2550"]
+ roles = [
+ "member-1"
+ ]
+ downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider"
+
+ distributed-data {
+ # How often the Replicator should send out gossip information.
+ # This value controls how quickly Entity Ownership Service data is replicated
+ # across cluster nodes.
+ gossip-interval = 100 ms
+
+ # How often the subscribers will be notified of changes, if any.
+ # This value controls how quickly Entity Ownership Service decisions are
+ # propagated within a node.
+ notify-subscribers-interval = 20 ms
+ }
+ split-brain-resolver {
+ active-strategy = keep-majority
+ stable-after = 7s
+ }
+ }
+}
+
--- /dev/null
+org.slf4j.simpleLogger.defaultLogLevel=info
+org.slf4j.simpleLogger.showDateTime=true
+org.slf4j.simpleLogger.dateTimeFormat=hh:mm:ss,S a
+org.slf4j.simpleLogger.logFile=System.out
+org.slf4j.simpleLogger.showShortLogName=true
+org.slf4j.simpleLogger.levelInBrackets=true
+org.slf4j.simpleLogger.log.org.opendaylight.controller.eos.akka=debug
+org.slf4j.simpleLogger.log.org.opendaylight.mdsal.singleton=debug
\ No newline at end of file
<parent>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>bundle-parent</artifactId>
- <version>7.0.5</version>
+ <version>13.0.11</version>
<relativePath/>
</parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>mdsal-it-base</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencyManagement>
<dependency>
<groupId>org.opendaylight.mdsal</groupId>
<artifactId>mdsal-artifacts</artifactId>
- <version>6.0.4</version>
+ <version>13.0.1</version>
<type>pom</type>
<scope>import</scope>
</dependency>
<artifactId>pax-url-aether</artifactId>
<scope>compile</scope>
</dependency>
+
+ <!-- Referenced by pax-exam-features, needs to be pulled into local repository -->
<dependency>
- <groupId>javax.inject</groupId>
- <artifactId>javax.inject</artifactId>
- <scope>compile</scope>
+ <groupId>org.ops4j.base</groupId>
+ <artifactId>ops4j-base-monitors</artifactId>
+ <version>1.5.1</version>
</dependency>
+ <dependency>
+ <groupId>org.apache.geronimo.specs</groupId>
+ <artifactId>geronimo-atinject_1.0_spec</artifactId>
+ <!-- FIXME: remove this override once pax-exam is aligned with pax-web -->
+ <version>1.0</version>
+ </dependency>
+
<dependency>
<groupId>org.apache.karaf.features</groupId>
<artifactId>org.apache.karaf.features.core</artifactId>
- <version>${karaf.version}</version>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.osgi</groupId>
- <artifactId>org.osgi.core</artifactId>
+ <artifactId>org.osgi.framework</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<parent>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>bundle-parent</artifactId>
- <version>7.0.5</version>
+ <version>13.0.11</version>
<relativePath/>
</parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>mdsal-it-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<packaging>pom</packaging>
<properties>
<karaf.distro.artifactId>opendaylight-karaf-empty</karaf.distro.artifactId>
<karaf.distro.type>zip</karaf.distro.type>
<karaf.keep.unpack>false</karaf.keep.unpack>
+
+ <!-- FIXME: Remove this -->
+ <odlparent.modernizer.enforce>false</odlparent.modernizer.enforce>
</properties>
<dependencyManagement>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>controller-artifacts</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<type>pom</type>
<scope>import</scope>
</dependency>
<groupId>org.ops4j.pax.url</groupId>
<artifactId>pax-url-aether</artifactId>
</dependency>
- <dependency>
- <groupId>javax.inject</groupId>
- <artifactId>javax.inject</artifactId>
- <version>1</version>
- </dependency>
<dependency>
<groupId>org.apache.karaf.features</groupId>
<artifactId>org.apache.karaf.features.core</artifactId>
</dependency>
<dependency>
<groupId>org.osgi</groupId>
- <artifactId>org.osgi.core</artifactId>
- </dependency>
- <dependency>
- <groupId>junit</groupId>
- <artifactId>junit</artifactId>
+ <artifactId>org.osgi.framework</artifactId>
</dependency>
- <!-- Testing Dependencies -->
+ <!--
+ Unfortunately default mockito-inline does not work in OSGi.
+ See https://github.com/mockito/mockito/issues/2203#issuecomment-926372053
+ -->
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
+ <version>4.11.0</version>
<scope>test</scope>
</dependency>
</dependencies>
+
<build>
<plugins>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
- <!-- Overridden to have TCP channel support -->
- <version>3.0.0-M5</version>
<configuration>
<!-- Overridden to fix corruption, where the process would hang after test -->
<forkNode implementation="org.apache.maven.plugin.surefire.extensions.SurefireForkNodeFactory"/>
<artifactId>maven-dependency-plugin</artifactId>
<executions>
<execution>
- <id>unpack-karaf-resources</id>
- <goals>
- <goal>unpack-dependencies</goal>
- </goals>
- <phase>process-test-resources</phase>
- <configuration>
- <outputDirectory>${project.build.directory}/test-classes</outputDirectory>
- <groupId>org.opendaylight.controller</groupId>
- <includeArtifactIds>mockito-core,objenesis,mdsal-it-base</includeArtifactIds>
- <excludes>META-INF\/**</excludes>
- <ignorePermissions>false</ignorePermissions>
- </configuration>
+ <id>unpack-karaf-resources</id>
+ <goals>
+ <goal>unpack-dependencies</goal>
+ </goals>
+ <phase>process-test-resources</phase>
+ <configuration>
+ <outputDirectory>${project.build.directory}/test-classes</outputDirectory>
+ <groupId>org.opendaylight.controller</groupId>
+ <includeArtifactIds>mockito-core,byte-buddy,objenesis,mdsal-it-base</includeArtifactIds>
+ <excludes>META-INF\/**</excludes>
+ <ignorePermissions>false</ignorePermissions>
+ </configuration>
</execution>
- </executions>
+ </executions>
</plugin>
</plugins>
</build>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
-
-This program and the accompanying materials are made available under the
-terms of the Eclipse Public License v1.0 which accompanies this distribution,
-and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>mdsal-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
- <relativePath>../parent</relativePath>
- </parent>
-
- <artifactId>messagebus-api</artifactId>
- <packaging>bundle</packaging>
- <name>${project.artifactId}</name>
-
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.controller.model</groupId>
- <artifactId>model-inventory</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.mdsal.model</groupId>
- <artifactId>yang-ext</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.mdsal.model</groupId>
- <artifactId>ietf-topology</artifactId>
- </dependency>
- </dependencies>
-
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.felix</groupId>
- <artifactId>maven-bundle-plugin</artifactId>
- <extensions>true</extensions>
- <configuration>
- <instructions>
- <Export-Package>org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.*</Export-Package>
- </instructions>
- </configuration>
- </plugin>
- </plugins>
- </build>
-</project>
+++ /dev/null
-module event-aggregator {
- // FIXME: this module needs to be split up to concepts and API
- // as the concepts are shared with the other model in this
- // package.
- yang-version 1;
- namespace "urn:cisco:params:xml:ns:yang:messagebus:eventaggregator";
- prefix "eventaggregator";
-
- organization "Cisco Systems, Inc.";
- contact "Robert Gallas";
-
- description
- "Module implementing message but RPC.
-
- Copyright (c)2014 Cisco Systems, Inc. All rights reserved.
-
- This program and the accompanying materials are made available
- under the terms of the Eclipse Public License v1.0 which
- accompanies this distribution, and is available at
- http://www.eclipse.org/legal/epl-v10.html";
-
- revision "2014-12-02" {
- description "Initial revision";
- }
-
- typedef pattern {
- type string {
- length 1..max;
- }
-
- // FIXME: make this a regular expression
- description "A match pattern. Specifically this is a wildcard pattern.";
- }
-
- typedef notification-pattern {
- type pattern;
- description
- "Pattern for matching candidate notification types. This pattern is to be
- applied against the concatenation of the namespace of the module which
- defines that particular notification, followed by a single colon, and
- then followed by notification identifier, as supplied in the argument to
- the notification statement.";
- }
-
- typedef topic-id {
- type string {
- length 1..max;
- }
- description
- "A topic identifier. It uniquely defines a topic as seen by the the user
- of this model's RPCs";
- }
-
- // FIXME: we would really like to share instances here, but that requires some sort
- // of sane reference counting. The reason for sharing is the data path part
- // of notification delivery -- multiple creators of topics can still share
- // a single data path.
- rpc create-topic {
- description
- "Create a new topic. A topic is an aggregation of several notification
- types from a set of nodes. Each successful invocation results in a unique
- topic being created. The caller is responsible for removing the topic
- once it is no longer needed.";
-
- input {
- leaf notification-pattern {
- type notification-pattern;
- mandatory true;
- description
- "Pattern matching notification which should be forwarded into this
- topic.";
- }
-
- leaf node-id-pattern {
- type pattern;
- mandatory true;
- description
- "Pattern for matching candidate event source nodes when looking
- for contributors to the topic. The pattern will be applied against
- /network-topology/topology/node/node-id";
- }
- }
-
- output {
- leaf topic-id {
- type topic-id;
- mandatory true;
- }
- }
- }
-
- rpc destroy-topic {
- description
- "Destroy a topic. No further messages will be delivered to it.";
-
- input {
- leaf topic-id {
- type topic-id;
- mandatory true;
- }
- }
- }
-
- notification topic-notification {
- description
- "Notification of an event occuring on a particular node. This notification
- acts as an encapsulation for the event being delivered.";
-
- leaf topic-id {
- type topic-id;
- mandatory true;
- description
- "Topic to which this event is being delivered.";
- }
-
- leaf node-id {
- // FIXME: should be topology node ID
- type string;
- mandatory true;
- description
- "Node ID of the node which generated the event.";
- }
-
- anyxml payload {
- mandatory true;
- description
- "Encapsulated notification. The format is the XML representation of
- a notification according to RFC6020 section 7.14.2.";
- }
- }
-}
+++ /dev/null
-module event-source {
- yang-version 1;
- namespace "urn:cisco:params:xml:ns:yang:messagebus:eventsource";
- prefix "eventsource";
-
- import event-aggregator { prefix aggr; }
- import network-topology { prefix nt; revision-date "2013-10-21"; }
- import opendaylight-inventory {prefix inv; revision-date "2013-08-19"; }
- import yang-ext {prefix ext; revision-date "2013-07-09"; }
-
- organization "Cisco Systems, Inc.";
- contact "Robert Gallas";
-
- description
- "Base model for a topology where individual nodes can produce events.
-
- Module implementing event source topology and encapped notification.
-
- Copyright (c)2014 Cisco Systems, Inc. All rights reserved.
-
- This program and the accompanying materials are made available
- under the terms of the Eclipse Public License v1.0 which
- accompanies this distribution, and is available at
- http://www.eclipse.org/legal/epl-v10.html";
-
- revision "2014-12-02" {
- description "first revision
- + add rpc dis-join-topic
- + add notification event-source-status-notification";
- }
-
- // FIXME: expand this
- typedef join-topic-status {
- type enumeration {
- enum up;
- enum down;
- }
- description "Object status";
- }
-
- // FIXME: migrate to topology
- typedef node-ref {
- type leafref {
- path "/inv:nodes/inv:node/inv:id";
- }
- }
-
- typedef event-source-status {
- type enumeration {
- enum active;
- enum inactive;
- enum deactive;
- }
- description "Status of event source
- - active: event source is publishing notification,
- - inactive: event source stopped publishing of notifications temporarily
- - deactive: event source stopped publishing of notifications permanently" ;
- }
-
- grouping topology-event-source-type {
- container topology-event-source {
- presence "indicates an event source-aware topology";
- }
- }
-
- rpc join-topic {
- input {
- leaf node {
- ext:context-reference "inv:node-context";
- type "instance-identifier";
- }
- leaf topic-id {
- type aggr:topic-id;
- description "in current implementation notification-pattern is defined by topic-id.
- By persisting topic definition we could omit notification-pattern";
- }
- leaf notification-pattern {
- type aggr:notification-pattern;
- }
- }
-
- output {
- leaf status {
- type join-topic-status;
- }
- }
- }
-
- rpc dis-join-topic {
- input {
- leaf node {
- ext:context-reference "inv:node-context";
- type "instance-identifier";
- }
- leaf topic-id {
- type aggr:topic-id;
- mandatory true;
- description "identifier of topic to be disjoin";
- }
- }
-
- }
-
- notification event-source-status-notification {
-
- description
- "Notification of change event source status.";
-
- leaf status {
- type event-source-status;
- mandatory true;
- description "Current status of event source.";
- }
-
- }
-
- augment "/nt:network-topology/nt:topology/nt:topology-types" {
- uses topology-event-source-type;
- }
-
- augment "/nt:network-topology/nt:topology/nt:node" {
- when "../../nt:topology-types/topology-event-source";
- leaf event-source-node {
- type node-ref;
- }
- }
-}
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
-
-This program and the accompanying materials are made available under the
-terms of the Eclipse Public License v1.0 which accompanies this distribution,
-and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>mdsal-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
- <relativePath>../parent</relativePath>
- </parent>
-
- <artifactId>messagebus-impl</artifactId>
- <name>${project.artifactId}</name>
- <packaging>bundle</packaging>
-
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.mdsal</groupId>
- <artifactId>mdsal-binding-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>messagebus-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>messagebus-util</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>messagebus-spi</artifactId>
- </dependency>
- </dependencies>
-</project>
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.messagebus.app.impl;
-
-import static java.util.Objects.requireNonNull;
-
-import org.opendaylight.controller.messagebus.spi.EventSource;
-import org.opendaylight.controller.messagebus.spi.EventSourceRegistration;
-import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
-
-class EventSourceRegistrationImpl<T extends EventSource> extends AbstractObjectRegistration<T>
- implements EventSourceRegistration<T> {
-
- private final EventSourceTopology eventSourceTopology;
-
- /**
- * Constructor.
- *
- * @param instance of EventSource that has been registered by
- * {@link EventSourceRegistryImpl#registerEventSource(Node, EventSource)}
- */
- EventSourceRegistrationImpl(T instance, EventSourceTopology eventSourceTopology) {
- super(instance);
- this.eventSourceTopology = requireNonNull(eventSourceTopology);
- }
-
- @Override
- protected void removeRegistration() {
- this.eventSourceTopology.unRegister(getInstance());
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.messagebus.app.impl;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.util.concurrent.FluentFuture;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.MoreExecutors;
-import java.util.Collection;
-import java.util.Optional;
-import java.util.UUID;
-import java.util.concurrent.CopyOnWriteArraySet;
-import java.util.concurrent.ExecutionException;
-import java.util.regex.Pattern;
-import org.opendaylight.mdsal.binding.api.DataObjectModification;
-import org.opendaylight.mdsal.binding.api.DataTreeChangeListener;
-import org.opendaylight.mdsal.binding.api.DataTreeIdentifier;
-import org.opendaylight.mdsal.binding.api.DataTreeModification;
-import org.opendaylight.mdsal.binding.api.ReadTransaction;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.NotificationPattern;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.TopicId;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.DisJoinTopicInput;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.DisJoinTopicInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.DisJoinTopicOutput;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.EventSourceService;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.JoinTopicInput;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.JoinTopicInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.JoinTopicOutput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.common.RpcError;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.slf4j.LoggerFactory;
-
-public final class EventSourceTopic implements DataTreeChangeListener<Node>, AutoCloseable {
- private static final org.slf4j.Logger LOG = LoggerFactory.getLogger(EventSourceTopic.class);
- private final NotificationPattern notificationPattern;
- private final EventSourceService sourceService;
- private final Pattern nodeIdPattern;
- private final TopicId topicId;
- private ListenerRegistration<?> listenerRegistration;
- private final CopyOnWriteArraySet<InstanceIdentifier<?>> joinedEventSources = new CopyOnWriteArraySet<>();
-
- public static EventSourceTopic create(final NotificationPattern notificationPattern,
- final String nodeIdRegexPattern, final EventSourceTopology eventSourceTopology) {
- final EventSourceTopic est = new EventSourceTopic(notificationPattern, nodeIdRegexPattern,
- eventSourceTopology.getEventSourceService());
- est.registerListner(eventSourceTopology);
- est.notifyExistingNodes(eventSourceTopology);
- return est;
- }
-
- private EventSourceTopic(final NotificationPattern notificationPattern, final String nodeIdRegexPattern,
- final EventSourceService sourceService) {
- this.notificationPattern = requireNonNull(notificationPattern);
- this.sourceService = requireNonNull(sourceService);
- this.nodeIdPattern = Pattern.compile(nodeIdRegexPattern);
- this.topicId = new TopicId(getUUIDIdent());
- this.listenerRegistration = null;
- LOG.info("EventSourceTopic created - topicId {}", topicId.getValue());
- }
-
- public TopicId getTopicId() {
- return topicId;
- }
-
- @Override
- public void onDataTreeChanged(final Collection<DataTreeModification<Node>> changes) {
- for (DataTreeModification<Node> change: changes) {
- final DataObjectModification<Node> rootNode = change.getRootNode();
- switch (rootNode.getModificationType()) {
- case WRITE:
- case SUBTREE_MODIFIED:
- final Node node = rootNode.getDataAfter();
- if (getNodeIdRegexPattern().matcher(node.getNodeId().getValue()).matches()) {
- notifyNode(change.getRootPath().getRootIdentifier());
- }
- break;
- default:
- break;
- }
- }
- }
-
- public void notifyNode(final InstanceIdentifier<?> nodeId) {
- LOG.debug("Notify node: {}", nodeId);
- try {
- final RpcResult<JoinTopicOutput> rpcResultJoinTopic =
- sourceService.joinTopic(getJoinTopicInputArgument(nodeId)).get();
- if (!rpcResultJoinTopic.isSuccessful()) {
- for (final RpcError err : rpcResultJoinTopic.getErrors()) {
- LOG.error("Can not join topic: [{}] on node: [{}]. Error: {}", getTopicId().getValue(),
- nodeId.toString(), err.toString());
- }
- } else {
- joinedEventSources.add(nodeId);
- }
- } catch (InterruptedException | ExecutionException e) {
- LOG.error("Could not invoke join topic for node {}", nodeId);
- }
- }
-
- private void notifyExistingNodes(final EventSourceTopology eventSourceTopology) {
- LOG.debug("Notify existing nodes");
- final Pattern nodeRegex = this.nodeIdPattern;
-
- final FluentFuture<Optional<Topology>> future;
- try (ReadTransaction tx = eventSourceTopology.getDataBroker().newReadOnlyTransaction()) {
- future = tx.read(LogicalDatastoreType.OPERATIONAL, EventSourceTopology.EVENT_SOURCE_TOPOLOGY_PATH);
- }
-
- future.addCallback(new FutureCallback<Optional<Topology>>() {
- @Override
- public void onSuccess(final Optional<Topology> data) {
- if (data.isPresent()) {
- for (final Node node : data.get().nonnullNode().values()) {
- if (nodeRegex.matcher(node.getNodeId().getValue()).matches()) {
- notifyNode(EventSourceTopology.EVENT_SOURCE_TOPOLOGY_PATH.child(Node.class, node.key()));
- }
- }
- }
- }
-
- @Override
- public void onFailure(final Throwable ex) {
- LOG.error("Can not notify existing nodes", ex);
- }
- }, MoreExecutors.directExecutor());
- }
-
- private JoinTopicInput getJoinTopicInputArgument(final InstanceIdentifier<?> path) {
- final NodeRef nodeRef = new NodeRef(path);
- final JoinTopicInput jti =
- new JoinTopicInputBuilder()
- .setNode(nodeRef.getValue())
- .setTopicId(topicId)
- .setNotificationPattern(notificationPattern)
- .build();
- return jti;
- }
-
- public Pattern getNodeIdRegexPattern() {
- return nodeIdPattern;
- }
-
- private DisJoinTopicInput getDisJoinTopicInputArgument(final InstanceIdentifier<?> eventSourceNodeId) {
- final NodeRef nodeRef = new NodeRef(eventSourceNodeId);
- final DisJoinTopicInput dji = new DisJoinTopicInputBuilder()
- .setNode(nodeRef.getValue())
- .setTopicId(topicId)
- .build();
- return dji;
- }
-
- private void registerListner(final EventSourceTopology eventSourceTopology) {
- this.listenerRegistration = eventSourceTopology.getDataBroker().registerDataTreeChangeListener(
- DataTreeIdentifier.create(LogicalDatastoreType.OPERATIONAL,
- EventSourceTopology.EVENT_SOURCE_TOPOLOGY_PATH.child(Node.class)), this);
- }
-
- @Override
- public void close() {
- if (this.listenerRegistration != null) {
- this.listenerRegistration.close();
- }
- for (final InstanceIdentifier<?> eventSourceNodeId : joinedEventSources) {
- try {
- final RpcResult<DisJoinTopicOutput> result = sourceService
- .disJoinTopic(getDisJoinTopicInputArgument(eventSourceNodeId)).get();
- if (result.isSuccessful() == false) {
- for (final RpcError err : result.getErrors()) {
- LOG.error("Can not destroy topic: [{}] on node: [{}]. Error: {}", getTopicId().getValue(),
- eventSourceNodeId, err.toString());
- }
- }
- } catch (InterruptedException | ExecutionException ex) {
- LOG.error("Can not close event source topic / destroy topic {} on node {}.", this.topicId.getValue(),
- eventSourceNodeId, ex);
- }
- }
- joinedEventSources.clear();
- }
-
- private static String getUUIDIdent() {
- final UUID uuid = UUID.randomUUID();
- return uuid.toString();
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.messagebus.app.impl;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import java.util.Collections;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import org.opendaylight.controller.messagebus.app.util.Util;
-import org.opendaylight.controller.messagebus.spi.EventSource;
-import org.opendaylight.controller.messagebus.spi.EventSourceRegistration;
-import org.opendaylight.controller.messagebus.spi.EventSourceRegistry;
-import org.opendaylight.mdsal.binding.api.DataBroker;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.mdsal.binding.api.RpcProviderService;
-import org.opendaylight.mdsal.binding.api.WriteTransaction;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.CreateTopicInput;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.CreateTopicOutput;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.CreateTopicOutputBuilder;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.DestroyTopicInput;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.DestroyTopicOutput;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.DestroyTopicOutputBuilder;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.EventAggregatorService;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.NotificationPattern;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.TopicId;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.EventSourceService;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.Node1;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.Node1Builder;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.TopologyTypes1;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.TopologyTypes1Builder;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.topology.event.source.type.TopologyEventSource;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.topology.event.source.type.TopologyEventSourceBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopology;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.TopologyId;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.TopologyKey;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeKey;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.TopologyTypes;
-import org.opendaylight.yangtools.concepts.ObjectRegistration;
-import org.opendaylight.yangtools.concepts.Registration;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class EventSourceTopology implements EventAggregatorService, EventSourceRegistry {
- private static final Logger LOG = LoggerFactory.getLogger(EventSourceTopology.class);
-
- private static final String TOPOLOGY_ID = "EVENT-SOURCE-TOPOLOGY" ;
- private static final TopologyKey EVENT_SOURCE_TOPOLOGY_KEY = new TopologyKey(new TopologyId(TOPOLOGY_ID));
- private static final LogicalDatastoreType OPERATIONAL = LogicalDatastoreType.OPERATIONAL;
-
- static final InstanceIdentifier<Topology> EVENT_SOURCE_TOPOLOGY_PATH =
- InstanceIdentifier.create(NetworkTopology.class).child(Topology.class, EVENT_SOURCE_TOPOLOGY_KEY);
-
- private static final InstanceIdentifier<TopologyTypes1> TOPOLOGY_TYPE_PATH = EVENT_SOURCE_TOPOLOGY_PATH
- .child(TopologyTypes.class).augmentation(TopologyTypes1.class);
-
- private final Map<TopicId, EventSourceTopic> eventSourceTopicMap = new ConcurrentHashMap<>();
- private final Map<NodeKey, Registration> routedRpcRegistrations = new ConcurrentHashMap<>();
-
- private final DataBroker dataBroker;
- private final ObjectRegistration<EventSourceTopology> aggregatorRpcReg;
- private final EventSourceService eventSourceService;
- private final RpcProviderService rpcRegistry;
-
- public EventSourceTopology(final DataBroker dataBroker, final RpcProviderService providerService,
- RpcConsumerRegistry rpcService) {
-
- this.dataBroker = dataBroker;
- this.rpcRegistry = providerService;
- aggregatorRpcReg = providerService.registerRpcImplementation(EventAggregatorService.class, this);
- eventSourceService = rpcService.getRpcService(EventSourceService.class);
-
- final TopologyEventSource topologySource = new TopologyEventSourceBuilder().build();
- final TopologyTypes1 topologyTypeAugment =
- new TopologyTypes1Builder().setTopologyEventSource(topologySource).build();
- putData(OPERATIONAL, TOPOLOGY_TYPE_PATH, topologyTypeAugment);
- LOG.info("EventSourceRegistry has been initialized");
- }
-
- private <T extends DataObject> void putData(final LogicalDatastoreType store,
- final InstanceIdentifier<T> path,
- final T data) {
-
- final WriteTransaction tx = getDataBroker().newWriteOnlyTransaction();
- tx.mergeParentStructurePut(store, path, data);
- tx.commit().addCallback(new FutureCallback<CommitInfo>() {
- @Override
- public void onSuccess(final CommitInfo result) {
- LOG.trace("Data has put into datastore {} {}", store, path);
- }
-
- @Override
- public void onFailure(final Throwable ex) {
- LOG.error("Can not put data into datastore [store: {}] [path: {}]", store, path, ex);
- }
- }, MoreExecutors.directExecutor());
- }
-
- private <T extends DataObject> void deleteData(final LogicalDatastoreType store,
- final InstanceIdentifier<T> path) {
- final WriteTransaction tx = getDataBroker().newWriteOnlyTransaction();
- tx.delete(OPERATIONAL, path);
- tx.commit().addCallback(new FutureCallback<CommitInfo>() {
- @Override
- public void onSuccess(final CommitInfo result) {
- LOG.trace("Data has deleted from datastore {} {}", store, path);
- }
-
- @Override
- public void onFailure(final Throwable ex) {
- LOG.error("Can not delete data from datastore [store: {}] [path: {}]", store, path, ex);
- }
- }, MoreExecutors.directExecutor());
- }
-
- private void insert(final KeyedInstanceIdentifier<Node, NodeKey> sourcePath) {
- final NodeKey nodeKey = sourcePath.getKey();
- final InstanceIdentifier<Node1> augmentPath = sourcePath.augmentation(Node1.class);
- final Node1 nodeAgument = new Node1Builder().setEventSourceNode(
- new NodeId(nodeKey.getNodeId().getValue())).build();
- putData(OPERATIONAL, augmentPath, nodeAgument);
- }
-
- private void remove(final KeyedInstanceIdentifier<Node, NodeKey> sourcePath) {
- final InstanceIdentifier<Node1> augmentPath = sourcePath.augmentation(Node1.class);
- deleteData(OPERATIONAL, augmentPath);
- }
-
- @Override
- public ListenableFuture<RpcResult<CreateTopicOutput>> createTopic(final CreateTopicInput input) {
- LOG.debug("Received Topic creation request: NotificationPattern -> {}, NodeIdPattern -> {}",
- input.getNotificationPattern(),
- input.getNodeIdPattern());
-
- final NotificationPattern notificationPattern = new NotificationPattern(input.getNotificationPattern());
- //FIXME: do not use Util.wildcardToRegex - NodeIdPatter should be regex
- final String nodeIdPattern = input.getNodeIdPattern().getValue();
- final EventSourceTopic eventSourceTopic = EventSourceTopic.create(notificationPattern, nodeIdPattern, this);
-
- eventSourceTopicMap.put(eventSourceTopic.getTopicId(), eventSourceTopic);
-
- final CreateTopicOutput cto = new CreateTopicOutputBuilder()
- .setTopicId(eventSourceTopic.getTopicId())
- .build();
-
- LOG.info("Topic has been created: NotificationPattern -> {}, NodeIdPattern -> {}",
- input.getNotificationPattern(),
- input.getNodeIdPattern());
-
- return Util.resultRpcSuccessFor(cto);
- }
-
- @Override
- public ListenableFuture<RpcResult<DestroyTopicOutput>> destroyTopic(final DestroyTopicInput input) {
- final EventSourceTopic topicToDestroy = eventSourceTopicMap.remove(input.getTopicId());
- if (topicToDestroy != null) {
- topicToDestroy.close();
- }
- return Util.resultRpcSuccessFor(new DestroyTopicOutputBuilder().build());
- }
-
- @Override
- public void close() {
- aggregatorRpcReg.close();
- eventSourceTopicMap.values().forEach(EventSourceTopic::close);
- }
-
- public void register(final EventSource eventSource) {
- final NodeKey nodeKey = eventSource.getSourceNodeKey();
- final KeyedInstanceIdentifier<Node, NodeKey> sourcePath = EVENT_SOURCE_TOPOLOGY_PATH.child(Node.class, nodeKey);
- final Registration reg = rpcRegistry.registerRpcImplementation(EventSourceService.class, eventSource,
- Collections.singleton(sourcePath));
- routedRpcRegistrations.put(nodeKey, reg);
- insert(sourcePath);
- }
-
- public void unRegister(final EventSource eventSource) {
- final NodeKey nodeKey = eventSource.getSourceNodeKey();
- final KeyedInstanceIdentifier<Node, NodeKey> sourcePath = EVENT_SOURCE_TOPOLOGY_PATH.child(Node.class, nodeKey);
- final Registration removeRegistration = routedRpcRegistrations.remove(nodeKey);
- if (removeRegistration != null) {
- removeRegistration.close();
- remove(sourcePath);
- }
- }
-
- @Override
- public <T extends EventSource> EventSourceRegistration<T> registerEventSource(final T eventSource) {
- final EventSourceRegistrationImpl<T> esr = new EventSourceRegistrationImpl<>(eventSource, this);
- register(eventSource);
- return esr;
- }
-
- DataBroker getDataBroker() {
- return dataBroker;
- }
-
- EventSourceService getEventSourceService() {
- return eventSourceService;
- }
-
- @VisibleForTesting
- Map<NodeKey, Registration> getRoutedRpcRegistrations() {
- return routedRpcRegistrations;
- }
-
- @VisibleForTesting
- Map<TopicId, EventSourceTopic> getEventSourceTopicMap() {
- return eventSourceTopicMap;
- }
-}
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright (c) 2017 Inocybe Technologies Inc. and others. All rights reserved.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
- xmlns:odl="http://opendaylight.org/xmlns/blueprint/v1.0.0"
- odl:use-default-for-reference-types="true">
-
- <reference id="dataBroker" interface="org.opendaylight.mdsal.binding.api.DataBroker"/>
- <reference id="consumerRegistry" interface="org.opendaylight.mdsal.binding.api.RpcConsumerRegistry"/>
- <reference id="providerRegistry" interface="org.opendaylight.mdsal.binding.api.RpcProviderService"/>
-
- <bean id="eventSourceTopology" class="org.opendaylight.controller.messagebus.app.impl.EventSourceTopology"
- destroy-method="close">
- <argument ref="dataBroker"/>
- <argument ref="providerRegistry"/>
- <argument ref="consumerRegistry"/>
- </bean>
-
- <service ref="eventSourceTopology"
- interface="org.opendaylight.controller.messagebus.spi.EventSourceRegistry"/>
-</blueprint>
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.messagebus.app.impl;
-
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.opendaylight.controller.messagebus.spi.EventSource;
-
-public class EventSourceRegistrationImplTest {
-
- EventSourceRegistrationImplLocal eventSourceRegistrationImplLocal;
- EventSourceTopology eventSourceTopologyMock;
-
- @BeforeClass
- public static void initTestClass() {
- }
-
- @Before
- public void setUp() {
- EventSource eventSourceMock = mock(EventSource.class);
- eventSourceTopologyMock = mock(EventSourceTopology.class);
- eventSourceRegistrationImplLocal = new EventSourceRegistrationImplLocal(eventSourceMock,
- eventSourceTopologyMock);
- }
-
- @Test
- public void removeRegistrationTest() {
- eventSourceRegistrationImplLocal.removeRegistration();
- verify(eventSourceTopologyMock, times(1)).unRegister(any(EventSource.class));
- }
-
-
- private class EventSourceRegistrationImplLocal extends EventSourceRegistrationImpl<EventSource> {
- EventSourceRegistrationImplLocal(EventSource instance, EventSourceTopology eventSourceTopology) {
- super(instance, eventSourceTopology);
- }
- }
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.messagebus.app.impl;
-
-import static org.junit.Assert.assertNotNull;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
-import com.google.common.util.concurrent.FluentFuture;
-import java.util.Collections;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.opendaylight.mdsal.binding.api.DataBroker;
-import org.opendaylight.mdsal.binding.api.DataObjectModification;
-import org.opendaylight.mdsal.binding.api.DataTreeIdentifier;
-import org.opendaylight.mdsal.binding.api.DataTreeModification;
-import org.opendaylight.mdsal.binding.api.ReadTransaction;
-import org.opendaylight.mdsal.binding.api.WriteTransaction;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.NotificationPattern;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.EventSourceService;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.JoinTopicInput;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.JoinTopicOutputBuilder;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.JoinTopicStatus;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NodeId;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeKey;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-
-public class EventSourceTopicTest {
-
- EventSourceTopic eventSourceTopic;
- DataBroker dataBrokerMock;
- EventSourceService eventSourceServiceMock;
- EventSourceTopology eventSourceTopologyMock;
-
- @BeforeClass
- public static void initTestClass() {
- }
-
- @Before
- public void setUp() {
- final NotificationPattern notificationPattern = new NotificationPattern("value1");
- eventSourceServiceMock = mock(EventSourceService.class);
- doReturn(RpcResultBuilder.success(new JoinTopicOutputBuilder().setStatus(JoinTopicStatus.Up).build())
- .buildFuture()).when(eventSourceServiceMock).joinTopic(any(JoinTopicInput.class));
-
- eventSourceTopologyMock = mock(EventSourceTopology.class);
- dataBrokerMock = mock(DataBroker.class);
- doReturn(eventSourceServiceMock).when(eventSourceTopologyMock).getEventSourceService();
- doReturn(dataBrokerMock).when(eventSourceTopologyMock).getDataBroker();
-
- WriteTransaction writeTransactionMock = mock(WriteTransaction.class);
- doReturn(writeTransactionMock).when(dataBrokerMock).newWriteOnlyTransaction();
- doNothing().when(writeTransactionMock).mergeParentStructurePut(any(LogicalDatastoreType.class),
- any(InstanceIdentifier.class), any(DataObject.class));
- FluentFuture checkedFutureWriteMock = mock(FluentFuture.class);
- doReturn(checkedFutureWriteMock).when(writeTransactionMock).commit();
-
- ReadTransaction readOnlyTransactionMock = mock(ReadTransaction.class);
- doReturn(readOnlyTransactionMock).when(dataBrokerMock).newReadOnlyTransaction();
- FluentFuture checkedFutureReadMock = mock(FluentFuture.class);
- doReturn(checkedFutureReadMock).when(readOnlyTransactionMock).read(LogicalDatastoreType.OPERATIONAL,
- EventSourceTopology.EVENT_SOURCE_TOPOLOGY_PATH);
- eventSourceTopic = EventSourceTopic.create(notificationPattern, "nodeIdPattern1", eventSourceTopologyMock);
- }
-
- @Test
- public void createModuleTest() {
- assertNotNull("Instance has not been created correctly.", eventSourceTopic);
- }
-
- @Test
- public void getTopicIdTest() {
- assertNotNull("Topic has not been created correctly.", eventSourceTopic.getTopicId());
- }
-
- @SuppressWarnings("unchecked")
- @Test
- public void onDataTreeChangedTest() {
- InstanceIdentifier<Node> instanceIdentifierMock = mock(InstanceIdentifier.class);
- DataTreeModification<Node> mockDataTreeModification = mock(DataTreeModification.class);
- DataObjectModification<Node> mockModification = mock(DataObjectModification.class);
- doReturn(mockModification).when(mockDataTreeModification).getRootNode();
- doReturn(DataTreeIdentifier.create(LogicalDatastoreType.OPERATIONAL, instanceIdentifierMock))
- .when(mockDataTreeModification).getRootPath();
- doReturn(DataObjectModification.ModificationType.WRITE).when(mockModification).getModificationType();
-
- Node dataObjectNodeMock = mock(Node.class);
- doReturn(getNodeKey("testNodeId01")).when(dataObjectNodeMock).key();
- NodeId nodeIdMock = mock(NodeId.class);
- doReturn(nodeIdMock).when(dataObjectNodeMock).getNodeId();
- doReturn("nodeIdPattern1").when(nodeIdMock).getValue();
-
- doReturn(dataObjectNodeMock).when(mockModification).getDataAfter();
-
- eventSourceTopic.onDataTreeChanged(Collections.singletonList(mockDataTreeModification));
- verify(dataObjectNodeMock).getNodeId();
- verify(nodeIdMock).getValue();
- }
-
- @Test
- public void notifyNodeTest() {
- InstanceIdentifier instanceIdentifierMock = mock(InstanceIdentifier.class);
- eventSourceTopic.notifyNode(instanceIdentifierMock);
- verify(eventSourceServiceMock, times(1)).joinTopic(any(JoinTopicInput.class));
- }
-
- public NodeKey getNodeKey(final String nodeId) {
- return new NodeKey(new NodeId(nodeId));
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.messagebus.app.impl;
-
-import static org.junit.Assert.assertNotNull;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.eq;
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
-import com.google.common.util.concurrent.FluentFuture;
-import java.util.Map;
-import java.util.Optional;
-import java.util.Set;
-import org.junit.Before;
-import org.junit.Test;
-import org.opendaylight.controller.messagebus.spi.EventSource;
-import org.opendaylight.mdsal.binding.api.DataBroker;
-import org.opendaylight.mdsal.binding.api.DataTreeIdentifier;
-import org.opendaylight.mdsal.binding.api.ReadTransaction;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.mdsal.binding.api.RpcProviderService;
-import org.opendaylight.mdsal.binding.api.WriteTransaction;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.CreateTopicInput;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.DestroyTopicInput;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.DestroyTopicInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.EventAggregatorService;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.NotificationPattern;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.Pattern;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.TopicId;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.EventSourceService;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NodeId;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeBuilder;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeKey;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.concepts.ObjectRegistration;
-import org.opendaylight.yangtools.concepts.Registration;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-public class EventSourceTopologyTest {
-
- EventSourceTopology eventSourceTopology;
- DataBroker dataBrokerMock;
- RpcProviderService rpcProviderRegistryMock;
- RpcConsumerRegistry rpcServiceMock;
- CreateTopicInput createTopicInputMock;
- ListenerRegistration<?> listenerRegistrationMock;
- ObjectRegistration<EventAggregatorService> aggregatorRpcReg;
-
- @Before
- public void setUp() {
- dataBrokerMock = mock(DataBroker.class);
- rpcProviderRegistryMock = mock(RpcProviderService.class);
- rpcServiceMock = mock(RpcConsumerRegistry.class);
- }
-
- @Test
- public void constructorTest() {
- constructorTestHelper();
- eventSourceTopology = new EventSourceTopology(dataBrokerMock, rpcProviderRegistryMock, rpcServiceMock);
- assertNotNull("Instance has not been created correctly.", eventSourceTopology);
- }
-
- private void constructorTestHelper() {
- aggregatorRpcReg = mock(ObjectRegistration.class);
- EventSourceService eventSourceService = mock(EventSourceService.class);
- doReturn(aggregatorRpcReg).when(rpcProviderRegistryMock).registerRpcImplementation(
- eq(EventAggregatorService.class), any(EventSourceTopology.class));
- doReturn(eventSourceService).when(rpcServiceMock).getRpcService(EventSourceService.class);
- WriteTransaction writeTransactionMock = mock(WriteTransaction.class);
- doReturn(writeTransactionMock).when(dataBrokerMock).newWriteOnlyTransaction();
- doNothing().when(writeTransactionMock).mergeParentStructurePut(any(LogicalDatastoreType.class),
- any(InstanceIdentifier.class), any(DataObject.class));
- FluentFuture checkedFutureMock = mock(FluentFuture.class);
- doReturn(checkedFutureMock).when(writeTransactionMock).commit();
- }
-
- @Test
- public void createTopicTest() throws Exception {
- topicTestHelper();
- assertNotNull("Topic has not been created correctly.", eventSourceTopology.createTopic(createTopicInputMock));
- }
-
- @Test
- public void destroyTopicTest() throws Exception {
- topicTestHelper();
- TopicId topicId = new TopicId("topic-id-007");
- Map<TopicId, EventSourceTopic> localMap = eventSourceTopology.getEventSourceTopicMap();
- EventSourceTopic eventSourceTopic = EventSourceTopic.create(new NotificationPattern("foo"),
- "pattern", eventSourceTopology);
- localMap.put(topicId, eventSourceTopic);
- DestroyTopicInput input = new DestroyTopicInputBuilder().setTopicId(topicId).build();
- eventSourceTopology.destroyTopic(input);
- verify(listenerRegistrationMock, times(1)).close();
- }
-
- private void topicTestHelper() throws Exception {
- constructorTestHelper();
- createTopicInputMock = mock(CreateTopicInput.class);
- eventSourceTopology = new EventSourceTopology(dataBrokerMock, rpcProviderRegistryMock, rpcServiceMock);
-
- NotificationPattern notificationPattern = new NotificationPattern("value1");
- doReturn(notificationPattern).when(createTopicInputMock).getNotificationPattern();
- Pattern pattern = new Pattern("valuePattern1");
- doReturn(pattern).when(createTopicInputMock).getNodeIdPattern();
-
- listenerRegistrationMock = mock(ListenerRegistration.class);
- doReturn(listenerRegistrationMock).when(dataBrokerMock).registerDataTreeChangeListener(
- any(DataTreeIdentifier.class), any(EventSourceTopic.class));
-
- ReadTransaction readOnlyTransactionMock = mock(ReadTransaction.class);
- doReturn(readOnlyTransactionMock).when(dataBrokerMock).newReadOnlyTransaction();
-
- FluentFuture checkedFutureMock = mock(FluentFuture.class);
- doReturn(checkedFutureMock).when(readOnlyTransactionMock).read(eq(LogicalDatastoreType.OPERATIONAL),
- any(InstanceIdentifier.class));
- Topology topologyMock = mock(Topology.class);
- doReturn(Optional.of(topologyMock)).when(checkedFutureMock).get();
-
- final NodeKey nodeKey = new NodeKey(new NodeId("nodeIdValue1"));
- final Node node = new NodeBuilder().withKey(nodeKey).build();
- doReturn(Map.of(nodeKey, node)).when(topologyMock).getNode();
- }
-
- @Test
- public void closeTest() throws Exception {
- constructorTestHelper();
- topicTestHelper();
- Map<TopicId, EventSourceTopic> localMap = eventSourceTopology.getEventSourceTopicMap();
- TopicId topicIdMock = mock(TopicId.class);
- EventSourceTopic eventSourceTopic = EventSourceTopic.create(new NotificationPattern("foo"),
- "pattern", eventSourceTopology);
- localMap.put(topicIdMock, eventSourceTopic);
- eventSourceTopology.close();
- verify(aggregatorRpcReg, times(1)).close();
- verify(listenerRegistrationMock, times(1)).close();
- }
-
- @Test
- public void registerTest() throws Exception {
- topicTestHelper();
- Node nodeMock = mock(Node.class);
- EventSource eventSourceMock = mock(EventSource.class);
- NodeId nodeId = new NodeId("nodeIdValue1");
- NodeKey nodeKey = new NodeKey(nodeId);
- doReturn(nodeKey).when(nodeMock).key();
- doReturn(nodeKey).when(eventSourceMock).getSourceNodeKey();
- ObjectRegistration routedRpcRegistrationMock = mock(ObjectRegistration.class);
- doReturn(routedRpcRegistrationMock).when(rpcProviderRegistryMock).registerRpcImplementation(
- eq(EventSourceService.class), eq(eventSourceMock), any(Set.class));
- eventSourceTopology.register(eventSourceMock);
- verify(rpcProviderRegistryMock, times(1)).registerRpcImplementation(eq(EventSourceService.class),
- eq(eventSourceMock), any(Set.class));
- }
-
- @Test
- public void unregisterTest() throws Exception {
- topicTestHelper();
- EventSource eventSourceMock = mock(EventSource.class);
- NodeId nodeId = new NodeId("nodeIdValue1");
- NodeKey nodeKey = new NodeKey(nodeId);
- Map<NodeKey, Registration> localMap = eventSourceTopology.getRoutedRpcRegistrations();
- NodeKey nodeKeyMock = mock(NodeKey.class);
- doReturn(nodeKeyMock).when(eventSourceMock).getSourceNodeKey();
- ObjectRegistration routedRpcRegistrationMock = mock(ObjectRegistration.class);
- localMap.put(nodeKeyMock, routedRpcRegistrationMock);
- eventSourceTopology.unRegister(eventSourceMock);
- verify(routedRpcRegistrationMock, times(1)).close();
- }
-
- @Test
- public void registerEventSourceTest() throws Exception {
- topicTestHelper();
- Node nodeMock = mock(Node.class);
- EventSource eventSourceMock = mock(EventSource.class);
- NodeId nodeId = new NodeId("nodeIdValue1");
- NodeKey nodeKey = new NodeKey(nodeId);
- doReturn(nodeKey).when(nodeMock).key();
- doReturn(nodeKey).when(eventSourceMock).getSourceNodeKey();
- ObjectRegistration routedRpcRegistrationMock = mock(ObjectRegistration.class);
- doReturn(routedRpcRegistrationMock).when(rpcProviderRegistryMock)
- .registerRpcImplementation(eq(EventSourceService.class), eq(eventSourceMock), any(Set.class));
- assertNotNull("Return value has not been created correctly.",
- eventSourceTopology.registerEventSource(eventSourceMock));
- }
-}
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
-
-This program and the accompanying materials are made available under the
-terms of the Eclipse Public License v1.0 which accompanies this distribution,
-and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>mdsal-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
- <relativePath>../parent</relativePath>
- </parent>
-
- <artifactId>messagebus-spi</artifactId>
- <name>${project.artifactId}</name>
- <packaging>bundle</packaging>
-
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>messagebus-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-data-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-model-api</artifactId>
- </dependency>
- </dependencies>
-
- <scm>
- <connection>scm:git:http://git.opendaylight.org/gerrit/controller.git</connection>
- <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
- <tag>HEAD</tag>
- <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
- </scm>
-</project>
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.messagebus.spi;
-
-import java.util.List;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.EventSourceService;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeKey;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-/**
- * Event source is a node in topology which is able to produce notifications.
- * To register event source you use {@link EventSourceRegistry#registerEventSource(EventSource)}.
- * EventSourceRegistry will request registered event source to publish notifications
- * whenever EventSourceRegistry has been asked to publish a certain type of notifications.
- * EventSourceRegistry will call method JoinTopic to request EventSource to publish notification.
- * Event source must implement method JoinTopic (from superinterface {@link EventSourceService}).
- */
-
-public interface EventSource extends EventSourceService, AutoCloseable {
-
- /**
- * Identifier of node associated with event source.
- *
- * @return instance of NodeKey
- */
- NodeKey getSourceNodeKey();
-
- /**
- * List the types of notifications which source can produce.
- *
- * @return list of available notification
- */
- List<SchemaPath> getAvailableNotifications();
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.messagebus.spi;
-
-import org.opendaylight.yangtools.concepts.ObjectRegistration;
-
-/**
- * Instance of EventSourceRegistration is returned by {@link EventSourceRegistry#registerEventSource(EventSource)}
- * and it is used to unregister EventSource.
- *
- */
-public interface EventSourceRegistration<T extends EventSource> extends ObjectRegistration<T> {
-
- @Override
- void close();
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.messagebus.spi;
-
-/**
- * EventSourceRegistry is used to register {@link EventSource}.
- */
-public interface EventSourceRegistry extends AutoCloseable {
-
- /**
- * Registers the given EventSource for public consumption. The EventSource is
- * associated with the node identified via {@link EventSource#getSourceNodeKey}.
- *
- * @param eventSource the EventSource instance to register
- * @return an EventSourceRegistration instance that is used to unregister the EventSource
- * via {@link EventSourceRegistration#close()}.
- */
- <T extends EventSource> EventSourceRegistration<T> registerEventSource(T eventSource);
-}
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
-
-This program and the accompanying materials are made available under the
-terms of the Eclipse Public License v1.0 which accompanies this distribution,
-and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
-
- <parent>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>mdsal-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
- <relativePath>../parent</relativePath>
- </parent>
-
- <artifactId>messagebus-util</artifactId>
- <packaging>bundle</packaging>
- <name>${project.artifactId}</name>
-
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.mdsal</groupId>
- <artifactId>mdsal-binding-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.mdsal</groupId>
- <artifactId>mdsal-dom-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>messagebus-api</artifactId>
- </dependency>
- <!-- Testing Dependencies -->
- <dependency>
- <groupId>org.glassfish.jersey.test-framework.providers</groupId>
- <artifactId>jersey-test-framework-provider-grizzly2</artifactId>
- <version>2.4</version>
- <scope>test</scope>
- </dependency>
- </dependencies>
-</project>
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.messagebus.app.util;
-
-import org.opendaylight.mdsal.dom.api.DOMNotification;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.TopicNotification;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-public class TopicDOMNotification implements DOMNotification {
-
- private static final SchemaPath TOPIC_NOTIFICATION_ID = SchemaPath.create(true, TopicNotification.QNAME);
- private final ContainerNode body;
-
- public TopicDOMNotification(final ContainerNode body) {
- this.body = body;
- }
-
- @Override
- public SchemaPath getType() {
- return TOPIC_NOTIFICATION_ID;
- }
-
- @Override
- public ContainerNode getBody() {
- return body;
- }
-
- @Override
- public String toString() {
- return "TopicDOMNotification [body=" + body + "]";
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.messagebus.app.util;
-
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.regex.Pattern;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-public final class Util {
- private Util() {
- }
-
- public static <T> ListenableFuture<RpcResult<T>> resultRpcSuccessFor(final T output) {
- return Futures.immediateFuture(RpcResultBuilder.success(output).build());
- }
-
- /**
- * Method filters qnames based on wildcard strings.
- *
- * @param list list of SchemaPaths
- * @param pattern matching pattern
- * @return list of filtered qnames
- */
- public static List<SchemaPath> expandQname(final List<SchemaPath> list, final Pattern pattern) {
- final List<SchemaPath> matchingQnames = new ArrayList<>();
-
- for (final SchemaPath notification : list) {
- final String namespace = notification.getLastComponent().getNamespace().toString();
- if (pattern.matcher(namespace).matches()) {
- matchingQnames.add(notification);
- }
- }
- return matchingQnames;
- }
-
- /**
- * CREDIT to http://www.rgagnon.com/javadetails/java-0515.html.
- */
- public static String wildcardToRegex(final String wildcard) {
- final StringBuilder s = new StringBuilder(wildcard.length());
- s.append('^');
- for (final char c : wildcard.toCharArray()) {
- switch (c) {
- case '*':
- s.append(".*");
- break;
- case '?':
- s.append('.');
- break;
- // escape special regexp-characters
- case '(':
- case ')':
- case '[':
- case ']':
- case '$':
- case '^':
- case '.':
- case '{':
- case '}':
- case '|':
- case '\\':
- s.append("\\");
- s.append(c);
- break;
- default:
- s.append(c);
- break;
- }
- }
- s.append('$');
- return s.toString();
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.messagebus.app.util;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.TopicNotification;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-public class TopicDOMNotificationTest {
-
- private static final String CONTAINER_NODE_BODY_MOCK_TO_STRING = "containerNodeBodyMock";
- ContainerNode containerNodeBodyMock;
- TopicDOMNotification topicDOMNotification;
-
- @BeforeClass
- public static void initTestClass() {
- }
-
- @Before
- public void setUp() {
- containerNodeBodyMock = mock(ContainerNode.class);
- doReturn(CONTAINER_NODE_BODY_MOCK_TO_STRING).when(containerNodeBodyMock).toString();
- topicDOMNotification = new TopicDOMNotification(containerNodeBodyMock);
- }
-
- @Test
- public void constructorTest() {
- assertNotNull("Instance has not been created correctly.", topicDOMNotification);
- }
-
- @Test
- public void getTypeTest() {
- SchemaPath topicNotificationId = SchemaPath.create(true, TopicNotification.QNAME);
- assertEquals("Type has not been created correctly.", topicNotificationId, topicDOMNotification.getType());
- }
-
- @Test
- public void getBodyTest() {
- assertEquals("String has not been created correctly.", containerNodeBodyMock, topicDOMNotification.getBody());
- }
-
- @Test
- public void getToStringTest() {
- String bodyString = "TopicDOMNotification [body=" + CONTAINER_NODE_BODY_MOCK_TO_STRING + "]";
- assertEquals("String has not been created correctly.", bodyString, topicDOMNotification.toString());
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.messagebus.app.util;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.regex.Pattern;
-import org.junit.Test;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-/**
- * Unit tests for Util.
- *
- * @author ppalmar
- */
-public class UtilTest {
-
- @Test
- public void testResultFor() throws Exception {
- {
- final String expectedResult = "dummy string";
- RpcResult<String> rpcResult = Util.resultRpcSuccessFor(expectedResult).get();
- assertEquals(expectedResult, rpcResult.getResult());
- assertTrue(rpcResult.isSuccessful());
- assertTrue(rpcResult.getErrors().isEmpty());
- }
- {
- final Integer expectedResult = 42;
- RpcResult<Integer> rpcResult = Util.resultRpcSuccessFor(expectedResult).get();
- assertEquals(expectedResult, rpcResult.getResult());
- assertTrue(rpcResult.isSuccessful());
- assertTrue(rpcResult.getErrors().isEmpty());
- }
- }
-
- @Test
- public void testExpandQname() {
- // match no path because the list of the allowed paths is empty
- {
- final List<SchemaPath> paths = new ArrayList<>();
- final Pattern regexPattern = Pattern.compile(".*"); // match everything
- final List<SchemaPath> matchingPaths = Util.expandQname(paths, regexPattern);
- assertTrue(matchingPaths.isEmpty());
- }
-
- // match no path because of regex pattern
- {
- final List<SchemaPath> paths = createSchemaPathList();
- final Pattern regexPattern = Pattern.compile("^@.*");
- final List<SchemaPath> matchingPaths = Util.expandQname(paths, regexPattern);
- assertTrue(matchingPaths.isEmpty());
- }
-
- // match all paths
- {
- final List<SchemaPath> paths = createSchemaPathList();
- final Pattern regexPattern = Pattern.compile(".*");
- final List<SchemaPath> matchingPaths = Util.expandQname(paths, regexPattern);
- assertTrue(matchingPaths.contains(paths.get(0)));
- assertTrue(matchingPaths.contains(paths.get(1)));
- assertEquals(paths.size(), matchingPaths.size());
- }
-
- // match one path only
- {
- final List<SchemaPath> paths = createSchemaPathList();
- final Pattern regexPattern = Pattern.compile(".*yyy$");
- final List<SchemaPath> matchingPaths = Util.expandQname(paths, regexPattern);
- assertTrue(matchingPaths.contains(paths.get(1)));
- assertEquals(1, matchingPaths.size());
- }
- }
-
- private static List<SchemaPath> createSchemaPathList() {
- final QName qname1 = QName.create("urn:odl:xxx", "2015-01-01", "localName");
- final QName qname2 = QName.create("urn:odl:yyy", "2015-01-01", "localName");
- final SchemaPath path1 = SchemaPath.create(true, qname1);
- final SchemaPath path2 = SchemaPath.create(true, qname2);
- return Arrays.asList(path1, path2);
- }
-}
<parent>
<groupId>org.opendaylight.mdsal</groupId>
<artifactId>binding-parent</artifactId>
- <version>6.0.4</version>
+ <version>13.0.1</version>
<relativePath/>
</parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>mdsal-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<packaging>pom</packaging>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>controller-artifacts</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <artifactId>bundle-parent</artifactId>
+ <version>9.0.3-SNAPSHOT</version>
<type>pom</type>
<scope>import</scope>
</dependency>
<parent>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>odlparent-lite</artifactId>
- <version>7.0.5</version>
+ <version>13.0.11</version>
<relativePath/>
</parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>mdsal-aggregator</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<packaging>pom</packaging>
<properties>
<module>cds-access-api</module>
<module>cds-access-client</module>
<module>cds-dom-api</module>
+ <module>cds-mgmt-api</module>
<module>sal-akka-segmented-journal</module>
<!-- sal clustering configuration -->
<module>sal-dummy-distributed-datastore</module>
<module>sal-cluster-admin-api</module>
<module>sal-cluster-admin-impl</module>
- <module>sal-distributed-eos</module>
+ <module>sal-cluster-admin-karaf-cli</module>
+
+ <!-- Entity Ownership Service on top of Akka Distributed Data/Singleton -->
+ <module>eos-dom-akka</module>
<!-- Yang Test Models for MD-SAL -->
<module>sal-test-model</module>
<!-- Clustering -->
<module>sal-remoterpc-connector</module>
- <!-- Message Bus -->
- <module>messagebus-api</module>
- <module>messagebus-spi</module>
- <module>messagebus-impl</module>
- <module>messagebus-util</module>
-
<!-- PAX EXAM ITs -->
<module>sal-binding-it</module>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>mdsal-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../parent</relativePath>
</parent>
<packaging>bundle</packaging>
<dependencies>
+ <dependency>
+ <groupId>com.github.spotbugs</groupId>
+ <artifactId>spotbugs-annotations</artifactId>
+ <optional>true</optional>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-akka-raft</artifactId>
import org.opendaylight.controller.cluster.raft.RaftState;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
import org.opendaylight.controller.cluster.raft.behaviors.Leader;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.yangtools.concepts.Identifier;
import org.opendaylight.yangtools.util.AbstractStringIdentifier;
if (message instanceof KeyValue) {
if (isLeader()) {
persistData(getSender(), new PayloadIdentifier(persistIdentifier++), (Payload) message, false);
- } else {
- if (getLeader() != null) {
- getLeader().forward(message, getContext());
- }
+ } else if (getLeader() != null) {
+ getLeader().forward(message, getContext());
}
} else if (message instanceof PrintState) {
} else if (message instanceof PrintRole) {
if (LOG.isDebugEnabled()) {
if (getRaftState() == RaftState.Leader || getRaftState() == RaftState.IsolatedLeader) {
- final String followers = ((Leader)this.getCurrentBehavior()).printFollowerStates();
+ final String followers = ((Leader)getCurrentBehavior()).printFollowerStates();
LOG.debug("{} = {}, Peers={}, followers={}", getId(), getRaftState(),
getRaftActorContext().getPeerIds(), followers);
} else {
}
public Optional<ActorRef> createRoleChangeNotifier(final String actorId) {
- ActorRef exampleRoleChangeNotifier = this.getContext().actorOf(
+ ActorRef exampleRoleChangeNotifier = getContext().actorOf(
RoleChangeNotifier.getProps(actorId), actorId + "-notifier");
return Optional.<ActorRef>of(exampleRoleChangeNotifier);
}
@Override
protected void applyState(final ActorRef clientActor, final Identifier identifier, final Object data) {
- if (data instanceof KeyValue) {
- KeyValue kv = (KeyValue) data;
+ if (data instanceof KeyValue kv) {
state.put(kv.getKey(), kv.getValue());
if (clientActor != null) {
clientActor.tell(new KeyValueSaved(), getSelf());
public void createSnapshot(final ActorRef actorRef, final Optional<OutputStream> installSnapshotStream) {
try {
if (installSnapshotStream.isPresent()) {
- SerializationUtils.serialize((Serializable) state, installSnapshotStream.get());
+ SerializationUtils.serialize((Serializable) state, installSnapshotStream.orElseThrow());
}
} catch (RuntimeException e) {
LOG.error("Exception in creating snapshot", e);
try {
return new MapState((Map<String, String>) SerializationUtils.deserialize(snapshotBytes.read()));
} catch (IOException e) {
- throw new RuntimeException(e);
+ throw new IllegalStateException(e);
}
}
}
@Override
- public int getSnapshotChunkSize() {
+ public int getMaximumMessageSliceSize() {
return 50;
}
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.example;
import akka.actor.ActorRef;
import java.util.Map;
import java.util.Random;
import org.opendaylight.controller.cluster.example.messages.KeyValue;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Created by kramesha on 7/16/14.
*/
public class LogGenerator {
+ private static final Logger LOG = LoggerFactory.getLogger(LogGenerator.class);
+
private final Map<ActorRef, LoggingThread> clientToLoggingThread = new HashMap<>();
- public void startLoggingForClient(ActorRef client) {
+ public void startLoggingForClient(final ActorRef client) {
LoggingThread lt = new LoggingThread(client);
clientToLoggingThread.put(client, lt);
new Thread(lt).start();
}
- public void stopLoggingForClient(ActorRef client) {
+ public void stopLoggingForClient(final ActorRef client) {
clientToLoggingThread.get(client).stopLogging();
clientToLoggingThread.remove(client);
}
public static class LoggingThread implements Runnable {
-
+ private final Random random = new Random();
private final ActorRef clientActor;
+
private volatile boolean stopLogging = false;
- public LoggingThread(ActorRef clientActor) {
+ public LoggingThread(final ActorRef clientActor) {
this.clientActor = clientActor;
}
@Override
- @SuppressWarnings("checkstyle:RegexpSingleLineJava")
public void run() {
- Random random = new Random();
while (true) {
if (stopLogging) {
- System.out.println("Logging stopped for client:" + clientActor.path());
+ LOG.info("Logging stopped for client: {}", clientActor.path());
break;
}
String key = clientActor.path().name();
int randomInt = random.nextInt(100);
clientActor.tell(new KeyValue(key + "-key-" + randomInt, "value-" + randomInt), null);
+
try {
Thread.sleep(randomInt % 10 * 1000L);
} catch (InterruptedException e) {
- e.printStackTrace();
+ LOG.info("Interrupted while sleeping", e);
}
}
}
import java.nio.charset.Charset;
import java.util.Arrays;
import java.util.HashMap;
-import java.util.List;
import java.util.Map;
import java.util.Optional;
import org.opendaylight.controller.cluster.example.messages.KeyValue;
withoutPeer("example-3"), Optional.empty()), "example-3");
- List<ActorRef> examples = Arrays.asList(example1Actor, example2Actor, example3Actor);
+ final var examples = Arrays.asList(example1Actor, example2Actor, example3Actor);
- ActorRef clientActor = ACTOR_SYSTEM.actorOf(ClientActor.props(example1Actor));
- BufferedReader br =
- new BufferedReader(new InputStreamReader(System.in, Charset.defaultCharset()));
+ final var clientActor = ACTOR_SYSTEM.actorOf(ClientActor.props(example1Actor));
+ final var br = new BufferedReader(new InputStreamReader(System.in, Charset.defaultCharset()));
System.out.println("Usage :");
System.out.println("s <1-3> to start a peer");
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.example.messages;
+
+import java.io.Serializable;
+
+final class KVv1 implements Serializable {
+ private static final long serialVersionUID = 1L;
+
+ private final String key;
+ private final String value;
+
+ KVv1(String key, String value) {
+ this.key = key;
+ this.value = value;
+ }
+
+ Object readResolve() {
+ return new KeyValue(key, value);
+ }
+}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.example.messages;
-import java.io.Serializable;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
-public class KeyValue extends Payload implements Serializable {
+public final class KeyValue extends Payload {
private static final long serialVersionUID = 1L;
+
private String key;
private String value;
public KeyValue() {
}
- public KeyValue(String key, String value) {
+ public KeyValue(final String key, final String value) {
this.key = key;
this.value = value;
}
return value;
}
- public void setKey(String key) {
- this.key = key;
+ @Override
+ public int size() {
+ return value.length() + key.length();
}
- public void setValue(String value) {
- this.value = value;
+ @Override
+ public int serializedSize() {
+ // Should be a better estimate
+ return size();
}
@Override
}
@Override
- public int size() {
- return this.value.length() + this.key.length();
+ protected Object writeReplace() {
+ return new KVv1(value, key);
}
-
}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>mdsal-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../parent</relativePath>
</parent>
<dependencies>
<dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-clustering-commons</artifactId>
+ <groupId>com.github.spotbugs</groupId>
+ <artifactId>spotbugs-annotations</artifactId>
+ <optional>true</optional>
</dependency>
<dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-actor_2.13</artifactId>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
</dependency>
<dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-cluster_2.13</artifactId>
+ <groupId>org.eclipse.jdt</groupId>
+ <artifactId>org.eclipse.jdt.annotation</artifactId>
</dependency>
<dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-persistence_2.13</artifactId>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>concepts</artifactId>
</dependency>
<dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-remote_2.13</artifactId>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>util</artifactId>
</dependency>
<dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-slf4j_2.13</artifactId>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-clustering-commons</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>cds-mgmt-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>repackaged-akka</artifactId>
</dependency>
<dependency>
<groupId>org.scala-lang</groupId>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
</dependency>
- <dependency>
- <groupId>org.osgi</groupId>
- <artifactId>org.osgi.core</artifactId>
- </dependency>
<!-- Test Dependencies -->
<dependency>
<groupId>com.typesafe.akka</groupId>
<artifactId>akka-testkit_2.13</artifactId>
- <scope>test</scope>
</dependency>
<dependency>
<groupId>org.awaitility</groupId>
<artifactId>commons-io</artifactId>
<scope>test</scope>
</dependency>
- <dependency>
- <groupId>commons-lang</groupId>
- <artifactId>commons-lang</artifactId>
- <scope>test</scope>
- </dependency>
</dependencies>
<build>
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
+import org.eclipse.jdt.annotation.NonNull;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
private long previousSnapshotTerm = -1;
private int dataSize = 0;
- protected AbstractReplicatedLogImpl(long snapshotIndex, long snapshotTerm,
- List<ReplicatedLogEntry> unAppliedEntries, String logContext) {
+ protected AbstractReplicatedLogImpl(final long snapshotIndex, final long snapshotTerm,
+ final List<ReplicatedLogEntry> unAppliedEntries, final String logContext) {
this.snapshotIndex = snapshotIndex;
this.snapshotTerm = snapshotTerm;
this.logContext = logContext;
- this.journal = new ArrayList<>(unAppliedEntries.size());
+ journal = new ArrayList<>(unAppliedEntries.size());
for (ReplicatedLogEntry entry: unAppliedEntries) {
append(entry);
}
}
protected AbstractReplicatedLogImpl() {
- this(-1L, -1L, Collections.<ReplicatedLogEntry>emptyList(), "");
+ this(-1L, -1L, Collections.emptyList(), "");
}
- protected int adjustedIndex(long logEntryIndex) {
+ protected int adjustedIndex(final long logEntryIndex) {
if (snapshotIndex < 0) {
return (int) logEntryIndex;
}
}
@Override
- public ReplicatedLogEntry get(long logEntryIndex) {
+ public ReplicatedLogEntry get(final long logEntryIndex) {
int adjustedIndex = adjustedIndex(logEntryIndex);
if (adjustedIndex < 0 || adjustedIndex >= journal.size()) {
}
@Override
- public long removeFrom(long logEntryIndex) {
+ public long removeFrom(final long logEntryIndex) {
int adjustedIndex = adjustedIndex(logEntryIndex);
if (adjustedIndex < 0 || adjustedIndex >= journal.size()) {
// physical index should be less than list size and >= 0
}
@Override
- public boolean append(ReplicatedLogEntry replicatedLogEntry) {
+ public boolean append(final ReplicatedLogEntry replicatedLogEntry) {
if (replicatedLogEntry.getIndex() > lastIndex()) {
journal.add(replicatedLogEntry);
dataSize += replicatedLogEntry.size();
}
@Override
- public void increaseJournalLogCapacity(int amount) {
+ public void increaseJournalLogCapacity(final int amount) {
journal.ensureCapacity(journal.size() + amount);
}
@Override
- public List<ReplicatedLogEntry> getFrom(long logEntryIndex) {
+ public List<ReplicatedLogEntry> getFrom(final long logEntryIndex) {
return getFrom(logEntryIndex, journal.size(), NO_MAX_SIZE);
}
@Override
- public List<ReplicatedLogEntry> getFrom(long logEntryIndex, int maxEntries, long maxDataSize) {
+ public List<ReplicatedLogEntry> getFrom(final long logEntryIndex, final int maxEntries, final long maxDataSize) {
int adjustedIndex = adjustedIndex(logEntryIndex);
int size = journal.size();
if (adjustedIndex >= 0 && adjustedIndex < size) {
}
}
- private List<ReplicatedLogEntry> copyJournalEntries(int fromIndex, int toIndex, long maxDataSize) {
+ private @NonNull List<ReplicatedLogEntry> copyJournalEntries(final int fromIndex, final int toIndex,
+ final long maxDataSize) {
List<ReplicatedLogEntry> retList = new ArrayList<>(toIndex - fromIndex);
long totalSize = 0;
for (int i = fromIndex; i < toIndex; i++) {
ReplicatedLogEntry entry = journal.get(i);
- totalSize += entry.size();
+ totalSize += entry.serializedSize();
if (totalSize <= maxDataSize) {
retList.add(entry);
} else {
}
@Override
- public boolean isPresent(long logEntryIndex) {
+ public boolean isPresent(final long logEntryIndex) {
if (logEntryIndex > lastIndex()) {
// if the request logical index is less than the last present in the list
return false;
}
@Override
- public boolean isInSnapshot(long logEntryIndex) {
+ public boolean isInSnapshot(final long logEntryIndex) {
return logEntryIndex >= 0 && logEntryIndex <= snapshotIndex && snapshotIndex != -1;
}
}
@Override
- public void setSnapshotIndex(long snapshotIndex) {
+ public void setSnapshotIndex(final long snapshotIndex) {
this.snapshotIndex = snapshotIndex;
}
@Override
- public void setSnapshotTerm(long snapshotTerm) {
+ public void setSnapshotTerm(final long snapshotTerm) {
this.snapshotTerm = snapshotTerm;
}
@Override
- public void clear(int startIndex, int endIndex) {
+ public void clear(final int startIndex, final int endIndex) {
journal.subList(startIndex, endIndex).clear();
}
@Override
- public void snapshotPreCommit(long snapshotCapturedIndex, long snapshotCapturedTerm) {
+ public void snapshotPreCommit(final long snapshotCapturedIndex, final long snapshotCapturedTerm) {
Preconditions.checkArgument(snapshotCapturedIndex >= snapshotIndex,
"snapshotCapturedIndex must be greater than or equal to snapshotIndex");
}
@Override
- public void snapshotCommit() {
+ public void snapshotCommit(final boolean updateDataSize) {
snapshottedJournal = null;
previousSnapshotIndex = -1;
previousSnapshotTerm = -1;
- dataSize = 0;
- // need to recalc the datasize based on the entries left after precommit.
- for (ReplicatedLogEntry logEntry : journal) {
- dataSize += logEntry.size();
- }
+ if (updateDataSize) {
+ // need to recalc the datasize based on the entries left after precommit.
+ int newDataSize = 0;
+ for (ReplicatedLogEntry logEntry : journal) {
+ newDataSize += logEntry.size();
+ }
+ LOG.trace("{}: Updated dataSize from {} to {}", logContext, dataSize, newDataSize);
+ dataSize = newDataSize;
+ }
}
@Override
}
@VisibleForTesting
- ReplicatedLogEntry getAtPhysicalIndex(int index) {
+ ReplicatedLogEntry getAtPhysicalIndex(final int index) {
return journal.get(index);
}
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.raft;
import akka.actor.ActorRef;
import org.opendaylight.yangtools.concepts.Identifier;
-public interface ClientRequestTracker {
- /**
- * Returns the client actor that should be sent a response when consensus is achieved.
- *
- * @return the client actor
- */
- ActorRef getClientActor();
-
- /**
- * Returns the identifier of the object that is to be replicated. For example a transaction identifier in the case
- * of a transaction.
- *
- * @return the identifier
- */
- Identifier getIdentifier();
-
- /**
- * Returns the index of the log entry that is to be replicated.
- *
- * @return the index
- */
- long getIndex();
+/**
+ * Consensus forwarding tracker.
+ *
+ * @param clientActor the client actor that should be sent a response when consensus is achieved
+ * @param identifier the identifier of the object that is to be replicated. For example a transaction identifier in the
+ * case of a transaction
+ * @param logIndex the index of the log entry that is to be replicated
+ */
+public record ClientRequestTracker(long logIndex, ActorRef clientActor, Identifier identifier) {
}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.raft;
-
-import akka.actor.ActorRef;
-import org.opendaylight.yangtools.concepts.Identifier;
-
-public class ClientRequestTrackerImpl implements ClientRequestTracker {
-
- private final ActorRef clientActor;
- private final Identifier identifier;
- private final long logIndex;
-
- public ClientRequestTrackerImpl(ActorRef clientActor, Identifier identifier, long logIndex) {
-
- this.clientActor = clientActor;
-
- this.identifier = identifier;
-
- this.logIndex = logIndex;
- }
-
- @Override
- public ActorRef getClientActor() {
- return clientActor;
- }
-
- @Override
- public long getIndex() {
- return logIndex;
- }
-
- @Override
- public Identifier getIdentifier() {
- return identifier;
- }
-}
import scala.concurrent.duration.FiniteDuration;
/**
- * Configuration Parameter interface for configuring the Raft consensus system
- *
- * <p>
- * Any component using this implementation might want to provide an implementation of
- * this interface to configure
- *
- * <p>
- * A default implementation will be used if none is provided.
+ * Configuration Parameter interface for configuring the Raft consensus system. Any component using this implementation
+ * might want to provide an implementation of this interface to configure. A default implementation will be used if none
+ * is provided.
*
* @author Kamal Rameshan
*/
/**
* Returns the percentage of total memory used in the in-memory Raft log before a snapshot should be taken.
+ * Disabled when direct threshold is enabled.
*
* @return the percentage.
*/
int getSnapshotDataThresholdPercentage();
+ /**
+ * Returns the max size of memory used in the in-memory Raft log before a snapshot should be taken. 0 means that
+ * direct threshold is disabled and percentage is used instead.
+ *
+ * @return maximum journal size (in MiB).
+ */
+ int getSnapshotDataThreshold();
/**
- * Returns the interval(in seconds) after which a snapshot should be taken during recovery.
- * Negative value means don't take snapshots.
+ * Returns the interval(in seconds) after which a snapshot should be taken during recovery. Negative value means
+ * do not take snapshots.
*
* @return the interval of recovery snapshot in seconds
*/
*
* @return the maximum size (in bytes).
*/
- int getSnapshotChunkSize();
+ int getMaximumMessageSliceSize();
/**
* Returns the maximum number of journal log entries to batch on recovery before applying.
*/
long getIsolatedCheckIntervalInMillis();
-
/**
* Returns the multiplication factor to be used to determine the shard election timeout. The election timeout
* is determined by multiplying the election timeout factor with the heart beat duration.
*/
long getElectionTimeoutFactor();
-
/**
* Returns the RaftPolicy used to determine certain Raft behaviors.
*
*/
private static final int ELECTION_TIME_MAX_VARIANCE = 100;
- private static final int SNAPSHOT_CHUNK_SIZE = 2048 * 1000; //2MB
+ private static final int MAXIMUM_MESSAGE_SLICE_SIZE = 480 * 1024; // 480KiB
/**
// in-memory journal can use before it needs to snapshot
private int snapshotDataThresholdPercentage = 12;
- private int snapshotChunkSize = SNAPSHOT_CHUNK_SIZE;
+ // max size of in-memory journal in MB
+ // 0 means direct threshold if disabled
+ private int snapshotDataThreshold = 0;
+
+ private int maximumMessageSliceSize = MAXIMUM_MESSAGE_SLICE_SIZE;
private long electionTimeoutFactor = 2;
private long candidateElectionTimeoutDivisor = 1;
this.snapshotBatchCount = snapshotBatchCount;
}
- public void setRecoverySnapshotIntervalSeconds(int recoverySnapshotInterval) {
+ public void setRecoverySnapshotIntervalSeconds(final int recoverySnapshotInterval) {
checkArgument(recoverySnapshotInterval >= 0);
- this.recoverySnapshotIntervalSeconds = recoverySnapshotInterval;
+ recoverySnapshotIntervalSeconds = recoverySnapshotInterval;
}
public void setSnapshotDataThresholdPercentage(final int snapshotDataThresholdPercentage) {
this.snapshotDataThresholdPercentage = snapshotDataThresholdPercentage;
}
- public void setSnapshotChunkSize(final int snapshotChunkSize) {
- this.snapshotChunkSize = snapshotChunkSize;
+ public void setSnapshotDataThreshold(final int snapshotDataThreshold) {
+ this.snapshotDataThreshold = snapshotDataThreshold;
+ }
+
+ public void setMaximumMessageSliceSize(final int maximumMessageSliceSize) {
+ this.maximumMessageSliceSize = maximumMessageSliceSize;
}
public void setJournalRecoveryLogBatchSize(final int journalRecoveryLogBatchSize) {
return snapshotDataThresholdPercentage;
}
+ @Override
+ public int getSnapshotDataThreshold() {
+ return snapshotDataThreshold;
+ }
+
@Override
public int getRecoverySnapshotIntervalSeconds() {
- return this.recoverySnapshotIntervalSeconds;
+ return recoverySnapshotIntervalSeconds;
}
@Override
}
@Override
- public int getSnapshotChunkSize() {
- return snapshotChunkSize;
+ public int getMaximumMessageSliceSize() {
+ return maximumMessageSliceSize;
}
@Override
*/
package org.opendaylight.controller.cluster.raft;
+import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkState;
import static java.util.Objects.requireNonNull;
private short payloadVersion = -1;
- // Assume the HELIUM_VERSION version initially for backwards compatibility until we obtain the follower's
- // actual version via AppendEntriesReply. Although we no longer support the Helium version, a pre-Boron
- // follower will not have the version field in AppendEntriesReply so it will be set to 0 which is
- // HELIUM_VERSION.
- private short raftVersion = RaftVersions.HELIUM_VERSION;
+ // Assume the FLUORINE_VERSION version initially, as we no longer support pre-Fluorine versions.
+ private short raftVersion = RaftVersions.FLUORINE_VERSION;
private final PeerInfo peerInfo;
*/
@VisibleForTesting
FollowerLogInformation(final PeerInfo peerInfo, final long matchIndex, final RaftActorContext context) {
- this.nextIndex = context.getCommitIndex();
+ nextIndex = context.getCommitIndex();
this.matchIndex = matchIndex;
this.context = context;
this.peerInfo = requireNonNull(peerInfo);
* @param raftVersion the raft version.
*/
public void setRaftVersion(final short raftVersion) {
+ checkArgument(raftVersion >= RaftVersions.FLUORINE_VERSION, "Unexpected version %s", raftVersion);
this.raftVersion = raftVersion;
}
* @param state the LeaderInstallSnapshotState
*/
public void setLeaderInstallSnapshotState(final @NonNull LeaderInstallSnapshotState state) {
- if (this.installSnapshotState == null) {
- this.installSnapshotState = requireNonNull(state);
+ if (installSnapshotState == null) {
+ installSnapshotState = requireNonNull(state);
}
}
*
* @author Thomas Pantelis
*/
-class GetSnapshotReplyActor extends UntypedAbstractActor {
+final class GetSnapshotReplyActor extends UntypedAbstractActor {
private static final Logger LOG = LoggerFactory.getLogger(GetSnapshotReplyActor.class);
private final Params params;
this.replyToActor = requireNonNull(replyToActor);
this.receiveTimeout = requireNonNull(receiveTimeout);
this.id = requireNonNull(id);
- this.peerInformation = peerInfo;
+ peerInformation = peerInfo;
}
}
}
import akka.actor.ActorSelection;
import akka.actor.PoisonPill;
import akka.actor.Status;
+import akka.persistence.JournalProtocol;
+import akka.persistence.SnapshotProtocol;
import com.google.common.annotations.VisibleForTesting;
-import java.util.ArrayList;
-import java.util.Collection;
+import com.google.common.collect.ImmutableList;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.util.HashMap;
-import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import org.opendaylight.controller.cluster.NonPersistentDataProvider;
import org.opendaylight.controller.cluster.PersistentDataProvider;
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedPersistentActor;
+import org.opendaylight.controller.cluster.mgmt.api.FollowerInfo;
import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
import org.opendaylight.controller.cluster.notifications.RoleChanged;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
-import org.opendaylight.controller.cluster.raft.client.messages.FollowerInfo;
import org.opendaylight.controller.cluster.raft.client.messages.GetOnDemandRaftState;
import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
import org.opendaylight.controller.cluster.raft.client.messages.Shutdown;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
import org.opendaylight.controller.cluster.raft.messages.RequestLeadership;
import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries;
import org.opendaylight.controller.cluster.raft.persisted.NoopPayload;
import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.yangtools.concepts.Identifier;
import org.opendaylight.yangtools.concepts.Immutable;
* </ul>
*/
public abstract class RaftActor extends AbstractUntypedPersistentActor {
-
- private static final long APPLY_STATE_DELAY_THRESHOLD_IN_NANOS = TimeUnit.MILLISECONDS.toNanos(50L); // 50 millis
+ private static final long APPLY_STATE_DELAY_THRESHOLD_IN_NANOS = TimeUnit.MILLISECONDS.toNanos(50);
/**
* This context should NOT be passed directly to any other actor it is
private boolean shuttingDown;
+ @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design")
protected RaftActor(final String id, final Map<String, String> peerAddresses,
final Optional<ConfigParams> configParams, final short payloadVersion) {
persistentProvider = new PersistentDataProvider(this);
delegatingPersistenceProvider = new RaftActorDelegatingPersistentDataProvider(null, persistentProvider);
- context = new RaftActorContextImpl(this.getSelf(),
- this.getContext(), id, new ElectionTermImpl(persistentProvider, id, LOG),
- -1, -1, peerAddresses,
- configParams.isPresent() ? configParams.get() : new DefaultConfigParamsImpl(),
+ context = new RaftActorContextImpl(getSelf(), getContext(), id,
+ new ElectionTermImpl(persistentProvider, id, LOG), -1, -1, peerAddresses,
+ configParams.isPresent() ? configParams.orElseThrow() : new DefaultConfigParamsImpl(),
delegatingPersistenceProvider, this::handleApplyState, LOG, this::executeInSelf);
context.setPayloadVersion(payloadVersion);
* Handles a message.
*
* @deprecated This method is not final for testing purposes. DO NOT OVERRIDE IT, override
- * {@link #handleNonRaftCommand(Object)} instead.
+ * {@link #handleNonRaftCommand(Object)} instead.
*/
@Deprecated
@Override
if (snapshotSupport.handleSnapshotMessage(message, getSender())) {
return;
}
- if (message instanceof ApplyState) {
- ApplyState applyState = (ApplyState) message;
-
+ if (message instanceof ApplyState applyState) {
if (!hasFollowers()) {
// for single node, the capture should happen after the apply state
// as we delete messages from the persistent journal which have made it to the snapshot
}
possiblyHandleBehaviorMessage(message);
- } else if (message instanceof ApplyJournalEntries) {
- ApplyJournalEntries applyEntries = (ApplyJournalEntries) message;
+ } else if (message instanceof ApplyJournalEntries applyEntries) {
LOG.debug("{}: Persisting ApplyJournalEntries with index={}", persistenceId(), applyEntries.getToIndex());
persistence().persistAsync(applyEntries, NoopProcedure.instance());
-
} else if (message instanceof FindLeader) {
- getSender().tell(
- new FindLeaderReply(getLeaderAddress()),
- getSelf()
- );
+ getSender().tell(new FindLeaderReply(getLeaderAddress()), getSelf());
} else if (message instanceof GetOnDemandRaftState) {
onGetOnDemandRaftStats();
} else if (message instanceof InitiateCaptureSnapshot) {
captureSnapshot();
- } else if (message instanceof SwitchBehavior) {
- switchBehavior((SwitchBehavior) message);
- } else if (message instanceof LeaderTransitioning) {
- onLeaderTransitioning((LeaderTransitioning)message);
+ } else if (message instanceof SwitchBehavior switchBehavior) {
+ switchBehavior(switchBehavior);
+ } else if (message instanceof LeaderTransitioning leaderTransitioning) {
+ onLeaderTransitioning(leaderTransitioning);
} else if (message instanceof Shutdown) {
onShutDown();
- } else if (message instanceof Runnable) {
- ((Runnable)message).run();
- } else if (message instanceof NoopPayload) {
- persistData(null, null, (NoopPayload) message, false);
- } else if (message instanceof RequestLeadership) {
- onRequestLeadership((RequestLeadership) message);
+ } else if (message instanceof Runnable runnable) {
+ runnable.run();
+ } else if (message instanceof NoopPayload noopPayload) {
+ persistData(null, null, noopPayload, false);
+ } else if (message instanceof RequestLeadership requestLeadership) {
+ onRequestLeadership(requestLeadership);
} else if (!possiblyHandleBehaviorMessage(message)) {
- handleNonRaftCommand(message);
+ if (message instanceof JournalProtocol.Response response
+ && delegatingPersistenceProvider.handleJournalResponse(response)) {
+ LOG.debug("{}: handled a journal response", persistenceId());
+ } else if (message instanceof SnapshotProtocol.Response response
+ && delegatingPersistenceProvider.handleSnapshotResponse(response)) {
+ LOG.debug("{}: handled a snapshot response", persistenceId());
+ } else {
+ handleNonRaftCommand(message);
+ }
}
}
Optional<ActorRef> roleChangeNotifier = getRoleChangeNotifier();
if (getRaftState() == RaftState.Follower && roleChangeNotifier.isPresent()
&& leaderTransitioning.getLeaderId().equals(getCurrentBehavior().getLeaderId())) {
- roleChangeNotifier.get().tell(newLeaderStateChanged(getId(), null,
+ roleChangeNotifier.orElseThrow().tell(newLeaderStateChanged(getId(), null,
getCurrentBehavior().getLeaderPayloadVersion()), getSelf());
}
}
}
final RaftActorBehavior currentBehavior = context.getCurrentBehavior();
- OnDemandRaftState.AbstractBuilder<?, ?> builder = newOnDemandRaftStateBuilder()
+ final var builder = newOnDemandRaftStateBuilder()
.commitIndex(context.getCommitIndex())
.currentTerm(context.getTermInformation().getCurrentTerm())
.inMemoryJournalDataSize(replicatedLog().dataSize())
builder.lastLogTerm(lastLogEntry.getTerm());
}
- if (getCurrentBehavior() instanceof AbstractLeader) {
- AbstractLeader leader = (AbstractLeader)getCurrentBehavior();
- Collection<String> followerIds = leader.getFollowerIds();
- List<FollowerInfo> followerInfoList = new ArrayList<>(followerIds.size());
- for (String id: followerIds) {
- final FollowerLogInformation info = leader.getFollower(id);
- followerInfoList.add(new FollowerInfo(id, info.getNextIndex(), info.getMatchIndex(),
- info.isFollowerActive(), DurationFormatUtils.formatDurationHMS(
- TimeUnit.NANOSECONDS.toMillis(info.nanosSinceLastActivity())),
- context.getPeerInfo(info.getId()).isVoting()));
- }
-
- builder.followerInfoList(followerInfoList);
+ if (getCurrentBehavior() instanceof AbstractLeader leader) {
+ builder.followerInfoList(leader.getFollowerIds().stream()
+ .map(leader::getFollower)
+ .map(info -> new FollowerInfo(info.getId(), info.getNextIndex(), info.getMatchIndex(),
+ info.isFollowerActive(), DurationFormatUtils.formatDurationHMS(
+ TimeUnit.NANOSECONDS.toMillis(info.nanosSinceLastActivity())),
+ context.getPeerInfo(info.getId()).isVoting()))
+ .collect(ImmutableList.toImmutableList()));
}
sender().tell(builder.build(), self());
if (!Objects.equals(lastLeaderId, currentBehavior.getLeaderId())
|| oldBehaviorState.getLeaderPayloadVersion() != currentBehavior.getLeaderPayloadVersion()) {
if (roleChangeNotifier.isPresent()) {
- roleChangeNotifier.get().tell(newLeaderStateChanged(getId(), currentBehavior.getLeaderId(),
+ roleChangeNotifier.orElseThrow().tell(newLeaderStateChanged(getId(), currentBehavior.getLeaderId(),
currentBehavior.getLeaderPayloadVersion()), getSelf());
}
if (roleChangeNotifier.isPresent()
&& (oldBehavior == null || oldBehavior.state() != currentBehavior.state())) {
- roleChangeNotifier.get().tell(new RoleChanged(getId(), oldBehaviorStateName ,
+ roleChangeNotifier.orElseThrow().tell(new RoleChanged(getId(), oldBehaviorStateName ,
currentBehavior.state().name()), getSelf());
}
}
if (wasAppended && hasFollowers()) {
// Send log entry for replication.
- getCurrentBehavior().handleMessage(getSelf(), new Replicate(clientActor, identifier, replicatedLogEntry,
- !batchHint));
+ getCurrentBehavior().handleMessage(getSelf(),
+ new Replicate(replicatedLogEntry.getIndex(), !batchHint, clientActor, identifier));
}
}
this.lastValidLeaderId = lastValidLeaderId;
this.lastLeaderId = lastLeaderId;
this.behavior = requireNonNull(behavior);
- this.leaderPayloadVersion = behavior.getLeaderPayloadVersion();
+ leaderPayloadVersion = behavior.getLeaderPayloadVersion();
}
@Override
import akka.actor.Props;
import akka.cluster.Cluster;
import com.google.common.annotations.VisibleForTesting;
-import java.util.ArrayList;
+import com.google.common.collect.ImmutableList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
-import java.util.List;
import java.util.Map;
import java.util.Optional;
-import java.util.Set;
import java.util.concurrent.Executor;
import java.util.function.Consumer;
import java.util.function.LongSupplier;
this.lastApplied = lastApplied;
this.configParams = requireNonNull(configParams);
this.persistenceProvider = requireNonNull(persistenceProvider);
- this.log = requireNonNull(logger);
+ log = requireNonNull(logger);
this.applyStateConsumer = requireNonNull(applyStateConsumer);
fileBackedOutputStreamFactory = new FileBackedOutputStreamFactory(
@Override
public Logger getLogger() {
- return this.log;
+ return log;
}
@Override
@Override
public void updatePeerIds(final ServerConfigurationPayload serverConfig) {
- votingMember = true;
- boolean foundSelf = false;
- Set<String> currentPeers = new HashSet<>(this.getPeerIds());
- for (ServerInfo server : serverConfig.getServerConfig()) {
- if (getId().equals(server.getId())) {
- foundSelf = true;
- if (!server.isVoting()) {
- votingMember = false;
- }
+ boolean newVotingMember = false;
+ var currentPeers = new HashSet<>(getPeerIds());
+ for (var server : serverConfig.getServerConfig()) {
+ if (getId().equals(server.peerId())) {
+ newVotingMember = server.isVoting();
} else {
- VotingState votingState = server.isVoting() ? VotingState.VOTING : VotingState.NON_VOTING;
- if (!currentPeers.contains(server.getId())) {
- this.addToPeers(server.getId(), null, votingState);
+ final var votingState = server.isVoting() ? VotingState.VOTING : VotingState.NON_VOTING;
+ if (currentPeers.contains(server.peerId())) {
+ getPeerInfo(server.peerId()).setVotingState(votingState);
+ currentPeers.remove(server.peerId());
} else {
- this.getPeerInfo(server.getId()).setVotingState(votingState);
- currentPeers.remove(server.getId());
+ addToPeers(server.peerId(), null, votingState);
}
}
}
for (String peerIdToRemove : currentPeers) {
- this.removePeer(peerIdToRemove);
- }
-
- if (!foundSelf) {
- votingMember = false;
+ removePeer(peerIdToRemove);
}
+ votingMember = newVotingMember;
log.debug("{}: Updated server config: isVoting: {}, peers: {}", id, votingMember, peerInfoMap.values());
setDynamicServerConfigurationInUse();
@Override
public void setDynamicServerConfigurationInUse() {
- this.dynamicServerConfiguration = true;
+ dynamicServerConfiguration = true;
}
@Override
if (!isDynamicServerConfigurationInUse()) {
return null;
}
- Collection<PeerInfo> peers = getPeers();
- List<ServerInfo> newConfig = new ArrayList<>(peers.size() + 1);
- for (PeerInfo peer: peers) {
+ final var peers = getPeers();
+ final var newConfig = ImmutableList.<ServerInfo>builderWithExpectedSize(peers.size() + (includeSelf ? 1 : 0));
+ for (PeerInfo peer : peers) {
newConfig.add(new ServerInfo(peer.getId(), peer.isVoting()));
}
newConfig.add(new ServerInfo(getId(), votingMember));
}
- return new ServerConfigurationPayload(newConfig);
+ return new ServerConfigurationPayload(newConfig.build());
}
@Override
}
void setCurrentBehavior(final RaftActorBehavior behavior) {
- this.currentBehavior = requireNonNull(behavior);
+ currentBehavior = requireNonNull(behavior);
}
@Override
import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.opendaylight.controller.cluster.DelegatingPersistentDataProvider;
import org.opendaylight.controller.cluster.PersistentDataProvider;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.PersistentPayload;
+import org.opendaylight.controller.cluster.raft.messages.PersistentPayload;
/**
* The DelegatingPersistentDataProvider used by RaftActor to override the configured persistent provider to
}
private <T> void doPersist(final T entry, final Procedure<T> procedure, final boolean async) {
- if (getDelegate().isRecoveryApplicable()) {
- persistSuper(entry, procedure, async);
- } else {
- if (entry instanceof ReplicatedLogEntry) {
- Payload payload = ((ReplicatedLogEntry)entry).getData();
- if (payload instanceof PersistentPayload) {
- // We persist the Payload but not the ReplicatedLogEntry to avoid gaps in the journal indexes
- // on recovery if data persistence is later enabled.
- if (async) {
- persistentProvider.persistAsync(payload, p -> procedure.apply(entry));
- } else {
- persistentProvider.persist(payload, p -> procedure.apply(entry));
- }
- } else {
- persistSuper(entry, procedure, async);
- }
+ if (!getDelegate().isRecoveryApplicable() && entry instanceof ReplicatedLogEntry replicatedLogEntry
+ && replicatedLogEntry.getData() instanceof PersistentPayload payload) {
+ // We persist the Payload but not the ReplicatedLogEntry to avoid gaps in the journal indexes on recovery
+ // if data persistence is later enabled.
+ if (async) {
+ persistentProvider.persistAsync(payload, p -> procedure.apply(entry));
} else {
- persistSuper(entry, procedure, async);
+ persistentProvider.persist(payload, p -> procedure.apply(entry));
}
- }
- }
-
- private <T> void persistSuper(final T object, final Procedure<T> procedure, final boolean async) {
- if (async) {
- super.persistAsync(object, procedure);
+ } else if (async) {
+ super.persistAsync(entry, procedure);
} else {
- super.persist(object, procedure);
+ super.persist(entry, procedure);
}
}
}
Optional<ActorRef> roleChangeNotifier = raftActor.getRoleChangeNotifier();
if (roleChangeNotifier.isPresent()) {
- roleChangeNotifier.get().tell(raftActor.newLeaderStateChanged(context.getId(), null,
+ roleChangeNotifier.orElseThrow().tell(raftActor.newLeaderStateChanged(context.getId(), null,
currentBehavior.getLeaderPayloadVersion()), raftActor.self());
}
void doTransfer() {
RaftActorBehavior behavior = raftActor.getCurrentBehavior();
// Sanity check...
- if (behavior instanceof Leader) {
+ if (behavior instanceof Leader leader) {
isTransferring = true;
- ((Leader)behavior).transferLeadership(this);
+ leader.transferLeadership(this);
} else {
LOG.debug("{}: No longer the leader - skipping transfer", raftActor.persistenceId());
finish(true);
package org.opendaylight.controller.cluster.raft;
import org.eclipse.jdt.annotation.Nullable;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
/**
* Interface for a class that participates in raft actor persistence recovery.
import java.util.concurrent.TimeUnit;
import org.opendaylight.controller.cluster.PersistentDataProvider;
import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
+import org.opendaylight.controller.cluster.raft.messages.PersistentPayload;
import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries;
import org.opendaylight.controller.cluster.raft.persisted.DeleteEntries;
import org.opendaylight.controller.cluster.raft.persisted.EmptyState;
import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
import org.opendaylight.controller.cluster.raft.persisted.Snapshot.State;
import org.opendaylight.controller.cluster.raft.persisted.UpdateElectionTerm;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.PersistentPayload;
import org.slf4j.Logger;
/**
RaftActorRecoverySupport(final RaftActorContext context, final RaftActorRecoveryCohort cohort) {
this.context = context;
this.cohort = cohort;
- this.log = context.getLogger();
+ log = context.getLogger();
}
boolean handleRecoveryMessage(final Object message, final PersistentDataProvider persistentProvider) {
}
boolean recoveryComplete = false;
- if (message instanceof UpdateElectionTerm) {
- context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(),
- ((UpdateElectionTerm) message).getVotedFor());
- } else if (message instanceof SnapshotOffer) {
- onRecoveredSnapshot((SnapshotOffer) message);
- } else if (message instanceof ReplicatedLogEntry) {
- onRecoveredJournalLogEntry((ReplicatedLogEntry) message);
- } else if (message instanceof ApplyJournalEntries) {
- onRecoveredApplyLogEntries(((ApplyJournalEntries) message).getToIndex());
- } else if (message instanceof DeleteEntries) {
- onDeleteEntries((DeleteEntries) message);
- } else if (message instanceof ServerConfigurationPayload) {
- context.updatePeerIds((ServerConfigurationPayload)message);
+ if (message instanceof UpdateElectionTerm updateElectionTerm) {
+ context.getTermInformation().update(updateElectionTerm.getCurrentTerm(), updateElectionTerm.getVotedFor());
+ } else if (message instanceof SnapshotOffer snapshotOffer) {
+ onRecoveredSnapshot(snapshotOffer);
+ } else if (message instanceof ReplicatedLogEntry replicatedLogEntry) {
+ onRecoveredJournalLogEntry(replicatedLogEntry);
+ } else if (message instanceof ApplyJournalEntries applyJournalEntries) {
+ onRecoveredApplyLogEntries(applyJournalEntries.getToIndex());
+ } else if (message instanceof DeleteEntries deleteEntries) {
+ onDeleteEntries(deleteEntries);
+ } else if (message instanceof ServerConfigurationPayload serverConfigurationPayload) {
+ context.updatePeerIds(serverConfigurationPayload);
} else if (message instanceof RecoveryCompleted) {
recoveryComplete = true;
onRecoveryCompletedMessage(persistentProvider);
final SnapshotManager snapshotManager = context.getSnapshotManager();
if (snapshotManager.capture(logEntry, -1)) {
log.info("Capturing snapshot, resetting timer for the next recovery snapshot interval.");
- this.recoverySnapshotTimer.reset().start();
+ recoverySnapshotTimer.reset().start();
} else {
log.info("SnapshotManager is not able to capture snapshot at this time. It will be retried "
+ "again with the next recovered entry.");
}
private boolean shouldTakeRecoverySnapshot() {
- return this.recoverySnapshotTimer != null && this.recoverySnapshotTimer.elapsed(TimeUnit.SECONDS)
+ return recoverySnapshotTimer != null && recoverySnapshotTimer.elapsed(TimeUnit.SECONDS)
>= context.getConfigParams().getRecoverySnapshotIntervalSeconds();
}
}
private static boolean isMigratedSerializable(final Object message) {
- return message instanceof MigratedSerializable && ((MigratedSerializable)message).isMigrated();
+ return message instanceof MigratedSerializable migrated && migrated.isMigrated();
}
}
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
import akka.actor.Cancellable;
+import com.google.common.collect.ImmutableList;
import java.util.ArrayDeque;
-import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
-import java.util.List;
import java.util.Map;
import java.util.Queue;
import java.util.UUID;
import org.opendaylight.controller.cluster.raft.messages.AddServer;
import org.opendaylight.controller.cluster.raft.messages.AddServerReply;
import org.opendaylight.controller.cluster.raft.messages.ChangeServersVotingStatus;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
import org.opendaylight.controller.cluster.raft.messages.RemoveServer;
import org.opendaylight.controller.cluster.raft.messages.RemoveServerReply;
import org.opendaylight.controller.cluster.raft.messages.ServerChangeReply;
import org.opendaylight.controller.cluster.raft.messages.UnInitializedFollowerSnapshotReply;
import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
import org.opendaylight.controller.cluster.raft.persisted.ServerInfo;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.yangtools.concepts.Identifier;
import org.opendaylight.yangtools.util.AbstractUUIDIdentifier;
import org.slf4j.Logger;
RaftActorServerConfigurationSupport(final RaftActor raftActor) {
this.raftActor = raftActor;
- this.raftContext = raftActor.getRaftActorContext();
+ raftContext = raftActor.getRaftActorContext();
}
boolean handleMessage(final Object message, final ActorRef sender) {
- if (message instanceof AddServer) {
- onAddServer((AddServer) message, sender);
+ if (message instanceof AddServer addServer) {
+ onAddServer(addServer, sender);
return true;
- } else if (message instanceof RemoveServer) {
- onRemoveServer((RemoveServer) message, sender);
+ } else if (message instanceof RemoveServer removeServer) {
+ onRemoveServer(removeServer, sender);
return true;
- } else if (message instanceof ChangeServersVotingStatus) {
- onChangeServersVotingStatus((ChangeServersVotingStatus) message, sender);
+ } else if (message instanceof ChangeServersVotingStatus changeServersVotingStatus) {
+ onChangeServersVotingStatus(changeServersVotingStatus, sender);
return true;
- } else if (message instanceof ServerOperationTimeout) {
- currentOperationState.onServerOperationTimeout((ServerOperationTimeout) message);
+ } else if (message instanceof ServerOperationTimeout serverOperationTimeout) {
+ currentOperationState.onServerOperationTimeout(serverOperationTimeout);
return true;
- } else if (message instanceof UnInitializedFollowerSnapshotReply) {
- currentOperationState.onUnInitializedFollowerSnapshotReply((UnInitializedFollowerSnapshotReply) message);
+ } else if (message instanceof UnInitializedFollowerSnapshotReply uninitFollowerSnapshotReply) {
+ currentOperationState.onUnInitializedFollowerSnapshotReply(uninitFollowerSnapshotReply);
return true;
- } else if (message instanceof ApplyState) {
- return onApplyState((ApplyState) message);
+ } else if (message instanceof ApplyState applyState) {
+ return onApplyState(applyState);
} else if (message instanceof SnapshotComplete) {
currentOperationState.onSnapshotComplete();
return false;
}
private boolean updateLocalPeerInfo() {
- List<ServerInfo> newServerInfoList = newServerInfoList();
+ final var newServerInfoList = newServerInfoList();
// Check if new voting state would leave us with no voting members.
boolean atLeastOneVoting = false;
}
raftContext.updatePeerIds(new ServerConfigurationPayload(newServerInfoList));
- if (raftActor.getCurrentBehavior() instanceof AbstractLeader) {
- AbstractLeader leader = (AbstractLeader) raftActor.getCurrentBehavior();
+ if (raftActor.getCurrentBehavior() instanceof AbstractLeader leader) {
leader.updateMinReplicaCount();
}
return true;
}
- private List<ServerInfo> newServerInfoList() {
- Map<String, Boolean> serverVotingStatusMap = changeVotingStatusContext.getOperation()
- .getServerVotingStatusMap();
- List<ServerInfo> newServerInfoList = new ArrayList<>();
- for (String peerId: raftContext.getPeerIds()) {
- newServerInfoList.add(new ServerInfo(peerId, serverVotingStatusMap.containsKey(peerId)
- ? serverVotingStatusMap.get(peerId) : raftContext.getPeerInfo(peerId).isVoting()));
+ private ImmutableList<ServerInfo> newServerInfoList() {
+ final var serverVotingStatusMap = changeVotingStatusContext.getOperation().getServerVotingStatusMap();
+ final var peerInfos = raftContext.getPeers();
+ final var newServerInfoList = ImmutableList.<ServerInfo>builderWithExpectedSize(peerInfos.size() + 1);
+ for (var peerInfo : peerInfos) {
+ final var peerId = peerInfo.getId();
+ final var voting = serverVotingStatusMap.get(peerId);
+ newServerInfoList.add(new ServerInfo(peerId, voting != null ? voting : peerInfo.isVoting()));
}
- newServerInfoList.add(new ServerInfo(raftContext.getId(), serverVotingStatusMap.containsKey(
- raftContext.getId()) ? serverVotingStatusMap.get(raftContext.getId())
- : raftContext.isVotingMember()));
+ final var myId = raftContext.getId();
+ final var myVoting = serverVotingStatusMap.get(myId);
+ newServerInfoList.add(new ServerInfo(myId, myVoting != null ? myVoting : raftContext.isVotingMember()));
- return newServerInfoList;
+ return newServerInfoList.build();
}
}
import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
+import org.opendaylight.controller.cluster.raft.base.messages.SnapshotComplete;
import org.opendaylight.controller.cluster.raft.client.messages.GetSnapshot;
import org.opendaylight.controller.cluster.raft.client.messages.GetSnapshotReply;
import org.opendaylight.controller.cluster.raft.persisted.EmptyState;
RaftActorSnapshotMessageSupport(final RaftActorContext context, final RaftActorSnapshotCohort cohort) {
this.context = context;
this.cohort = cohort;
- this.log = context.getLogger();
+ log = context.getLogger();
context.getSnapshotManager().setCreateSnapshotConsumer(
outputStream -> cohort.createSnapshot(context.getActor(), outputStream));
return cohort;
}
- boolean handleSnapshotMessage(Object message, ActorRef sender) {
- if (message instanceof ApplySnapshot) {
- onApplySnapshot((ApplySnapshot) message);
- } else if (message instanceof SaveSnapshotSuccess) {
- onSaveSnapshotSuccess((SaveSnapshotSuccess) message);
- } else if (message instanceof SaveSnapshotFailure) {
- onSaveSnapshotFailure((SaveSnapshotFailure) message);
- } else if (message instanceof CaptureSnapshotReply) {
- onCaptureSnapshotReply((CaptureSnapshotReply) message);
+ boolean handleSnapshotMessage(final Object message, final ActorRef sender) {
+ if (message instanceof ApplySnapshot applySnapshot) {
+ onApplySnapshot(applySnapshot);
+ } else if (message instanceof SaveSnapshotSuccess saveSnapshotSuccess) {
+ onSaveSnapshotSuccess(saveSnapshotSuccess);
+ } else if (message instanceof SaveSnapshotFailure saveSnapshotFailure) {
+ onSaveSnapshotFailure(saveSnapshotFailure);
+ } else if (message instanceof CaptureSnapshotReply captureSnapshotReply) {
+ onCaptureSnapshotReply(captureSnapshotReply);
} else if (COMMIT_SNAPSHOT.equals(message)) {
context.getSnapshotManager().commit(-1, -1);
- } else if (message instanceof GetSnapshot) {
- onGetSnapshot(sender, (GetSnapshot) message);
+ } else if (message instanceof GetSnapshot getSnapshot) {
+ onGetSnapshot(sender, getSnapshot);
+ } else if (message instanceof SnapshotComplete) {
+ log.debug("{}: SnapshotComplete received", context.getId());
} else {
return false;
}
return true;
}
- private void onCaptureSnapshotReply(CaptureSnapshotReply reply) {
+ private void onCaptureSnapshotReply(final CaptureSnapshotReply reply) {
log.debug("{}: CaptureSnapshotReply received by actor", context.getId());
context.getSnapshotManager().persist(reply.getSnapshotState(), reply.getInstallSnapshotStream(),
context.getTotalMemory());
}
- private void onSaveSnapshotFailure(SaveSnapshotFailure saveSnapshotFailure) {
+ private void onSaveSnapshotFailure(final SaveSnapshotFailure saveSnapshotFailure) {
log.error("{}: SaveSnapshotFailure received for snapshot Cause:",
context.getId(), saveSnapshotFailure.cause());
context.getSnapshotManager().rollback();
}
- private void onSaveSnapshotSuccess(SaveSnapshotSuccess success) {
+ private void onSaveSnapshotSuccess(final SaveSnapshotSuccess success) {
long sequenceNumber = success.metadata().sequenceNr();
log.info("{}: SaveSnapshotSuccess received for snapshot, sequenceNr: {}", context.getId(), sequenceNumber);
context.getSnapshotManager().commit(sequenceNumber, success.metadata().timestamp());
}
- private void onApplySnapshot(ApplySnapshot message) {
+ private void onApplySnapshot(final ApplySnapshot message) {
log.info("{}: Applying snapshot on follower: {}", context.getId(), message.getSnapshot());
context.getSnapshotManager().apply(message);
}
- private void onGetSnapshot(ActorRef sender, GetSnapshot getSnapshot) {
+ private void onGetSnapshot(final ActorRef sender, final GetSnapshot getSnapshot) {
log.debug("{}: onGetSnapshot", context.getId());
}
@VisibleForTesting
- void setSnapshotReplyActorTimeout(FiniteDuration snapshotReplyActorTimeout) {
+ void setSnapshotReplyActorTimeout(final FiniteDuration snapshotReplyActorTimeout) {
this.snapshotReplyActorTimeout = snapshotReplyActorTimeout;
}
}
* @author Thomas Pantelis
*/
public final class RaftVersions {
- public static final short HELIUM_VERSION = 0;
- public static final short LITHIUM_VERSION = 1;
- public static final short BORON_VERSION = 3;
+ // HELIUM_VERSION = 0
+ // LITHIUM_VERSION = 1
+ // BORON_VERSION = 3
public static final short FLUORINE_VERSION = 4;
- public static final short CURRENT_VERSION = FLUORINE_VERSION;
+ public static final short ARGON_VERSION = 5;
+ public static final short CURRENT_VERSION = ARGON_VERSION;
private RaftVersions() {
-
+ // Hidden on purpose
}
}
void snapshotPreCommit(long snapshotCapturedIndex, long snapshotCapturedTerm);
/**
- * Sets the Replicated log to state after snapshot success.
+ * Sets the Replicated log to state after snapshot success. This method is equivalent to
+ * {@code snapshotCommit(true)}.
*/
- void snapshotCommit();
+ default void snapshotCommit() {
+ snapshotCommit(true);
+ }
+
+ /**
+ * Sets the Replicated log to state after snapshot success. Most users will want to use {@link #snapshotCommit()}
+ * instead.
+ *
+ * @param updateDataSize true if {@link #dataSize()} should also be updated
+ */
+ void snapshotCommit(boolean updateDataSize);
/**
* Restores the replicated log to a state in the event of a save snapshot failure.
package org.opendaylight.controller.cluster.raft;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
/**
* Represents one entry in the replicated log.
*/
int size();
+ /**
+ * Return the estimate of serialized size of this entry when passed through serialization. The estimate needs to
+ * be reasonably accurate and should err on the side of caution and report a slightly-higher size in face of
+ * uncertainty.
+ *
+ * @return An estimate of serialized size.
+ */
+ int serializedSize();
+
/**
* Checks if persistence is pending for this entry.
*
@Override
public boolean shouldCaptureSnapshot(final long logIndex) {
final ConfigParams config = context.getConfigParams();
- final long journalSize = logIndex + 1;
- final long dataThreshold = context.getTotalMemory() * config.getSnapshotDataThresholdPercentage() / 100;
+ if ((logIndex + 1) % config.getSnapshotBatchCount() == 0) {
+ return true;
+ }
- return journalSize % config.getSnapshotBatchCount() == 0 || getDataSizeForSnapshotCheck() > dataThreshold;
+ final long absoluteThreshold = config.getSnapshotDataThreshold();
+ final long dataThreshold = absoluteThreshold != 0 ? absoluteThreshold * ConfigParams.MEGABYTE
+ : context.getTotalMemory() * config.getSnapshotDataThresholdPercentage() / 100;
+ return getDataSizeForSnapshotCheck() > dataThreshold;
}
@Override
*/
public SnapshotManager(final RaftActorContext context, final Logger logger) {
this.context = context;
- this.log = logger;
+ log = logger;
}
public boolean isApplying() {
newReplicatedToAllIndex, newReplicatedToAllTerm, unAppliedEntries, mandatoryTrim);
}
- private class AbstractSnapshotState implements SnapshotState {
+ private abstract class AbstractSnapshotState implements SnapshotState {
@Override
public boolean isCapturing() {
//use the term of the temp-min, since we check for isPresent, entry will not be null
ReplicatedLogEntry entry = context.getReplicatedLog().get(tempMin);
context.getReplicatedLog().snapshotPreCommit(tempMin, entry.getTerm());
- context.getReplicatedLog().snapshotCommit();
+ context.getReplicatedLog().snapshotCommit(false);
return tempMin;
}
}
}
- private class Idle extends AbstractSnapshotState {
-
+ private final class Idle extends AbstractSnapshotState {
@Override
public boolean isCapturing() {
return false;
log.debug("{}: lastSequenceNumber prior to capture: {}", persistenceId(), lastSequenceNumber);
- SnapshotManager.this.currentState = CREATING;
+ currentState = CREATING;
try {
createSnapshotProcedure.accept(Optional.ofNullable(installSnapshotStream));
} catch (Exception e) {
- SnapshotManager.this.currentState = IDLE;
+ currentState = IDLE;
log.error("Error creating snapshot", e);
return false;
}
@Override
public void apply(final ApplySnapshot toApply) {
- SnapshotManager.this.applySnapshot = toApply;
+ applySnapshot = toApply;
lastSequenceNumber = context.getPersistenceProvider().getLastSequenceNumber();
context.getPersistenceProvider().saveSnapshot(toApply.getSnapshot());
- SnapshotManager.this.currentState = PERSISTING;
+ currentState = PERSISTING;
}
@Override
}
}
- private class Creating extends AbstractSnapshotState {
-
+ private final class Creating extends AbstractSnapshotState {
@Override
public void persist(final Snapshot.State snapshotState, final Optional<OutputStream> installSnapshotStream,
final long totalMemory) {
log.info("{}: Persisting of snapshot done: {}", persistenceId(), snapshot);
- long dataThreshold = totalMemory * context.getConfigParams().getSnapshotDataThresholdPercentage() / 100;
- boolean dataSizeThresholdExceeded = context.getReplicatedLog().dataSize() > dataThreshold;
+ final ConfigParams config = context.getConfigParams();
+ final long absoluteThreshold = config.getSnapshotDataThreshold();
+ final long dataThreshold = absoluteThreshold != 0 ? absoluteThreshold * ConfigParams.MEGABYTE
+ : totalMemory * config.getSnapshotDataThresholdPercentage() / 100;
- boolean logSizeExceededSnapshotBatchCount =
- context.getReplicatedLog().size() >= context.getConfigParams().getSnapshotBatchCount();
+ final boolean dataSizeThresholdExceeded = context.getReplicatedLog().dataSize() > dataThreshold;
+ final boolean logSizeExceededSnapshotBatchCount =
+ context.getReplicatedLog().size() >= config.getSnapshotBatchCount();
final RaftActorBehavior currentBehavior = context.getCurrentBehavior();
if (dataSizeThresholdExceeded || logSizeExceededSnapshotBatchCount || captureSnapshot.isMandatoryTrim()) {
} else if (logSizeExceededSnapshotBatchCount) {
log.debug("{}: log size {} exceeds the snapshot batch count {} - doing snapshotPreCommit with "
+ "index {}", context.getId(), context.getReplicatedLog().size(),
- context.getConfigParams().getSnapshotBatchCount(),
- captureSnapshot.getLastAppliedIndex());
+ config.getSnapshotBatchCount(), captureSnapshot.getLastAppliedIndex());
} else {
log.debug("{}: user triggered or root overwrite snapshot encountered, trimming log up to "
+ "last applied index {}", context.getId(), captureSnapshot.getLastAppliedIndex());
if (installSnapshotStream.isPresent()) {
if (context.getId().equals(currentBehavior.getLeaderId())) {
try {
- ByteSource snapshotBytes = ((FileBackedOutputStream)installSnapshotStream.get()).asByteSource();
+ ByteSource snapshotBytes = ((FileBackedOutputStream)installSnapshotStream.orElseThrow())
+ .asByteSource();
currentBehavior.handleMessage(context.getActor(),
new SendInstallSnapshot(snapshot, snapshotBytes));
} catch (IOException e) {
context.getId(), e);
}
} else {
- ((FileBackedOutputStream)installSnapshotStream.get()).cleanup();
+ ((FileBackedOutputStream)installSnapshotStream.orElseThrow()).cleanup();
}
}
captureSnapshot = null;
- SnapshotManager.this.currentState = PERSISTING;
+ currentState = PERSISTING;
}
@Override
}
- private class Persisting extends AbstractSnapshotState {
-
+ private final class Persisting extends AbstractSnapshotState {
@Override
@SuppressWarnings("checkstyle:IllegalCatch")
public void commit(final long sequenceNumber, final long timeStamp) {
private void snapshotComplete() {
lastSequenceNumber = -1;
applySnapshot = null;
- SnapshotManager.this.currentState = IDLE;
+ currentState = IDLE;
context.getActor().tell(SnapshotComplete.INSTANCE, context.getActor());
}
long getTerm();
}
- static class LastAppliedTermInformationReader implements TermInformationReader {
+ static final class LastAppliedTermInformationReader implements TermInformationReader {
private long index;
private long term;
LastAppliedTermInformationReader init(final ReplicatedLog log, final long originalIndex,
final ReplicatedLogEntry lastLogEntry, final boolean hasFollowers) {
ReplicatedLogEntry entry = log.get(originalIndex);
- this.index = -1L;
- this.term = -1L;
+ index = -1L;
+ term = -1L;
if (!hasFollowers) {
if (lastLogEntry != null) {
// since we have persisted the last-log-entry to persistent journal before the capture,
@Override
public long getIndex() {
- return this.index;
+ return index;
}
@Override
public long getTerm() {
- return this.term;
+ return term;
}
}
- private static class ReplicatedToAllTermInformationReader implements TermInformationReader {
+ private static final class ReplicatedToAllTermInformationReader implements TermInformationReader {
private long index;
private long term;
ReplicatedToAllTermInformationReader init(final ReplicatedLog log, final long originalIndex) {
ReplicatedLogEntry entry = log.get(originalIndex);
- this.index = -1L;
- this.term = -1L;
+ index = -1L;
+ term = -1L;
if (entry != null) {
index = entry.getIndex();
@Override
public long getIndex() {
- return this.index;
+ return index;
}
@Override
public long getTerm() {
- return this.term;
+ return term;
}
}
}
import static java.util.Objects.requireNonNull;
import akka.actor.Cancellable;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import scala.concurrent.duration.FiniteDuration;
/**
private final Cancellable cancelTimer;
private boolean canRun = true;
+ @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR",
+ justification = "https://github.com/spotbugs/spotbugs/issues/1867")
TimedRunnable(final FiniteDuration timeout, final RaftActor actor) {
cancelTimer = requireNonNull(actor).getContext().system().scheduler()
- .scheduleOnce(requireNonNull(timeout), actor.self(), (Runnable) this::cancel,
- actor.getContext().system().dispatcher(), actor.self());
+ .scheduleOnce(requireNonNull(timeout), actor.self(), (Runnable) this::cancel,
+ actor.getContext().system().dispatcher(), actor.self());
}
@Override
+++ /dev/null
-/*
- * Copyright (c) 2017 Inocybe Technologies and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.raft.base.messages;
-
-import static java.util.Objects.requireNonNull;
-
-import java.io.Externalizable;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-
-/**
- * Abstract base that implements Externalizable with no-op methods that is intended for classes that use the
- * externalizable proxy pattern but have no data to serialize and read-resolve to a static instance.
- *
- * @author Thomas Pantelis
- */
-public abstract class EmptyExternalizableProxy implements Externalizable {
- private static final long serialVersionUID = 1L;
-
- private final Object readResolveTo;
-
- protected EmptyExternalizableProxy(final Object readResolveTo) {
- this.readResolveTo = requireNonNull(readResolveTo);
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) {
- }
-
- @Override
- public void readExternal(final ObjectInput in) {
- }
-
- protected Object readResolve() {
- return readResolveTo;
- }
-}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.raft.base.messages;
import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
import org.opendaylight.yangtools.concepts.Identifier;
-public class Replicate {
- private final ActorRef clientActor;
- private final Identifier identifier;
- private final ReplicatedLogEntry replicatedLogEntry;
- private final boolean sendImmediate;
-
- public Replicate(ActorRef clientActor, Identifier identifier, ReplicatedLogEntry replicatedLogEntry,
- boolean sendImmediate) {
- this.clientActor = clientActor;
- this.identifier = identifier;
- this.replicatedLogEntry = replicatedLogEntry;
- this.sendImmediate = sendImmediate;
- }
-
- public ActorRef getClientActor() {
- return clientActor;
- }
-
- public Identifier getIdentifier() {
- return identifier;
- }
-
- public ReplicatedLogEntry getReplicatedLogEntry() {
- return replicatedLogEntry;
- }
-
- public boolean isSendImmediate() {
- return sendImmediate;
- }
+public record Replicate(long logIndex, boolean sendImmediate, ActorRef clientActor, Identifier identifier) {
+ // Nothing else here
}
* @author Thomas Pantelis
*/
public final class TimeoutNow implements Serializable, ControlMessage {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
+
public static final TimeoutNow INSTANCE = new TimeoutNow();
private TimeoutNow() {
// Hidden on purpose
}
- private Object writeReplace() {
- return new Proxy();
- }
-
- private static class Proxy extends EmptyExternalizableProxy {
- private static final long serialVersionUID = 1L;
-
- // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
- // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public Proxy() {
- super(INSTANCE);
- }
+ @java.io.Serial
+ @SuppressWarnings("static-method")
+ private Object readResolve() {
+ return INSTANCE;
}
}
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
-import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.opendaylight.controller.cluster.messaging.MessageSlicer;
import org.opendaylight.controller.cluster.messaging.SliceOptions;
import org.opendaylight.controller.cluster.raft.ClientRequestTracker;
-import org.opendaylight.controller.cluster.raft.ClientRequestTrackerImpl;
import org.opendaylight.controller.cluster.raft.FollowerLogInformation;
import org.opendaylight.controller.cluster.raft.PeerInfo;
import org.opendaylight.controller.cluster.raft.RaftActorContext;
import org.opendaylight.controller.cluster.raft.RaftState;
+import org.opendaylight.controller.cluster.raft.RaftVersions;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.VotingState;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapshot;
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
+import org.opendaylight.controller.cluster.raft.messages.IdentifiablePayload;
import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
import org.opendaylight.controller.cluster.raft.messages.RaftRPC;
import org.opendaylight.controller.cluster.raft.messages.UnInitializedFollowerSnapshotReply;
import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.IdentifiablePayload;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import scala.concurrent.duration.FiniteDuration;
/**
super(context, state);
appendEntriesMessageSlicer = MessageSlicer.builder().logContext(logName())
- .messageSliceSize(context.getConfigParams().getSnapshotChunkSize())
+ .messageSliceSize(context.getConfigParams().getMaximumMessageSliceSize())
.expireStateAfterInactivity(context.getConfigParams().getElectionTimeOutInterval().toMillis() * 3,
TimeUnit.MILLISECONDS).build();
followerToLog.remove(followerId);
}
- public void updateMinReplicaCount() {
+ public final void updateMinReplicaCount() {
int numVoting = 0;
for (PeerInfo peer: context.getPeers()) {
if (peer.isVoting()) {
return this;
}
+ final var followerRaftVersion = appendEntriesReply.getRaftVersion();
+ if (followerRaftVersion < RaftVersions.FLUORINE_VERSION) {
+ log.warn("{}: handleAppendEntriesReply - ignoring reply from follower {} raft version {}", logName(),
+ followerId, followerRaftVersion);
+ return this;
+ }
+
final long lastActivityNanos = followerLogInformation.nanosSinceLastActivity();
if (lastActivityNanos > context.getConfigParams().getElectionTimeOutInterval().toNanos()) {
log.warn("{} : handleAppendEntriesReply delayed beyond election timeout, "
followerLogInformation.markFollowerActive();
followerLogInformation.setPayloadVersion(appendEntriesReply.getPayloadVersion());
- followerLogInformation.setRaftVersion(appendEntriesReply.getRaftVersion());
+ followerLogInformation.setRaftVersion(followerRaftVersion);
followerLogInformation.setNeedsLeaderAddress(appendEntriesReply.isNeedsLeaderAddress());
long followerLastLogIndex = appendEntriesReply.getLogLastIndex();
* @return the ClientRequestTracker or null if none available
*/
private ClientRequestTracker removeClientRequestTracker(final long logIndex) {
- final Iterator<ClientRequestTracker> it = trackers.iterator();
+ final var it = trackers.iterator();
while (it.hasNext()) {
- final ClientRequestTracker t = it.next();
- if (t.getIndex() == logIndex) {
+ final var tracker = it.next();
+ if (tracker.logIndex() == logIndex) {
it.remove();
- return t;
+ return tracker;
}
}
-
return null;
}
// If it does that means the leader wasn't dropped before the transaction applied.
// That means that this transaction can be safely applied as a local transaction since we
// have the ClientRequestTracker.
- final ClientRequestTracker tracker = removeClientRequestTracker(entry.getIndex());
+ final var tracker = removeClientRequestTracker(entry.getIndex());
if (tracker != null) {
- return new ApplyState(tracker.getClientActor(), tracker.getIdentifier(), entry);
+ return new ApplyState(tracker.clientActor(), tracker.identifier(), entry);
}
// Tracker is missing, this means that we switched behaviours between replicate and applystate
// and became the leader again,. We still want to apply this as a local modification because
// we have resumed leadership with that log entry having been committed.
- final Payload payload = entry.getData();
- if (payload instanceof IdentifiablePayload) {
- return new ApplyState(null, ((IdentifiablePayload<?>) payload).getIdentifier(), entry);
+ if (entry.getData() instanceof IdentifiablePayload<?> identifiable) {
+ return new ApplyState(null, identifiable.getIdentifier(), entry);
}
return new ApplyState(null, null, entry);
return this;
}
- if (message instanceof RaftRPC) {
- RaftRPC rpc = (RaftRPC) message;
- // If RPC request or response contains term T > currentTerm:
- // set currentTerm = T, convert to follower (§5.1)
- // This applies to all RPC messages and responses
- if (rpc.getTerm() > context.getTermInformation().getCurrentTerm() && shouldUpdateTerm(rpc)) {
- log.info("{}: Term {} in \"{}\" message is greater than leader's term {} - switching to Follower",
- logName(), rpc.getTerm(), rpc, context.getTermInformation().getCurrentTerm());
-
- context.getTermInformation().updateAndPersist(rpc.getTerm(), null);
-
- // This is a special case. Normally when stepping down as leader we don't process and reply to the
- // RaftRPC as per raft. But if we're in the process of transferring leadership and we get a
- // RequestVote, process the RequestVote before switching to Follower. This enables the requesting
- // candidate node to be elected the leader faster and avoids us possibly timing out in the Follower
- // state and starting a new election and grabbing leadership back before the other candidate node can
- // start a new election due to lack of responses. This case would only occur if there isn't a majority
- // of other nodes available that can elect the requesting candidate. Since we're transferring
- // leadership, we should make every effort to get the requesting node elected.
- if (rpc instanceof RequestVote && context.getRaftActorLeadershipTransferCohort() != null) {
- log.debug("{}: Leadership transfer in progress - processing RequestVote", logName());
- super.handleMessage(sender, rpc);
- }
-
- return internalSwitchBehavior(RaftState.Follower);
+ // If RPC request or response contains term T > currentTerm:
+ // set currentTerm = T, convert to follower (§5.1)
+ // This applies to all RPC messages and responses
+ if (message instanceof RaftRPC rpc && rpc.getTerm() > context.getTermInformation().getCurrentTerm()
+ && shouldUpdateTerm(rpc)) {
+
+ log.info("{}: Term {} in \"{}\" message is greater than leader's term {} - switching to Follower",
+ logName(), rpc.getTerm(), rpc, context.getTermInformation().getCurrentTerm());
+
+ context.getTermInformation().updateAndPersist(rpc.getTerm(), null);
+
+ // This is a special case. Normally when stepping down as leader we don't process and reply to the
+ // RaftRPC as per raft. But if we're in the process of transferring leadership and we get a
+ // RequestVote, process the RequestVote before switching to Follower. This enables the requesting
+ // candidate node to be elected the leader faster and avoids us possibly timing out in the Follower
+ // state and starting a new election and grabbing leadership back before the other candidate node can
+ // start a new election due to lack of responses. This case would only occur if there isn't a majority
+ // of other nodes available that can elect the requesting candidate. Since we're transferring
+ // leadership, we should make every effort to get the requesting node elected.
+ if (rpc instanceof RequestVote requestVote && context.getRaftActorLeadershipTransferCohort() != null) {
+ log.debug("{}: Leadership transfer in progress - processing RequestVote", logName());
+ requestVote(sender, requestVote);
}
+
+ return internalSwitchBehavior(RaftState.Follower);
}
if (message instanceof SendHeartBeat) {
beforeSendHeartbeat();
sendHeartBeat();
scheduleHeartBeat(context.getConfigParams().getHeartBeatInterval());
- } else if (message instanceof SendInstallSnapshot) {
- SendInstallSnapshot sendInstallSnapshot = (SendInstallSnapshot) message;
+ } else if (message instanceof SendInstallSnapshot sendInstallSnapshot) {
setSnapshotHolder(new SnapshotHolder(sendInstallSnapshot.getSnapshot(),
sendInstallSnapshot.getSnapshotBytes()));
sendInstallSnapshot();
- } else if (message instanceof Replicate) {
- replicate((Replicate) message);
- } else if (message instanceof InstallSnapshotReply) {
- handleInstallSnapshotReply((InstallSnapshotReply) message);
+ } else if (message instanceof Replicate replicate) {
+ replicate(replicate);
+ } else if (message instanceof InstallSnapshotReply installSnapshotReply) {
+ handleInstallSnapshotReply(installSnapshotReply);
} else if (message instanceof CheckConsensusReached) {
possiblyUpdateCommitIndex();
} else {
if (installSnapshotState.isLastChunk(reply.getChunkIndex())) {
//this was the last chunk reply
- long followerMatchIndex = snapshotHolder.get().getLastIncludedIndex();
+ long followerMatchIndex = snapshotHolder.orElseThrow().getLastIncludedIndex();
followerLogInformation.setMatchIndex(followerMatchIndex);
followerLogInformation.setNextIndex(followerMatchIndex + 1);
followerLogInformation.clearLeaderInstallSnapshotState();
}
private void replicate(final Replicate replicate) {
- long logIndex = replicate.getReplicatedLogEntry().getIndex();
+ final long logIndex = replicate.logIndex();
- log.debug("{}: Replicate message: identifier: {}, logIndex: {}, payload: {}, isSendImmediate: {}", logName(),
- replicate.getIdentifier(), logIndex, replicate.getReplicatedLogEntry().getData().getClass(),
- replicate.isSendImmediate());
+ log.debug("{}: Replicate message: identifier: {}, logIndex: {}, isSendImmediate: {}", logName(),
+ replicate.identifier(), logIndex, replicate.sendImmediate());
// Create a tracker entry we will use this later to notify the
// client actor
- if (replicate.getClientActor() != null) {
- trackers.add(new ClientRequestTrackerImpl(replicate.getClientActor(), replicate.getIdentifier(),
- logIndex));
+ final var clientActor = replicate.clientActor();
+ if (clientActor != null) {
+ trackers.add(new ClientRequestTracker(logIndex, clientActor, replicate.identifier()));
}
boolean applyModificationToState = !context.anyVotingPeers()
applyLogToStateMachine(logIndex);
}
- if (replicate.isSendImmediate() && !followerToLog.isEmpty()) {
+ if (replicate.sendImmediate() && !followerToLog.isEmpty()) {
sendAppendEntries(0, false);
}
}
// Try to get all the entries in the journal but not exceeding the max data size for a single AppendEntries
// message.
int maxEntries = (int) context.getReplicatedLog().size();
- final int maxDataSize = context.getConfigParams().getSnapshotChunkSize();
+ final int maxDataSize = context.getConfigParams().getMaximumMessageSliceSize();
final long followerNextIndex = followerLogInfo.getNextIndex();
List<ReplicatedLogEntry> entries = context.getReplicatedLog().getFrom(followerNextIndex,
maxEntries, maxDataSize);
// If the first entry's size exceeds the max data size threshold, it will be returned from the call above. If
// that is the case, then we need to slice it into smaller chunks.
- if (!(entries.size() == 1 && entries.get(0).getData().size() > maxDataSize)) {
+ if (entries.size() != 1 || entries.get(0).getData().serializedSize() <= maxDataSize) {
// Don't need to slice.
return entries;
}
}
boolean captureInitiated = context.getSnapshotManager().captureToInstall(context.getReplicatedLog().last(),
- this.getReplicatedToAllIndex(), followerId);
+ getReplicatedToAllIndex(), followerId);
if (captureInitiated) {
followerLogInfo.setLeaderInstallSnapshotState(new LeaderInstallSnapshotState(
- context.getConfigParams().getSnapshotChunkSize(), logName()));
+ context.getConfigParams().getMaximumMessageSliceSize(), logName()));
}
return captureInitiated;
if (snapshotHolder.isPresent()) {
LeaderInstallSnapshotState installSnapshotState = followerLogInfo.getInstallSnapshotState();
if (installSnapshotState == null) {
- installSnapshotState = new LeaderInstallSnapshotState(context.getConfigParams().getSnapshotChunkSize(),
- logName());
+ installSnapshotState = new LeaderInstallSnapshotState(
+ context.getConfigParams().getMaximumMessageSliceSize(), logName());
followerLogInfo.setLeaderInstallSnapshotState(installSnapshotState);
}
try {
// Ensure the snapshot bytes are set - this is a no-op.
- installSnapshotState.setSnapshotBytes(snapshotHolder.get().getSnapshotBytes());
+ installSnapshotState.setSnapshotBytes(snapshotHolder.orElseThrow().getSnapshotBytes());
if (!installSnapshotState.canSendNextChunk()) {
return;
} catch (IOException e) {
log.warn("{}: Unable to send chunk: {}/{}. Reseting snapshot progress. Snapshot state: {}", logName(),
installSnapshotState.getChunkIndex(), installSnapshotState.getTotalChunks(),
- installSnapshotState);
+ installSnapshotState, e);
installSnapshotState.reset();
}
}
installSnapshotState.startChunkTimer();
followerActor.tell(
new InstallSnapshot(currentTerm(), context.getId(),
- snapshotHolder.get().getLastIncludedIndex(),
- snapshotHolder.get().getLastIncludedTerm(),
+ snapshotHolder.orElseThrow().getLastIncludedIndex(),
+ snapshotHolder.orElseThrow().getLastIncludedTerm(),
snapshotChunk,
chunkIndex,
installSnapshotState.getTotalChunks(),
OptionalInt.of(installSnapshotState.getLastChunkHashCode()),
- serverConfig
- ).toSerializable(followerLogInfo.getRaftVersion()),
+ serverConfig,
+ followerLogInfo.getRaftVersion()),
actor()
);
}
private final ByteSource snapshotBytes;
SnapshotHolder(final Snapshot snapshot, final ByteSource snapshotBytes) {
- this.lastIncludedTerm = snapshot.getLastAppliedTerm();
- this.lastIncludedIndex = snapshot.getLastAppliedIndex();
+ lastIncludedTerm = snapshot.getLastAppliedTerm();
+ lastIncludedIndex = snapshot.getLastAppliedIndex();
this.snapshotBytes = snapshotBytes;
}
import akka.cluster.Member;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.util.Optional;
-import java.util.Random;
import java.util.Set;
+import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import org.opendaylight.controller.cluster.raft.RaftActorContext;
import org.opendaylight.controller.cluster.raft.RaftState;
AbstractRaftActorBehavior(final RaftActorContext context, final RaftState state) {
this.context = requireNonNull(context);
this.state = requireNonNull(state);
- this.log = context.getLogger();
+ log = context.getLogger();
logName = String.format("%s (%s)", context.getId(), state);
}
public static RaftActorBehavior createBehavior(final RaftActorContext context, final RaftState state) {
- switch (state) {
- case Candidate:
- return new Candidate(context);
- case Follower:
- return new Follower(context);
- case IsolatedLeader:
- return new IsolatedLeader(context);
- case Leader:
- return new Leader(context);
- case PreLeader:
- return new PreLeader(context);
- default:
- throw new IllegalArgumentException("Unhandled state " + state);
- }
+ return switch (state) {
+ case Candidate -> new Candidate(context);
+ case Follower -> new Follower(context);
+ case IsolatedLeader -> new IsolatedLeader(context);
+ case Leader -> new Leader(context);
+ case PreLeader -> new PreLeader(context);
+ };
}
@Override
// the log with the later term is more up-to-date. If the logs
// end with the same term, then whichever log is longer is
// more up-to-date.
- if (requestVote.getLastLogTerm() > lastTerm()) {
- candidateLatest = true;
- } else if (requestVote.getLastLogTerm() == lastTerm()
- && requestVote.getLastLogIndex() >= lastIndex()) {
+ if (requestVote.getLastLogTerm() > lastTerm()
+ || requestVote.getLastLogTerm() == lastTerm() && requestVote.getLastLogIndex() >= lastIndex()) {
candidateLatest = true;
}
* @return a random election duration
*/
protected FiniteDuration electionDuration() {
- long variance = new Random().nextInt(context.getConfigParams().getElectionTimeVariance());
+ long variance = ThreadLocalRandom.current().nextInt(context.getConfigParams().getElectionTimeVariance());
return context.getConfigParams().getElectionTimeOutInterval().$plus(
new FiniteDuration(variance, TimeUnit.MILLISECONDS));
}
*
* @param interval the duration after which we should trigger a new election
*/
+ // Non-final for testing
protected void scheduleElection(final FiniteDuration interval) {
stopElection();
*
* @return the actor
*/
- protected ActorRef actor() {
+ protected final ActorRef actor() {
return context.getActor();
}
@Override
public RaftActorBehavior handleMessage(final ActorRef sender, final Object message) {
- if (message instanceof AppendEntries) {
- return appendEntries(sender, (AppendEntries) message);
- } else if (message instanceof AppendEntriesReply) {
- return handleAppendEntriesReply(sender, (AppendEntriesReply) message);
- } else if (message instanceof RequestVote) {
- return requestVote(sender, (RequestVote) message);
- } else if (message instanceof RequestVoteReply) {
- return handleRequestVoteReply(sender, (RequestVoteReply) message);
+ if (message instanceof AppendEntries appendEntries) {
+ return appendEntries(sender, appendEntries);
+ } else if (message instanceof AppendEntriesReply appendEntriesReply) {
+ return handleAppendEntriesReply(sender, appendEntriesReply);
+ } else if (message instanceof RequestVote requestVote) {
+ return requestVote(sender, requestVote);
+ } else if (message instanceof RequestVoteReply requestVoteReply) {
+ return handleRequestVoteReply(sender, requestVoteReply);
} else {
return null;
}
return this;
}
- log.info("{} :- Switching from behavior {} to {}, election term: {}", logName(), this.state(),
+ log.info("{} :- Switching from behavior {} to {}, election term: {}", logName(), state(),
newBehavior.state(), context.getTermInformation().getCurrentTerm());
try {
close();
} catch (RuntimeException e) {
- log.error("{}: Failed to close behavior : {}", logName(), this.state(), e);
+ log.error("{}: Failed to close behavior : {}", logName(), state(), e);
}
return newBehavior;
}
}
}
- protected String getId() {
+ protected final String getId() {
return context.getId();
}
// Check whether we should update the term. In case of half-connected nodes, we want to ignore RequestVote
// messages, as the candidate is not able to receive our response.
protected boolean shouldUpdateTerm(final RaftRPC rpc) {
- if (!(rpc instanceof RequestVote)) {
+ if (!(rpc instanceof RequestVote requestVote)) {
return true;
}
- final RequestVote requestVote = (RequestVote) rpc;
log.debug("{}: Found higher term in RequestVote rpc, verifying whether it's safe to update term.", logName());
final Optional<Cluster> maybeCluster = context.getCluster();
if (!maybeCluster.isPresent()) {
return true;
}
- final Cluster cluster = maybeCluster.get();
+ final Cluster cluster = maybeCluster.orElseThrow();
final Set<Member> unreachable = cluster.state().getUnreachable();
log.debug("{}: Cluster state: {}", logName(), unreachable);
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.raft.behaviors;
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
-import java.util.ArrayList;
-import java.util.Collection;
+import com.google.common.collect.ImmutableList;
import org.opendaylight.controller.cluster.raft.PeerInfo;
import org.opendaylight.controller.cluster.raft.RaftActorContext;
import org.opendaylight.controller.cluster.raft.RaftState;
* <li> If election timeout elapses: start new election
* </ul>
*/
-public class Candidate extends AbstractRaftActorBehavior {
-
- private int voteCount;
-
+public final class Candidate extends AbstractRaftActorBehavior {
+ private final ImmutableList<String> votingPeers;
private final int votesRequired;
- private final Collection<String> votingPeers = new ArrayList<>();
+ private int voteCount;
public Candidate(final RaftActorContext context) {
super(context, RaftState.Candidate);
- for (PeerInfo peer: context.getPeers()) {
- if (peer.isVoting()) {
- votingPeers.add(peer.getId());
- }
- }
+ votingPeers = context.getPeers().stream()
+ .filter(PeerInfo::isVoting)
+ .map(PeerInfo::getId)
+ .collect(ImmutableList.toImmutableList());
log.debug("{}: Election: Candidate has following voting peers: {}", logName(), votingPeers);
}
@Override
- public final String getLeaderId() {
+ public String getLeaderId() {
return null;
}
@Override
- public final short getLeaderPayloadVersion() {
+ public short getLeaderPayloadVersion() {
return -1;
}
@Override
- final ApplyState getApplyStateFor(final ReplicatedLogEntry entry) {
+ ApplyState getApplyStateFor(final ReplicatedLogEntry entry) {
throw new IllegalStateException("A candidate should never attempt to apply " + entry);
}
return this;
}
- if (message instanceof RaftRPC) {
-
- RaftRPC rpc = (RaftRPC) message;
+ if (message instanceof RaftRPC rpc) {
log.debug("{}: RaftRPC message received {}, my term is {}", logName(), rpc,
context.getTermInformation().getCurrentTerm());
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.behaviors;
+
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+
+/**
+ * Serialization proxy for {@link FollowerIdentifier}.
+ */
+final class FI implements Externalizable {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private String value;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public FI() {
+ // For Externalizable
+ }
+
+ FI(final String value) {
+ this.value = requireNonNull(value);
+ }
+
+ @Override
+ public void writeExternal(final ObjectOutput out) throws IOException {
+ out.writeObject(value);
+ }
+
+ @Override
+ public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+ value = (String) in.readObject();
+ }
+
+ @java.io.Serial
+ private Object readResolve() {
+ return new FollowerIdentifier(value);
+ }
+}
import akka.cluster.MemberStatus;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Stopwatch;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.io.IOException;
-import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Optional;
* convert to candidate
* </ul>
*/
+// Non-final for testing
public class Follower extends AbstractRaftActorBehavior {
private static final long MAX_ELECTION_TIMEOUT_FACTOR = 18;
this(context, null, (short)-1);
}
+ @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR",
+ justification = "electionDuration() is not final for Candidate override")
public Follower(final RaftActorContext context, final String initialLeaderId,
final short initialLeaderPayloadVersion) {
super(context, RaftState.Follower);
- this.leaderId = initialLeaderId;
- this.leaderPayloadVersion = initialLeaderPayloadVersion;
+ leaderId = initialLeaderId;
+ leaderPayloadVersion = initialLeaderPayloadVersion;
initialSyncStatusTracker = new SyncStatusTracker(context.getActor(), getId(), context.getConfigParams()
.getSyncIndexThreshold());
leaderId = appendEntries.getLeaderId();
leaderPayloadVersion = appendEntries.getPayloadVersion();
- if (appendEntries.getLeaderAddress().isPresent()) {
- final String address = appendEntries.getLeaderAddress().get();
- log.debug("New leader address: {}", address);
-
- context.setPeerAddress(leaderId, address);
- context.getConfigParams().getPeerAddressResolver().setResolved(leaderId, address);
+ final var leaderAddress = appendEntries.leaderAddress();
+ if (leaderAddress != null) {
+ log.debug("New leader address: {}", leaderAddress);
+ context.setPeerAddress(leaderId, leaderAddress);
+ context.getConfigParams().getPeerAddressResolver().setResolved(leaderId, leaderAddress);
}
// First check if the logs are in sync or not
shouldCaptureSnapshot.compareAndSet(false,
context.getReplicatedLog().shouldCaptureSnapshot(entry.getIndex()));
- if (entry.getData() instanceof ServerConfigurationPayload) {
- context.updatePeerIds((ServerConfigurationPayload)entry.getData());
+ if (entry.getData() instanceof ServerConfigurationPayload serverConfiguration) {
+ context.updatePeerIds(serverConfiguration);
}
}
return this;
}
- if (!(message instanceof RaftRPC)) {
+ if (!(message instanceof RaftRPC rpc)) {
// The rest of the processing requires the message to be a RaftRPC
return null;
}
- final RaftRPC rpc = (RaftRPC) message;
// If RPC request or response contains term T > currentTerm:
// set currentTerm = T, convert to follower (§5.1)
// This applies to all RPC messages and responses
context.getTermInformation().updateAndPersist(rpc.getTerm(), null);
}
- if (rpc instanceof InstallSnapshot) {
- handleInstallSnapshot(sender, (InstallSnapshot) rpc);
+ if (rpc instanceof InstallSnapshot installSnapshot) {
+ handleInstallSnapshot(sender, installSnapshot);
restartLastLeaderMessageTimer();
scheduleElection(electionDuration());
return this;
}
- if (!(rpc instanceof RequestVote) || canGrantVote((RequestVote) rpc)) {
+ if (!(rpc instanceof RequestVote requestVote) || canGrantVote(requestVote)) {
restartLastLeaderMessageTimer();
scheduleElection(electionDuration());
}
Address leaderAddress = leaderActor.anchorPath().address();
- CurrentClusterState state = cluster.get().state();
+ CurrentClusterState state = cluster.orElseThrow().state();
Set<Member> unreachable = state.getUnreachable();
log.debug("{}: Checking for leader {} in the cluster unreachable set {}", logName(), leaderAddress,
return false;
}
- final Cluster cluster = maybeCluster.get();
+ final Cluster cluster = maybeCluster.orElseThrow();
final Member selfMember = cluster.selfMember();
final CurrentClusterState state = cluster.state();
+ "all members {} self member: {}", logName(), unreachable, members, selfMember);
// no unreachable peers means we cannot be isolated
- if (unreachable.size() == 0) {
+ if (unreachable.isEmpty()) {
return false;
}
membersToCheck.removeAll(unreachable);
// check if the only member not unreachable is us
- if (membersToCheck.size() == 1 && membersToCheck.iterator().next().equals(selfMember)) {
- return true;
- }
-
- return false;
+ return membersToCheck.size() == 1 && membersToCheck.iterator().next().equals(selfMember);
}
private void handleInstallSnapshot(final ActorRef sender, final InstallSnapshot installSnapshot) {
Snapshot snapshot = Snapshot.create(
context.getSnapshotManager().convertSnapshot(snapshotTracker.getSnapshotBytes()),
- new ArrayList<>(),
+ List.of(),
installSnapshot.getLastIncludedIndex(),
installSnapshot.getLastIncludedTerm(),
installSnapshot.getLastIncludedIndex(),
} catch (IOException e) {
log.debug("{}: Exception in InstallSnapshot of follower", logName(), e);
- sender.tell(new InstallSnapshotReply(currentTerm(), context.getId(),
- -1, false), actor());
+ sender.tell(new InstallSnapshotReply(currentTerm(), context.getId(), -1, false), actor());
closeSnapshotTracker();
}
*/
package org.opendaylight.controller.cluster.raft.behaviors;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
import org.opendaylight.yangtools.util.AbstractStringIdentifier;
/**
*
* @author Thomas Pantelis
*/
-class FollowerIdentifier extends AbstractStringIdentifier<FollowerIdentifier> {
+final class FollowerIdentifier extends AbstractStringIdentifier<FollowerIdentifier> {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
- FollowerIdentifier(String followerId) {
+ FollowerIdentifier(final String followerId) {
super(followerId);
}
+ @java.io.Serial
private Object writeReplace() {
- return new Proxy(this);
- }
-
- private static class Proxy implements Externalizable {
- private static final long serialVersionUID = 1L;
-
- private FollowerIdentifier identifier;
-
- // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
- // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public Proxy() {
- }
-
- Proxy(FollowerIdentifier identifier) {
- this.identifier = identifier;
- }
-
- @Override
- public void writeExternal(ObjectOutput out) throws IOException {
- out.writeObject(identifier.getValue());
- }
-
- @Override
- public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
- identifier = new FollowerIdentifier((String) in.readObject());
- }
-
- private Object readResolve() {
- return identifier;
- }
+ return new FI(getValue());
}
}
final Optional<String> requestedFollowerIdOptional
= leadershipTransferContext.transferCohort.getRequestedFollowerId();
- if (requestedFollowerIdOptional.isPresent() && !requestedFollowerIdOptional.get().equals(followerId)) {
+ if (requestedFollowerIdOptional.isPresent() && !requestedFollowerIdOptional.orElseThrow().equals(followerId)) {
// we want to transfer leadership to specific follower
return;
}
private final int snapshotChunkSize;
private final String logName;
private ByteSource snapshotBytes;
- private int offset = INITIAL_OFFSET;
+ private long offset = INITIAL_OFFSET;
// the next snapshot chunk is sent only if the replyReceivedForOffset matches offset
- private int replyReceivedForOffset = -1;
+ private long replyReceivedForOffset = -1;
// if replyStatus is false, the previous chunk is attempted
private boolean replyStatus = false;
private int chunkIndex = FIRST_CHUNK_INDEX;
private int nextChunkHashCode = INITIAL_LAST_CHUNK_HASH_CODE;
private long snapshotSize;
private InputStream snapshotInputStream;
- private Stopwatch chunkTimer = Stopwatch.createUnstarted();
+ private final Stopwatch chunkTimer = Stopwatch.createUnstarted();
private byte[] currentChunk = null;
LeaderInstallSnapshotState(final int snapshotChunkSize, final String logName) {
chunkIndex = FIRST_CHUNK_INDEX;
}
- int incrementOffset() {
- // if offset is -1 doesnt matter whether it was the initial value or reset, move the offset to 0 to begin with
+ private long incrementOffset() {
+ // if offset is -1 doesn't matter whether it was the initial value or reset, move the offset to 0 to begin with
if (offset == INITIAL_OFFSET) {
offset = 0;
} else {
byte[] getNextChunk() throws IOException {
// increment offset to indicate next chunk is in flight, canSendNextChunk() wont let us hit this again until,
// markSendStatus() is called with either success or failure
- int start = incrementOffset();
+ final var start = incrementOffset();
if (replyStatus || currentChunk == null) {
int size = snapshotChunkSize;
if (snapshotChunkSize > snapshotSize) {
}
currentChunk = new byte[size];
- int numRead = snapshotInputStream.read(currentChunk);
+ final var numRead = snapshotInputStream.read(currentChunk);
if (numRead != size) {
throw new IOException(String.format(
- "The # of bytes read from the input stream, %d,"
- + "does not match the expected # %d", numRead, size));
+ "The # of bytes read from the input stream, %d, does not match the expected # %d",
+ numRead, size));
}
nextChunkHashCode = Arrays.hashCode(currentChunk);
try {
snapshotInputStream = snapshotBytes.openStream();
} catch (IOException e) {
- throw new RuntimeException(e);
+ throw new IllegalStateException(e);
}
}
try {
snapshotInputStream.close();
} catch (IOException e) {
- LOG.warn("{}: Error closing snapshot stream", logName);
+ LOG.warn("{}: Error closing snapshot stream", logName, e);
}
snapshotInputStream = null;
* @param lastChunkHashCode the optional hash code for the chunk
* @return true if this is the last chunk is received
* @throws InvalidChunkException if the chunk index is invalid or out of order
+ * @throws IOException if there is a problem writing to the stream
*/
boolean addChunk(final int chunkIndex, final byte[] chunk, final OptionalInt maybeLastChunkHashCode)
- throws InvalidChunkException, IOException {
+ throws IOException {
log.debug("addChunk: chunkIndex={}, lastChunkIndex={}, collectedChunks.size={}, lastChunkHashCode={}",
- chunkIndex, lastChunkIndex, count, this.lastChunkHashCode);
+ chunkIndex, lastChunkIndex, count, lastChunkHashCode);
if (sealed) {
throw new InvalidChunkException("Invalid chunk received with chunkIndex " + chunkIndex
throw new InvalidChunkException("Expected chunkIndex " + (lastChunkIndex + 1) + " got " + chunkIndex);
}
- if (maybeLastChunkHashCode.isPresent() && maybeLastChunkHashCode.getAsInt() != this.lastChunkHashCode) {
+ if (maybeLastChunkHashCode.isPresent() && maybeLastChunkHashCode.orElseThrow() != lastChunkHashCode) {
throw new InvalidChunkException("The hash code of the recorded last chunk does not match "
- + "the senders hash code, expected " + this.lastChunkHashCode + " was "
- + maybeLastChunkHashCode.getAsInt());
+ + "the senders hash code, expected " + lastChunkHashCode + " was "
+ + maybeLastChunkHashCode.orElseThrow());
}
bufferedStream.write(chunk);
count += chunk.length;
sealed = chunkIndex == totalChunks;
lastChunkIndex = chunkIndex;
- this.lastChunkHashCode = Arrays.hashCode(chunk);
+ lastChunkHashCode = Arrays.hashCode(chunk);
return sealed;
}
import java.util.List;
import java.util.Map;
import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.controller.cluster.mgmt.api.FollowerInfo;
/**
* The response to a GetOnDemandRaftState message.
import akka.dispatch.ControlMessage;
import java.io.Serializable;
-import org.opendaylight.controller.cluster.raft.base.messages.EmptyExternalizableProxy;
/**
* Message sent to a raft actor to shutdown gracefully. If it's the leader it will transfer leadership to a
* @author Thomas Pantelis
*/
public final class Shutdown implements Serializable, ControlMessage {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
+
public static final Shutdown INSTANCE = new Shutdown();
private Shutdown() {
// Hidden on purpose
}
- private Object writeReplace() {
- return new Proxy();
- }
-
- private static class Proxy extends EmptyExternalizableProxy {
- private static final long serialVersionUID = 1L;
-
- // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
- // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public Proxy() {
- super(INSTANCE);
- }
+ @java.io.Serial
+ @SuppressWarnings("static-method")
+ private Object readResolve() {
+ return INSTANCE;
}
}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.messages;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.collect.ImmutableList;
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.controller.cluster.raft.RaftVersions;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Argon serialization proxy for {@link AppendEntries}.
+ */
+final class AE implements Externalizable {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private AppendEntries appendEntries;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public AE() {
+ // For Externalizable
+ }
+
+ AE(final AppendEntries appendEntries) {
+ this.appendEntries = requireNonNull(appendEntries);
+ }
+
+ @Override
+ public void writeExternal(final ObjectOutput out) throws IOException {
+ out.writeShort(appendEntries.getLeaderRaftVersion());
+ WritableObjects.writeLong(out, appendEntries.getTerm());
+ out.writeObject(appendEntries.getLeaderId());
+
+ WritableObjects.writeLongs(out, appendEntries.getPrevLogTerm(), appendEntries.getPrevLogIndex());
+ WritableObjects.writeLongs(out, appendEntries.getLeaderCommit(), appendEntries.getReplicatedToAllIndex());
+
+ out.writeShort(appendEntries.getPayloadVersion());
+
+ final var entries = appendEntries.getEntries();
+ out.writeInt(entries.size());
+ for (var e : entries) {
+ WritableObjects.writeLongs(out, e.getIndex(), e.getTerm());
+ out.writeObject(e.getData());
+ }
+
+ out.writeObject(appendEntries.leaderAddress());
+ }
+
+ @Override
+ public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+ short leaderRaftVersion = in.readShort();
+ long term = WritableObjects.readLong(in);
+ String leaderId = (String) in.readObject();
+
+ byte hdr = WritableObjects.readLongHeader(in);
+ long prevLogTerm = WritableObjects.readFirstLong(in, hdr);
+ long prevLogIndex = WritableObjects.readSecondLong(in, hdr);
+
+ hdr = WritableObjects.readLongHeader(in);
+ long leaderCommit = WritableObjects.readFirstLong(in, hdr);
+ long replicatedToAllIndex = WritableObjects.readSecondLong(in, hdr);
+ short payloadVersion = in.readShort();
+
+ int size = in.readInt();
+ var entries = ImmutableList.<ReplicatedLogEntry>builderWithExpectedSize(size);
+ for (int i = 0; i < size; i++) {
+ hdr = WritableObjects.readLongHeader(in);
+ entries.add(new SimpleReplicatedLogEntry(WritableObjects.readFirstLong(in, hdr),
+ WritableObjects.readSecondLong(in, hdr), (Payload) in.readObject()));
+ }
+
+ String leaderAddress = (String)in.readObject();
+
+ appendEntries = new AppendEntries(term, leaderId, prevLogIndex, prevLogTerm, entries.build(), leaderCommit,
+ replicatedToAllIndex, payloadVersion, RaftVersions.CURRENT_VERSION, leaderRaftVersion,
+ leaderAddress);
+ }
+
+ @java.io.Serial
+ private Object readResolve() {
+ return verifyNotNull(appendEntries);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.messages;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.controller.cluster.raft.RaftVersions;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link AppendEntriesReply}.
+ */
+final class AR implements Externalizable {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ // Flag bits
+ private static final int SUCCESS = 0x10;
+ private static final int FORCE_INSTALL_SNAPSHOT = 0x20;
+ private static final int NEEDS_LEADER_ADDRESS = 0x40;
+
+ private AppendEntriesReply appendEntriesReply;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public AR() {
+ // For Externalizable
+ }
+
+ AR(final AppendEntriesReply appendEntriesReply) {
+ this.appendEntriesReply = requireNonNull(appendEntriesReply);
+ }
+
+ @Override
+ public void writeExternal(final ObjectOutput out) throws IOException {
+ out.writeShort(appendEntriesReply.getRaftVersion());
+
+ int flags = 0;
+ if (appendEntriesReply.isSuccess()) {
+ flags |= SUCCESS;
+ }
+ if (appendEntriesReply.isForceInstallSnapshot()) {
+ flags |= FORCE_INSTALL_SNAPSHOT;
+ }
+ if (appendEntriesReply.isNeedsLeaderAddress()) {
+ flags |= NEEDS_LEADER_ADDRESS;
+ }
+ WritableObjects.writeLong(out, appendEntriesReply.getTerm(), flags);
+
+ out.writeObject(appendEntriesReply.getFollowerId());
+
+ WritableObjects.writeLongs(out, appendEntriesReply.getLogLastIndex(), appendEntriesReply.getLogLastTerm());
+
+ out.writeShort(appendEntriesReply.getPayloadVersion());
+ }
+
+ @Override
+ public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+ short raftVersion = in.readShort();
+
+ byte hdr = WritableObjects.readLongHeader(in);
+ final int flags = WritableObjects.longHeaderFlags(hdr);
+
+ long term = WritableObjects.readLongBody(in, hdr);
+ String followerId = (String) in.readObject();
+
+ hdr = WritableObjects.readLongHeader(in);
+ long logLastIndex = WritableObjects.readFirstLong(in, hdr);
+ long logLastTerm = WritableObjects.readSecondLong(in, hdr);
+
+ short payloadVersion = in.readShort();
+
+ appendEntriesReply = new AppendEntriesReply(followerId, term, getFlag(flags, SUCCESS), logLastIndex,
+ logLastTerm, payloadVersion, getFlag(flags, FORCE_INSTALL_SNAPSHOT), getFlag(flags, NEEDS_LEADER_ADDRESS),
+ raftVersion, RaftVersions.CURRENT_VERSION);
+ }
+
+ @java.io.Serial
+ private Object readResolve() {
+ return verifyNotNull(appendEntriesReply);
+ }
+
+ private static boolean getFlag(final int flags, final int bit) {
+ return (flags & bit) != 0;
+ }
+}
package org.opendaylight.controller.cluster.raft.messages;
public abstract class AbstractRaftRPC implements RaftRPC {
+ @java.io.Serial
private static final long serialVersionUID = -6061342433962854822L;
// term
}
// All implementations must use Externalizable Proxy pattern
+ @java.io.Serial
abstract Object writeReplace();
}
import static java.util.Objects.requireNonNull;
import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableList;
import java.io.Externalizable;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
-import java.util.ArrayList;
import java.util.List;
-import java.util.Optional;
import org.eclipse.jdt.annotation.NonNull;
import org.eclipse.jdt.annotation.Nullable;
import org.opendaylight.controller.cluster.raft.RaftVersions;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
/**
* Invoked by leader to replicate log entries (§5.3); also used as heartbeat (§5.2).
*/
public final class AppendEntries extends AbstractRaftRPC {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
// So that follower can redirect clients
private final String leaderAddress;
- private AppendEntries(final long term, @NonNull final String leaderId, final long prevLogIndex,
+ AppendEntries(final long term, @NonNull final String leaderId, final long prevLogIndex,
final long prevLogTerm, @NonNull final List<ReplicatedLogEntry> entries, final long leaderCommit,
final long replicatedToAllIndex, final short payloadVersion, final short recipientRaftVersion,
final short leaderRaftVersion, @Nullable final String leaderAddress) {
return payloadVersion;
}
- public Optional<String> getLeaderAddress() {
- return Optional.ofNullable(leaderAddress);
+ public @Nullable String leaderAddress() {
+ return leaderAddress;
}
public short getLeaderRaftVersion() {
@Override
Object writeReplace() {
- return recipientRaftVersion >= RaftVersions.FLUORINE_VERSION ? new ProxyV2(this) : new Proxy(this);
+ return recipientRaftVersion <= RaftVersions.FLUORINE_VERSION ? new ProxyV2(this) : new AE(this);
}
/**
* Fluorine version that adds the leader address.
*/
private static class ProxyV2 implements Externalizable {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
private AppendEntries appendEntries;
short payloadVersion = in.readShort();
int size = in.readInt();
- List<ReplicatedLogEntry> entries = new ArrayList<>(size);
+ var entries = ImmutableList.<ReplicatedLogEntry>builderWithExpectedSize(size);
for (int i = 0; i < size; i++) {
entries.add(new SimpleReplicatedLogEntry(in.readLong(), in.readLong(), (Payload) in.readObject()));
}
String leaderAddress = (String)in.readObject();
- appendEntries = new AppendEntries(term, leaderId, prevLogIndex, prevLogTerm, entries, leaderCommit,
+ appendEntries = new AppendEntries(term, leaderId, prevLogIndex, prevLogTerm, entries.build(), leaderCommit,
replicatedToAllIndex, payloadVersion, RaftVersions.CURRENT_VERSION, leaderRaftVersion,
leaderAddress);
}
- private Object readResolve() {
- return appendEntries;
- }
- }
-
- /**
- * Pre-Fluorine version.
- */
- @Deprecated
- private static class Proxy implements Externalizable {
- private static final long serialVersionUID = 1L;
-
- private AppendEntries appendEntries;
-
- // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
- // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public Proxy() {
- }
-
- Proxy(final AppendEntries appendEntries) {
- this.appendEntries = appendEntries;
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) throws IOException {
- out.writeLong(appendEntries.getTerm());
- out.writeObject(appendEntries.leaderId);
- out.writeLong(appendEntries.prevLogTerm);
- out.writeLong(appendEntries.prevLogIndex);
- out.writeLong(appendEntries.leaderCommit);
- out.writeLong(appendEntries.replicatedToAllIndex);
- out.writeShort(appendEntries.payloadVersion);
-
- out.writeInt(appendEntries.entries.size());
- for (ReplicatedLogEntry e: appendEntries.entries) {
- out.writeLong(e.getIndex());
- out.writeLong(e.getTerm());
- out.writeObject(e.getData());
- }
- }
-
- @Override
- public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
- long term = in.readLong();
- String leaderId = (String) in.readObject();
- long prevLogTerm = in.readLong();
- long prevLogIndex = in.readLong();
- long leaderCommit = in.readLong();
- long replicatedToAllIndex = in.readLong();
- short payloadVersion = in.readShort();
-
- int size = in.readInt();
- List<ReplicatedLogEntry> entries = new ArrayList<>(size);
- for (int i = 0; i < size; i++) {
- entries.add(new SimpleReplicatedLogEntry(in.readLong(), in.readLong(), (Payload) in.readObject()));
- }
-
- appendEntries = new AppendEntries(term, leaderId, prevLogIndex, prevLogTerm, entries, leaderCommit,
- replicatedToAllIndex, payloadVersion, RaftVersions.CURRENT_VERSION, RaftVersions.BORON_VERSION, null);
- }
-
+ @java.io.Serial
private Object readResolve() {
return appendEntries;
}
* Reply for the AppendEntries message.
*/
public final class AppendEntriesReply extends AbstractRaftRPC {
+ @java.io.Serial
private static final long serialVersionUID = -7487547356392536683L;
// true if follower contained entry matching
needsLeaderAddress, RaftVersions.CURRENT_VERSION, recipientRaftVersion);
}
- private AppendEntriesReply(final String followerId, final long term, final boolean success, final long logLastIndex,
+ AppendEntriesReply(final String followerId, final long term, final boolean success, final long logLastIndex,
final long logLastTerm, final short payloadVersion, final boolean forceInstallSnapshot,
final boolean needsLeaderAddress, final short raftVersion, final short recipientRaftVersion) {
super(term);
@Override
Object writeReplace() {
- return recipientRaftVersion >= RaftVersions.FLUORINE_VERSION ? new Proxy2(this) : new Proxy(this);
+ return recipientRaftVersion <= RaftVersions.FLUORINE_VERSION ? new Proxy2(this) : new AR(this);
}
/**
* Fluorine version that adds the needsLeaderAddress flag.
*/
private static class Proxy2 implements Externalizable {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
private AppendEntriesReply appendEntriesReply;
RaftVersions.CURRENT_VERSION);
}
- private Object readResolve() {
- return appendEntriesReply;
- }
- }
-
- /**
- * Pre-Fluorine version.
- */
- @Deprecated
- private static class Proxy implements Externalizable {
- private static final long serialVersionUID = 1L;
-
- private AppendEntriesReply appendEntriesReply;
-
- // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
- // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public Proxy() {
- }
-
- Proxy(final AppendEntriesReply appendEntriesReply) {
- this.appendEntriesReply = appendEntriesReply;
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) throws IOException {
- out.writeShort(appendEntriesReply.raftVersion);
- out.writeLong(appendEntriesReply.getTerm());
- out.writeObject(appendEntriesReply.followerId);
- out.writeBoolean(appendEntriesReply.success);
- out.writeLong(appendEntriesReply.logLastIndex);
- out.writeLong(appendEntriesReply.logLastTerm);
- out.writeShort(appendEntriesReply.payloadVersion);
- out.writeBoolean(appendEntriesReply.forceInstallSnapshot);
- }
-
- @Override
- public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
- short raftVersion = in.readShort();
- long term = in.readLong();
- String followerId = (String) in.readObject();
- boolean success = in.readBoolean();
- long logLastIndex = in.readLong();
- long logLastTerm = in.readLong();
- short payloadVersion = in.readShort();
- boolean forceInstallSnapshot = in.readBoolean();
-
- appendEntriesReply = new AppendEntriesReply(followerId, term, success, logLastIndex, logLastTerm,
- payloadVersion, forceInstallSnapshot, false, raftVersion, RaftVersions.CURRENT_VERSION);
- }
-
+ @java.io.Serial
private Object readResolve() {
return appendEntriesReply;
}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.messages;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link InstallSnapshotReply}.
+ */
+final class IR implements Externalizable {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ // Flags
+ private static final int SUCCESS = 0x10;
+
+ private InstallSnapshotReply installSnapshotReply;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public IR() {
+ // For Externalizable
+ }
+
+ IR(final InstallSnapshotReply installSnapshotReply) {
+ this.installSnapshotReply = requireNonNull(installSnapshotReply);
+ }
+
+ @Override
+ public void writeExternal(final ObjectOutput out) throws IOException {
+ WritableObjects.writeLong(out, installSnapshotReply.getTerm(), installSnapshotReply.isSuccess() ? SUCCESS : 0);
+ out.writeObject(installSnapshotReply.getFollowerId());
+ out.writeInt(installSnapshotReply.getChunkIndex());
+ }
+
+ @Override
+ public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+ final byte hdr = WritableObjects.readLongHeader(in);
+ final int flags = WritableObjects.longHeaderFlags(hdr);
+
+ long term = WritableObjects.readLongBody(in, hdr);
+ String followerId = (String) in.readObject();
+ int chunkIndex = in.readInt();
+
+ installSnapshotReply = new InstallSnapshotReply(term, followerId, chunkIndex, (flags & SUCCESS) != 0);
+ }
+
+ @java.io.Serial
+ private Object readResolve() {
+ return verifyNotNull(installSnapshotReply);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.messages;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.Optional;
+import java.util.OptionalInt;
+import org.opendaylight.controller.cluster.raft.RaftVersions;
+import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link InstallSnapshot}.
+ */
+final class IS implements Externalizable {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ // Flags
+ private static final int LAST_CHUNK_HASHCODE = 0x10;
+ private static final int SERVER_CONFIG = 0x20;
+
+ private InstallSnapshot installSnapshot;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public IS() {
+ // For Externalizable
+ }
+
+ IS(final InstallSnapshot installSnapshot) {
+ this.installSnapshot = requireNonNull(installSnapshot);
+ }
+
+ @Override
+ public void writeExternal(final ObjectOutput out) throws IOException {
+ int flags = 0;
+ final var lastChunkHashCode = installSnapshot.getLastChunkHashCode();
+ if (lastChunkHashCode.isPresent()) {
+ flags |= LAST_CHUNK_HASHCODE;
+ }
+ final var serverConfig = installSnapshot.getServerConfig();
+ if (serverConfig.isPresent()) {
+ flags |= SERVER_CONFIG;
+ }
+
+ WritableObjects.writeLong(out, installSnapshot.getTerm(), flags);
+ out.writeObject(installSnapshot.getLeaderId());
+ WritableObjects.writeLongs(out, installSnapshot.getLastIncludedIndex(), installSnapshot.getLastIncludedTerm());
+ out.writeInt(installSnapshot.getChunkIndex());
+ out.writeInt(installSnapshot.getTotalChunks());
+
+ if (lastChunkHashCode.isPresent()) {
+ out.writeInt(lastChunkHashCode.orElseThrow());
+ }
+ if (serverConfig.isPresent()) {
+ out.writeObject(serverConfig.orElseThrow());
+ }
+
+ out.writeObject(installSnapshot.getData());
+ }
+
+ @Override
+ public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+ byte hdr = WritableObjects.readLongHeader(in);
+ final int flags = WritableObjects.longHeaderFlags(hdr);
+
+ long term = WritableObjects.readLongBody(in, hdr);
+ String leaderId = (String) in.readObject();
+
+ hdr = WritableObjects.readLongHeader(in);
+ long lastIncludedIndex = WritableObjects.readFirstLong(in, hdr);
+ long lastIncludedTerm = WritableObjects.readSecondLong(in, hdr);
+ int chunkIndex = in.readInt();
+ int totalChunks = in.readInt();
+
+ OptionalInt lastChunkHashCode = getFlag(flags, LAST_CHUNK_HASHCODE) ? OptionalInt.of(in.readInt())
+ : OptionalInt.empty();
+ Optional<ServerConfigurationPayload> serverConfig = getFlag(flags, SERVER_CONFIG)
+ ? Optional.of((ServerConfigurationPayload)in.readObject()) : Optional.empty();
+
+ byte[] data = (byte[])in.readObject();
+
+ installSnapshot = new InstallSnapshot(term, leaderId, lastIncludedIndex, lastIncludedTerm, data,
+ chunkIndex, totalChunks, lastChunkHashCode, serverConfig, RaftVersions.CURRENT_VERSION);
+ }
+
+ @java.io.Serial
+ private Object readResolve() {
+ return verifyNotNull(installSnapshot);
+ }
+
+ private static boolean getFlag(final int flags, final int bit) {
+ return (flags & bit) != 0;
+ }
+}
+
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
-package org.opendaylight.controller.cluster.raft.protobuff.client.messages;
+package org.opendaylight.controller.cluster.raft.messages;
import org.opendaylight.yangtools.concepts.Identifiable;
import org.opendaylight.yangtools.concepts.Identifier;
public abstract class IdentifiablePayload<T extends Identifier> extends Payload implements Identifiable<T> {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
}
*/
package org.opendaylight.controller.cluster.raft.messages;
+import com.google.common.annotations.VisibleForTesting;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.io.Externalizable;
import java.io.IOException;
import java.io.ObjectOutput;
import java.util.Optional;
import java.util.OptionalInt;
+import org.opendaylight.controller.cluster.raft.RaftVersions;
import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
/**
* Message sent from a leader to install a snapshot chunk on a follower.
*/
public final class InstallSnapshot extends AbstractRaftRPC {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
private final String leaderId;
private final OptionalInt lastChunkHashCode;
@SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "Handled via writeReplace()")
private final Optional<ServerConfigurationPayload> serverConfig;
+ private final short recipientRaftVersion;
- @SuppressFBWarnings(value = "EI_EXPOSE_REP2", justification = "Stores a reference to an externally mutable byte[] "
- + "object but this is OK since this class is merely a DTO and does not process byte[] internally. "
- + "Also it would be inefficient to create a copy as the byte[] could be large.")
+ @SuppressFBWarnings(value = "EI_EXPOSE_REP2", justification = """
+ Stores a reference to an externally mutable byte[] object but this is OK since this class is merely a DTO and \
+ does not process byte[] internally. Also it would be inefficient to create a copy as the byte[] could be \
+ large.""")
public InstallSnapshot(final long term, final String leaderId, final long lastIncludedIndex,
final long lastIncludedTerm, final byte[] data, final int chunkIndex, final int totalChunks,
- final OptionalInt lastChunkHashCode, final Optional<ServerConfigurationPayload> serverConfig) {
+ final OptionalInt lastChunkHashCode, final Optional<ServerConfigurationPayload> serverConfig,
+ final short recipientRaftVersion) {
super(term);
this.leaderId = leaderId;
this.lastIncludedIndex = lastIncludedIndex;
this.totalChunks = totalChunks;
this.lastChunkHashCode = lastChunkHashCode;
this.serverConfig = serverConfig;
+ this.recipientRaftVersion = recipientRaftVersion;
}
+ @VisibleForTesting
public InstallSnapshot(final long term, final String leaderId, final long lastIncludedIndex,
final long lastIncludedTerm, final byte[] data, final int chunkIndex,
final int totalChunks) {
this(term, leaderId, lastIncludedIndex, lastIncludedTerm, data, chunkIndex, totalChunks, OptionalInt.empty(),
- Optional.empty());
+ Optional.empty(), RaftVersions.CURRENT_VERSION);
}
public String getLeaderId() {
return lastIncludedTerm;
}
- @SuppressFBWarnings(value = "EI_EXPOSE_REP", justification = "Exposes a mutable object stored in a field but "
- + "this is OK since this class is merely a DTO and does not process the byte[] internally. "
- + "Also it would be inefficient to create a return copy as the byte[] could be large.")
+ @SuppressFBWarnings(value = "EI_EXPOSE_REP", justification = """
+ Exposes a mutable object stored in a field but this is OK since this class is merely a DTO and does not \
+ process the byte[] internally. Also it would be inefficient to create a return copy as the byte[] could be \
+ large.""")
public byte[] getData() {
return data;
}
return serverConfig;
}
- public <T> Object toSerializable(final short version) {
- return this;
- }
-
@Override
public String toString() {
return "InstallSnapshot [term=" + getTerm() + ", leaderId=" + leaderId + ", lastIncludedIndex="
@Override
Object writeReplace() {
- return new Proxy(this);
+ return recipientRaftVersion <= RaftVersions.FLUORINE_VERSION ? new Proxy(this) : new IS(this);
}
private static class Proxy implements Externalizable {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
private InstallSnapshot installSnapshot;
out.writeByte(installSnapshot.lastChunkHashCode.isPresent() ? 1 : 0);
if (installSnapshot.lastChunkHashCode.isPresent()) {
- out.writeInt(installSnapshot.lastChunkHashCode.getAsInt());
+ out.writeInt(installSnapshot.lastChunkHashCode.orElseThrow());
}
out.writeByte(installSnapshot.serverConfig.isPresent() ? 1 : 0);
if (installSnapshot.serverConfig.isPresent()) {
- out.writeObject(installSnapshot.serverConfig.get());
+ out.writeObject(installSnapshot.serverConfig.orElseThrow());
}
out.writeObject(installSnapshot.data);
byte[] data = (byte[])in.readObject();
installSnapshot = new InstallSnapshot(term, leaderId, lastIncludedIndex, lastIncludedTerm, data,
- chunkIndex, totalChunks, lastChunkHashCode, serverConfig);
+ chunkIndex, totalChunks, lastChunkHashCode, serverConfig, RaftVersions.CURRENT_VERSION);
}
+ @java.io.Serial
private Object readResolve() {
return installSnapshot;
}
*/
package org.opendaylight.controller.cluster.raft.messages;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-
public final class InstallSnapshotReply extends AbstractRaftRPC {
+ @java.io.Serial
private static final long serialVersionUID = 642227896390779503L;
// The followerId - this will be used to figure out which follower is
@Override
Object writeReplace() {
- return new Proxy(this);
- }
-
- private static class Proxy implements Externalizable {
- private static final long serialVersionUID = 1L;
-
- private InstallSnapshotReply installSnapshotReply;
-
- // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
- // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public Proxy() {
- }
-
- Proxy(final InstallSnapshotReply installSnapshotReply) {
- this.installSnapshotReply = installSnapshotReply;
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) throws IOException {
- out.writeLong(installSnapshotReply.getTerm());
- out.writeObject(installSnapshotReply.followerId);
- out.writeInt(installSnapshotReply.chunkIndex);
- out.writeBoolean(installSnapshotReply.success);
- }
-
- @Override
- public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
- long term = in.readLong();
- String followerId = (String) in.readObject();
- int chunkIndex = in.readInt();
- boolean success = in.readBoolean();
-
- installSnapshotReply = new InstallSnapshotReply(term, followerId, chunkIndex, success);
- }
-
- private Object readResolve() {
- return installSnapshotReply;
- }
+ return new IR(this);
}
}
--- /dev/null
+/*
+ * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.messages;
+
+import java.io.Serializable;
+
+/**
+ * An instance of a {@link Payload} class is meant to be used as the Payload for {@link AppendEntries}.
+ *
+ * <p>
+ * When an actor which is derived from RaftActor attempts to persistData it must pass an instance of the Payload class.
+ * Similarly when state needs to be applied to the derived RaftActor it will be passed an instance of the Payload class.
+ */
+public abstract class Payload implements Serializable {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ /**
+ * Return the estimate of in-memory size of this payload.
+ *
+ * @return An estimate of the in-memory size of this payload.
+ */
+ public abstract int size();
+
+ /**
+ * Return the estimate of serialized size of this payload when passed through serialization. The estimate needs to
+ * be reasonably accurate and should err on the side of caution and report a slightly-higher size in face of
+ * uncertainty.
+ *
+ * @return An estimate of serialized size.
+ */
+ public abstract int serializedSize();
+
+ /**
+ * Return the serialization proxy for this object.
+ *
+ * @return Serialization proxy
+ */
+ @java.io.Serial
+ protected abstract Object writeReplace();
+}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.cluster.raft.protobuff.client.messages;
+package org.opendaylight.controller.cluster.raft.messages;
/**
* This is a tagging interface for a Payload implementation that needs to always be persisted regardless of
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.messages;
+
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link RequestVote}.
+ */
+final class RV implements Externalizable {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private RequestVote requestVote;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public RV() {
+ // For Externalizable
+ }
+
+ RV(final RequestVote requestVote) {
+ this.requestVote = requireNonNull(requestVote);
+ }
+
+ @Override
+ public void writeExternal(final ObjectOutput out) throws IOException {
+ WritableObjects.writeLong(out, requestVote.getTerm());
+ out.writeObject(requestVote.getCandidateId());
+ WritableObjects.writeLongs(out, requestVote.getLastLogIndex(), requestVote.getLastLogTerm());
+ }
+
+ @Override
+ public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+ long term = WritableObjects.readLong(in);
+ String candidateId = (String) in.readObject();
+
+ final byte hdr = WritableObjects.readLongHeader(in);
+ long lastLogIndex = WritableObjects.readFirstLong(in, hdr);
+ long lastLogTerm = WritableObjects.readSecondLong(in, hdr);
+
+ requestVote = new RequestVote(term, candidateId, lastLogIndex, lastLogTerm);
+ }
+
+ @java.io.Serial
+ private Object readResolve() {
+ return requestVote;
+ }
+}
*/
package org.opendaylight.controller.cluster.raft.messages;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-
/**
* Invoked by candidates to gather votes (§5.2).
*/
public final class RequestVote extends AbstractRaftRPC {
+ @java.io.Serial
private static final long serialVersionUID = -6967509186297108657L;
// candidate requesting vote
@Override
Object writeReplace() {
- return new Proxy(this);
- }
-
- private static class Proxy implements Externalizable {
- private static final long serialVersionUID = 1L;
-
- private RequestVote requestVote;
-
- // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
- // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public Proxy() {
- }
-
- Proxy(final RequestVote requestVote) {
- this.requestVote = requestVote;
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) throws IOException {
- out.writeLong(requestVote.getTerm());
- out.writeObject(requestVote.candidateId);
- out.writeLong(requestVote.lastLogIndex);
- out.writeLong(requestVote.lastLogTerm);
- }
-
- @Override
- public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
- long term = in.readLong();
- String candidateId = (String) in.readObject();
- long lastLogIndex = in.readLong();
- long lastLogTerm = in.readLong();
-
- requestVote = new RequestVote(term, candidateId, lastLogIndex, lastLogTerm);
- }
-
- private Object readResolve() {
- return requestVote;
- }
+ return new RV(this);
}
}
*/
package org.opendaylight.controller.cluster.raft.messages;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-
public final class RequestVoteReply extends AbstractRaftRPC {
+ @java.io.Serial
private static final long serialVersionUID = 8427899326488775660L;
// true means candidate received vote
@Override
Object writeReplace() {
- return new Proxy(this);
- }
-
- private static class Proxy implements Externalizable {
- private static final long serialVersionUID = 1L;
-
- private RequestVoteReply requestVoteReply;
-
- // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
- // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public Proxy() {
- }
-
- Proxy(final RequestVoteReply requestVoteReply) {
- this.requestVoteReply = requestVoteReply;
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) throws IOException {
- out.writeLong(requestVoteReply.getTerm());
- out.writeBoolean(requestVoteReply.voteGranted);
- }
-
- @Override
- public void readExternal(final ObjectInput in) throws IOException {
- long term = in.readLong();
- boolean voteGranted = in.readBoolean();
-
- requestVoteReply = new RequestVoteReply(term, voteGranted);
- }
-
- private Object readResolve() {
- return requestVoteReply;
- }
+ return new VR(this);
}
}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.messages;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link RequestVoteReply}.
+ */
+final class VR implements Externalizable {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ // Flags
+ private static final int VOTE_GRANTED = 0x10;
+
+ private RequestVoteReply requestVoteReply;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public VR() {
+ // For Externalizable
+ }
+
+ VR(final RequestVoteReply requestVoteReply) {
+ this.requestVoteReply = requireNonNull(requestVoteReply);
+ }
+
+ @Override
+ public void writeExternal(final ObjectOutput out) throws IOException {
+ WritableObjects.writeLong(out, requestVoteReply.getTerm(), requestVoteReply.isVoteGranted() ? VOTE_GRANTED : 0);
+ }
+
+ @Override
+ public void readExternal(final ObjectInput in) throws IOException {
+ final byte hdr = WritableObjects.readLongHeader(in);
+ requestVoteReply = new RequestVoteReply(WritableObjects.readLongBody(in, hdr),
+ (WritableObjects.longHeaderFlags(hdr) & VOTE_GRANTED) != 0);
+ }
+
+ @java.io.Serial
+ private Object readResolve() {
+ return verifyNotNull(requestVoteReply);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link ApplyJournalEntries}.
+ */
+final class AJE implements Externalizable {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private ApplyJournalEntries applyEntries;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public AJE() {
+ // For Externalizable
+ }
+
+ AJE(final ApplyJournalEntries applyEntries) {
+ this.applyEntries = requireNonNull(applyEntries);
+ }
+
+ @Override
+ public void writeExternal(final ObjectOutput out) throws IOException {
+ WritableObjects.writeLong(out, applyEntries.getToIndex());
+ }
+
+ @Override
+ public void readExternal(final ObjectInput in) throws IOException {
+ applyEntries = new ApplyJournalEntries(WritableObjects.readLong(in));
+ }
+
+ @java.io.Serial
+ private Object readResolve() {
+ return verifyNotNull(applyEntries);
+ }
+}
package org.opendaylight.controller.cluster.raft.persisted;
import akka.dispatch.ControlMessage;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
import java.io.Serializable;
/**
*
* @author Thomas Pantelis
*/
-public class ApplyJournalEntries implements Serializable, ControlMessage {
- private static final class Proxy implements Externalizable {
- private static final long serialVersionUID = 1L;
-
- private ApplyJournalEntries applyEntries;
-
- // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
- // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public Proxy() {
- // For Externalizable
- }
-
- Proxy(final ApplyJournalEntries applyEntries) {
- this.applyEntries = applyEntries;
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) throws IOException {
- out.writeLong(applyEntries.toIndex);
- }
-
- @Override
- public void readExternal(final ObjectInput in) throws IOException {
- applyEntries = new ApplyJournalEntries(in.readLong());
- }
-
- private Object readResolve() {
- return applyEntries;
- }
- }
-
+public final class ApplyJournalEntries implements Serializable, ControlMessage {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
private final long toIndex;
return toIndex;
}
- private Object writeReplace() {
- return new Proxy(this);
- }
-
@Override
public String toString() {
return "ApplyJournalEntries [toIndex=" + toIndex + "]";
}
+
+ @java.io.Serial
+ private Object writeReplace() {
+ return new AJE(this);
+ }
}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link DeleteEntries}.
+ */
+final class DE implements Externalizable {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private DeleteEntries deleteEntries;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public DE() {
+ // For Externalizable
+ }
+
+ DE(final DeleteEntries deleteEntries) {
+ this.deleteEntries = requireNonNull(deleteEntries);
+ }
+
+ @Override
+ public void writeExternal(final ObjectOutput out) throws IOException {
+ WritableObjects.writeLong(out, deleteEntries.getFromIndex());
+ }
+
+ @Override
+ public void readExternal(final ObjectInput in) throws IOException {
+ deleteEntries = new DeleteEntries(WritableObjects.readLong(in));
+ }
+
+ @java.io.Serial
+ private Object readResolve() {
+ return verifyNotNull(deleteEntries);
+ }
+}
*/
package org.opendaylight.controller.cluster.raft.persisted;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
import java.io.Serializable;
/**
*
* @author Thomas Pantelis
*/
-public class DeleteEntries implements Serializable {
- private static final class Proxy implements Externalizable {
- private static final long serialVersionUID = 1L;
-
- private DeleteEntries deleteEntries;
-
- // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
- // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public Proxy() {
- // For Externalizable
- }
-
- Proxy(final DeleteEntries deleteEntries) {
- this.deleteEntries = deleteEntries;
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) throws IOException {
- out.writeLong(deleteEntries.fromIndex);
- }
-
- @Override
- public void readExternal(final ObjectInput in) throws IOException {
- deleteEntries = new DeleteEntries(in.readLong());
- }
-
- private Object readResolve() {
- return deleteEntries;
- }
- }
-
+public final class DeleteEntries implements Serializable {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
private final long fromIndex;
return fromIndex;
}
- private Object writeReplace() {
- return new Proxy(this);
- }
-
@Override
public String toString() {
return "DeleteEntries [fromIndex=" + fromIndex + "]";
}
+
+ @java.io.Serial
+ private Object writeReplace() {
+ return new DE(this);
+ }
}
* @author Thomas Pantelis
*/
public final class EmptyState implements Snapshot.State {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
public static final EmptyState INSTANCE = new EmptyState();
private EmptyState() {
+ // Hidden on purpose
}
+ @java.io.Serial
@SuppressWarnings("static-method")
private Object readResolve() {
return INSTANCE;
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.persisted;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link SimpleReplicatedLogEntry}.
+ */
+final class LE implements Externalizable {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private long index;
+ private long term;
+ private Payload data;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public LE() {
+ // For Externalizable
+ }
+
+ // For size estimation only, use full bit size
+ LE(final Void dummy) {
+ index = Long.MIN_VALUE;
+ term = Long.MIN_VALUE;
+ data = null;
+ }
+
+ LE(final SimpleReplicatedLogEntry logEntry) {
+ index = logEntry.getIndex();
+ term = logEntry.getTerm();
+ data = logEntry.getData();
+ }
+
+ @Override
+ public void writeExternal(final ObjectOutput out) throws IOException {
+ WritableObjects.writeLongs(out, index, term);
+ out.writeObject(data);
+ }
+
+ @Override
+ public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+ final byte hdr = WritableObjects.readLongHeader(in);
+ index = WritableObjects.readFirstLong(in, hdr);
+ term = WritableObjects.readSecondLong(in, hdr);
+ data = (Payload) in.readObject();
+ }
+
+ @java.io.Serial
+ private Object readResolve() {
+ return new SimpleReplicatedLogEntry(index, term, data);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.persisted;
+
+/**
+ * Marker interface for serializable objects which have been migrated. It implements {@link MigratedSerializable} and
+ * always returns {@code true} from {@link #isMigrated()}. This interface is marked as deprecated , as any of its users
+ * should also be marked as deprecated.
+ */
+@Deprecated
+public interface LegacySerializable extends MigratedSerializable {
+ @Override
+ @Deprecated(forRemoval = true)
+ default boolean isMigrated() {
+ return true;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.persisted;
+
+import java.io.Serializable;
+
+/**
+ * Serialization proxy for {@link NoopPayload}.
+ */
+// There is no need for Externalizable
+final class NP implements Serializable {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ @java.io.Serial
+ private Object readResolve() {
+ return NoopPayload.INSTANCE;
+ }
+}
+
package org.opendaylight.controller.cluster.raft.persisted;
import akka.dispatch.ControlMessage;
-import java.io.Serializable;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
+import org.apache.commons.lang3.SerializationUtils;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
/**
* Payload used for no-op log entries that are put into the journal by the PreLeader in order to commit
*
* @author Thomas Pantelis
*/
-public final class NoopPayload extends Payload implements Serializable, ControlMessage {
- public static final NoopPayload INSTANCE = new NoopPayload();
-
- // There is no need for Externalizable
- private static final class Proxy implements Serializable {
- private static final long serialVersionUID = 1L;
-
- private Object readResolve() {
- return INSTANCE;
- }
- }
-
+public final class NoopPayload extends Payload implements ControlMessage {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
- private static final Proxy PROXY = new Proxy();
+ private static final @NonNull NP PROXY = new NP();
+ // Estimate to how big the proxy is. Note this includes object stream overhead, so it is a bit conservative
+ private static final int PROXY_SIZE = SerializationUtils.serialize(PROXY).length;
+
+ public static final @NonNull NoopPayload INSTANCE = new NoopPayload();
private NoopPayload() {
+ // Hidden on purpose
}
@Override
return 0;
}
- private Object writeReplace() {
+ @Override
+ public int serializedSize() {
+ return PROXY_SIZE;
+ }
+
+ @Override
+ protected Object writeReplace() {
return PROXY;
}
}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.collect.ImmutableList;
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
+import org.opendaylight.controller.cluster.raft.persisted.Snapshot.State;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Externalizable proxy for {@link Snapshot}.
+ */
+final class SS implements Externalizable {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private Snapshot snapshot;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public SS() {
+ // For Externalizable
+ }
+
+ SS(final Snapshot snapshot) {
+ this.snapshot = requireNonNull(snapshot);
+ }
+
+ @Override
+ public void writeExternal(final ObjectOutput out) throws IOException {
+ WritableObjects.writeLongs(out, snapshot.getLastIndex(), snapshot.getLastTerm());
+ WritableObjects.writeLongs(out, snapshot.getLastAppliedIndex(), snapshot.getLastAppliedTerm());
+ WritableObjects.writeLong(out, snapshot.getElectionTerm());
+ out.writeObject(snapshot.getElectionVotedFor());
+ out.writeObject(snapshot.getServerConfiguration());
+
+ final var unAppliedEntries = snapshot.getUnAppliedEntries();
+ out.writeInt(unAppliedEntries.size());
+ for (var e : unAppliedEntries) {
+ WritableObjects.writeLongs(out, e.getIndex(), e.getTerm());
+ out.writeObject(e.getData());
+ }
+
+ out.writeObject(snapshot.getState());
+ }
+
+ @Override
+ public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+ byte hdr = WritableObjects.readLongHeader(in);
+ long lastIndex = WritableObjects.readFirstLong(in, hdr);
+ long lastTerm = WritableObjects.readSecondLong(in, hdr);
+
+ hdr = WritableObjects.readLongHeader(in);
+ long lastAppliedIndex = WritableObjects.readFirstLong(in, hdr);
+ long lastAppliedTerm = WritableObjects.readSecondLong(in, hdr);
+ long electionTerm = WritableObjects.readLong(in);
+ String electionVotedFor = (String) in.readObject();
+ ServerConfigurationPayload serverConfig = (ServerConfigurationPayload) in.readObject();
+
+ int size = in.readInt();
+ var unAppliedEntries = ImmutableList.<ReplicatedLogEntry>builderWithExpectedSize(size);
+ for (int i = 0; i < size; i++) {
+ hdr = WritableObjects.readLongHeader(in);
+ unAppliedEntries.add(new SimpleReplicatedLogEntry(
+ WritableObjects.readFirstLong(in, hdr), WritableObjects.readSecondLong(in, hdr),
+ (Payload) in.readObject()));
+ }
+
+ State state = (State) in.readObject();
+
+ snapshot = Snapshot.create(state, unAppliedEntries.build(), lastIndex, lastTerm, lastAppliedIndex,
+ lastAppliedTerm, electionTerm, electionVotedFor, serverConfig);
+ }
+
+ @java.io.Serial
+ private Object readResolve() {
+ return verifyNotNull(snapshot);
+ }
+}
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.io.ObjectOutputStream;
-import java.io.Serializable;
-import java.util.ArrayList;
import java.util.List;
import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.PersistentPayload;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
+import org.opendaylight.controller.cluster.raft.messages.PersistentPayload;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
*
* @author Thomas Pantelis
*/
-public final class ServerConfigurationPayload extends Payload implements PersistentPayload, Serializable {
+public final class ServerConfigurationPayload extends Payload implements PersistentPayload {
private static final class Proxy implements Externalizable {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
private List<ServerInfo> serverConfig;
}
Proxy(final ServerConfigurationPayload payload) {
- this.serverConfig = payload.getServerConfig();
+ serverConfig = payload.getServerConfig();
}
@Override
public void writeExternal(final ObjectOutput out) throws IOException {
out.writeInt(serverConfig.size());
- for (ServerInfo i : serverConfig) {
- out.writeObject(i.getId());
- out.writeBoolean(i.isVoting());
+ for (var serverInfo : serverConfig) {
+ out.writeObject(serverInfo.peerId());
+ out.writeBoolean(serverInfo.isVoting());
}
}
@Override
public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
final int size = in.readInt();
- serverConfig = new ArrayList<>(size);
+
+ final var builder = ImmutableList.<ServerInfo>builderWithExpectedSize(size);
for (int i = 0; i < size; ++i) {
final String id = (String) in.readObject();
final boolean voting = in.readBoolean();
- serverConfig.add(new ServerInfo(id, voting));
+ builder.add(new ServerInfo(id, voting));
}
+ serverConfig = builder.build();
}
+ @java.io.Serial
private Object readResolve() {
return new ServerConfigurationPayload(serverConfig);
}
}
private static final Logger LOG = LoggerFactory.getLogger(ServerConfigurationPayload.class);
+ @java.io.Serial
private static final long serialVersionUID = 1L;
@SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but this class "
@Override
public int size() {
+ return serializedSize();
+ }
+
+ @Override
+ public int serializedSize() {
if (serializedSize < 0) {
try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) {
try (ObjectOutputStream out = new ObjectOutputStream(bos)) {
}
@Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
-
- if (obj == null) {
- return false;
- }
-
- if (getClass() != obj.getClass()) {
- return false;
- }
-
- ServerConfigurationPayload other = (ServerConfigurationPayload) obj;
- return serverConfig.equals(other.serverConfig);
+ public boolean equals(final Object obj) {
+ return this == obj || obj instanceof ServerConfigurationPayload other
+ && serverConfig.equals(other.serverConfig);
}
@Override
return "ServerConfigurationPayload [serverConfig=" + serverConfig + "]";
}
- private Object writeReplace() {
+ @Override
+ protected Object writeReplace() {
return new Proxy(this);
}
}
*
* @author Thomas Pantelis
*/
-public final class ServerInfo {
- private final String id;
- private final boolean isVoting;
-
- public ServerInfo(@NonNull String id, boolean isVoting) {
- this.id = requireNonNull(id);
- this.isVoting = isVoting;
- }
-
- public @NonNull String getId() {
- return id;
- }
-
- public boolean isVoting() {
- return isVoting;
- }
-
- @Override
- public int hashCode() {
- final int prime = 31;
- int result = 1;
- result = prime * result + Boolean.hashCode(isVoting);
- result = prime * result + id.hashCode();
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (!(obj instanceof ServerInfo)) {
- return false;
- }
-
- final ServerInfo other = (ServerInfo) obj;
- return isVoting == other.isVoting && id.equals(other.id);
- }
-
- @Override
- public String toString() {
- return "ServerInfo [id=" + id + ", isVoting=" + isVoting + "]";
+public record ServerInfo(@NonNull String peerId, boolean isVoting) {
+ public ServerInfo {
+ requireNonNull(peerId);
}
}
\ No newline at end of file
import static java.util.Objects.requireNonNull;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
import java.io.Serializable;
+import org.apache.commons.lang3.SerializationUtils;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
/**
* A {@link ReplicatedLogEntry} implementation.
* @author Thomas Pantelis
*/
public final class SimpleReplicatedLogEntry implements ReplicatedLogEntry, Serializable {
- private static final class Proxy implements Externalizable {
- private static final long serialVersionUID = 1L;
-
- private ReplicatedLogEntry replicatedLogEntry;
-
- // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
- // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public Proxy() {
- // For Externalizable
- }
-
- Proxy(final ReplicatedLogEntry replicatedLogEntry) {
- this.replicatedLogEntry = replicatedLogEntry;
- }
-
- static int estimatedSerializedSize(final ReplicatedLogEntry replicatedLogEntry) {
- return 8 /* index */ + 8 /* term */ + replicatedLogEntry.getData().size()
- + 400 /* estimated extra padding for class info */;
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) throws IOException {
- out.writeLong(replicatedLogEntry.getIndex());
- out.writeLong(replicatedLogEntry.getTerm());
- out.writeObject(replicatedLogEntry.getData());
- }
-
- @Override
- public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
- replicatedLogEntry = new SimpleReplicatedLogEntry(in.readLong(), in.readLong(), (Payload) in.readObject());
- }
-
- private Object readResolve() {
- return replicatedLogEntry;
- }
- }
-
+ @java.io.Serial
private static final long serialVersionUID = 1L;
+ // Estimate to how big the proxy is. Note this includes object stream overhead, so it is a bit conservative.
+ private static final int PROXY_SIZE = SerializationUtils.serialize(new LE((Void) null)).length;
private final long index;
private final long term;
@Override
public int size() {
- return getData().size();
+ return payload.size();
+ }
+
+ @Override
+ public int serializedSize() {
+ return PROXY_SIZE + payload.serializedSize();
}
@Override
persistencePending = pending;
}
- private Object writeReplace() {
- return new Proxy(this);
- }
-
- public int estimatedSerializedSize() {
- return Proxy.estimatedSerializedSize(this);
- }
-
@Override
public int hashCode() {
final int prime = 31;
@Override
public boolean equals(final Object obj) {
- if (this == obj) {
- return true;
- }
-
- if (obj == null || getClass() != obj.getClass()) {
- return false;
- }
-
- SimpleReplicatedLogEntry other = (SimpleReplicatedLogEntry) obj;
- return index == other.index && term == other.term && payload.equals(other.payload);
+ return this == obj || obj instanceof SimpleReplicatedLogEntry other && index == other.index
+ && term == other.term && payload.equals(other.payload);
}
@Override
public String toString() {
return "SimpleReplicatedLogEntry [index=" + index + ", term=" + term + ", payload=" + payload + "]";
}
+
+ @java.io.Serial
+ private Object writeReplace() {
+ return new LE(this);
+ }
}
*/
package org.opendaylight.controller.cluster.raft.persisted;
-import static com.google.common.base.Preconditions.checkArgument;
import static java.util.Objects.requireNonNull;
import akka.actor.ExtendedActorSystem;
}
@Override
- public byte[] toBinary(Object obj) {
- checkArgument(obj instanceof SimpleReplicatedLogEntry, "Unsupported object type %s", obj.getClass());
+ public byte[] toBinary(final Object obj) {
+ if (!(obj instanceof SimpleReplicatedLogEntry replicatedLogEntry)) {
+ throw new IllegalArgumentException("Unsupported object type " + obj.getClass());
+ }
- SimpleReplicatedLogEntry replicatedLogEntry = (SimpleReplicatedLogEntry)obj;
- final int estimatedSerializedSize = replicatedLogEntry.estimatedSerializedSize();
+ final int estimatedSerializedSize = replicatedLogEntry.serializedSize();
final ByteArrayOutputStream bos = new ByteArrayOutputStream(estimatedSerializedSize);
SerializationUtils.serialize(replicatedLogEntry, bos);
}
@Override
- public Object fromBinaryJava(byte[] bytes, Class<?> manifest) {
+ public Object fromBinaryJava(final byte[] bytes, final Class<?> manifest) {
try (ClassLoaderObjectInputStream is = new ClassLoaderObjectInputStream(system.dynamicAccess().classLoader(),
new ByteArrayInputStream(bytes))) {
return is.readObject();
*/
package org.opendaylight.controller.cluster.raft.persisted;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
import java.io.Serializable;
-import java.util.ArrayList;
import java.util.List;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
/**
* Represents a snapshot of the raft data.
*
* @author Thomas Pantelis
*/
-// Not final for mocking
-public class Snapshot implements Serializable {
-
+public final class Snapshot implements Serializable {
/**
* Implementations of this interface are used as the state payload for a snapshot.
*
}
}
- private static final class Proxy implements Externalizable {
- private static final long serialVersionUID = 1L;
-
- private Snapshot snapshot;
-
- // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
- // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public Proxy() {
- // For Externalizable
- }
-
- Proxy(final Snapshot snapshot) {
- this.snapshot = snapshot;
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) throws IOException {
- out.writeLong(snapshot.lastIndex);
- out.writeLong(snapshot.lastTerm);
- out.writeLong(snapshot.lastAppliedIndex);
- out.writeLong(snapshot.lastAppliedTerm);
- out.writeLong(snapshot.electionTerm);
- out.writeObject(snapshot.electionVotedFor);
- out.writeObject(snapshot.serverConfig);
-
- out.writeInt(snapshot.unAppliedEntries.size());
- for (ReplicatedLogEntry e: snapshot.unAppliedEntries) {
- out.writeLong(e.getIndex());
- out.writeLong(e.getTerm());
- out.writeObject(e.getData());
- }
-
- out.writeObject(snapshot.state);
- }
-
- @Override
- public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
- long lastIndex = in.readLong();
- long lastTerm = in.readLong();
- long lastAppliedIndex = in.readLong();
- long lastAppliedTerm = in.readLong();
- long electionTerm = in.readLong();
- String electionVotedFor = (String) in.readObject();
- ServerConfigurationPayload serverConfig = (ServerConfigurationPayload) in.readObject();
-
- int size = in.readInt();
- List<ReplicatedLogEntry> unAppliedEntries = new ArrayList<>(size);
- for (int i = 0; i < size; i++) {
- unAppliedEntries.add(new SimpleReplicatedLogEntry(in.readLong(), in.readLong(),
- (Payload) in.readObject()));
- }
-
- State state = (State) in.readObject();
-
- snapshot = Snapshot.create(state, unAppliedEntries, lastIndex, lastTerm, lastAppliedIndex, lastAppliedTerm,
- electionTerm, electionVotedFor, serverConfig);
- }
-
- private Object readResolve() {
- return snapshot;
- }
- }
-
+ @java.io.Serial
private static final long serialVersionUID = 1L;
private final State state;
private final String electionVotedFor;
private final ServerConfigurationPayload serverConfig;
- Snapshot(final State state, final List<ReplicatedLogEntry> unAppliedEntries, final long lastIndex,
+ private Snapshot(final State state, final List<ReplicatedLogEntry> unAppliedEntries, final long lastIndex,
final long lastTerm, final long lastAppliedIndex, final long lastAppliedTerm, final long electionTerm,
final String electionVotedFor, final ServerConfigurationPayload serverConfig) {
this.state = state;
}
public long getLastIndex() {
- return this.lastIndex;
+ return lastIndex;
}
public long getElectionTerm() {
return serverConfig;
}
- private Object writeReplace() {
- return new Proxy(this);
- }
-
@Override
public String toString() {
return "Snapshot [lastIndex=" + lastIndex + ", lastTerm=" + lastTerm + ", lastAppliedIndex=" + lastAppliedIndex
+ ", state=" + state + ", electionTerm=" + electionTerm + ", electionVotedFor="
+ electionVotedFor + ", ServerConfigPayload=" + serverConfig + "]";
}
+
+ @java.io.Serial
+ private Object writeReplace() {
+ return new SS(this);
+ }
}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link UpdateElectionTerm}.
+ */
+final class UT implements Externalizable {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private UpdateElectionTerm updateElectionTerm;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public UT() {
+ // For Externalizable
+ }
+
+ UT(final UpdateElectionTerm updateElectionTerm) {
+ this.updateElectionTerm = requireNonNull(updateElectionTerm);
+ }
+
+ @Override
+ public void writeExternal(final ObjectOutput out) throws IOException {
+ WritableObjects.writeLong(out, updateElectionTerm.getCurrentTerm());
+ out.writeObject(updateElectionTerm.getVotedFor());
+ }
+
+ @Override
+ public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+ updateElectionTerm = new UpdateElectionTerm(WritableObjects.readLong(in), (String) in.readObject());
+ }
+
+ @java.io.Serial
+ private Object readResolve() {
+ return verifyNotNull(updateElectionTerm);
+ }
+}
*/
package org.opendaylight.controller.cluster.raft.persisted;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
import java.io.Serializable;
/**
* Message class to persist election term information.
*/
-public class UpdateElectionTerm implements Serializable {
- private static final class Proxy implements Externalizable {
- private static final long serialVersionUID = 1L;
-
- private UpdateElectionTerm updateElectionTerm;
-
- // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
- // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public Proxy() {
- // For Externalizable
- }
-
- Proxy(final UpdateElectionTerm updateElectionTerm) {
- this.updateElectionTerm = updateElectionTerm;
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) throws IOException {
- out.writeLong(updateElectionTerm.currentTerm);
- out.writeObject(updateElectionTerm.votedFor);
- }
-
- @Override
- public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
- updateElectionTerm = new UpdateElectionTerm(in.readLong(), (String) in.readObject());
- }
-
- private Object readResolve() {
- return updateElectionTerm;
- }
- }
-
+public final class UpdateElectionTerm implements Serializable {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
private final long currentTerm;
return votedFor;
}
- private Object writeReplace() {
- return new Proxy(this);
- }
-
@Override
public String toString() {
return "UpdateElectionTerm [currentTerm=" + currentTerm + ", votedFor=" + votedFor + "]";
}
+
+ @java.io.Serial
+ private Object writeReplace() {
+ return new UT(this);
+ }
}
*/
package org.opendaylight.controller.cluster.raft;
-import static akka.pattern.Patterns.ask;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import akka.actor.Terminated;
import akka.dispatch.Dispatchers;
import akka.dispatch.Mailboxes;
+import akka.pattern.Patterns;
import akka.testkit.TestActorRef;
import akka.testkit.javadsl.TestKit;
import akka.util.Timeout;
import com.google.common.base.Stopwatch;
-import com.google.common.collect.ImmutableMap;
import com.google.common.util.concurrent.Uninterruptibles;
import java.io.OutputStream;
import java.time.Duration;
import java.util.ArrayList;
-import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
import org.opendaylight.controller.cluster.raft.client.messages.GetOnDemandRaftState;
import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries;
import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
TestRaftActor(final Builder builder) {
super(builder);
- this.collectorActor = builder.collectorActor;
+ collectorActor = builder.collectorActor;
}
public void startDropMessages(final Class<?> msgClass) {
@SuppressWarnings({ "rawtypes", "unchecked", "checkstyle:IllegalCatch" })
@Override
public void handleCommand(final Object message) {
- if (message instanceof MockPayload) {
- MockPayload payload = (MockPayload) message;
+ if (message instanceof MockPayload payload) {
super.persistData(collectorActor, new MockIdentifier(payload.toString()), payload, false);
return;
}
- if (message instanceof ServerConfigurationPayload) {
- super.persistData(collectorActor, new MockIdentifier("serverConfig"), (Payload) message, false);
+ if (message instanceof ServerConfigurationPayload payload) {
+ super.persistData(collectorActor, new MockIdentifier("serverConfig"), payload, false);
return;
}
- if (message instanceof SetPeerAddress) {
- setPeerAddress(((SetPeerAddress) message).getPeerId(),
- ((SetPeerAddress) message).getPeerAddress());
+ if (message instanceof SetPeerAddress setPeerAddress) {
+ setPeerAddress(setPeerAddress.getPeerId(), setPeerAddress.getPeerAddress());
return;
}
- if (message instanceof TestPersist) {
- persistData(((TestPersist) message).getActorRef(), ((TestPersist) message).getIdentifier(),
- ((TestPersist) message).getPayload(), false);
+ if (message instanceof TestPersist testPersist) {
+ persistData(testPersist.getActorRef(), testPersist.getIdentifier(), testPersist.getPayload(), false);
return;
}
@Override
@SuppressWarnings("checkstyle:IllegalCatch")
public void createSnapshot(final ActorRef actorRef, final Optional<OutputStream> installSnapshotStream) {
- MockSnapshotState snapshotState = new MockSnapshotState(new ArrayList<>(getState()));
+ MockSnapshotState snapshotState = new MockSnapshotState(List.copyOf(getState()));
if (installSnapshotStream.isPresent()) {
- SerializationUtils.serialize(snapshotState, installSnapshotStream.get());
+ SerializationUtils.serialize(snapshotState, installSnapshotStream.orElseThrow());
}
actorRef.tell(new CaptureSnapshotReply(snapshotState, installSnapshotStream), actorRef);
}
public Builder collectorActor(final ActorRef newCollectorActor) {
- this.collectorActor = newCollectorActor;
+ collectorActor = newCollectorActor;
return this;
}
}
}
- protected static final int SNAPSHOT_CHUNK_SIZE = 100;
+ // FIXME: this is an arbitrary limit. Document interactions and/or improve them to improve maintainability
+ protected static final int MAXIMUM_MESSAGE_SLICE_SIZE = 700;
protected final Logger testLog = LoggerFactory.getLogger(getClass());
protected String follower2Id = factory.generateActorId("follower");
protected TestActorRef<TestRaftActor> follower2Actor;
protected ActorRef follower2CollectorActor;
- protected RaftActorBehavior follower2;
+ protected RaftActorBehavior follower2;
protected RaftActorContext follower2Context;
- protected ImmutableMap<String, String> peerAddresses;
+ protected Map<String, String> peerAddresses;
protected long initialTerm = 5;
protected long currentTerm;
protected int snapshotBatchCount = 4;
- protected int snapshotChunkSize = SNAPSHOT_CHUNK_SIZE;
+ protected int maximumMessageSliceSize = MAXIMUM_MESSAGE_SLICE_SIZE;
protected List<MockPayload> expSnapshotState = new ArrayList<>();
configParams.setSnapshotBatchCount(snapshotBatchCount);
configParams.setSnapshotDataThresholdPercentage(70);
configParams.setIsolatedLeaderCheckInterval(new FiniteDuration(1, TimeUnit.DAYS));
- configParams.setSnapshotChunkSize(snapshotChunkSize);
+ configParams.setMaximumMessageSliceSize(maximumMessageSliceSize);
return configParams;
}
protected TestActorRef<TestRaftActor> newTestRaftActor(final String id, final Map<String, String> newPeerAddresses,
final ConfigParams configParams) {
return newTestRaftActor(id, TestRaftActor.newBuilder().peerAddresses(newPeerAddresses != null
- ? newPeerAddresses : Collections.<String, String>emptyMap()).config(configParams));
+ ? newPeerAddresses : Map.of()).config(configParams));
}
protected TestActorRef<TestRaftActor> newTestRaftActor(final String id, final TestRaftActor.Builder builder) {
Stopwatch sw = Stopwatch.createStarted();
while (sw.elapsed(TimeUnit.SECONDS) <= 5) {
try {
- OnDemandRaftState raftState = (OnDemandRaftState)Await.result(ask(raftActor,
+ OnDemandRaftState raftState = (OnDemandRaftState)Await.result(Patterns.ask(raftActor,
GetOnDemandRaftState.INSTANCE, timeout), timeout.duration());
verifier.accept(raftState);
return;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.util.List;
import java.util.Map;
import java.util.function.Consumer;
-import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
assertEquals("lastTerm", -1, replicatedLogImpl.lastTerm());
assertEquals("isPresent", false, replicatedLogImpl.isPresent(0));
assertEquals("isInSnapshot", false, replicatedLogImpl.isInSnapshot(0));
- Assert.assertNull("get(0)", replicatedLogImpl.get(0));
- Assert.assertNull("last", replicatedLogImpl.last());
+ assertNull("get(0)", replicatedLogImpl.get(0));
+ assertNull("last", replicatedLogImpl.last());
List<ReplicatedLogEntry> list = replicatedLogImpl.getFrom(0, 1, ReplicatedLog.NO_MAX_SIZE);
assertEquals("getFrom size", 0, list.size());
@Test
public void testGetFromWithMax() {
List<ReplicatedLogEntry> from = replicatedLogImpl.getFrom(0, 1, ReplicatedLog.NO_MAX_SIZE);
- Assert.assertEquals(1, from.size());
- Assert.assertEquals("A", from.get(0).getData().toString());
+ assertEquals(1, from.size());
+ assertEquals("A", from.get(0).getData().toString());
from = replicatedLogImpl.getFrom(0, 20, ReplicatedLog.NO_MAX_SIZE);
- Assert.assertEquals(4, from.size());
- Assert.assertEquals("A", from.get(0).getData().toString());
- Assert.assertEquals("D", from.get(3).getData().toString());
+ assertEquals(4, from.size());
+ assertEquals("A", from.get(0).getData().toString());
+ assertEquals("B", from.get(1).getData().toString());
+ assertEquals("C", from.get(2).getData().toString());
+ assertEquals("D", from.get(3).getData().toString());
+
+ // Pre-calculate sizing information for use with capping
+ final int sizeB = from.get(1).serializedSize();
+ final int sizeC = from.get(2).serializedSize();
+ final int sizeD = from.get(3).serializedSize();
from = replicatedLogImpl.getFrom(1, 2, ReplicatedLog.NO_MAX_SIZE);
- Assert.assertEquals(2, from.size());
- Assert.assertEquals("B", from.get(0).getData().toString());
- Assert.assertEquals("C", from.get(1).getData().toString());
-
- from = replicatedLogImpl.getFrom(1, 3, 2);
- Assert.assertEquals(2, from.size());
- Assert.assertEquals("B", from.get(0).getData().toString());
- Assert.assertEquals("C", from.get(1).getData().toString());
-
- from = replicatedLogImpl.getFrom(1, 3, 3);
- Assert.assertEquals(3, from.size());
- Assert.assertEquals("B", from.get(0).getData().toString());
- Assert.assertEquals("C", from.get(1).getData().toString());
- Assert.assertEquals("D", from.get(2).getData().toString());
-
- from = replicatedLogImpl.getFrom(1, 2, 3);
- Assert.assertEquals(2, from.size());
- Assert.assertEquals("B", from.get(0).getData().toString());
- Assert.assertEquals("C", from.get(1).getData().toString());
+ assertEquals(2, from.size());
+ assertEquals("B", from.get(0).getData().toString());
+ assertEquals("C", from.get(1).getData().toString());
+
+ from = replicatedLogImpl.getFrom(1, 3, sizeB + sizeC);
+ assertEquals(2, from.size());
+ assertEquals("B", from.get(0).getData().toString());
+ assertEquals("C", from.get(1).getData().toString());
+
+ from = replicatedLogImpl.getFrom(1, 3, sizeB + sizeC + sizeD);
+ assertEquals(3, from.size());
+ assertEquals("B", from.get(0).getData().toString());
+ assertEquals("C", from.get(1).getData().toString());
+ assertEquals("D", from.get(2).getData().toString());
+
+ from = replicatedLogImpl.getFrom(1, 2, sizeB + sizeC + sizeD);
+ assertEquals(2, from.size());
+ assertEquals("B", from.get(0).getData().toString());
+ assertEquals("C", from.get(1).getData().toString());
replicatedLogImpl.append(new SimpleReplicatedLogEntry(4, 2, new MockPayload("12345")));
from = replicatedLogImpl.getFrom(4, 2, 2);
- Assert.assertEquals(1, from.size());
- Assert.assertEquals("12345", from.get(0).getData().toString());
+ assertEquals(1, from.size());
+ assertEquals("12345", from.get(0).getData().toString());
}
@Test
assertEquals("lastIndex", 3, replicatedLogImpl.lastIndex());
assertEquals("lastTerm", 2, replicatedLogImpl.lastTerm());
- Assert.assertNull("get(0)", replicatedLogImpl.get(0));
- Assert.assertNull("get(1)", replicatedLogImpl.get(1));
- Assert.assertNotNull("get(2)", replicatedLogImpl.get(2));
- Assert.assertNotNull("get(3)", replicatedLogImpl.get(3));
+ assertNull("get(0)", replicatedLogImpl.get(0));
+ assertNull("get(1)", replicatedLogImpl.get(1));
+ assertNotNull("get(2)", replicatedLogImpl.get(2));
+ assertNotNull("get(3)", replicatedLogImpl.get(3));
}
@Test
assertEquals("dataSize", 4, replicatedLogImpl.dataSize());
assertEquals("getSnapshotIndex", -1, replicatedLogImpl.getSnapshotIndex());
assertEquals("getSnapshotTerm", -1, replicatedLogImpl.getSnapshotTerm());
- Assert.assertNotNull("get(0)", replicatedLogImpl.get(0));
- Assert.assertNotNull("get(3)", replicatedLogImpl.get(3));
+ assertNotNull("get(0)", replicatedLogImpl.get(0));
+ assertNotNull("get(3)", replicatedLogImpl.get(3));
}
@Test
}
- class MockAbstractReplicatedLogImpl extends AbstractReplicatedLogImpl {
+ static class MockAbstractReplicatedLogImpl extends AbstractReplicatedLogImpl {
@Override
public boolean removeFromAndPersist(final long index) {
return true;
}
@Override
- public boolean appendAndPersist(ReplicatedLogEntry replicatedLogEntry, Consumer<ReplicatedLogEntry> callback,
- boolean doAsync) {
+ public boolean appendAndPersist(final ReplicatedLogEntry replicatedLogEntry,
+ final Consumer<ReplicatedLogEntry> callback, final boolean doAsync) {
if (callback != null) {
callback.accept(replicatedLogEntry);
}
}
@Override
- public void captureSnapshotIfReady(ReplicatedLogEntry replicatedLogEntry) {
+ public void captureSnapshotIfReady(final ReplicatedLogEntry replicatedLogEntry) {
+ // No-op
}
@Override
- public boolean shouldCaptureSnapshot(long logIndex) {
+ public boolean shouldCaptureSnapshot(final long logIndex) {
return false;
}
}
import static org.mockito.Mockito.verify;
import akka.japi.Procedure;
-import org.junit.Before;
import org.junit.Test;
+import org.junit.runner.RunWith;
import org.mockito.ArgumentCaptor;
import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.opendaylight.controller.cluster.raft.persisted.UpdateElectionTerm;
import org.slf4j.Logger;
*
* @author Thomas Pantelis
*/
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
public class ElectionTermImplTest {
private static final Logger LOG = LoggerFactory.getLogger(RaftActorRecoverySupportTest.class);
@Mock
private DataPersistenceProvider mockPersistence;
- @Before
- public void setup() {
- MockitoAnnotations.initMocks(this);
- }
-
- @SuppressWarnings({ "rawtypes", "unchecked" })
@Test
+ @SuppressWarnings({ "rawtypes", "unchecked" })
public void testUpdateAndPersist() throws Exception {
ElectionTermImpl impl = new ElectionTermImpl(mockPersistence, "test", LOG);
import akka.pattern.Patterns;
import akka.testkit.TestActorRef;
import akka.testkit.javadsl.TestKit;
-import com.google.common.collect.ImmutableMap;
-import java.util.Arrays;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
+import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.junit.Test;
import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
private void createRaftActors() {
testLog.info("createRaftActors starting");
- final Snapshot snapshot = Snapshot.create(EmptyState.INSTANCE, Collections.emptyList(), -1, -1, -1, -1,
+ final Snapshot snapshot = Snapshot.create(EmptyState.INSTANCE, List.of(), -1, -1, -1, -1,
1, null, new org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload(
- Arrays.asList(new ServerInfo(leaderId, true), new ServerInfo(follower1Id, true),
+ List.of(new ServerInfo(leaderId, true), new ServerInfo(follower1Id, true),
new ServerInfo(follower2Id, true), new ServerInfo(follower3Id, false))));
InMemorySnapshotStore.addSnapshot(leaderId, snapshot);
follower1NotifierActor = factory.createActor(MessageCollectorActor.props(),
factory.generateActorId(follower1Id + "-notifier"));
follower1Actor = newTestRaftActor(follower1Id, TestRaftActor.newBuilder().peerAddresses(
- ImmutableMap.of(leaderId, testActorPath(leaderId), follower2Id, testActorPath(follower2Id),
+ Map.of(leaderId, testActorPath(leaderId), follower2Id, testActorPath(follower2Id),
follower3Id, testActorPath(follower3Id)))
.config(newFollowerConfigParams()).roleChangeNotifier(follower1NotifierActor));
follower2NotifierActor = factory.createActor(MessageCollectorActor.props(),
factory.generateActorId(follower2Id + "-notifier"));
follower2Actor = newTestRaftActor(follower2Id,TestRaftActor.newBuilder().peerAddresses(
- ImmutableMap.of(leaderId, testActorPath(leaderId), follower1Id, follower1Actor.path().toString(),
+ Map.of(leaderId, testActorPath(leaderId), follower1Id, follower1Actor.path().toString(),
follower3Id, testActorPath(follower3Id)))
.config(newFollowerConfigParams()).roleChangeNotifier(follower2NotifierActor));
follower3NotifierActor = factory.createActor(MessageCollectorActor.props(),
factory.generateActorId(follower3Id + "-notifier"));
follower3Actor = newTestRaftActor(follower3Id,TestRaftActor.newBuilder().peerAddresses(
- ImmutableMap.of(leaderId, testActorPath(leaderId), follower1Id, follower1Actor.path().toString(),
+ Map.of(leaderId, testActorPath(leaderId), follower1Id, follower1Actor.path().toString(),
follower2Id, follower2Actor.path().toString()))
.config(newFollowerConfigParams()).roleChangeNotifier(follower3NotifierActor));
- peerAddresses = ImmutableMap.<String, String>builder()
- .put(follower1Id, follower1Actor.path().toString())
- .put(follower2Id, follower2Actor.path().toString())
- .put(follower3Id, follower3Actor.path().toString()).build();
+ peerAddresses = Map.of(
+ follower1Id, follower1Actor.path().toString(),
+ follower2Id, follower2Actor.path().toString(),
+ follower3Id, follower3Actor.path().toString());
leaderConfigParams = newLeaderConfigParams();
leaderConfigParams.setElectionTimeoutFactor(3);
import java.util.Collections;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
import java.util.Optional;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang3.SerializationUtils;
import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.yangtools.concepts.Identifier;
public class MockRaftActor extends RaftActor implements RaftActorRecoveryCohort, RaftActorSnapshotCohort {
super(builder.id, builder.peerAddresses != null ? builder.peerAddresses :
Collections.emptyMap(), Optional.ofNullable(builder.config), PAYLOAD_VERSION);
state = Collections.synchronizedList(new ArrayList<>());
- this.actorDelegate = mock(RaftActor.class);
- this.recoveryCohortDelegate = mock(RaftActorRecoveryCohort.class);
+ actorDelegate = mock(RaftActor.class);
+ recoveryCohortDelegate = mock(RaftActorRecoveryCohort.class);
- this.snapshotCohortDelegate = builder.snapshotCohort != null ? builder.snapshotCohort :
+ snapshotCohortDelegate = builder.snapshotCohort != null ? builder.snapshotCohort :
mock(RaftActorSnapshotCohort.class);
if (builder.dataPersistenceProvider == null) {
- setPersistence(builder.persistent.isPresent() ? builder.persistent.get() : true);
+ setPersistence(builder.persistent.isPresent() ? builder.persistent.orElseThrow() : true);
} else {
setPersistence(builder.dataPersistenceProvider);
}
}
private void applySnapshotState(final Snapshot.State newState) {
- if (newState instanceof MockSnapshotState) {
+ if (newState instanceof MockSnapshotState mockState) {
state.clear();
- state.addAll(((MockSnapshotState)newState).getState());
+ state.addAll(mockState.getState());
}
}
}
@Override public String persistenceId() {
- return this.getId();
+ return getId();
}
protected void newBehavior(final RaftActorBehavior newBehavior) {
}
public static List<Object> fromState(final Snapshot.State from) {
- if (from instanceof MockSnapshotState) {
- return ((MockSnapshotState)from).getState();
+ if (from instanceof MockSnapshotState mockState) {
+ return mockState.getState();
}
throw new IllegalStateException("Unexpected snapshot State: " + from);
}
public ReplicatedLog getReplicatedLog() {
- return this.getRaftActorContext().getReplicatedLog();
+ return getRaftActorContext().getReplicatedLog();
}
@Override
}
public T id(final String newId) {
- this.id = newId;
+ id = newId;
return self();
}
public T peerAddresses(final Map<String, String> newPeerAddresses) {
- this.peerAddresses = newPeerAddresses;
+ peerAddresses = newPeerAddresses;
return self();
}
public T config(final ConfigParams newConfig) {
- this.config = newConfig;
+ config = newConfig;
return self();
}
public T dataPersistenceProvider(final DataPersistenceProvider newDataPersistenceProvider) {
- this.dataPersistenceProvider = newDataPersistenceProvider;
+ dataPersistenceProvider = newDataPersistenceProvider;
return self();
}
public T roleChangeNotifier(final ActorRef newRoleChangeNotifier) {
- this.roleChangeNotifier = newRoleChangeNotifier;
+ roleChangeNotifier = newRoleChangeNotifier;
return self();
}
public T snapshotMessageSupport(final RaftActorSnapshotMessageSupport newSnapshotMessageSupport) {
- this.snapshotMessageSupport = newSnapshotMessageSupport;
+ snapshotMessageSupport = newSnapshotMessageSupport;
return self();
}
public T restoreFromSnapshot(final Snapshot newRestoreFromSnapshot) {
- this.restoreFromSnapshot = newRestoreFromSnapshot;
+ restoreFromSnapshot = newRestoreFromSnapshot;
return self();
}
public T persistent(final Optional<Boolean> newPersistent) {
- this.persistent = newPersistent;
+ persistent = newPersistent;
return self();
}
public T pauseLeaderFunction(final Function<Runnable, Void> newPauseLeaderFunction) {
- this.pauseLeaderFunction = newPauseLeaderFunction;
+ pauseLeaderFunction = newPauseLeaderFunction;
return self();
}
public T snapshotCohort(final RaftActorSnapshotCohort newSnapshotCohort) {
- this.snapshotCohort = newSnapshotCohort;
+ snapshotCohort = newSnapshotCohort;
return self();
}
@Override
public int hashCode() {
- final int prime = 31;
- int result = 1;
- result = prime * result + (state == null ? 0 : state.hashCode());
- return result;
+ return Objects.hash(state);
}
@Override
return false;
}
MockSnapshotState other = (MockSnapshotState) obj;
- if (state == null) {
- if (other.state != null) {
- return false;
- }
- } else if (!state.equals(other.state)) {
+ if (!Objects.equals(state, other.state)) {
return false;
}
return true;
package org.opendaylight.controller.cluster.raft;
+import static java.util.Objects.requireNonNull;
+
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
import akka.actor.ActorSystem;
import java.io.Serializable;
import java.util.HashMap;
import java.util.Map;
+import java.util.Objects;
import java.util.Optional;
import java.util.function.Consumer;
import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.opendaylight.controller.cluster.NonPersistentDataProvider;
import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
import org.opendaylight.controller.cluster.raft.persisted.ByteState;
import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.persisted.Snapshot.State;
import org.opendaylight.controller.cluster.raft.policy.RaftPolicy;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Override
public void update(final long newTerm, final String newVotedFor) {
- this.currentTerm = newTerm;
- this.votedFor = newVotedFor;
+ currentTerm = newTerm;
+ votedFor = newVotedFor;
// TODO : Write to some persistent state
}
}
@Override public ActorSystem getActorSystem() {
- return this.system;
+ return system;
}
@Override public ActorSelection getPeerActorSelection(final String peerId) {
}
}
- public static class MockPayload extends Payload implements Serializable {
+ public static final class MockPayload extends Payload {
private static final long serialVersionUID = 3121380393130864247L;
- private String value = "";
- private int size;
+
+ private final String data;
+ private final int size;
public MockPayload() {
+ this("");
}
public MockPayload(final String data) {
- this.value = data;
- size = value.length();
+ this(data, data.length());
}
public MockPayload(final String data, final int size) {
- this(data);
+ this.data = requireNonNull(data);
this.size = size;
}
return size;
}
+ @Override
+ public int serializedSize() {
+ return size;
+ }
+
@Override
public String toString() {
- return value;
+ return data;
}
@Override
public int hashCode() {
- final int prime = 31;
- int result = 1;
- result = prime * result + (value == null ? 0 : value.hashCode());
- return result;
+ return data.hashCode();
}
@Override
public boolean equals(final Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj == null) {
- return false;
- }
- if (getClass() != obj.getClass()) {
- return false;
- }
- MockPayload other = (MockPayload) obj;
- if (value == null) {
- if (other.value != null) {
- return false;
- }
- } else if (!value.equals(other.value)) {
- return false;
- }
- return true;
+ return this == obj || obj instanceof MockPayload other && Objects.equals(data, other.data)
+ && size == other.size;
+ }
+
+ @Override
+ protected Object writeReplace() {
+ return new MockPayloadProxy(data, size);
+ }
+ }
+
+ private static final class MockPayloadProxy implements Serializable {
+ private static final long serialVersionUID = 1L;
+
+ private final String value;
+ private final int size;
+
+ MockPayloadProxy(String value, int size) {
+ this.value = value;
+ this.size = size;
+ }
+
+ Object readResolve() {
+ return new MockPayload(value, size);
}
}
public MockReplicatedLogBuilder createEntries(final int start, final int end, final int term) {
for (int i = start; i < end; i++) {
- this.mockLog.append(new SimpleReplicatedLogEntry(i, term,
+ mockLog.append(new SimpleReplicatedLogEntry(i, term,
new MockRaftActorContext.MockPayload(Integer.toString(i))));
}
return this;
}
public MockReplicatedLogBuilder addEntry(final int index, final int term, final MockPayload payload) {
- this.mockLog.append(new SimpleReplicatedLogEntry(index, term, payload));
+ mockLog.append(new SimpleReplicatedLogEntry(index, term, payload));
return this;
}
public ReplicatedLog build() {
- return this.mockLog;
+ return mockLog;
}
}
import static org.junit.Assert.assertEquals;
import akka.actor.ActorRef;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Sets;
-import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
import java.util.Optional;
+import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.junit.Test;
import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
//
// We also add another voting follower actor into the mix even though it shoildn't affect the
// outcome.
- ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(
+ ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(List.of(
new ServerInfo(leaderId, true), new ServerInfo(follower1Id, false),
new ServerInfo(follower2Id, true), new ServerInfo("downPeer", false)));
SimpleReplicatedLogEntry persistedServerConfigEntry = new SimpleReplicatedLogEntry(0, currentTerm,
DefaultConfigParamsImpl follower2ConfigParams = newFollowerConfigParams();
follower2ConfigParams.setCustomRaftPolicyImplementationClass(DisableElectionsRaftPolicy.class.getName());
follower2Actor = newTestRaftActor(follower2Id, TestRaftActor.newBuilder().peerAddresses(
- ImmutableMap.of(leaderId, testActorPath(leaderId), follower1Id, follower1Actor.path().toString()))
+ Map.of(leaderId, testActorPath(leaderId), follower1Id, follower1Actor.path().toString()))
.config(follower2ConfigParams).persistent(Optional.of(false)));
TestRaftActor follower2Instance = follower2Actor.underlyingActor();
follower2Instance.waitForRecoveryComplete();
follower2CollectorActor = follower2Instance.collectorActor();
- peerAddresses = ImmutableMap.of(follower1Id, follower1Actor.path().toString(),
+ peerAddresses = Map.of(follower1Id, follower1Actor.path().toString(),
follower2Id, follower2Actor.path().toString());
createNewLeaderActor();
// Set up a persisted ServerConfigurationPayload with the leader voting and the follower non-voting.
- ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(
+ ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(List.of(
new ServerInfo(leaderId, true), new ServerInfo(follower1Id, false)));
SimpleReplicatedLogEntry persistedServerConfigEntry = new SimpleReplicatedLogEntry(0, persistedTerm,
persistedServerConfig);
DefaultConfigParamsImpl followerConfigParams = newFollowerConfigParams();
follower1Actor = newTestRaftActor(follower1Id, follower1Builder.peerAddresses(
- ImmutableMap.of(leaderId, testActorPath(leaderId))).config(followerConfigParams)
+ Map.of(leaderId, testActorPath(leaderId))).config(followerConfigParams)
.persistent(Optional.of(false)));
- peerAddresses = ImmutableMap.<String, String>builder()
- .put(follower1Id, follower1Actor.path().toString()).build();
+ peerAddresses = Map.of(follower1Id, follower1Actor.path().toString());
leaderConfigParams = newLeaderConfigParams();
leaderActor = newTestRaftActor(leaderId, TestRaftActor.newBuilder().peerAddresses(peerAddresses)
currentTerm = persistedTerm + 1;
assertEquals("Leader term", currentTerm, leaderContext.getTermInformation().getCurrentTerm());
- assertEquals("Leader server config", Sets.newHashSet(persistedServerConfig.getServerConfig()),
- Sets.newHashSet(leaderContext.getPeerServerInfo(true).getServerConfig()));
+ assertEquals("Leader server config", Set.copyOf(persistedServerConfig.getServerConfig()),
+ Set.copyOf(leaderContext.getPeerServerInfo(true).getServerConfig()));
assertEquals("Leader isVotingMember", true, leaderContext.isVotingMember());
// Verify follower's context after startup
MessageCollectorActor.expectFirstMatching(follower1CollectorActor, AppendEntries.class);
assertEquals("Follower term", currentTerm, follower1Context.getTermInformation().getCurrentTerm());
- assertEquals("Follower server config", Sets.newHashSet(persistedServerConfig.getServerConfig()),
- Sets.newHashSet(follower1Context.getPeerServerInfo(true).getServerConfig()));
+ assertEquals("Follower server config", Set.copyOf(persistedServerConfig.getServerConfig()),
+ Set.copyOf(follower1Context.getPeerServerInfo(true).getServerConfig()));
assertEquals("FollowerisVotingMember", false, follower1Context.isVotingMember());
}
}
import akka.actor.Props;
import akka.testkit.TestActorRef;
-import com.google.common.collect.ImmutableMap;
import com.google.common.util.concurrent.MoreExecutors;
-import java.util.Arrays;
import java.util.HashMap;
+import java.util.List;
import java.util.Map;
import org.junit.After;
import org.junit.Test;
DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl();
RaftActorContextImpl context = new RaftActorContextImpl(actor, actor.underlyingActor().getContext(),
"test", new ElectionTermImpl(createProvider(), "test", LOG), -1, -1,
- new HashMap<>(ImmutableMap.of("peer1", "peerAddress1")), configParams,
+ Map.of("peer1", "peerAddress1"), configParams,
createProvider(), applyState -> { }, LOG, MoreExecutors.directExecutor());
context.setPeerAddress("peer1", "peerAddress1_1");
public void testUpdatePeerIds() {
RaftActorContextImpl context = new RaftActorContextImpl(actor, actor.underlyingActor().getContext(),
"self", new ElectionTermImpl(createProvider(), "test", LOG), -1, -1,
- new HashMap<>(ImmutableMap.of("peer1", "peerAddress1")),
+ Map.of("peer1", "peerAddress1"),
new DefaultConfigParamsImpl(), createProvider(), applyState -> { }, LOG,
MoreExecutors.directExecutor());
- context.updatePeerIds(new ServerConfigurationPayload(Arrays.asList(new ServerInfo("self", false),
+ context.updatePeerIds(new ServerConfigurationPayload(List.of(new ServerInfo("self", false),
new ServerInfo("peer2", true), new ServerInfo("peer3", false))));
verifyPeerInfo(context, "peer1", null);
verifyPeerInfo(context, "peer2", true);
verifyPeerInfo(context, "peer3", false);
assertEquals("isVotingMember", false, context.isVotingMember());
- context.updatePeerIds(new ServerConfigurationPayload(Arrays.asList(new ServerInfo("self", true),
+ context.updatePeerIds(new ServerConfigurationPayload(List.of(new ServerInfo("self", true),
new ServerInfo("peer2", true), new ServerInfo("peer3", true))));
verifyPeerInfo(context, "peer2", true);
verifyPeerInfo(context, "peer3", true);
assertEquals("isVotingMember", true, context.isVotingMember());
- context.updatePeerIds(new ServerConfigurationPayload(Arrays.asList(new ServerInfo("peer2", true),
+ context.updatePeerIds(new ServerConfigurationPayload(List.of(new ServerInfo("peer2", true),
new ServerInfo("peer3", true))));
verifyPeerInfo(context, "peer2", true);
verifyPeerInfo(context, "peer3", true);
PeerInfo peerInfo = context.getPeerInfo(peerId);
if (voting != null) {
assertNotNull("Expected peer " + peerId, peerInfo);
- assertEquals("getVotingState for " + peerId, voting.booleanValue()
+ assertEquals("getVotingState for " + peerId, voting
? VotingState.VOTING : VotingState.NON_VOTING, peerInfo.getVotingState());
} else {
assertNull("Unexpected peer " + peerId, peerInfo);
import akka.japi.Procedure;
import org.junit.Before;
import org.junit.Test;
+import org.junit.runner.RunWith;
import org.mockito.ArgumentCaptor;
import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.opendaylight.controller.cluster.PersistentDataProvider;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.PersistentPayload;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
+import org.opendaylight.controller.cluster.raft.messages.PersistentPayload;
/**
* Unit tests for RaftActorDelegatingPersistentDataProvider.
*
* @author Thomas Pantelis
*/
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
public class RaftActorDelegatingPersistentDataProviderTest {
private static final Payload PERSISTENT_PAYLOAD = new TestPersistentPayload();
@Before
public void setup() {
- MockitoAnnotations.initMocks(this);
doReturn(PERSISTENT_PAYLOAD).when(mockPersistentLogEntry).getData();
doReturn(NON_PERSISTENT_PAYLOAD).when(mockNonPersistentLogEntry).getData();
provider = new RaftActorDelegatingPersistentDataProvider(mockDelegateProvider, mockPersistentProvider);
}
static class TestNonPersistentPayload extends Payload {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
@Override
public int size() {
return 0;
}
+
+ @Override
+ public int serializedSize() {
+ return 0;
+ }
+
+ @Override
+ protected Object writeReplace() {
+ // Not needed
+ throw new UnsupportedOperationException();
+ }
}
static class TestPersistentPayload extends TestNonPersistentPayload implements PersistentPayload {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
}
}
import static org.junit.Assert.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyInt;
-import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import akka.persistence.RecoveryCompleted;
import akka.persistence.SnapshotMetadata;
import akka.persistence.SnapshotOffer;
-import com.google.common.collect.Sets;
+import akka.testkit.javadsl.TestKit;
import com.google.common.util.concurrent.MoreExecutors;
import java.io.OutputStream;
-import java.util.Arrays;
-import java.util.Collections;
+import java.util.List;
+import java.util.Map;
import java.util.Optional;
+import java.util.Set;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Consumer;
+import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
+import org.junit.runner.RunWith;
import org.mockito.ArgumentMatchers;
import org.mockito.InOrder;
import org.mockito.Mock;
import org.mockito.Mockito;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.opendaylight.controller.cluster.PersistentDataProvider;
import org.opendaylight.controller.cluster.raft.MockRaftActor.MockSnapshotState;
import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries;
import org.opendaylight.controller.cluster.raft.persisted.DeleteEntries;
import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
import org.opendaylight.controller.cluster.raft.persisted.UpdateElectionTerm;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
*
* @author Thomas Pantelis
*/
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
public class RaftActorRecoverySupportTest {
-
private static final Logger LOG = LoggerFactory.getLogger(RaftActorRecoverySupportTest.class);
@Mock
@Before
public void setup() {
- MockitoAnnotations.initMocks(this);
mockActorSystem = ActorSystem.create();
mockActorRef = mockActorSystem.actorOf(Props.create(DoNothingActor.class));
context = new RaftActorContextImpl(mockActorRef, null, localId,
new ElectionTermImpl(mockPersistentProvider, "test", LOG), -1, -1,
- Collections.<String, String>emptyMap(), configParams, mockPersistence, applyState -> {
+ Map.of(), configParams, mockPersistence, applyState -> {
}, LOG, MoreExecutors.directExecutor());
support = new RaftActorRecoverySupport(context, mockCohort);
context.setReplicatedLog(ReplicatedLogImpl.newInstance(context));
}
+ @After
+ public void tearDown() {
+ TestKit.shutdownActorSystem(mockActorSystem);
+ }
+
private void sendMessageToSupport(final Object message) {
sendMessageToSupport(message, false);
}
long electionTerm = 2;
String electionVotedFor = "member-2";
- MockSnapshotState snapshotState = new MockSnapshotState(Arrays.asList(new MockPayload("1")));
+ MockSnapshotState snapshotState = new MockSnapshotState(List.of(new MockPayload("1")));
Snapshot snapshot = Snapshot.create(snapshotState,
- Arrays.asList(unAppliedEntry1, unAppliedEntry2), lastIndexDuringSnapshotCapture, 1,
+ List.of(unAppliedEntry1, unAppliedEntry2), lastIndexDuringSnapshotCapture, 1,
lastAppliedDuringSnapshotCapture, 1, electionTerm, electionVotedFor, null);
SnapshotMetadata metadata = new SnapshotMetadata("test", 6, 12345);
@Test
public void testDataRecoveredWithPersistenceDisabled() {
- doNothing().when(mockCohort).applyRecoverySnapshot(any());
doReturn(false).when(mockPersistence).isRecoveryApplicable();
doReturn(10L).when(mockPersistentProvider).getLastSequenceNumber();
- Snapshot snapshot = Snapshot.create(new MockSnapshotState(Arrays.asList(new MockPayload("1"))),
- Collections.<ReplicatedLogEntry>emptyList(), 3, 1, 3, 1, -1, null, null);
+ Snapshot snapshot = Snapshot.create(new MockSnapshotState(List.of(new MockPayload("1"))),
+ List.of(), 3, 1, 3, 1, -1, null, null);
SnapshotOffer snapshotOffer = new SnapshotOffer(new SnapshotMetadata("test", 6, 12345), snapshot);
sendMessageToSupport(snapshotOffer);
@Test
public void testNoDataRecoveredWithPersistenceDisabled() {
- doReturn(false).when(mockPersistence).isRecoveryApplicable();
-
sendMessageToSupport(new UpdateElectionTerm(5, "member2"));
assertEquals("Current term", 5, context.getTermInformation().getCurrentTerm());
context.addToPeers(follower2, null, VotingState.VOTING);
//add new Server
- ServerConfigurationPayload obj = new ServerConfigurationPayload(Arrays.asList(
+ ServerConfigurationPayload obj = new ServerConfigurationPayload(List.of(
new ServerInfo(localId, true),
new ServerInfo(follower1, true),
new ServerInfo(follower2, false),
//verify new peers
assertTrue("Dynamic server configuration", context.isDynamicServerConfigurationInUse());
- assertEquals("New peer Ids", Sets.newHashSet(follower1, follower2, follower3),
- Sets.newHashSet(context.getPeerIds()));
+ assertEquals("New peer Ids", Set.of(follower1, follower2, follower3), Set.copyOf(context.getPeerIds()));
assertEquals("follower1 isVoting", true, context.getPeerInfo(follower1).isVoting());
assertEquals("follower2 isVoting", false, context.getPeerInfo(follower2).isVoting());
assertEquals("follower3 isVoting", true, context.getPeerInfo(follower3).isVoting());
verify(mockCohort, never()).appendRecoveredLogEntry(any(Payload.class));
//remove existing follower1
- obj = new ServerConfigurationPayload(Arrays.asList(
+ obj = new ServerConfigurationPayload(List.of(
new ServerInfo(localId, true),
new ServerInfo("follower2", true),
new ServerInfo("follower3", true)));
//verify new peers
assertTrue("Dynamic server configuration", context.isDynamicServerConfigurationInUse());
- assertEquals("New peer Ids", Sets.newHashSet(follower2, follower3), Sets.newHashSet(context.getPeerIds()));
+ assertEquals("New peer Ids", Set.of(follower2, follower3), Set.copyOf(context.getPeerIds()));
}
@Test
doReturn(false).when(mockPersistence).isRecoveryApplicable();
String follower = "follower";
- ServerConfigurationPayload obj = new ServerConfigurationPayload(Arrays.asList(
+ ServerConfigurationPayload obj = new ServerConfigurationPayload(List.of(
new ServerInfo(localId, true), new ServerInfo(follower, true)));
sendMessageToSupport(new SimpleReplicatedLogEntry(0, 1, obj));
//verify new peers
- assertEquals("New peer Ids", Sets.newHashSet(follower), Sets.newHashSet(context.getPeerIds()));
+ assertEquals("New peer Ids", Set.of(follower), Set.copyOf(context.getPeerIds()));
}
@Test
public void testOnSnapshotOfferWithServerConfiguration() {
long electionTerm = 2;
String electionVotedFor = "member-2";
- ServerConfigurationPayload serverPayload = new ServerConfigurationPayload(Arrays.asList(
+ ServerConfigurationPayload serverPayload = new ServerConfigurationPayload(List.of(
new ServerInfo(localId, true),
new ServerInfo("follower1", true),
new ServerInfo("follower2", true)));
- MockSnapshotState snapshotState = new MockSnapshotState(Arrays.asList(new MockPayload("1")));
- Snapshot snapshot = Snapshot.create(snapshotState, Collections.<ReplicatedLogEntry>emptyList(),
+ MockSnapshotState snapshotState = new MockSnapshotState(List.of(new MockPayload("1")));
+ Snapshot snapshot = Snapshot.create(snapshotState, List.of(),
-1, -1, -1, -1, electionTerm, electionVotedFor, serverPayload);
SnapshotMetadata metadata = new SnapshotMetadata("test", 6, 12345);
assertEquals("Election term", electionTerm, context.getTermInformation().getCurrentTerm());
assertEquals("Election votedFor", electionVotedFor, context.getTermInformation().getVotedFor());
assertTrue("Dynamic server configuration", context.isDynamicServerConfigurationInUse());
- assertEquals("Peer List", Sets.newHashSet("follower1", "follower2"),
- Sets.newHashSet(context.getPeerIds()));
+ assertEquals("Peer List", Set.of("follower1", "follower2"), Set.copyOf(context.getPeerIds()));
}
}
\ No newline at end of file
import akka.testkit.TestActorRef;
import akka.testkit.javadsl.TestKit;
import com.google.common.base.Stopwatch;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
import com.google.common.io.ByteSource;
import com.google.common.util.concurrent.MoreExecutors;
import java.io.OutputStream;
import java.time.Duration;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
+import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang3.SerializationUtils;
import org.junit.After;
followerActorContext.setCurrentBehavior(follower);
TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
- MockLeaderRaftActor.props(ImmutableMap.of(FOLLOWER_ID, followerActor.path().toString()),
+ MockLeaderRaftActor.props(Map.of(FOLLOWER_ID, followerActor.path().toString()),
followerActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
actorFactory.generateActorId(LEADER_ID));
AddServerReply addServerReply = testKit.expectMsgClass(Duration.ofSeconds(5), AddServerReply.class);
assertEquals("getStatus", ServerChangeStatus.OK, addServerReply.getStatus());
- assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().get());
+ assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().orElseThrow());
// Verify ServerConfigurationPayload entry in leader's log
// Verify new server config was applied in both followers
- assertEquals("Follower peers", ImmutableSet.of(LEADER_ID, NEW_SERVER_ID), followerActorContext.getPeerIds());
+ assertEquals("Follower peers", Set.of(LEADER_ID, NEW_SERVER_ID), followerActorContext.getPeerIds());
- assertEquals("New follower peers", ImmutableSet.of(LEADER_ID, FOLLOWER_ID),
- newFollowerActorContext.getPeerIds());
+ assertEquals("New follower peers", Set.of(LEADER_ID, FOLLOWER_ID), newFollowerActorContext.getPeerIds());
assertEquals("Follower commit index", 3, followerActorContext.getCommitIndex());
assertEquals("Follower last applied index", 3, followerActorContext.getLastApplied());
0, 2, 1).build());
TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
- MockLeaderRaftActor.props(ImmutableMap.<String, String>of(),
- initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
+ MockLeaderRaftActor.props(Map.of(), initialActorContext)
+ .withDispatcher(Dispatchers.DefaultDispatcherId()),
actorFactory.generateActorId(LEADER_ID));
MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor();
AddServerReply addServerReply = testKit.expectMsgClass(Duration.ofSeconds(5), AddServerReply.class);
assertEquals("getStatus", ServerChangeStatus.OK, addServerReply.getStatus());
- assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().get());
+ assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().orElseThrow());
// Verify ServerConfigurationPayload entry in leader's log
// Verify new server config was applied in the new follower
- assertEquals("New follower peers", ImmutableSet.of(LEADER_ID), newFollowerActorContext.getPeerIds());
+ assertEquals("New follower peers", Set.of(LEADER_ID), newFollowerActorContext.getPeerIds());
LOG.info("testAddServerWithNoExistingFollower ending");
}
RaftActorContext initialActorContext = new MockRaftActorContext();
TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
- MockLeaderRaftActor.props(ImmutableMap.<String, String>of(),
- initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
+ MockLeaderRaftActor.props(Map.of(), initialActorContext)
+ .withDispatcher(Dispatchers.DefaultDispatcherId()),
actorFactory.generateActorId(LEADER_ID));
MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor();
AddServerReply addServerReply = testKit.expectMsgClass(Duration.ofSeconds(5), AddServerReply.class);
assertEquals("getStatus", ServerChangeStatus.OK, addServerReply.getStatus());
- assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().get());
+ assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().orElseThrow());
// Verify ServerConfigurationPayload entry in leader's log
// Verify new server config was applied in the new follower
- assertEquals("New follower peers", ImmutableSet.of(LEADER_ID), newFollowerActorContext.getPeerIds());
+ assertEquals("New follower peers", Set.of(LEADER_ID), newFollowerActorContext.getPeerIds());
assertNoneMatching(newFollowerCollectorActor, InstallSnapshot.class, 500);
RaftActorContext initialActorContext = new MockRaftActorContext();
TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
- MockLeaderRaftActor.props(ImmutableMap.<String, String>of(),
- initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
+ MockLeaderRaftActor.props(Map.of(), initialActorContext)
+ .withDispatcher(Dispatchers.DefaultDispatcherId()),
actorFactory.generateActorId(LEADER_ID));
MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor();
// Verify ServerConfigurationPayload entry in the new follower
expectMatching(newFollowerCollectorActor, ApplyState.class, 2);
- assertEquals("New follower peers", ImmutableSet.of(LEADER_ID, NEW_SERVER_ID2),
- newFollowerActorContext.getPeerIds());
+ assertEquals("New follower peers", Set.of(LEADER_ID, NEW_SERVER_ID2), newFollowerActorContext.getPeerIds());
LOG.info("testAddServerWithOperationInProgress ending");
}
RaftActorContext initialActorContext = new MockRaftActorContext();
TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
- MockLeaderRaftActor.props(ImmutableMap.<String, String>of(),
- initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
+ MockLeaderRaftActor.props(Map.of(), initialActorContext)
+ .withDispatcher(Dispatchers.DefaultDispatcherId()),
actorFactory.generateActorId(LEADER_ID));
MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor();
AddServerReply addServerReply = testKit.expectMsgClass(Duration.ofSeconds(5), AddServerReply.class);
assertEquals("getStatus", ServerChangeStatus.OK, addServerReply.getStatus());
- assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().get());
+ assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().orElseThrow());
expectFirstMatching(newFollowerCollectorActor, ApplySnapshot.class);
RaftActorContext initialActorContext = new MockRaftActorContext();
TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
- MockLeaderRaftActor.props(ImmutableMap.<String, String>of(),
- initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
+ MockLeaderRaftActor.props(Map.of(), initialActorContext)
+ .withDispatcher(Dispatchers.DefaultDispatcherId()),
actorFactory.generateActorId(LEADER_ID));
MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor();
RaftActorContext initialActorContext = new MockRaftActorContext();
TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
- MockLeaderRaftActor.props(ImmutableMap.<String, String>of(),
- initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
+ MockLeaderRaftActor.props(Map.of(), initialActorContext)
+ .withDispatcher(Dispatchers.DefaultDispatcherId()),
actorFactory.generateActorId(LEADER_ID));
MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor();
RaftActorContext initialActorContext = new MockRaftActorContext();
TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
- MockLeaderRaftActor.props(ImmutableMap.<String, String>of(),
- initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
+ MockLeaderRaftActor.props(Map.of(), initialActorContext)
+ .withDispatcher(Dispatchers.DefaultDispatcherId()),
actorFactory.generateActorId(LEADER_ID));
MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor();
RaftActorContext initialActorContext = new MockRaftActorContext();
TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
- MockLeaderRaftActor.props(ImmutableMap.<String, String>of(),
- initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
+ MockLeaderRaftActor.props(Map.of(), initialActorContext)
+ .withDispatcher(Dispatchers.DefaultDispatcherId()),
actorFactory.generateActorId(LEADER_ID));
MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor();
configParams.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
TestActorRef<MockRaftActor> noLeaderActor = actorFactory.createTestActor(
- MockRaftActor.builder().id(LEADER_ID).peerAddresses(ImmutableMap.of(FOLLOWER_ID,
+ MockRaftActor.builder().id(LEADER_ID).peerAddresses(Map.of(FOLLOWER_ID,
followerActor.path().toString())).config(configParams).persistent(Optional.of(false))
.props().withDispatcher(Dispatchers.DefaultDispatcherId()),
actorFactory.generateActorId(LEADER_ID));
RaftActorContext initialActorContext = new MockRaftActorContext();
TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
- MockLeaderRaftActor.props(ImmutableMap.<String, String>of(),
- initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
+ MockLeaderRaftActor.props(Map.of(), initialActorContext)
+ .withDispatcher(Dispatchers.DefaultDispatcherId()),
actorFactory.generateActorId(LEADER_ID));
MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor();
// The first AddServer should succeed with OK even though consensus wasn't reached
AddServerReply addServerReply = testKit.expectMsgClass(Duration.ofSeconds(5), AddServerReply.class);
assertEquals("getStatus", ServerChangeStatus.OK, addServerReply.getStatus());
- assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().get());
+ assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().orElseThrow());
// Verify ServerConfigurationPayload entry in leader's log
verifyServerConfigurationPayloadEntry(leaderActorContext.getReplicatedLog(), votingServer(LEADER_ID),
RaftActorContext initialActorContext = new MockRaftActorContext();
TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
- MockLeaderRaftActor.props(ImmutableMap.of(FOLLOWER_ID, followerActor.path().toString()),
+ MockLeaderRaftActor.props(Map.of(FOLLOWER_ID, followerActor.path().toString()),
initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
actorFactory.generateActorId(LEADER_ID));
MessageCollectorActor.props(), actorFactory.generateActorId(LEADER_ID));
TestActorRef<MockRaftActor> followerRaftActor = actorFactory.createTestActor(
- MockRaftActor.builder().id(FOLLOWER_ID).peerAddresses(ImmutableMap.of(LEADER_ID,
+ MockRaftActor.builder().id(FOLLOWER_ID).peerAddresses(Map.of(LEADER_ID,
leaderActor.path().toString())).config(configParams).persistent(Optional.of(false))
.props().withDispatcher(Dispatchers.DefaultDispatcherId()),
actorFactory.generateActorId(FOLLOWER_ID));
followerRaftActor.underlyingActor().waitForInitializeBehaviorComplete();
- followerRaftActor.tell(new AppendEntries(1, LEADER_ID, 0, 1, Collections.<ReplicatedLogEntry>emptyList(),
- -1, -1, (short)0), leaderActor);
+ followerRaftActor.tell(new AppendEntries(1, LEADER_ID, 0, 1, List.of(), -1, -1, (short)0), leaderActor);
followerRaftActor.tell(new AddServer(NEW_SERVER_ID, newFollowerRaftActor.path().toString(), true),
testKit.getRef());
DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl();
configParams.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
TestActorRef<MockRaftActor> noLeaderActor = actorFactory.createTestActor(
- MockRaftActor.builder().id(LEADER_ID).peerAddresses(ImmutableMap.of(FOLLOWER_ID,
+ MockRaftActor.builder().id(LEADER_ID).peerAddresses(Map.of(FOLLOWER_ID,
followerActor.path().toString())).config(configParams).persistent(Optional.of(false))
.props().withDispatcher(Dispatchers.DefaultDispatcherId()),
actorFactory.generateActorId(LEADER_ID));
noLeaderActor.underlyingActor());
ReplicatedLogEntry serverConfigEntry = new SimpleReplicatedLogEntry(1, 1,
- new ServerConfigurationPayload(Collections.<ServerInfo>emptyList()));
+ new ServerConfigurationPayload(List.of()));
boolean handled = support.handleMessage(new ApplyState(null, null, serverConfigEntry), ActorRef.noSender());
assertEquals("Message handled", true, handled);
configParams.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
TestActorRef<MockRaftActor> leaderActor = actorFactory.createTestActor(
- MockRaftActor.builder().id(LEADER_ID).peerAddresses(ImmutableMap.of(FOLLOWER_ID,
+ MockRaftActor.builder().id(LEADER_ID).peerAddresses(Map.of(FOLLOWER_ID,
followerActor.path().toString())).config(configParams).persistent(Optional.of(false))
.props().withDispatcher(Dispatchers.DefaultDispatcherId()),
actorFactory.generateActorId(LEADER_ID));
RaftActorContext initialActorContext = new MockRaftActorContext();
TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
- MockLeaderRaftActor.props(ImmutableMap.of(FOLLOWER_ID, followerActor.path().toString()),
+ MockLeaderRaftActor.props(Map.of(FOLLOWER_ID, followerActor.path().toString()),
initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
actorFactory.generateActorId(LEADER_ID));
MessageCollectorActor.props(), actorFactory.generateActorId(LEADER_ID));
TestActorRef<MockRaftActor> followerRaftActor = actorFactory.createTestActor(
- MockRaftActor.builder().id(FOLLOWER_ID).peerAddresses(ImmutableMap.of(LEADER_ID,
+ MockRaftActor.builder().id(FOLLOWER_ID).peerAddresses(Map.of(LEADER_ID,
leaderActor.path().toString())).config(configParams).persistent(Optional.of(false))
.props().withDispatcher(Dispatchers.DefaultDispatcherId()),
actorFactory.generateActorId(FOLLOWER_ID));
followerRaftActor.underlyingActor().waitForInitializeBehaviorComplete();
- followerRaftActor.tell(new AppendEntries(1, LEADER_ID, 0, 1, Collections.<ReplicatedLogEntry>emptyList(),
- -1, -1, (short)0), leaderActor);
+ followerRaftActor.tell(new AppendEntries(1, LEADER_ID, 0, 1, List.of(), -1, -1, (short)0), leaderActor);
followerRaftActor.tell(new RemoveServer(FOLLOWER_ID), testKit.getRef());
expectFirstMatching(leaderActor, RemoveServer.class);
final String downNodeId = "downNode";
TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(MockLeaderRaftActor.props(
- ImmutableMap.of(FOLLOWER_ID, follower1ActorPath, FOLLOWER_ID2, follower2ActorPath, downNodeId, ""),
+ Map.of(FOLLOWER_ID, follower1ActorPath, FOLLOWER_ID2, follower2ActorPath, downNodeId, ""),
initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
actorFactory.generateActorId(LEADER_ID));
ActorRef follower1Collector = actorFactory.createActor(
MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
final TestActorRef<CollectingMockRaftActor> follower1Actor = actorFactory.createTestActor(
- CollectingMockRaftActor.props(FOLLOWER_ID, ImmutableMap.of(LEADER_ID, leaderActor.path().toString(),
+ CollectingMockRaftActor.props(FOLLOWER_ID, Map.of(LEADER_ID, leaderActor.path().toString(),
FOLLOWER_ID2, follower2ActorPath, downNodeId, ""), configParams, NO_PERSISTENCE,
follower1Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), follower1ActorId);
ActorRef follower2Collector = actorFactory.createActor(
MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
final TestActorRef<CollectingMockRaftActor> follower2Actor = actorFactory.createTestActor(
- CollectingMockRaftActor.props(FOLLOWER_ID2, ImmutableMap.of(LEADER_ID, leaderActor.path().toString(),
+ CollectingMockRaftActor.props(FOLLOWER_ID2, Map.of(LEADER_ID, leaderActor.path().toString(),
FOLLOWER_ID, follower1ActorPath, downNodeId, ""), configParams, NO_PERSISTENCE,
follower2Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), follower2ActorId);
RaftActorContext initialActorContext = new MockRaftActorContext();
TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
- MockLeaderRaftActor.props(ImmutableMap.of(FOLLOWER_ID, followerActorPath),
+ MockLeaderRaftActor.props(Map.of(FOLLOWER_ID, followerActorPath),
initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
actorFactory.generateActorId(LEADER_ID));
final ActorRef followerCollector =
actorFactory.createActor(MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
actorFactory.createTestActor(
- CollectingMockRaftActor.props(FOLLOWER_ID, ImmutableMap.of(LEADER_ID, leaderActor.path().toString()),
+ CollectingMockRaftActor.props(FOLLOWER_ID, Map.of(LEADER_ID, leaderActor.path().toString()),
configParams, NO_PERSISTENCE, followerCollector)
.withDispatcher(Dispatchers.DefaultDispatcherId()),
followerActorId);
LOG.info("testRemoveServerLeaderWithNoFollowers starting");
TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
- MockLeaderRaftActor.props(Collections.<String, String>emptyMap(),
+ MockLeaderRaftActor.props(Map.of(),
new MockRaftActorContext()).withDispatcher(Dispatchers.DefaultDispatcherId()),
actorFactory.generateActorId(LEADER_ID));
final String follower2ActorPath = actorFactory.createTestActorPath(follower2ActorId);
TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
- MockLeaderRaftActor.props(ImmutableMap.of(FOLLOWER_ID, follower1ActorPath,
+ MockLeaderRaftActor.props(Map.of(FOLLOWER_ID, follower1ActorPath,
FOLLOWER_ID2, follower2ActorPath), new MockRaftActorContext())
.withDispatcher(Dispatchers.DefaultDispatcherId()), actorFactory.generateActorId(LEADER_ID));
ActorRef leaderCollector = newLeaderCollectorActor(leaderActor.underlyingActor());
ActorRef follower1Collector = actorFactory.createActor(
MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
final TestActorRef<CollectingMockRaftActor> follower1RaftActor = actorFactory.createTestActor(
- CollectingMockRaftActor.props(FOLLOWER_ID, ImmutableMap.of(LEADER_ID, leaderActor.path().toString(),
+ CollectingMockRaftActor.props(FOLLOWER_ID, Map.of(LEADER_ID, leaderActor.path().toString(),
FOLLOWER_ID2, follower2ActorPath), configParams, NO_PERSISTENCE, follower1Collector)
.withDispatcher(Dispatchers.DefaultDispatcherId()), follower1ActorId);
ActorRef follower2Collector = actorFactory.createActor(
MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
final TestActorRef<CollectingMockRaftActor> follower2RaftActor = actorFactory.createTestActor(
- CollectingMockRaftActor.props(FOLLOWER_ID2, ImmutableMap.of(LEADER_ID, leaderActor.path().toString(),
+ CollectingMockRaftActor.props(FOLLOWER_ID2, Map.of(LEADER_ID, leaderActor.path().toString(),
FOLLOWER_ID, follower1ActorPath), configParams, NO_PERSISTENCE, follower2Collector)
.withDispatcher(Dispatchers.DefaultDispatcherId()), follower2ActorId);
// Send first ChangeServersVotingStatus message
- leaderActor.tell(new ChangeServersVotingStatus(ImmutableMap.of(FOLLOWER_ID, false, FOLLOWER_ID2, false)),
+ leaderActor.tell(new ChangeServersVotingStatus(Map.of(FOLLOWER_ID, false, FOLLOWER_ID2, false)),
testKit.getRef());
ServerChangeReply reply = testKit.expectMsgClass(Duration.ofSeconds(5), ServerChangeReply.class);
assertEquals("getStatus", ServerChangeStatus.OK, reply.getStatus());
// Send second ChangeServersVotingStatus message
- leaderActor.tell(new ChangeServersVotingStatus(ImmutableMap.of(FOLLOWER_ID, true)), testKit.getRef());
+ leaderActor.tell(new ChangeServersVotingStatus(Map.of(FOLLOWER_ID, true)), testKit.getRef());
reply = testKit.expectMsgClass(Duration.ofSeconds(5), ServerChangeReply.class);
assertEquals("getStatus", ServerChangeStatus.OK, reply.getStatus());
final String follower2ActorPath = actorFactory.createTestActorPath(follower2ActorId);
TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
- MockLeaderRaftActor.props(ImmutableMap.of(FOLLOWER_ID, follower1ActorPath,
+ MockLeaderRaftActor.props(Map.of(FOLLOWER_ID, follower1ActorPath,
FOLLOWER_ID2, follower2ActorPath), new MockRaftActorContext())
.withDispatcher(Dispatchers.DefaultDispatcherId()), actorFactory.generateActorId(LEADER_ID));
ActorRef leaderCollector = newLeaderCollectorActor(leaderActor.underlyingActor());
ActorRef follower1Collector = actorFactory.createActor(
MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
final TestActorRef<CollectingMockRaftActor> follower1RaftActor = actorFactory.createTestActor(
- CollectingMockRaftActor.props(FOLLOWER_ID, ImmutableMap.of(LEADER_ID, leaderActor.path().toString(),
+ CollectingMockRaftActor.props(FOLLOWER_ID, Map.of(LEADER_ID, leaderActor.path().toString(),
FOLLOWER_ID2, follower2ActorPath), configParams, NO_PERSISTENCE, follower1Collector)
.withDispatcher(Dispatchers.DefaultDispatcherId()), follower1ActorId);
ActorRef follower2Collector = actorFactory.createActor(
MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
final TestActorRef<CollectingMockRaftActor> follower2RaftActor = actorFactory.createTestActor(
- CollectingMockRaftActor.props(FOLLOWER_ID2, ImmutableMap.of(LEADER_ID, leaderActor.path().toString(),
+ CollectingMockRaftActor.props(FOLLOWER_ID2, Map.of(LEADER_ID, leaderActor.path().toString(),
FOLLOWER_ID, follower1ActorPath), configParams, NO_PERSISTENCE, follower2Collector)
.withDispatcher(Dispatchers.DefaultDispatcherId()), follower2ActorId);
// Send ChangeServersVotingStatus message
- leaderActor.tell(new ChangeServersVotingStatus(ImmutableMap.of(LEADER_ID, false)), testKit.getRef());
+ leaderActor.tell(new ChangeServersVotingStatus(Map.of(LEADER_ID, false)), testKit.getRef());
ServerChangeReply reply = testKit.expectMsgClass(Duration.ofSeconds(5), ServerChangeReply.class);
assertEquals("getStatus", ServerChangeStatus.OK, reply.getStatus());
LOG.info("testChangeLeaderToNonVotingInSingleNode starting");
TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
- MockLeaderRaftActor.props(ImmutableMap.of(), new MockRaftActorContext())
+ MockLeaderRaftActor.props(Map.of(), new MockRaftActorContext())
.withDispatcher(Dispatchers.DefaultDispatcherId()), actorFactory.generateActorId(LEADER_ID));
- leaderActor.tell(new ChangeServersVotingStatus(ImmutableMap.of(LEADER_ID, false)), testKit.getRef());
+ leaderActor.tell(new ChangeServersVotingStatus(Map.of(LEADER_ID, false)), testKit.getRef());
ServerChangeReply reply = testKit.expectMsgClass(Duration.ofSeconds(5), ServerChangeReply.class);
assertEquals("getStatus", ServerChangeStatus.INVALID_REQUEST, reply.getStatus());
// via the server config. The server config will also contain 2 voting peers that are down (ie no
// actors created).
- ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(
+ ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(List.of(
new ServerInfo(node1ID, false), new ServerInfo(node2ID, false),
new ServerInfo("downNode1", true), new ServerInfo("downNode2", true)));
SimpleReplicatedLogEntry persistedServerConfigEntry = new SimpleReplicatedLogEntry(0, 1, persistedServerConfig);
ActorRef node1Collector = actorFactory.createActor(
MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
TestActorRef<CollectingMockRaftActor> node1RaftActorRef = actorFactory.createTestActor(
- CollectingMockRaftActor.props(node1ID, ImmutableMap.<String, String>of(), configParams,
+ CollectingMockRaftActor.props(node1ID, Map.of(), configParams,
PERSISTENT, node1Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node1ID);
CollectingMockRaftActor node1RaftActor = node1RaftActorRef.underlyingActor();
ActorRef node2Collector = actorFactory.createActor(
MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
TestActorRef<CollectingMockRaftActor> node2RaftActorRef = actorFactory.createTestActor(
- CollectingMockRaftActor.props(node2ID, ImmutableMap.<String, String>of(), configParams,
+ CollectingMockRaftActor.props(node2ID, Map.of(), configParams,
PERSISTENT, node2Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node2ID);
CollectingMockRaftActor node2RaftActor = node2RaftActorRef.underlyingActor();
// First send the message such that node1 has no peer address for node2 - should fail.
- ChangeServersVotingStatus changeServers = new ChangeServersVotingStatus(ImmutableMap.of(node1ID, true,
+ ChangeServersVotingStatus changeServers = new ChangeServersVotingStatus(Map.of(node1ID, true,
node2ID, true, "downNode1", false, "downNode2", false));
node1RaftActorRef.tell(changeServers, testKit.getRef());
ServerChangeReply reply = testKit.expectMsgClass(Duration.ofSeconds(5), ServerChangeReply.class);
long term = node1RaftActor.getRaftActorContext().getTermInformation().getCurrentTerm();
node1RaftActorRef.tell(new AppendEntries(term, "downNode1", -1L, -1L,
- Collections.<ReplicatedLogEntry>emptyList(), 0, -1, (short)1), ActorRef.noSender());
+ List.of(), 0, -1, (short)1), ActorRef.noSender());
// Wait for the ElectionTimeout to clear the leaderId. The leaderId must be null so on the next
// ChangeServersVotingStatus message, it will try to elect a leader.
? actorFactory.createTestActorPath(node1ID) : peerId.equals(node2ID)
? actorFactory.createTestActorPath(node2ID) : null;
- ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(
+ ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(List.of(
new ServerInfo(node1ID, false), new ServerInfo(node2ID, true)));
SimpleReplicatedLogEntry persistedServerConfigEntry = new SimpleReplicatedLogEntry(0, 1, persistedServerConfig);
ActorRef node1Collector = actorFactory.createActor(
MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
TestActorRef<CollectingMockRaftActor> node1RaftActorRef = actorFactory.createTestActor(
- CollectingMockRaftActor.props(node1ID, ImmutableMap.<String, String>of(), configParams1,
+ CollectingMockRaftActor.props(node1ID, Map.of(), configParams1,
PERSISTENT, node1Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node1ID);
final CollectingMockRaftActor node1RaftActor = node1RaftActorRef.underlyingActor();
ActorRef node2Collector = actorFactory.createActor(
MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
TestActorRef<CollectingMockRaftActor> node2RaftActorRef = actorFactory.createTestActor(
- CollectingMockRaftActor.props(node2ID, ImmutableMap.<String, String>of(), configParams2,
+ CollectingMockRaftActor.props(node2ID, Map.of(), configParams2,
PERSISTENT, node2Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node2ID);
CollectingMockRaftActor node2RaftActor = node2RaftActorRef.underlyingActor();
node2RaftActor.setDropMessageOfType(RequestVote.class);
- ChangeServersVotingStatus changeServers = new ChangeServersVotingStatus(ImmutableMap.of(node1ID, true));
+ ChangeServersVotingStatus changeServers = new ChangeServersVotingStatus(Map.of(node1ID, true));
node1RaftActorRef.tell(changeServers, testKit.getRef());
ServerChangeReply reply = testKit.expectMsgClass(Duration.ofSeconds(5), ServerChangeReply.class);
assertEquals("getStatus", ServerChangeStatus.NO_LEADER, reply.getStatus());
- assertEquals("Server config", ImmutableSet.of(nonVotingServer(node1ID), votingServer(node2ID)),
- new HashSet<>(node1RaftActor.getRaftActorContext().getPeerServerInfo(true).getServerConfig()));
+ assertEquals("Server config", Set.of(nonVotingServer(node1ID), votingServer(node2ID)),
+ Set.copyOf(node1RaftActor.getRaftActorContext().getPeerServerInfo(true).getServerConfig()));
assertEquals("getRaftState", RaftState.Follower, node1RaftActor.getRaftState());
LOG.info("testChangeToVotingWithNoLeaderAndElectionTimeout ending");
configParams.setElectionTimeoutFactor(3);
configParams.setPeerAddressResolver(peerAddressResolver);
- ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(
+ ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(List.of(
new ServerInfo(node1ID, false), new ServerInfo(node2ID, false)));
SimpleReplicatedLogEntry persistedServerConfigEntry = new SimpleReplicatedLogEntry(0, 1, persistedServerConfig);
ActorRef node1Collector = actorFactory.createActor(
MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
TestActorRef<CollectingMockRaftActor> node1RaftActorRef = actorFactory.createTestActor(
- CollectingMockRaftActor.props(node1ID, ImmutableMap.<String, String>of(), configParams,
+ CollectingMockRaftActor.props(node1ID, Map.of(), configParams,
PERSISTENT, node1Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node1ID);
final CollectingMockRaftActor node1RaftActor = node1RaftActorRef.underlyingActor();
ActorRef node2Collector = actorFactory.createActor(
MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
TestActorRef<CollectingMockRaftActor> node2RaftActorRef = actorFactory.createTestActor(
- CollectingMockRaftActor.props(node2ID, ImmutableMap.<String, String>of(), configParams,
+ CollectingMockRaftActor.props(node2ID, Map.of(), configParams,
PERSISTENT, node2Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node2ID);
final CollectingMockRaftActor node2RaftActor = node2RaftActorRef.underlyingActor();
// forward the request to node2.
ChangeServersVotingStatus changeServers = new ChangeServersVotingStatus(
- ImmutableMap.of(node1ID, true, node2ID, true));
+ Map.of(node1ID, true, node2ID, true));
node1RaftActorRef.tell(changeServers, testKit.getRef());
ServerChangeReply reply = testKit.expectMsgClass(Duration.ofSeconds(5), ServerChangeReply.class);
assertEquals("getStatus", ServerChangeStatus.OK, reply.getStatus());
? actorFactory.createTestActorPath(node1ID) : peerId.equals(node2ID)
? actorFactory.createTestActorPath(node2ID) : null);
- ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(
+ ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(List.of(
new ServerInfo(node1ID, false), new ServerInfo(node2ID, true)));
SimpleReplicatedLogEntry persistedServerConfigEntry = new SimpleReplicatedLogEntry(0, 1, persistedServerConfig);
ActorRef node1Collector = actorFactory.createActor(
MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
TestActorRef<CollectingMockRaftActor> node1RaftActorRef = actorFactory.createTestActor(
- CollectingMockRaftActor.props(node1ID, ImmutableMap.<String, String>of(), configParams,
+ CollectingMockRaftActor.props(node1ID, Map.of(), configParams,
PERSISTENT, node1Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node1ID);
final CollectingMockRaftActor node1RaftActor = node1RaftActorRef.underlyingActor();
ActorRef node2Collector = actorFactory.createActor(
MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
TestActorRef<CollectingMockRaftActor> node2RaftActorRef = actorFactory.createTestActor(
- CollectingMockRaftActor.props(node2ID, ImmutableMap.<String, String>of(), configParams,
+ CollectingMockRaftActor.props(node2ID, Map.of(), configParams,
PERSISTENT, node2Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node2ID);
CollectingMockRaftActor node2RaftActor = node2RaftActorRef.underlyingActor();
node2RaftActor.setDropMessageOfType(RequestVote.class);
- ChangeServersVotingStatus changeServers = new ChangeServersVotingStatus(ImmutableMap.of(node1ID, true,
+ ChangeServersVotingStatus changeServers = new ChangeServersVotingStatus(Map.of(node1ID, true,
node2ID, true));
node1RaftActorRef.tell(changeServers, testKit.getRef());
ReplicatedLogEntry logEntry = log.get(log.lastIndex());
assertEquals("Last log entry payload class", ServerConfigurationPayload.class, logEntry.getData().getClass());
ServerConfigurationPayload payload = (ServerConfigurationPayload)logEntry.getData();
- assertEquals("Server config", ImmutableSet.copyOf(expected), new HashSet<>(payload.getServerConfig()));
+ assertEquals("Server config", Set.of(expected), Set.copyOf(payload.getServerConfig()));
}
private static RaftActorContextImpl newFollowerContext(final String id,
ElectionTermImpl termInfo = new ElectionTermImpl(noPersistence, id, LOG);
termInfo.update(1, LEADER_ID);
return new RaftActorContextImpl(actor, actor.underlyingActor().getContext(),
- id, termInfo, -1, -1, ImmutableMap.of(LEADER_ID, ""), configParams,
+ id, termInfo, -1, -1, Map.of(LEADER_ID, ""), configParams,
noPersistence, applyState -> actor.tell(applyState, actor), LOG, MoreExecutors.directExecutor());
}
AbstractMockRaftActor(final String id, final Map<String, String> peerAddresses,
final Optional<ConfigParams> config, final boolean persistent, final ActorRef collectorActor) {
- super(builder().id(id).peerAddresses(peerAddresses).config(config.get())
+ super(builder().id(id).peerAddresses(peerAddresses).config(config.orElseThrow())
.persistent(Optional.of(persistent)));
this.collectorActor = collectorActor;
}
@Override
@SuppressWarnings("checkstyle:IllegalCatch")
public void createSnapshot(final ActorRef actorRef, final Optional<OutputStream> installSnapshotStream) {
- MockSnapshotState snapshotState = new MockSnapshotState(new ArrayList<>(getState()));
+ MockSnapshotState snapshotState = new MockSnapshotState(List.copyOf(getState()));
if (installSnapshotStream.isPresent()) {
- SerializationUtils.serialize(snapshotState, installSnapshotStream.get());
+ SerializationUtils.serialize(snapshotState, installSnapshotStream.orElseThrow());
}
actorRef.tell(new CaptureSnapshotReply(snapshotState, installSnapshotStream), actorRef);
public static class MockNewFollowerRaftActor extends AbstractMockRaftActor {
public MockNewFollowerRaftActor(final ConfigParams config, final ActorRef collectorActor) {
- super(NEW_SERVER_ID, new HashMap<>(), Optional.of(config), NO_PERSISTENCE, collectorActor);
+ super(NEW_SERVER_ID, Map.of(), Optional.of(config), NO_PERSISTENCE, collectorActor);
setPersistence(false);
}
import static org.junit.Assert.assertEquals;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.ArgumentMatchers.eq;
-import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import akka.persistence.SnapshotMetadata;
import com.google.common.util.concurrent.MoreExecutors;
import java.io.OutputStream;
-import java.util.Collections;
+import java.util.List;
+import java.util.Map;
import java.util.Optional;
import org.junit.Before;
import org.junit.Test;
+import org.junit.runner.RunWith;
import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
*
* @author Thomas Pantelis
*/
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
public class RaftActorSnapshotMessageSupportTest {
-
private static final Logger LOG = LoggerFactory.getLogger(RaftActorRecoverySupportTest.class);
@Mock
@Before
public void setup() {
- MockitoAnnotations.initMocks(this);
-
context = new RaftActorContextImpl(mockRaftActorRef, null, "test",
- new ElectionTermImpl(mockPersistence, "test", LOG), -1, -1, Collections.<String,String>emptyMap(),
+ new ElectionTermImpl(mockPersistence, "test", LOG), -1, -1, Map.of(),
configParams, mockPersistence, applyState -> { }, LOG, MoreExecutors.directExecutor()) {
@Override
public SnapshotManager getSnapshotManager() {
support = new RaftActorSnapshotMessageSupport(context, mockCohort);
- doReturn(true).when(mockPersistence).isRecoveryApplicable();
-
context.setReplicatedLog(ReplicatedLogImpl.newInstance(context));
}
- private void sendMessageToSupport(Object message) {
+ private void sendMessageToSupport(final Object message) {
sendMessageToSupport(message, true);
}
- private void sendMessageToSupport(Object message, boolean expHandled) {
+ private void sendMessageToSupport(final Object message, final boolean expHandled) {
boolean handled = support.handleSnapshotMessage(message, mockRaftActorRef);
assertEquals("complete", expHandled, handled);
}
long lastIndexDuringSnapshotCapture = 2;
byte[] snapshotBytes = {1,2,3,4,5};
- Snapshot snapshot = Snapshot.create(ByteState.of(snapshotBytes), Collections.<ReplicatedLogEntry>emptyList(),
+ Snapshot snapshot = Snapshot.create(ByteState.of(snapshotBytes), List.of(),
lastIndexDuringSnapshotCapture, 1, lastAppliedDuringSnapshotCapture, 1, -1, null, null);
ApplySnapshot applySnapshot = new ApplySnapshot(snapshot);
import static org.mockito.Mockito.reset;
import static org.mockito.Mockito.timeout;
import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
import akka.actor.ActorRef;
import akka.actor.PoisonPill;
import akka.protobuf.ByteString;
import akka.testkit.TestActorRef;
import akka.testkit.javadsl.TestKit;
-import com.google.common.collect.ImmutableMap;
import com.google.common.util.concurrent.Uninterruptibles;
import java.io.ByteArrayOutputStream;
import java.io.ObjectOutputStream;
import java.time.Duration;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
// log entry.
config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
- ImmutableMap<String, String> peerAddresses = ImmutableMap.<String, String>builder()
- .put("member1", "address").build();
+ Map<String, String> peerAddresses = Map.of("member1", "address");
ActorRef followerActor = factory.createActor(MockRaftActor.props(persistenceId,
peerAddresses, config), persistenceId);
kit.watch(followerActor);
- List<ReplicatedLogEntry> snapshotUnappliedEntries = new ArrayList<>();
- ReplicatedLogEntry entry1 = new SimpleReplicatedLogEntry(4, 1, new MockRaftActorContext.MockPayload("E"));
- snapshotUnappliedEntries.add(entry1);
+ List<ReplicatedLogEntry> snapshotUnappliedEntries = List.of(
+ new SimpleReplicatedLogEntry(4, 1, new MockRaftActorContext.MockPayload("E")));
int lastAppliedDuringSnapshotCapture = 3;
int lastIndexDuringSnapshotCapture = 4;
// 4 messages as part of snapshot, which are applied to state
- MockSnapshotState snapshotState = new MockSnapshotState(Arrays.asList(
+ MockSnapshotState snapshotState = new MockSnapshotState(List.of(
new MockRaftActorContext.MockPayload("A"),
new MockRaftActorContext.MockPayload("B"),
new MockRaftActorContext.MockPayload("C"),
InMemorySnapshotStore.addSnapshot(persistenceId, snapshot);
// add more entries after snapshot is taken
- List<ReplicatedLogEntry> entries = new ArrayList<>();
ReplicatedLogEntry entry2 = new SimpleReplicatedLogEntry(5, 1, new MockRaftActorContext.MockPayload("F", 2));
ReplicatedLogEntry entry3 = new SimpleReplicatedLogEntry(6, 1, new MockRaftActorContext.MockPayload("G", 3));
ReplicatedLogEntry entry4 = new SimpleReplicatedLogEntry(7, 1, new MockRaftActorContext.MockPayload("H", 4));
- entries.add(entry2);
- entries.add(entry3);
- entries.add(entry4);
final int lastAppliedToState = 5;
final int lastIndex = 7;
mockRaftActor.waitForRecoveryComplete();
RaftActorContext context = mockRaftActor.getRaftActorContext();
- assertEquals("Journal log size", snapshotUnappliedEntries.size() + entries.size(),
+ assertEquals("Journal log size", snapshotUnappliedEntries.size() + 3,
context.getReplicatedLog().size());
assertEquals("Journal data size", 10, context.getReplicatedLog().dataSize());
assertEquals("Last index", lastIndex, context.getReplicatedLog().lastIndex());
config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
TestActorRef<MockRaftActor> ref = factory.createTestActor(MockRaftActor.props(persistenceId,
- ImmutableMap.<String, String>builder().put("member1", "address").build(),
- config, createProvider()), persistenceId);
+ Map.of("member1", "address"), config, createProvider()), persistenceId);
MockRaftActor mockRaftActor = ref.underlyingActor();
InMemoryJournal.addWriteMessagesCompleteLatch(persistenceId, 1);
TestActorRef<MockRaftActor> ref = factory.createTestActor(MockRaftActor.props(persistenceId,
- ImmutableMap.<String, String>builder().put("member1", "address").build(),
- config, createProvider())
+ Map.of("member1", "address"), config, createProvider())
.withDispatcher(Dispatchers.DefaultDispatcherId()), persistenceId);
InMemoryJournal.waitForWriteMessagesComplete(persistenceId);
factory.killActor(ref, kit);
config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
- ref = factory.createTestActor(MockRaftActor.props(persistenceId,
- ImmutableMap.<String, String>builder().put("member1", "address").build(), config,
+ ref = factory.createTestActor(MockRaftActor.props(persistenceId, Map.of("member1", "address"), config,
createProvider()).withDispatcher(Dispatchers.DefaultDispatcherId()),
factory.generateActorId("follower-"));
config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
- Collections.<String, String>emptyMap(), config), persistenceId);
+ Map.of(), config), persistenceId);
MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
mockRaftActor.setRaftActorRecoverySupport(mockSupport);
Snapshot snapshot = Snapshot.create(ByteState.of(new byte[]{1}),
- Collections.<ReplicatedLogEntry>emptyList(), 3, 1, 3, 1, -1, null, null);
+ List.of(), 3, 1, 3, 1, -1, null, null);
SnapshotOffer snapshotOffer = new SnapshotOffer(new SnapshotMetadata("test", 6, 12345), snapshot);
mockRaftActor.handleRecover(snapshotOffer);
// Wait for akka's recovery to complete so it doesn't interfere.
mockRaftActor.waitForRecoveryComplete();
- ApplySnapshot applySnapshot = new ApplySnapshot(mock(Snapshot.class));
- doReturn(true).when(mockSupport).handleSnapshotMessage(same(applySnapshot), any(ActorRef.class));
+ ApplySnapshot applySnapshot = new ApplySnapshot(
+ Snapshot.create(null, null, 0, 0, 0, 0, 0, persistenceId, null));
+ when(mockSupport.handleSnapshotMessage(same(applySnapshot), any(ActorRef.class))).thenReturn(true);
mockRaftActor.handleCommand(applySnapshot);
CaptureSnapshotReply captureSnapshotReply = new CaptureSnapshotReply(ByteState.empty(), Optional.empty());
- doReturn(true).when(mockSupport).handleSnapshotMessage(same(captureSnapshotReply), any(ActorRef.class));
+ when(mockSupport.handleSnapshotMessage(same(captureSnapshotReply), any(ActorRef.class))).thenReturn(true);
mockRaftActor.handleCommand(captureSnapshotReply);
SaveSnapshotSuccess saveSnapshotSuccess = new SaveSnapshotSuccess(new SnapshotMetadata("", 0L, 0L));
- doReturn(true).when(mockSupport).handleSnapshotMessage(same(saveSnapshotSuccess), any(ActorRef.class));
+ when(mockSupport.handleSnapshotMessage(same(saveSnapshotSuccess), any(ActorRef.class))).thenReturn(true);
mockRaftActor.handleCommand(saveSnapshotSuccess);
SaveSnapshotFailure saveSnapshotFailure = new SaveSnapshotFailure(new SnapshotMetadata("", 0L, 0L),
new Throwable());
- doReturn(true).when(mockSupport).handleSnapshotMessage(same(saveSnapshotFailure), any(ActorRef.class));
+ when(mockSupport.handleSnapshotMessage(same(saveSnapshotFailure), any(ActorRef.class))).thenReturn(true);
mockRaftActor.handleCommand(saveSnapshotFailure);
- doReturn(true).when(mockSupport).handleSnapshotMessage(same(RaftActorSnapshotMessageSupport.COMMIT_SNAPSHOT),
- any(ActorRef.class));
+ when(mockSupport.handleSnapshotMessage(same(RaftActorSnapshotMessageSupport.COMMIT_SNAPSHOT),
+ any(ActorRef.class))).thenReturn(true);
mockRaftActor.handleCommand(RaftActorSnapshotMessageSupport.COMMIT_SNAPSHOT);
- doReturn(true).when(mockSupport).handleSnapshotMessage(same(GetSnapshot.INSTANCE), any(ActorRef.class));
+ when(mockSupport.handleSnapshotMessage(same(GetSnapshot.INSTANCE), any(ActorRef.class))).thenReturn(true);
mockRaftActor.handleCommand(GetSnapshot.INSTANCE);
verify(mockSupport).handleSnapshotMessage(same(applySnapshot), any(ActorRef.class));
DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
- Collections.<String, String>emptyMap(), config, dataPersistenceProvider), persistenceId);
+ Map.of(), config, dataPersistenceProvider), persistenceId);
MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
- Collections.<String, String>emptyMap(), config, dataPersistenceProvider), persistenceId);
+ Map.of(), config, dataPersistenceProvider), persistenceId);
MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
String persistenceId = factory.generateActorId("notifier-");
factory.createActor(MockRaftActor.builder().id(persistenceId)
- .peerAddresses(ImmutableMap.of("leader", "fake/path"))
+ .peerAddresses(Map.of("leader", "fake/path"))
.config(config).roleChangeNotifier(notifierActor).props());
List<RoleChanged> matches = null;
DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
- Map<String, String> peerAddresses = new HashMap<>();
- peerAddresses.put(follower1Id, followerActor1.path().toString());
+ Map<String, String> peerAddresses = Map.of(follower1Id, followerActor1.path().toString());
TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(
MockRaftActor.props(persistenceId, peerAddresses, config, dataPersistenceProvider), persistenceId);
assertEquals(8, leaderActor.getReplicatedLog().size());
- MockSnapshotState snapshotState = new MockSnapshotState(Arrays.asList(
+ MockSnapshotState snapshotState = new MockSnapshotState(List.of(
new MockRaftActorContext.MockPayload("foo-0"),
new MockRaftActorContext.MockPayload("foo-1"),
new MockRaftActorContext.MockPayload("foo-2"),
DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
- Map<String, String> peerAddresses = new HashMap<>();
- peerAddresses.put(leaderId, leaderActor1.path().toString());
+ Map<String, String> peerAddresses = Map.of(leaderId, leaderActor1.path().toString());
TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(
MockRaftActor.props(persistenceId, peerAddresses, config, dataPersistenceProvider), persistenceId);
assertEquals(6, followerActor.getReplicatedLog().size());
//fake snapshot on index 6
- List<ReplicatedLogEntry> entries = Arrays.asList(
- (ReplicatedLogEntry) new SimpleReplicatedLogEntry(6, 1, new MockRaftActorContext.MockPayload("foo-6")));
+ List<ReplicatedLogEntry> entries = List.of(
+ new SimpleReplicatedLogEntry(6, 1, new MockRaftActorContext.MockPayload("foo-6")));
followerActor.handleCommand(new AppendEntries(1, leaderId, 5, 1, entries, 5, 5, (short)0));
assertEquals(7, followerActor.getReplicatedLog().size());
//fake snapshot on index 7
assertEquals(RaftState.Follower, followerActor.getCurrentBehavior().state());
- entries = Arrays.asList((ReplicatedLogEntry) new SimpleReplicatedLogEntry(7, 1,
+ entries = List.of(new SimpleReplicatedLogEntry(7, 1,
new MockRaftActorContext.MockPayload("foo-7")));
followerActor.handleCommand(new AppendEntries(1, leaderId, 6, 1, entries, 6, 6, (short) 0));
assertEquals(8, followerActor.getReplicatedLog().size());
assertEquals(RaftState.Follower, followerActor.getCurrentBehavior().state());
- ByteString snapshotBytes = fromObject(Arrays.asList(
+ ByteString snapshotBytes = fromObject(List.of(
new MockRaftActorContext.MockPayload("foo-0"),
new MockRaftActorContext.MockPayload("foo-1"),
new MockRaftActorContext.MockPayload("foo-2"),
assertEquals(3, followerActor.getReplicatedLog().size()); //indexes 5,6,7 left in the log
assertEquals(7, followerActor.getReplicatedLog().lastIndex());
- entries = Arrays.asList((ReplicatedLogEntry) new SimpleReplicatedLogEntry(8, 1,
- new MockRaftActorContext.MockPayload("foo-7")));
+ entries = List.of(new SimpleReplicatedLogEntry(8, 1, new MockRaftActorContext.MockPayload("foo-7")));
// send an additional entry 8 with leaderCommit = 7
followerActor.handleCommand(new AppendEntries(1, leaderId, 7, 1, entries, 7, 7, (short) 0));
DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
- Map<String, String> peerAddresses = new HashMap<>();
- peerAddresses.put(follower1Id, followerActor1.path().toString());
- peerAddresses.put(follower2Id, followerActor2.path().toString());
+ Map<String, String> peerAddresses = Map.of(
+ follower1Id, followerActor1.path().toString(),
+ follower2Id, followerActor2.path().toString());
TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(
MockRaftActor.props(persistenceId, peerAddresses, config, dataPersistenceProvider), persistenceId);
assertEquals("Fake snapshot should not happen when Initiate is in progress", 5,
leaderActor.getReplicatedLog().size());
- ByteString snapshotBytes = fromObject(Arrays.asList(
+ ByteString snapshotBytes = fromObject(List.of(
new MockRaftActorContext.MockPayload("foo-0"),
new MockRaftActorContext.MockPayload("foo-1"),
new MockRaftActorContext.MockPayload("foo-2"),
DataPersistenceProvider dataPersistenceProvider = createProvider();
- Map<String, String> peerAddresses = ImmutableMap.<String, String>builder().put("member1", "address").build();
+ Map<String, String> peerAddresses = Map.of("member1", "address");
TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(
MockRaftActor.props(persistenceId, peerAddresses, config, dataPersistenceProvider), persistenceId);
DataPersistenceProvider dataPersistenceProvider = createProvider();
- Map<String, String> peerAddresses = ImmutableMap.<String, String>builder().put("member1", "address").build();
+ Map<String, String> peerAddresses = Map.of("member1", "address");
TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(
MockRaftActor.props(persistenceId, peerAddresses, config, dataPersistenceProvider), persistenceId);
DataPersistenceProvider dataPersistenceProvider = createProvider();
- Map<String, String> peerAddresses = ImmutableMap.<String, String>builder().build();
+ Map<String, String> peerAddresses = Map.of();
TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(
MockRaftActor.props(persistenceId, peerAddresses, config, dataPersistenceProvider), persistenceId);
public void testUpdateConfigParam() {
DefaultConfigParamsImpl emptyConfig = new DefaultConfigParamsImpl();
String persistenceId = factory.generateActorId("follower-");
- ImmutableMap<String, String> peerAddresses =
- ImmutableMap.<String, String>builder().put("member1", "address").build();
+ Map<String, String> peerAddresses = Map.of("member1", "address");
DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
TestActorRef<MockRaftActor> actorRef = factory.createTestActor(
new MockRaftActorContext.MockPayload("C")));
TestActorRef<MockRaftActor> raftActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
- ImmutableMap.<String, String>builder().put("member1", "address").build(), config)
+ Map.of("member1", "address"), config)
.withDispatcher(Dispatchers.DefaultDispatcherId()), persistenceId);
MockRaftActor mockRaftActor = raftActorRef.underlyingActor();
DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
config.setCustomRaftPolicyImplementationClass(DisableElectionsRaftPolicy.class.getName());
- List<ReplicatedLogEntry> snapshotUnappliedEntries = new ArrayList<>();
- snapshotUnappliedEntries.add(new SimpleReplicatedLogEntry(4, 1, new MockRaftActorContext.MockPayload("E")));
+ List<ReplicatedLogEntry> snapshotUnappliedEntries = List.of(
+ new SimpleReplicatedLogEntry(4, 1, new MockRaftActorContext.MockPayload("E")));
int snapshotLastApplied = 3;
int snapshotLastIndex = 4;
- MockSnapshotState snapshotState = new MockSnapshotState(Arrays.asList(
+ MockSnapshotState snapshotState = new MockSnapshotState(List.of(
new MockRaftActorContext.MockPayload("A"),
new MockRaftActorContext.MockPayload("B"),
new MockRaftActorContext.MockPayload("C"),
// Test with data persistence disabled
- snapshot = Snapshot.create(EmptyState.INSTANCE, Collections.<ReplicatedLogEntry>emptyList(),
+ snapshot = Snapshot.create(EmptyState.INSTANCE, List.of(),
-1, -1, -1, -1, 5, "member-1", null);
persistenceId = factory.generateActorId("test-actor-");
DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
config.setCustomRaftPolicyImplementationClass(DisableElectionsRaftPolicy.class.getName());
- List<MockPayload> state = Arrays.asList(new MockRaftActorContext.MockPayload("A"));
+ List<MockPayload> state = List.of(new MockRaftActorContext.MockPayload("A"));
Snapshot snapshot = Snapshot.create(ByteState.of(fromObject(state).toByteArray()),
- Arrays.<ReplicatedLogEntry>asList(), 5, 2, 5, 2, 2, "member-1", null);
+ List.of(), 5, 2, 5, 2, 2, "member-1", null);
InMemoryJournal.addEntry(persistenceId, 1, new SimpleReplicatedLogEntry(0, 1,
new MockRaftActorContext.MockPayload("B")));
String persistenceId = factory.generateActorId("test-actor-");
InMemoryJournal.addEntry(persistenceId, 1, new SimpleReplicatedLogEntry(0, 1,
- new ServerConfigurationPayload(Arrays.asList(new ServerInfo(persistenceId, false)))));
+ new ServerConfigurationPayload(List.of(new ServerInfo(persistenceId, false)))));
TestActorRef<MockRaftActor> raftActorRef = factory.createTestActor(MockRaftActor.builder().id(persistenceId)
.config(config).props().withDispatcher(Dispatchers.DefaultDispatcherId()), persistenceId);
mockRaftActor.waitForInitializeBehaviorComplete();
- raftActorRef.tell(new AppendEntries(1L, "leader", 0L, 1L, Collections.<ReplicatedLogEntry>emptyList(),
+ raftActorRef.tell(new AppendEntries(1L, "leader", 0L, 1L, List.of(),
0L, -1L, (short)1), ActorRef.noSender());
LeaderStateChanged leaderStateChange = MessageCollectorActor.expectFirstMatching(
notifierActor, LeaderStateChanged.class);
doReturn(true).when(mockPersistenceProvider).isRecoveryApplicable();
TestActorRef<MockRaftActor> leaderActorRef = factory.createTestActor(
- MockRaftActor.props(leaderId, ImmutableMap.of(followerId, followerActor.path().toString()), config,
+ MockRaftActor.props(leaderId, Map.of(followerId, followerActor.path().toString()), config,
mockPersistenceProvider), leaderId);
MockRaftActor leaderActor = leaderActorRef.underlyingActor();
leaderActor.waitForInitializeBehaviorComplete();
config.setIsolatedLeaderCheckInterval(new FiniteDuration(1, TimeUnit.DAYS));
TestActorRef<MockRaftActor> leaderActorRef = factory.createTestActor(
- MockRaftActor.props(leaderId, ImmutableMap.of(followerId, followerActor.path().toString()), config),
+ MockRaftActor.props(leaderId, Map.of(followerId, followerActor.path().toString()), config),
leaderId);
MockRaftActor leaderActor = leaderActorRef.underlyingActor();
leaderActor.waitForInitializeBehaviorComplete();
TestRaftActor.Builder builder = TestRaftActor.newBuilder()
.id(leaderId)
- .peerAddresses(ImmutableMap.of(followerId,
- mockFollowerActorRef.path().toString()))
+ .peerAddresses(Map.of(followerId, mockFollowerActorRef.path().toString()))
.config(config)
.collectorActor(factory.createActor(
MessageCollectorActor.props(), factory.generateActorId(leaderId + "-collector")));
*/
package org.opendaylight.controller.cluster.raft;
+import static org.junit.Assert.fail;
+
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.pattern.Patterns;
import com.google.common.util.concurrent.Uninterruptibles;
import java.util.Optional;
import java.util.concurrent.TimeUnit;
-import org.junit.Assert;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
import org.slf4j.Logger;
public RaftActorTestKit(final ActorSystem actorSystem, final String actorName) {
super(actorSystem);
- raftActor = this.getSystem().actorOf(MockRaftActor.builder().id(actorName).props(), actorName);
+ raftActor = getSystem().actorOf(MockRaftActor.builder().id(actorName).props(), actorName);
}
public ActorRef getRaftActor() {
Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
}
- Assert.fail("Leader not found for actorRef " + actorRef.path());
+ fail("Leader not found for actorRef " + actorRef.path());
}
}
import akka.actor.ActorRef;
import akka.persistence.SaveSnapshotSuccess;
import akka.testkit.TestActorRef;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Lists;
import java.util.List;
+import java.util.Map;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries;
String persistenceId = factory.generateActorId("singleNode");
TestActorRef<AbstractRaftActorIntegrationTest.TestRaftActor> singleNodeActorRef =
- newTestRaftActor(persistenceId, ImmutableMap.<String, String>builder().build(), leaderConfigParams);
+ newTestRaftActor(persistenceId, Map.of(), leaderConfigParams);
waitUntilLeader(singleNodeActorRef);
assertEquals("Last applied", 5, singleNodeContext.getLastApplied());
- assertEquals("Incorrect State after snapshot success is received ", Lists.newArrayList(payload0, payload1,
- payload2, payload3, payload4, payload5), singleNodeActorRef.underlyingActor().getState());
+ assertEquals("Incorrect State after snapshot success is received ",
+ List.of(payload0, payload1, payload2, payload3, payload4, payload5),
+ singleNodeActorRef.underlyingActor().getState());
InMemoryJournal.waitForWriteMessagesComplete(persistenceId);
assertEquals(1, persistedSnapshots.size());
List<Object> snapshottedState = MockRaftActor.fromState(persistedSnapshots.get(0).getState());
- assertEquals("Incorrect Snapshot", Lists.newArrayList(payload0, payload1, payload2, payload3),
- snapshottedState);
+ assertEquals("Incorrect Snapshot", List.of(payload0, payload1, payload2, payload3), snapshottedState);
//recovery logic starts
killActor(singleNodeActorRef);
- singleNodeActorRef = newTestRaftActor(persistenceId,
- ImmutableMap.<String, String>builder().build(), leaderConfigParams);
+ singleNodeActorRef = newTestRaftActor(persistenceId, Map.of(), leaderConfigParams);
singleNodeActorRef.underlyingActor().waitForRecoveryComplete();
- assertEquals("Incorrect State after Recovery ", Lists.newArrayList(payload0, payload1, payload2, payload3,
- payload4, payload5), singleNodeActorRef.underlyingActor().getState());
-
+ assertEquals("Incorrect State after Recovery ",
+ List.of(payload0, payload1, payload2, payload3, payload4, payload5),
+ singleNodeActorRef.underlyingActor().getState());
}
}
import akka.actor.ActorRef;
import akka.persistence.SaveSnapshotSuccess;
-import com.google.common.collect.ImmutableMap;
-import java.util.Arrays;
-import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.junit.Before;
@Before
public void setup() {
- follower1Actor = newTestRaftActor(follower1Id, ImmutableMap.of(leaderId, testActorPath(leaderId)),
+ follower1Actor = newTestRaftActor(follower1Id, Map.of(leaderId, testActorPath(leaderId)),
newFollowerConfigParams());
- Map<String, String> leaderPeerAddresses = new HashMap<>();
- leaderPeerAddresses.put(follower1Id, follower1Actor.path().toString());
- leaderPeerAddresses.put(follower2Id, "");
-
leaderConfigParams = newLeaderConfigParams();
- leaderActor = newTestRaftActor(leaderId, leaderPeerAddresses, leaderConfigParams);
+ leaderActor = newTestRaftActor(leaderId, Map.of(follower1Id, follower1Actor.path().toString(), follower2Id, ""),
+ leaderConfigParams);
follower1CollectorActor = follower1Actor.underlyingActor().collectorActor();
leaderCollectorActor = leaderActor.underlyingActor().collectorActor();
assertEquals("Leader commit index", 4, leaderContext.getCommitIndex());
assertEquals("Leader last applied", 4, leaderContext.getLastApplied());
- assertEquals("Leader state", Arrays.asList(payload0, payload1, payload2, payload3, payload4),
+ assertEquals("Leader state", List.of(payload0, payload1, payload2, payload3, payload4),
leaderActor.underlyingActor().getState());
}
assertEquals("Leader commit index", 4, leaderContext.getCommitIndex());
assertEquals("Leader last applied", 4, leaderContext.getLastApplied());
- assertEquals("Leader state", Arrays.asList(payload0, payload1, payload2, payload3, payload4),
+ assertEquals("Leader state", List.of(payload0, payload1, payload2, payload3, payload4),
leaderActor.underlyingActor().getState());
}
leader = leaderActor.underlyingActor().getCurrentBehavior();
- follower2Actor = newTestRaftActor(follower2Id, ImmutableMap.of(leaderId, testActorPath(leaderId)),
- newFollowerConfigParams());
+ follower2Actor = newTestRaftActor(follower2Id,
+ Map.of(leaderId, testActorPath(leaderId)), newFollowerConfigParams());
follower2CollectorActor = follower2Actor.underlyingActor().collectorActor();
leaderActor.tell(new SetPeerAddress(follower2Id, follower2Actor.path().toString()), ActorRef.noSender());
InMemoryJournal.clear();
- follower2Actor = newTestRaftActor(follower2Id, ImmutableMap.of(leaderId, testActorPath(leaderId)),
- newFollowerConfigParams());
+ follower2Actor = newTestRaftActor(follower2Id,
+ Map.of(leaderId, testActorPath(leaderId)), newFollowerConfigParams());
TestRaftActor follower2Underlying = follower2Actor.underlyingActor();
follower2CollectorActor = follower2Underlying.collectorActor();
follower2Context = follower2Underlying.getRaftActorContext();
// Wait for the follower to persist the snapshot.
MessageCollectorActor.expectFirstMatching(follower2CollectorActor, SaveSnapshotSuccess.class);
- final List<MockPayload> expFollowerState = Arrays.asList(payload0, payload1, payload2);
+ final List<MockPayload> expFollowerState = List.of(payload0, payload1, payload2);
assertEquals("Follower commit index", 2, follower2Context.getCommitIndex());
assertEquals("Follower last applied", 2, follower2Context.getLastApplied());
killActor(follower2Actor);
- follower2Actor = newTestRaftActor(follower2Id, ImmutableMap.of(leaderId, testActorPath(leaderId)),
+ follower2Actor = newTestRaftActor(follower2Id, Map.of(leaderId, testActorPath(leaderId)),
newFollowerConfigParams());
follower2Underlying = follower2Actor.underlyingActor();
});
// Send new payloads
- final MockPayload payload4 = sendPayloadData(leaderActor,"newFour");
- final MockPayload payload5 = sendPayloadData(leaderActor,"newFive");
+ final MockPayload payload4 = sendPayloadData(leaderActor, "newFour");
+ final MockPayload payload5 = sendPayloadData(leaderActor, "newFive");
verifyRaftState(leaderActor, raftState -> {
assertEquals("leader journal last index", 5, leaderContext.getReplicatedLog().lastIndex());
reinstateLeaderActor();
- assertEquals("Leader last index", 5 , leaderActor.underlyingActor().getReplicatedLog().lastIndex());
- assertEquals(payload4 ,leaderActor.underlyingActor().getReplicatedLog().get(4).getData());
- assertEquals(payload5 ,leaderActor.underlyingActor().getReplicatedLog().get(5).getData());
+ final var log = leaderActor.underlyingActor().getReplicatedLog();
+ assertEquals("Leader last index", 5, log.lastIndex());
+ assertEquals(List.of(payload4, payload5), List.of(log.get(4).getData(), log.get(5).getData()));
}
private void reinstateLeaderActor() {
import static org.junit.Assert.assertEquals;
import static org.mockito.ArgumentMatchers.argThat;
import static org.mockito.ArgumentMatchers.same;
-import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.reset;
import static org.mockito.Mockito.verify;
import akka.japi.Procedure;
import com.google.common.util.concurrent.MoreExecutors;
-import java.util.Collections;
+import java.util.Map;
import java.util.function.Consumer;
import org.junit.Before;
import org.junit.Test;
+import org.junit.runner.RunWith;
import org.mockito.ArgumentCaptor;
import org.mockito.ArgumentMatcher;
import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
import org.mockito.internal.matchers.Same;
+import org.mockito.junit.MockitoJUnitRunner;
import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
*
* @author Thomas Pantelis
*/
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
public class ReplicatedLogImplTest {
private static final Logger LOG = LoggerFactory.getLogger(RaftActorRecoverySupportTest.class);
@Before
public void setup() {
- MockitoAnnotations.initMocks(this);
-
context = new RaftActorContextImpl(null, null, "test",
- new ElectionTermImpl(mockPersistence, "test", LOG), -1, -1, Collections.<String,String>emptyMap(),
+ new ElectionTermImpl(mockPersistence, "test", LOG), -1, -1, Map.of(),
configParams, mockPersistence, applyState -> { }, LOG, MoreExecutors.directExecutor());
}
- private void verifyPersist(Object message) throws Exception {
+ private void verifyPersist(final Object message) throws Exception {
verifyPersist(message, new Same(message), true);
}
@SuppressWarnings({ "unchecked", "rawtypes" })
- private void verifyPersist(Object message, ArgumentMatcher<?> matcher, boolean async) throws Exception {
+ private void verifyPersist(final Object message, final ArgumentMatcher<?> matcher, final boolean async)
+ throws Exception {
ArgumentCaptor<Procedure> procedure = ArgumentCaptor.forClass(Procedure.class);
if (async) {
verify(mockPersistence).persistAsync(argThat(matcher), procedure.capture());
procedure.getValue().apply(message);
}
- @SuppressWarnings("unchecked")
@Test
+ @SuppressWarnings("unchecked")
public void testAppendAndPersistExpectingNoCapture() throws Exception {
ReplicatedLog log = ReplicatedLogImpl.newInstance(context);
public void testAppendAndPersistExpectingCaptureDueToJournalCount() throws Exception {
configParams.setSnapshotBatchCount(2);
- doReturn(1L).when(mockBehavior).getReplicatedToAllIndex();
-
ReplicatedLog log = ReplicatedLogImpl.newInstance(context);
final ReplicatedLogEntry logEntry1 = new SimpleReplicatedLogEntry(2, 1, new MockPayload("2"));
@Test
public void testAppendAndPersistExpectingCaptureDueToDataSize() throws Exception {
- doReturn(1L).when(mockBehavior).getReplicatedToAllIndex();
-
context.setTotalMemoryRetriever(() -> 100);
ReplicatedLog log = ReplicatedLogImpl.newInstance(context);
verifyNoMoreInteractions(mockPersistence);
}
- public ArgumentMatcher<DeleteEntries> match(final DeleteEntries actual) {
+ @Test
+ public void testCommitFakeSnapshot() {
+ ReplicatedLog log = ReplicatedLogImpl.newInstance(context);
+
+ log.append(new SimpleReplicatedLogEntry(0, 1, new MockPayload("0")));
+ final int dataSizeAfterFirstPayload = log.dataSize();
+
+ log.snapshotPreCommit(0,1);
+ log.snapshotCommit(false);
+
+ assertEquals(0, log.size());
+ assertEquals(dataSizeAfterFirstPayload, log.dataSize());
+ }
+
+ private static ArgumentMatcher<DeleteEntries> match(final DeleteEntries actual) {
return other -> actual.getFromIndex() == other.getFromIndex();
}
}
import static org.junit.Assert.assertEquals;
import akka.persistence.SaveSnapshotSuccess;
-import com.google.common.collect.ImmutableMap;
import java.util.List;
+import java.util.Map;
import org.junit.Test;
import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
DefaultConfigParamsImpl followerConfigParams = newFollowerConfigParams();
followerConfigParams.setSnapshotBatchCount(snapshotBatchCount);
- follower1Actor = newTestRaftActor(follower1Id, ImmutableMap.of(leaderId, testActorPath(leaderId),
+ follower1Actor = newTestRaftActor(follower1Id, Map.of(leaderId, testActorPath(leaderId),
follower2Id, testActorPath(follower2Id)), followerConfigParams);
- follower2Actor = newTestRaftActor(follower2Id, ImmutableMap.of(leaderId, testActorPath(leaderId),
+ follower2Actor = newTestRaftActor(follower2Id, Map.of(leaderId, testActorPath(leaderId),
follower1Id, testActorPath(follower1Id)), followerConfigParams);
- peerAddresses = ImmutableMap.<String, String>builder()
- .put(follower1Id, follower1Actor.path().toString())
- .put(follower2Id, follower2Actor.path().toString()).build();
+ peerAddresses = Map.of(
+ follower1Id, follower1Actor.path().toString(),
+ follower2Id, follower2Actor.path().toString());
leaderConfigParams = newLeaderConfigParams();
leaderActor = newTestRaftActor(leaderId, peerAddresses, leaderConfigParams);
package org.opendaylight.controller.cluster.raft;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
import akka.actor.ActorRef;
import akka.persistence.SaveSnapshotSuccess;
-import com.google.common.collect.ImmutableMap;
import com.google.common.util.concurrent.Uninterruptibles;
-import java.util.Arrays;
-import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang3.SerializationUtils;
import org.eclipse.jdt.annotation.Nullable;
-import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
InMemoryJournal.addEntry(leaderId, 1, new UpdateElectionTerm(initialTerm, leaderId));
// Create the leader and 2 follower actors.
- follower1Actor = newTestRaftActor(follower1Id, ImmutableMap.of(leaderId, testActorPath(leaderId),
+ follower1Actor = newTestRaftActor(follower1Id, Map.of(leaderId, testActorPath(leaderId),
follower2Id, testActorPath(follower2Id)), newFollowerConfigParams());
- follower2Actor = newTestRaftActor(follower2Id, ImmutableMap.of(leaderId, testActorPath(leaderId),
+ follower2Actor = newTestRaftActor(follower2Id, Map.of(leaderId, testActorPath(leaderId),
follower1Id, testActorPath(follower1Id)), newFollowerConfigParams());
- Map<String, String> leaderPeerAddresses = ImmutableMap.<String, String>builder()
- .put(follower1Id, follower1Actor.path().toString())
- .put(follower2Id, follower2Actor.path().toString()).build();
+ Map<String, String> leaderPeerAddresses = Map.of(
+ follower1Id, follower1Actor.path().toString(),
+ follower2Id, follower2Actor.path().toString());
leaderConfigParams = newLeaderConfigParams();
leaderActor = newTestRaftActor(leaderId, leaderPeerAddresses, leaderConfigParams);
follower2 = follower2Actor.underlyingActor().getCurrentBehavior();
currentTerm = leaderContext.getTermInformation().getCurrentTerm();
- assertEquals("Current term > " + initialTerm, true, currentTerm > initialTerm);
+ assertTrue("Current term > " + initialTerm, currentTerm > initialTerm);
leaderCollectorActor = leaderActor.underlyingActor().collectorActor();
follower1CollectorActor = follower1Actor.underlyingActor().collectorActor();
}
private void setupFollower2() {
- follower2Actor = newTestRaftActor(follower2Id, ImmutableMap.of(leaderId, testActorPath(leaderId),
+ follower2Actor = newTestRaftActor(follower2Id, Map.of(leaderId, testActorPath(leaderId),
follower1Id, testActorPath(follower1Id)), newFollowerConfigParams());
follower2Context = follower2Actor.underlyingActor().getRaftActorContext();
// to catch it up because no snapshotting was done so the follower's next index was present in the log.
InstallSnapshot installSnapshot = MessageCollectorActor.getFirstMatching(follower2CollectorActor,
InstallSnapshot.class);
- Assert.assertNull("Follower 2 received unexpected InstallSnapshot", installSnapshot);
+ assertNull("Follower 2 received unexpected InstallSnapshot", installSnapshot);
testLog.info("testReplicationsWithLaggingFollowerCaughtUpViaAppendEntries complete");
}
// Verify the leader did not try to install a snapshot to catch up follower 2.
InstallSnapshot installSnapshot = MessageCollectorActor.getFirstMatching(follower2CollectorActor,
InstallSnapshot.class);
- Assert.assertNull("Follower 2 received unexpected InstallSnapshot", installSnapshot);
+ assertNull("Follower 2 received unexpected InstallSnapshot", installSnapshot);
// Ensure there's at least 1 more heartbeat.
MessageCollectorActor.clearMessages(leaderCollectorActor);
// Send a server config change to test that the install snapshot includes the server config.
- ServerConfigurationPayload serverConfig = new ServerConfigurationPayload(Arrays.asList(
+ ServerConfigurationPayload serverConfig = new ServerConfigurationPayload(List.of(
new ServerInfo(leaderId, true),
new ServerInfo(follower1Id, false),
new ServerInfo(follower2Id, false)));
setupFollower2();
- MessageCollectorActor.expectMatching(follower2CollectorActor, InstallSnapshot.class, 5);
+ MessageCollectorActor.expectMatching(follower2CollectorActor, InstallSnapshot.class, 1);
follower2Actor.stop();
// Verify a snapshot is not triggered.
CaptureSnapshot captureSnapshot = MessageCollectorActor.getFirstMatching(leaderCollectorActor,
CaptureSnapshot.class);
- Assert.assertNull("Leader received unexpected CaptureSnapshot", captureSnapshot);
+ assertNull("Leader received unexpected CaptureSnapshot", captureSnapshot);
expSnapshotState.add(payload1);
verifyApplyState(applyState, leaderCollectorActor, payload3.toString(), currentTerm, 3, payload3);
captureSnapshot = MessageCollectorActor.getFirstMatching(leaderCollectorActor, CaptureSnapshot.class);
- Assert.assertNull("Leader received unexpected CaptureSnapshot", captureSnapshot);
+ assertNull("Leader received unexpected CaptureSnapshot", captureSnapshot);
// Verify the follower 1 applies the state.
applyState = MessageCollectorActor.expectFirstMatching(follower1CollectorActor, ApplyState.class);
/**
* Resume the lagging follower 2 and verify it receives an install snapshot from the leader.
*/
- private void verifyInstallSnapshotToLaggingFollower(long lastAppliedIndex,
- @Nullable ServerConfigurationPayload expServerConfig) {
+ private void verifyInstallSnapshotToLaggingFollower(final long lastAppliedIndex,
+ final @Nullable ServerConfigurationPayload expServerConfig) {
testLog.info("verifyInstallSnapshotToLaggingFollower starting");
MessageCollectorActor.clearMessages(leaderCollectorActor);
// This is OK - the next snapshot should delete it. In production, even if the system restarted
// before another snapshot, they would both get applied which wouldn't hurt anything.
List<Snapshot> persistedSnapshots = InMemorySnapshotStore.getSnapshots(leaderId, Snapshot.class);
- Assert.assertTrue("Expected at least 1 persisted snapshots", persistedSnapshots.size() > 0);
+ assertFalse("Expected at least 1 persisted snapshots", persistedSnapshots.isEmpty());
Snapshot persistedSnapshot = persistedSnapshots.get(persistedSnapshots.size() - 1);
verifySnapshot("Persisted", persistedSnapshot, currentTerm, lastAppliedIndex, currentTerm, lastAppliedIndex);
List<ReplicatedLogEntry> unAppliedEntry = persistedSnapshot.getUnAppliedEntries();
assertEquals("Persisted Snapshot getUnAppliedEntries size", 0, unAppliedEntry.size());
int snapshotSize = SerializationUtils.serialize(persistedSnapshot.getState()).length;
- final int expTotalChunks = snapshotSize / SNAPSHOT_CHUNK_SIZE
- + (snapshotSize % SNAPSHOT_CHUNK_SIZE > 0 ? 1 : 0);
+ final int expTotalChunks = snapshotSize / MAXIMUM_MESSAGE_SLICE_SIZE
+ + (snapshotSize % MAXIMUM_MESSAGE_SLICE_SIZE > 0 ? 1 : 0);
InstallSnapshot installSnapshot = MessageCollectorActor.expectFirstMatching(follower2CollectorActor,
InstallSnapshot.class);
assertEquals("InstallSnapshotReply getTerm", currentTerm, installSnapshotReply.getTerm());
assertEquals("InstallSnapshotReply getChunkIndex", index++, installSnapshotReply.getChunkIndex());
assertEquals("InstallSnapshotReply getFollowerId", follower2Id, installSnapshotReply.getFollowerId());
- assertEquals("InstallSnapshotReply isSuccess", true, installSnapshotReply.isSuccess());
+ assertTrue("InstallSnapshotReply isSuccess", installSnapshotReply.isSuccess());
}
// Verify follower 2 applies the snapshot.
verifyLeadersTrimmedLog(lastAppliedIndex);
if (expServerConfig != null) {
- Set<ServerInfo> expServerInfo = new HashSet<>(expServerConfig.getServerConfig());
+ Set<ServerInfo> expServerInfo = Set.copyOf(expServerConfig.getServerConfig());
assertEquals("Leader snapshot server config", expServerInfo,
- new HashSet<>(persistedSnapshot.getServerConfiguration().getServerConfig()));
+ Set.copyOf(persistedSnapshot.getServerConfiguration().getServerConfig()));
assertEquals("Follower 2 snapshot server config", expServerInfo,
- new HashSet<>(applySnapshot.getSnapshot().getServerConfiguration().getServerConfig()));
+ Set.copyOf(applySnapshot.getSnapshot().getServerConfiguration().getServerConfig()));
ServerConfigurationPayload follower2ServerConfig = follower2Context.getPeerServerInfo(true);
assertNotNull("Follower 2 server config is null", follower2ServerConfig);
assertEquals("Follower 2 server config", expServerInfo,
- new HashSet<>(follower2ServerConfig.getServerConfig()));
+ Set.copyOf(follower2ServerConfig.getServerConfig()));
}
MessageCollectorActor.clearMessages(leaderCollectorActor);
// Verify the leaders's persisted journal log - it should only contain the last 2 ReplicatedLogEntries
// added after the snapshot as the persisted journal should've been purged to the snapshot
// sequence number.
- verifyPersistedJournal(leaderId, Arrays.asList(new SimpleReplicatedLogEntry(5, currentTerm, payload5),
- new SimpleReplicatedLogEntry(6, currentTerm, payload6)));
+ verifyPersistedJournal(leaderId, List.of(
+ new SimpleReplicatedLogEntry(5, currentTerm, payload5),
+ new SimpleReplicatedLogEntry(6, currentTerm, payload6)));
// Verify the leaders's persisted journal contains an ApplyJournalEntries for at least the last entry index.
List<ApplyJournalEntries> persistedApplyJournalEntries =
}
}
- Assert.assertTrue(String.format("ApplyJournalEntries with index %d not found in leader's persisted journal", 6),
- found);
+ assertTrue("ApplyJournalEntries with index 6 not found in leader's persisted journal", found);
// Verify follower 1 applies the 3 log entries.
applyStates = MessageCollectorActor.expectMatching(follower1CollectorActor, ApplyState.class, 3);
/**
* Kill the leader actor, reinstate it and verify the recovered journal.
*/
- private void verifyLeaderRecoveryAfterReinstatement(long lastIndex, long snapshotIndex,
- long firstJournalEntryIndex) {
+ private void verifyLeaderRecoveryAfterReinstatement(final long lastIndex, final long snapshotIndex,
+ final long firstJournalEntryIndex) {
testLog.info("verifyLeaderRecoveryAfterReinstatement starting: lastIndex: {}, snapshotIndex: {}, "
+ "firstJournalEntryIndex: {}", lastIndex, snapshotIndex, firstJournalEntryIndex);
testLog.info("verifyLeaderRecoveryAfterReinstatement ending");
}
- private void sendInitialPayloadsReplicatedToAllFollowers(String... data) {
-
+ private void sendInitialPayloadsReplicatedToAllFollowers(final String... data) {
// Send the payloads.
for (String d: data) {
expSnapshotState.add(sendPayloadData(leaderActor, d));
int numEntries = data.length;
// Verify the leader got consensus and applies each log entry even though follower 2 didn't respond.
- List<ApplyState> applyStates = MessageCollectorActor.expectMatching(leaderCollectorActor,
- ApplyState.class, numEntries);
+ final var leaderStates = MessageCollectorActor.expectMatching(leaderCollectorActor,
+ ApplyState.class, numEntries);
for (int i = 0; i < expSnapshotState.size(); i++) {
- MockPayload payload = expSnapshotState.get(i);
- verifyApplyState(applyStates.get(i), leaderCollectorActor, payload.toString(), currentTerm, i, payload);
+ final MockPayload payload = expSnapshotState.get(i);
+ verifyApplyState(leaderStates.get(i), leaderCollectorActor, payload.toString(), currentTerm, i, payload);
}
// Verify follower 1 applies each log entry.
- applyStates = MessageCollectorActor.expectMatching(follower1CollectorActor, ApplyState.class, numEntries);
+ final var follower1States = MessageCollectorActor.expectMatching(follower1CollectorActor,
+ ApplyState.class, numEntries);
for (int i = 0; i < expSnapshotState.size(); i++) {
- MockPayload payload = expSnapshotState.get(i);
- verifyApplyState(applyStates.get(i), null, null, currentTerm, i, payload);
+ final MockPayload payload = expSnapshotState.get(i);
+ verifyApplyState(follower1States.get(i), null, null, currentTerm, i, payload);
}
// Verify follower 2 applies each log entry.
- applyStates = MessageCollectorActor.expectMatching(follower2CollectorActor, ApplyState.class, numEntries);
+ final var follower2States = MessageCollectorActor.expectMatching(follower2CollectorActor,
+ ApplyState.class, numEntries);
for (int i = 0; i < expSnapshotState.size(); i++) {
- MockPayload payload = expSnapshotState.get(i);
- verifyApplyState(applyStates.get(i), null, null, currentTerm, i, payload);
+ final MockPayload payload = expSnapshotState.get(i);
+ verifyApplyState(follower2States.get(i), null, null, currentTerm, i, payload);
}
// Ensure there's at least 1 more heartbeat.
import static org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor.expectMatching;
-import com.google.common.collect.ImmutableMap;
import java.util.List;
+import java.util.Map;
import org.junit.Test;
import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
// Create the leader and 2 follower actors.
- snapshotChunkSize = 20;
+ maximumMessageSliceSize = 20;
DefaultConfigParamsImpl followerConfigParams = newFollowerConfigParams();
followerConfigParams.setSnapshotBatchCount(snapshotBatchCount);
- follower1Actor = newTestRaftActor(follower1Id, ImmutableMap.of(leaderId, testActorPath(leaderId),
+ follower1Actor = newTestRaftActor(follower1Id, Map.of(leaderId, testActorPath(leaderId),
follower2Id, testActorPath(follower2Id)), followerConfigParams);
- follower2Actor = newTestRaftActor(follower2Id, ImmutableMap.of(leaderId, testActorPath(leaderId),
+ follower2Actor = newTestRaftActor(follower2Id, Map.of(leaderId, testActorPath(leaderId),
follower1Id, testActorPath(follower1Id)), followerConfigParams);
- peerAddresses = ImmutableMap.<String, String>builder()
- .put(follower1Id, follower1Actor.path().toString())
- .put(follower2Id, follower2Actor.path().toString()).build();
+ peerAddresses = Map.of(
+ follower1Id, follower1Actor.path().toString(),
+ follower2Id, follower2Actor.path().toString());
leaderConfigParams = newLeaderConfigParams();
leaderActor = newTestRaftActor(leaderId, peerAddresses, leaderConfigParams);
// Send a large payload that exceeds the size threshold and needs to be sliced.
- MockPayload largePayload = sendPayloadData(leaderActor, "large", snapshotChunkSize + 1);
+ MockPayload largePayload = sendPayloadData(leaderActor, "large", maximumMessageSliceSize + 1);
// Then send a small payload that does not need to be sliced.
- MockPayload smallPayload = sendPayloadData(leaderActor, "normal", snapshotChunkSize - 1);
+ MockPayload smallPayload = sendPayloadData(leaderActor, "normal", maximumMessageSliceSize - 1);
final List<ApplyState> leaderApplyState = expectMatching(leaderCollectorActor, ApplyState.class, 2);
verifyApplyState(leaderApplyState.get(0), leaderCollectorActor,
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.raft;
import static org.junit.Assert.assertArrayEquals;
import akka.actor.ActorRef;
import akka.persistence.SnapshotSelectionCriteria;
import java.io.OutputStream;
-import java.util.Arrays;
+import java.util.List;
import java.util.Optional;
import java.util.function.Consumer;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
+import org.junit.runner.RunWith;
import org.mockito.ArgumentCaptor;
import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.opendaylight.controller.cluster.io.FileBackedOutputStreamFactory;
import org.opendaylight.controller.cluster.raft.SnapshotManager.LastAppliedTermInformationReader;
import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
import org.slf4j.LoggerFactory;
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
public class SnapshotManagerTest extends AbstractActorTest {
@Mock
@Before
public void setUp() {
- MockitoAnnotations.initMocks(this);
-
doReturn(false).when(mockRaftActorContext).hasFollowers();
doReturn(mockConfigParams).when(mockRaftActorContext).getConfigParams();
doReturn(10L).when(mockConfigParams).getSnapshotBatchCount();
@Test
public void testConstruction() {
- assertEquals(false, snapshotManager.isCapturing());
+ assertFalse(snapshotManager.isCapturing());
}
@SuppressWarnings({ "unchecked", "rawtypes" })
snapshotManager.captureToInstall(new SimpleReplicatedLogEntry(0, 1,
new MockRaftActorContext.MockPayload()), 0, "follower-1");
- assertEquals(true, snapshotManager.isCapturing());
+ assertTrue(snapshotManager.isCapturing());
ArgumentCaptor<Optional> outputStream = ArgumentCaptor.forClass(Optional.class);
verify(mockProcedure).accept(outputStream.capture());
assertTrue(capture);
- assertEquals(true, snapshotManager.isCapturing());
+ assertTrue(snapshotManager.isCapturing());
ArgumentCaptor<Optional> outputStream = ArgumentCaptor.forClass(Optional.class);
verify(mockProcedure).accept(outputStream.capture());
assertTrue(capture);
- assertEquals(true, snapshotManager.isCapturing());
+ assertTrue(snapshotManager.isCapturing());
ArgumentCaptor<Optional> outputStream = ArgumentCaptor.forClass(Optional.class);
verify(mockProcedure).accept(outputStream.capture());
assertFalse(capture);
- assertEquals(false, snapshotManager.isCapturing());
+ assertFalse(snapshotManager.isCapturing());
verify(mockProcedure).accept(any());
}
8L, 2L, new MockRaftActorContext.MockPayload());
doReturn(lastAppliedEntry).when(mockReplicatedLog).get(8L);
- doReturn(Arrays.asList(lastLogEntry)).when(mockReplicatedLog).getFrom(9L);
+ doReturn(List.of(lastLogEntry)).when(mockReplicatedLog).getFrom(9L);
// when replicatedToAllIndex = -1
snapshotManager.capture(lastLogEntry, -1);
assertEquals("getLastAppliedTerm", 2L, snapshot.getLastAppliedTerm());
assertEquals("getLastAppliedIndex", 8L, snapshot.getLastAppliedIndex());
assertEquals("getState", snapshotState, snapshot.getState());
- assertEquals("getUnAppliedEntries", Arrays.asList(lastLogEntry), snapshot.getUnAppliedEntries());
+ assertEquals("getUnAppliedEntries", List.of(lastLogEntry), snapshot.getUnAppliedEntries());
assertEquals("electionTerm", mockElectionTerm.getCurrentTerm(), snapshot.getElectionTerm());
assertEquals("electionVotedFor", mockElectionTerm.getVotedFor(), snapshot.getElectionVotedFor());
doReturn(45L).when(mockReplicatedLog).getSnapshotIndex();
doReturn(6L).when(mockReplicatedLog).getSnapshotTerm();
ReplicatedLogEntry replicatedLogEntry = mock(ReplicatedLogEntry.class);
+ doReturn(null).when(mockReplicatedLog).get(0);
doReturn(replicatedLogEntry).when(mockReplicatedLog).get(9);
doReturn(6L).when(replicatedLogEntry).getTerm();
doReturn(9L).when(replicatedLogEntry).getIndex();
long replicatedToAllIndex = 1;
ReplicatedLogEntry replicatedLogEntry = mock(ReplicatedLogEntry.class);
+ doReturn(null).when(mockReplicatedLog).get(0);
doReturn(replicatedLogEntry).when(mockReplicatedLog).get(replicatedToAllIndex);
doReturn(6L).when(replicatedLogEntry).getTerm();
doReturn(replicatedToAllIndex).when(replicatedLogEntry).getIndex();
Optional<OutputStream> installSnapshotStream = installSnapshotStreamCapture.getValue();
assertEquals("isPresent", true, installSnapshotStream.isPresent());
- installSnapshotStream.get().write(snapshotState.getBytes());
+ installSnapshotStream.orElseThrow().write(snapshotState.getBytes());
snapshotManager.persist(snapshotState, installSnapshotStream, Runtime.getRuntime().totalMemory());
- assertEquals(true, snapshotManager.isCapturing());
+ assertTrue(snapshotManager.isCapturing());
verify(mockDataPersistenceProvider).saveSnapshot(any(Snapshot.class));
snapshotManager.persist(ByteState.empty(), Optional.empty(), Runtime.getRuntime().totalMemory());
- assertEquals(true, snapshotManager.isCapturing());
+ assertTrue(snapshotManager.isCapturing());
snapshotManager.commit(100L, 1234L);
- assertEquals(false, snapshotManager.isCapturing());
+ assertFalse(snapshotManager.isCapturing());
verify(mockReplicatedLog).snapshotCommit();
assertEquals("return index", 10L, retIndex);
verify(mockReplicatedLog).snapshotPreCommit(10, 5);
- verify(mockReplicatedLog).snapshotCommit();
+ verify(mockReplicatedLog).snapshotCommit(false);
verify(mockRaftActorBehavior, never()).setReplicatedToAllIndex(anyLong());
}
public void testTrimLogWhenLastAppliedNotSet() {
doReturn(-1L).when(mockRaftActorContext).getLastApplied();
- ReplicatedLogEntry replicatedLogEntry = mock(ReplicatedLogEntry.class);
- doReturn(true).when(mockReplicatedLog).isPresent(10);
- doReturn(replicatedLogEntry).when(mockReplicatedLog).get(10);
- doReturn(5L).when(replicatedLogEntry).getTerm();
-
long retIndex = snapshotManager.trimLog(10);
assertEquals("return index", -1L, retIndex);
verify(mockReplicatedLog, never()).snapshotPreCommit(anyLong(), anyLong());
- verify(mockReplicatedLog, never()).snapshotCommit();
+ verify(mockReplicatedLog, never()).snapshotCommit(false);
verify(mockRaftActorBehavior, never()).setReplicatedToAllIndex(anyLong());
}
public void testTrimLogWhenLastAppliedZero() {
doReturn(0L).when(mockRaftActorContext).getLastApplied();
- ReplicatedLogEntry replicatedLogEntry = mock(ReplicatedLogEntry.class);
- doReturn(true).when(mockReplicatedLog).isPresent(10);
- doReturn(replicatedLogEntry).when(mockReplicatedLog).get(10);
- doReturn(5L).when(replicatedLogEntry).getTerm();
-
long retIndex = snapshotManager.trimLog(10);
assertEquals("return index", -1L, retIndex);
verify(mockReplicatedLog, never()).snapshotPreCommit(anyLong(), anyLong());
- verify(mockReplicatedLog, never()).snapshotCommit();
+ verify(mockReplicatedLog, never()).snapshotCommit(false);
verify(mockRaftActorBehavior, never()).setReplicatedToAllIndex(anyLong());
}
assertEquals("return index", -1L, retIndex);
verify(mockReplicatedLog, never()).snapshotPreCommit(anyLong(), anyLong());
- verify(mockReplicatedLog, never()).snapshotCommit();
+ verify(mockReplicatedLog, never()).snapshotCommit(false);
// Trim index is greater than replicatedToAllIndex so should update it.
verify(mockRaftActorBehavior).setReplicatedToAllIndex(10L);
assertTrue(capture);
- assertEquals(true, snapshotManager.isCapturing());
-
- ReplicatedLogEntry replicatedLogEntry = mock(ReplicatedLogEntry.class);
- doReturn(20L).when(mockRaftActorContext).getLastApplied();
- doReturn(true).when(mockReplicatedLog).isPresent(10);
- doReturn(replicatedLogEntry).when(mockReplicatedLog).get(10);
- doReturn(5L).when(replicatedLogEntry).getTerm();
+ assertTrue(snapshotManager.isCapturing());
snapshotManager.trimLog(10);
verify(mockReplicatedLog, never()).snapshotPreCommit(anyLong(), anyLong());
- verify(mockReplicatedLog, never()).snapshotCommit();
-
+ verify(mockReplicatedLog, never()).snapshotCommit(false);
}
@Test
assertTrue(capture);
- assertEquals(true, snapshotManager.isCapturing());
-
- ReplicatedLogEntry replicatedLogEntry = mock(ReplicatedLogEntry.class);
- doReturn(20L).when(mockRaftActorContext).getLastApplied();
- doReturn(true).when(mockReplicatedLog).isPresent(10);
- doReturn(replicatedLogEntry).when(mockReplicatedLog).get(10);
- doReturn(5L).when(replicatedLogEntry).getTerm();
+ assertTrue(snapshotManager.isCapturing());
snapshotManager.trimLog(10);
verify(mockReplicatedLog, never()).snapshotPreCommit(10, 5);
verify(mockReplicatedLog, never()).snapshotCommit();
-
}
@Test
*/
package org.opendaylight.controller.cluster.raft;
+import static org.junit.Assert.assertTrue;
+
import akka.actor.Actor;
import akka.actor.ActorIdentity;
import akka.actor.ActorRef;
import akka.util.Timeout;
import com.google.common.base.Stopwatch;
import com.google.common.util.concurrent.Uninterruptibles;
-import java.util.LinkedList;
+import java.time.Duration;
+import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
-import org.junit.Assert;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.concurrent.Await;
private static final Logger LOG = LoggerFactory.getLogger(TestActorFactory.class);
private final ActorSystem system;
- List<ActorRef> createdActors = new LinkedList<>();
+ private final List<ActorRef> createdActors = new ArrayList<>();
private static int actorCount = 1;
- public TestActorFactory(ActorSystem system) {
+ public TestActorFactory(final ActorSystem system) {
this.system = system;
}
* @param props the actor Props
* @return the ActorRef
*/
- public ActorRef createActor(Props props) {
+ public ActorRef createActor(final Props props) {
ActorRef actorRef = system.actorOf(props);
return addActor(actorRef, true);
}
* @param actorId name of actor
* @return the ActorRef
*/
- public ActorRef createActor(Props props, String actorId) {
+ public ActorRef createActor(final Props props, final String actorId) {
ActorRef actorRef = system.actorOf(props, actorId);
return addActor(actorRef, true);
}
* @param actorId name of actor
* @return the ActorRef
*/
- public ActorRef createActorNoVerify(Props props, String actorId) {
+ public ActorRef createActorNoVerify(final Props props, final String actorId) {
ActorRef actorRef = system.actorOf(props, actorId);
return addActor(actorRef, false);
}
* @return the ActorRef
*/
@SuppressWarnings("unchecked")
- public <T extends Actor> TestActorRef<T> createTestActor(Props props, String actorId) {
+ public <T extends Actor> TestActorRef<T> createTestActor(final Props props, final String actorId) {
InvalidActorNameException lastError = null;
for (int i = 0; i < 10; i++) {
try {
* @return the TestActorRef
*/
@SuppressWarnings("unchecked")
- public <T extends Actor> TestActorRef<T> createTestActor(Props props) {
+ public <T extends Actor> TestActorRef<T> createTestActor(final Props props) {
TestActorRef<T> actorRef = TestActorRef.create(system, props);
return (TestActorRef<T>) addActor(actorRef, true);
}
- private <T extends ActorRef> ActorRef addActor(T actorRef, boolean verify) {
+ private <T extends ActorRef> ActorRef addActor(final T actorRef, final boolean verify) {
createdActors.add(actorRef);
if (verify) {
verifyActorReady(actorRef);
}
@SuppressWarnings("checkstyle:IllegalCatch")
- private void verifyActorReady(ActorRef actorRef) {
+ private void verifyActorReady(final ActorRef actorRef) {
// Sometimes we see messages go to dead letters soon after creation - it seems the actor isn't quite
// in a state yet to receive messages or isn't actually created yet. This seems to happen with
// actorSelection so, to alleviate it, we use an actorSelection and send an Identify message with
ActorSelection actorSelection = system.actorSelection(actorRef.path().toString());
Future<Object> future = Patterns.ask(actorSelection, new Identify(""), timeout);
ActorIdentity reply = (ActorIdentity)Await.result(future, timeout.duration());
- Assert.assertTrue("Identify returned non-present", reply.getActorRef().isPresent());
+ assertTrue("Identify returned non-present", reply.getActorRef().isPresent());
return;
} catch (Exception | AssertionError e) {
Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
* @param prefix the name prefix
* @return the actor name
*/
- public String generateActorId(String prefix) {
+ public String generateActorId(final String prefix) {
return prefix + actorCount++;
}
- public void killActor(ActorRef actor, TestKit kit) {
+ public void killActor(final ActorRef actor, final TestKit kit) {
killActor(actor, kit, true);
}
- private void killActor(ActorRef actor, TestKit kit, boolean remove) {
+ private void killActor(final ActorRef actor, final TestKit kit, final boolean remove) {
LOG.info("Killing actor {}", actor);
kit.watch(actor);
actor.tell(PoisonPill.getInstance(), ActorRef.noSender());
- kit.expectTerminated(kit.duration("5 seconds"), actor);
+ kit.expectTerminated(Duration.ofSeconds(5), actor);
if (remove) {
createdActors.remove(actor);
}
}
- public String createTestActorPath(String actorId) {
+ public String createTestActorPath(final String actorId) {
return "akka://test/user/" + actorId;
}
*/
package org.opendaylight.controller.cluster.raft.base.messages;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertSame;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
/**
* @author Thomas Pantelis
*/
public class TimeoutNowTest {
-
@Test
public void test() {
- TimeoutNow cloned = (TimeoutNow) SerializationUtils.clone(TimeoutNow.INSTANCE);
+ final var bytes = SerializationUtils.serialize(TimeoutNow.INSTANCE);
+ assertEquals(86, bytes.length);
+ final var cloned = SerializationUtils.deserialize(bytes);
assertSame("Cloned instance", TimeoutNow.INSTANCE, cloned);
}
}
import org.opendaylight.controller.cluster.raft.TestActorFactory;
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
import org.opendaylight.controller.cluster.raft.messages.RaftRPC;
import org.opendaylight.controller.cluster.raft.messages.RequestVote;
import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.policy.RaftPolicy;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
import static org.junit.Assert.assertEquals;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
/**
* @author Thomas Pantelis
*/
public class FollowerIdentifierTest {
-
@Test
public void testSerialization() {
- FollowerIdentifier expected = new FollowerIdentifier("follower1");
- FollowerIdentifier cloned = (FollowerIdentifier) SerializationUtils.clone(expected);
+ final var expected = new FollowerIdentifier("follower1");
+ final var bytes = SerializationUtils.serialize(expected);
+ assertEquals(87, bytes.length);
+ final var cloned = (FollowerIdentifier) SerializationUtils.deserialize(bytes);
assertEquals("cloned", expected, cloned);
}
}
*/
package org.opendaylight.controller.cluster.raft.behaviors;
+import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import akka.testkit.TestActorRef;
import akka.testkit.javadsl.TestKit;
import com.google.common.base.Stopwatch;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
import com.google.common.io.ByteSource;
import com.google.common.util.concurrent.Uninterruptibles;
import java.io.OutputStream;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
import java.util.List;
+import java.util.Map;
import java.util.Optional;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import org.junit.After;
-import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
import org.opendaylight.controller.cluster.raft.MockRaftActor;
Uninterruptibles.sleepUninterruptibly(context.getConfigParams()
.getElectionTimeOutInterval().toMillis() - 100, TimeUnit.MILLISECONDS);
- follower.handleMessage(leaderActor, new AppendEntries(1, "leader", -1, -1, Collections.emptyList(),
+ follower.handleMessage(leaderActor, new AppendEntries(1, "leader", -1, -1, List.of(),
-1, -1, (short) 1));
Uninterruptibles.sleepUninterruptibly(130, TimeUnit.MILLISECONDS);
Uninterruptibles.sleepUninterruptibly(context.getConfigParams()
.getElectionTimeOutInterval().toMillis() - 150, TimeUnit.MILLISECONDS);
- follower.handleMessage(leaderActor, new AppendEntries(1, "leader", -1, -1, Collections.emptyList(),
+ follower.handleMessage(leaderActor, new AppendEntries(1, "leader", -1, -1, List.of(),
-1, -1, (short) 1));
Uninterruptibles.sleepUninterruptibly(200, TimeUnit.MILLISECONDS);
context.getReplicatedLog().append(newReplicatedLogEntry(1,100, "bar"));
context.getReplicatedLog().setSnapshotIndex(99);
- List<ReplicatedLogEntry> entries = Arrays.asList(
- newReplicatedLogEntry(2, 101, "foo"));
+ List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
- Assert.assertEquals(1, context.getReplicatedLog().size());
+ assertEquals(1, context.getReplicatedLog().size());
// The new commitIndex is 101
AppendEntries appendEntries = new AppendEntries(2, "leader-1", 100, 1, entries, 101, 100, (short)0);
MockRaftActorContext context = createActorContext();
- List<ReplicatedLogEntry> entries = Arrays.asList(
- newReplicatedLogEntry(2, 101, "foo"));
+ List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
// The new commitIndex is 101
AppendEntries appendEntries = new AppendEntries(2, "leader-1", -1, -1, entries, 101, 100, (short) 0);
context.getReplicatedLog().append(newReplicatedLogEntry(1, 100, "bar"));
context.getReplicatedLog().setSnapshotIndex(99);
- List<ReplicatedLogEntry> entries = Arrays.asList(
- newReplicatedLogEntry(2, 101, "foo"));
+ List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
// The new commitIndex is 101
AppendEntries appendEntries = new AppendEntries(2, "leader-1", -1, -1, entries, 101, 100, (short) 0);
context.getReplicatedLog().clear(0,2);
context.getReplicatedLog().setSnapshotIndex(100);
- List<ReplicatedLogEntry> entries = Arrays.asList(
- newReplicatedLogEntry(2, 101, "foo"));
+ List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
// The new commitIndex is 101
AppendEntries appendEntries = new AppendEntries(2, "leader-1", -1, -1, entries, 101, 100, (short) 0);
context.getReplicatedLog().clear(0,2);
context.getReplicatedLog().setSnapshotIndex(100);
- List<ReplicatedLogEntry> entries = Arrays.asList(
- newReplicatedLogEntry(2, 105, "foo"));
+ List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(2, 105, "foo"));
// The new commitIndex is 101
AppendEntries appendEntries = new AppendEntries(2, "leader-1", -1, -1, entries, 105, 100, (short) 0);
MockRaftActorContext context = createActorContext();
- List<ReplicatedLogEntry> entries = Arrays.asList(
- newReplicatedLogEntry(2, 101, "foo"));
+ List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
// The new commitIndex is 101
AppendEntries appendEntries = new AppendEntries(2, "leader-1", 100, 1, entries, 101, 100, (short)0);
context.setCommitIndex(101);
setLastLogEntry(context, 1, 101, new MockRaftActorContext.MockPayload(""));
- entries = Arrays.asList(newReplicatedLogEntry(2, 101, "foo"));
+ entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
// The new commitIndex is 101
appendEntries = new AppendEntries(2, "leader-1", 101, 1, entries, 102, 101, (short)0);
MockRaftActorContext context = createActorContext();
- List<ReplicatedLogEntry> entries = Arrays.asList(
- newReplicatedLogEntry(2, 101, "foo"));
+ List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
// The new commitIndex is 101
AppendEntries appendEntries = new AppendEntries(2, "leader-1", 100, 1, entries, 101, 100, (short)0);
setLastLogEntry(context, 1, 100,
new MockRaftActorContext.MockPayload(""));
- entries = Arrays.asList(
- newReplicatedLogEntry(2, 101, "foo"));
+ entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
// leader-2 is becoming the leader now and it says the commitIndex is 45
appendEntries = new AppendEntries(2, "leader-2", 45, 1, entries, 46, 100, (short)0);
MockRaftActorContext context = createActorContext();
- List<ReplicatedLogEntry> entries = Arrays.asList(
- newReplicatedLogEntry(2, 101, "foo"));
+ List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
// The new commitIndex is 101
AppendEntries appendEntries = new AppendEntries(2, "leader-1", 100, 1, entries, 101, 100, (short)0);
setLastLogEntry(context, 1, 101,
new MockRaftActorContext.MockPayload(""));
- entries = Arrays.asList(
- newReplicatedLogEntry(2, 101, "foo"));
+ entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
// The new commitIndex is 101
appendEntries = new AppendEntries(2, "leader-1", 101, 1, entries, 102, 101, (short)0);
setLastLogEntry(context, 1, 100,
new MockRaftActorContext.MockPayload(""));
- entries = Arrays.asList(
- newReplicatedLogEntry(2, 101, "foo"));
+ entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
// leader-2 is becoming the leader now and it says the commitIndex is 45
appendEntries = new AppendEntries(2, "leader-2", 45, 1, entries, 46, 100, (short)0);
new MockRaftActorContext.MockPayload(""));
context.getReplicatedLog().setSnapshotIndex(99);
- List<ReplicatedLogEntry> entries = Arrays.<ReplicatedLogEntry>asList(
- newReplicatedLogEntry(2, 101, "foo"));
+ List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
// The new commitIndex is 101
AppendEntries appendEntries = new AppendEntries(2, "leader-1", 100, 1, entries, 101, 100, (short)0);
MockRaftActorContext context = createActorContext();
- AppendEntries appendEntries = new AppendEntries(2, "leader", 0, 2, Collections.emptyList(), 101, -1, (short)0);
+ AppendEntries appendEntries = new AppendEntries(2, "leader", 0, 2, List.of(), 101, -1, (short)0);
follower = createBehavior(context);
RaftActorBehavior newBehavior = follower.handleMessage(leaderActor, appendEntries);
- Assert.assertSame(follower, newBehavior);
+ assertSame(follower, newBehavior);
AppendEntriesReply reply = MessageCollectorActor.expectFirstMatching(leaderActor,
AppendEntriesReply.class);
context.getReplicatedLog().setSnapshotIndex(4);
context.getReplicatedLog().setSnapshotTerm(3);
- AppendEntries appendEntries = new AppendEntries(3, "leader", 1, 3, Collections.emptyList(), 8, -1, (short)0);
+ AppendEntries appendEntries = new AppendEntries(3, "leader", 1, 3, List.of(), 8, -1, (short)0);
follower = createBehavior(context);
RaftActorBehavior newBehavior = follower.handleMessage(leaderActor, appendEntries);
- Assert.assertSame(follower, newBehavior);
+ assertSame(follower, newBehavior);
AppendEntriesReply reply = MessageCollectorActor.expectFirstMatching(leaderActor, AppendEntriesReply.class);
context.setReplicatedLog(log);
// Prepare the entries to be sent with AppendEntries
- List<ReplicatedLogEntry> entries = new ArrayList<>();
- entries.add(newReplicatedLogEntry(1, 3, "three"));
- entries.add(newReplicatedLogEntry(1, 4, "four"));
+ List<ReplicatedLogEntry> entries = List.of(
+ newReplicatedLogEntry(1, 3, "three"), newReplicatedLogEntry(1, 4, "four"));
// Send appendEntries with the same term as was set on the receiver
// before the new behavior was created (1 in this case)
RaftActorBehavior newBehavior = follower.handleMessage(leaderActor, appendEntries);
- Assert.assertSame(follower, newBehavior);
+ assertSame(follower, newBehavior);
assertEquals("Next index", 5, log.last().getIndex() + 1);
assertEquals("Entry 3", entries.get(0), log.get(3));
context.setReplicatedLog(log);
// Prepare the entries to be sent with AppendEntries
- List<ReplicatedLogEntry> entries = new ArrayList<>();
- entries.add(newReplicatedLogEntry(2, 2, "two-1"));
- entries.add(newReplicatedLogEntry(2, 3, "three"));
+ List<ReplicatedLogEntry> entries = List.of(
+ newReplicatedLogEntry(2, 2, "two-1"), newReplicatedLogEntry(2, 3, "three"));
// Send appendEntries with the same term as was set on the receiver
// before the new behavior was created (1 in this case)
RaftActorBehavior newBehavior = follower.handleMessage(leaderActor, appendEntries);
- Assert.assertSame(follower, newBehavior);
+ assertSame(follower, newBehavior);
// The entry at index 2 will be found out-of-sync with the leader
// and will be removed
context.setReplicatedLog(log);
// Prepare the entries to be sent with AppendEntries
- List<ReplicatedLogEntry> entries = new ArrayList<>();
- entries.add(newReplicatedLogEntry(2, 2, "two-1"));
- entries.add(newReplicatedLogEntry(2, 3, "three"));
+ List<ReplicatedLogEntry> entries = List.of(
+ newReplicatedLogEntry(2, 2, "two-1"), newReplicatedLogEntry(2, 3, "three"));
// Send appendEntries with the same term as was set on the receiver
// before the new behavior was created (1 in this case)
RaftActorBehavior newBehavior = follower.handleMessage(leaderActor, appendEntries);
- Assert.assertSame(follower, newBehavior);
+ assertSame(follower, newBehavior);
expectAndVerifyAppendEntriesReply(2, false, context.getId(), 1, 2, true);
}
context.setReplicatedLog(log);
// Prepare the entries to be sent with AppendEntries
- List<ReplicatedLogEntry> entries = new ArrayList<>();
- entries.add(newReplicatedLogEntry(1, 4, "four"));
+ List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(1, 4, "four"));
AppendEntries appendEntries = new AppendEntries(1, "leader", 3, 1, entries, 4, -1, (short)0);
RaftActorBehavior newBehavior = follower.handleMessage(leaderActor, appendEntries);
- Assert.assertSame(follower, newBehavior);
+ assertSame(follower, newBehavior);
expectAndVerifyAppendEntriesReply(1, false, context.getId(), 1, 2);
}
context.setReplicatedLog(log);
// Send the last entry again.
- List<ReplicatedLogEntry> entries = Arrays.asList(newReplicatedLogEntry(1, 1, "one"));
+ List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(1, 1, "one"));
follower = createBehavior(context);
// Send the last entry again and also a new one.
- entries = Arrays.asList(newReplicatedLogEntry(1, 1, "one"), newReplicatedLogEntry(1, 2, "two"));
+ entries = List.of(newReplicatedLogEntry(1, 1, "one"), newReplicatedLogEntry(1, 2, "two"));
MessageCollectorActor.clearMessages(leaderActor);
follower.handleMessage(leaderActor, new AppendEntries(1, "leader", 0, 1, entries, 2, -1, (short)0));
context.setReplicatedLog(log);
// Prepare the entries to be sent with AppendEntries
- List<ReplicatedLogEntry> entries = new ArrayList<>();
- entries.add(newReplicatedLogEntry(1, 4, "four"));
+ List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(1, 4, "four"));
AppendEntries appendEntries = new AppendEntries(1, "leader", 3, 1, entries, 4, 3, (short)0);
RaftActorBehavior newBehavior = follower.handleMessage(leaderActor, appendEntries);
- Assert.assertSame(follower, newBehavior);
+ assertSame(follower, newBehavior);
expectAndVerifyAppendEntriesReply(1, true, context.getId(), 1, 4);
}
snapshot.getLastAppliedIndex());
assertEquals("getLastTerm", lastInstallSnapshot.getLastIncludedTerm(), snapshot.getLastTerm());
assertEquals("getState type", ByteState.class, snapshot.getState().getClass());
- Assert.assertArrayEquals("getState", bsSnapshot.toByteArray(), ((ByteState)snapshot.getState()).getBytes());
+ assertArrayEquals("getState", bsSnapshot.toByteArray(), ((ByteState)snapshot.getState()).getBytes());
assertEquals("getElectionTerm", 1, snapshot.getElectionTerm());
assertEquals("getElectionVotedFor", "leader", snapshot.getElectionVotedFor());
applySnapshot.getCallback().onSuccess();
// Send an append entry
AppendEntries appendEntries = new AppendEntries(1, "leader", 1, 1,
- Arrays.asList(newReplicatedLogEntry(2, 1, "3")), 2, -1, (short)1);
+ List.of(newReplicatedLogEntry(2, 1, "3")), 2, -1, (short)1);
follower.handleMessage(leaderActor, appendEntries);
// Send appendEntries with a new term and leader.
AppendEntries appendEntries = new AppendEntries(2, "new-leader", 1, 1,
- Arrays.asList(newReplicatedLogEntry(2, 2, "3")), 2, -1, (short)1);
+ List.of(newReplicatedLogEntry(2, 2, "3")), 2, -1, (short)1);
follower.handleMessage(leaderActor, appendEntries);
setLastLogEntry(context, 1, 101,
new MockRaftActorContext.MockPayload(""));
- List<ReplicatedLogEntry> entries = Arrays.asList(
- newReplicatedLogEntry(2, 101, "foo"));
+ List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
// The new commitIndex is 101
AppendEntries appendEntries = new AppendEntries(2, "leader", 101, 1, entries, 102, 101, (short)0);
@Test
public void testFollowerSchedulesElectionIfNonVoting() {
MockRaftActorContext context = createActorContext();
- context.updatePeerIds(new ServerConfigurationPayload(Arrays.asList(new ServerInfo(context.getId(), false))));
+ context.updatePeerIds(new ServerConfigurationPayload(List.of(new ServerInfo(context.getId(), false))));
((DefaultConfigParamsImpl)context.getConfigParams()).setHeartBeatInterval(
FiniteDuration.apply(100, TimeUnit.MILLISECONDS));
((DefaultConfigParamsImpl)context.getConfigParams()).setElectionTimeoutFactor(1);
final AtomicReference<MockRaftActor> followerRaftActor = new AtomicReference<>();
RaftActorSnapshotCohort snapshotCohort = newRaftActorSnapshotCohort(followerRaftActor);
Builder builder = MockRaftActor.builder().persistent(Optional.of(true)).id(id)
- .peerAddresses(ImmutableMap.of("leader", "")).config(config).snapshotCohort(snapshotCohort);
+ .peerAddresses(Map.of("leader", "")).config(config).snapshotCohort(snapshotCohort);
TestActorRef<MockRaftActor> followerActorRef = actorFactory.createTestActor(builder.props()
.withDispatcher(Dispatchers.DefaultDispatcherId()), id);
followerRaftActor.set(followerActorRef.underlyingActor());
InMemoryJournal.addDeleteMessagesCompleteLatch(id);
InMemoryJournal.addWriteMessagesCompleteLatch(id, 1, ApplyJournalEntries.class);
- List<ReplicatedLogEntry> entries = Arrays.asList(
+ List<ReplicatedLogEntry> entries = List.of(
newReplicatedLogEntry(1, 0, "one"), newReplicatedLogEntry(1, 1, "two"));
AppendEntries appendEntries = new AppendEntries(1, "leader", -1, -1, entries, 1, -1, (short)0);
assertEquals("Snapshot getLastAppliedIndex", 1, snapshot.getLastAppliedIndex());
assertEquals("Snapshot getLastTerm", 1, snapshot.getLastTerm());
assertEquals("Snapshot getLastIndex", 1, snapshot.getLastIndex());
- assertEquals("Snapshot state", ImmutableList.of(entries.get(0).getData(), entries.get(1).getData()),
+ assertEquals("Snapshot state", List.of(entries.get(0).getData(), entries.get(1).getData()),
MockRaftActor.fromState(snapshot.getState()));
}
final AtomicReference<MockRaftActor> followerRaftActor = new AtomicReference<>();
RaftActorSnapshotCohort snapshotCohort = newRaftActorSnapshotCohort(followerRaftActor);
Builder builder = MockRaftActor.builder().persistent(Optional.of(true)).id(id)
- .peerAddresses(ImmutableMap.of("leader", "")).config(config).snapshotCohort(snapshotCohort);
+ .peerAddresses(Map.of("leader", "")).config(config).snapshotCohort(snapshotCohort);
TestActorRef<MockRaftActor> followerActorRef = actorFactory.createTestActor(builder.props()
.withDispatcher(Dispatchers.DefaultDispatcherId()), id);
followerRaftActor.set(followerActorRef.underlyingActor());
InMemoryJournal.addDeleteMessagesCompleteLatch(id);
InMemoryJournal.addWriteMessagesCompleteLatch(id, 1, ApplyJournalEntries.class);
- List<ReplicatedLogEntry> entries = Arrays.asList(
+ List<ReplicatedLogEntry> entries = List.of(
newReplicatedLogEntry(1, 0, "one"), newReplicatedLogEntry(1, 1, "two"),
newReplicatedLogEntry(1, 2, "three"));
assertEquals("Snapshot getLastAppliedIndex", 2, snapshot.getLastAppliedIndex());
assertEquals("Snapshot getLastTerm", 1, snapshot.getLastTerm());
assertEquals("Snapshot getLastIndex", 2, snapshot.getLastIndex());
- assertEquals("Snapshot state", ImmutableList.of(entries.get(0).getData(), entries.get(1).getData(),
+ assertEquals("Snapshot state", List.of(entries.get(0).getData(), entries.get(1).getData(),
entries.get(2).getData()), MockRaftActor.fromState(snapshot.getState()));
assertEquals("Journal size", 0, followerRaftActor.get().getReplicatedLog().size());
assertEquals("Last index", 2, followerRaftActor.get().getReplicatedLog().lastIndex());
assertEquals("Last applied index", 2, followerRaftActor.get().getRaftActorContext().getLastApplied());
assertEquals("Commit index", 2, followerRaftActor.get().getRaftActorContext().getCommitIndex());
- assertEquals("State", ImmutableList.of(entries.get(0).getData(), entries.get(1).getData(),
+ assertEquals("State", List.of(entries.get(0).getData(), entries.get(1).getData(),
entries.get(2).getData()), followerRaftActor.get().getState());
}
final AtomicReference<MockRaftActor> followerRaftActor = new AtomicReference<>();
RaftActorSnapshotCohort snapshotCohort = newRaftActorSnapshotCohort(followerRaftActor);
Builder builder = MockRaftActor.builder().persistent(Optional.of(true)).id(id)
- .peerAddresses(ImmutableMap.of("leader", "")).config(config).snapshotCohort(snapshotCohort);
+ .peerAddresses(Map.of("leader", "")).config(config).snapshotCohort(snapshotCohort);
TestActorRef<MockRaftActor> followerActorRef = actorFactory.createTestActor(builder.props()
.withDispatcher(Dispatchers.DefaultDispatcherId()), id);
followerRaftActor.set(followerActorRef.underlyingActor());
InMemoryJournal.addDeleteMessagesCompleteLatch(id);
InMemoryJournal.addWriteMessagesCompleteLatch(id, 1, ApplyJournalEntries.class);
- List<ReplicatedLogEntry> entries = Arrays.asList(
+ List<ReplicatedLogEntry> entries = List.of(
newReplicatedLogEntry(1, 0, "one"), newReplicatedLogEntry(1, 1, "two"),
newReplicatedLogEntry(1, 2, "three"));
assertEquals("Snapshot getLastAppliedIndex", 0, snapshot.getLastAppliedIndex());
assertEquals("Snapshot getLastTerm", 1, snapshot.getLastTerm());
assertEquals("Snapshot getLastIndex", 2, snapshot.getLastIndex());
- assertEquals("Snapshot state", ImmutableList.of(entries.get(0).getData()),
+ assertEquals("Snapshot state", List.of(entries.get(0).getData()),
MockRaftActor.fromState(snapshot.getState()));
}
follower = createBehavior(context);
follower.handleMessage(leaderActor,
- new AppendEntries(1, "leader", -1, -1, Collections.emptyList(), -1, -1, (short)0));
+ new AppendEntries(1, "leader", -1, -1, List.of(), -1, -1, (short)0));
AppendEntriesReply reply = MessageCollectorActor.expectFirstMatching(leaderActor, AppendEntriesReply.class);
assertTrue(reply.isNeedsLeaderAddress());
PeerAddressResolver mockResolver = mock(PeerAddressResolver.class);
((DefaultConfigParamsImpl)context.getConfigParams()).setPeerAddressResolver(mockResolver);
- follower.handleMessage(leaderActor, new AppendEntries(1, "leader", -1, -1, Collections.emptyList(), -1, -1,
+ follower.handleMessage(leaderActor, new AppendEntries(1, "leader", -1, -1, List.of(), -1, -1,
(short)0, RaftVersions.CURRENT_VERSION, leaderActor.path().toString()));
reply = MessageCollectorActor.expectFirstMatching(leaderActor, AppendEntriesReply.class);
int size = chunkSize;
if (chunkSize > snapshotLength) {
size = snapshotLength;
- } else {
- if (start + chunkSize > snapshotLength) {
- size = snapshotLength - start;
- }
+ } else if (start + chunkSize > snapshotLength) {
+ size = snapshotLength - start;
}
byte[] nextChunk = new byte[size];
}
private ByteString createSnapshot() {
- HashMap<String, String> followerSnapshot = new HashMap<>();
- followerSnapshot.put("1", "A");
- followerSnapshot.put("2", "B");
- followerSnapshot.put("3", "C");
-
- return toByteString(followerSnapshot);
+ return toByteString(Map.of("1", "A", "2", "B", "3", "C"));
}
@Override
--- /dev/null
+/*
+ * Copyright (c) 2023 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.behaviors;
+
+import static org.junit.Assert.assertEquals;
+
+import com.google.common.io.ByteSource;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Arrays;
+import java.util.Objects;
+import org.junit.Test;
+
+public class LeaderInstallSnapshotStateTest {
+ // Prime number on purpose
+ private static final int CHUNK_SIZE = 9_999_991;
+ // More than Integer.MAX_VALUE
+ private static final long SIZE = 4_294_967_294L;
+
+ @Test
+ public void testSnapshotLongerThanInteger() throws IOException {
+ try (var fts = new LeaderInstallSnapshotState(CHUNK_SIZE, "test")) {
+ fts.setSnapshotBytes(new MockByteSource(SIZE));
+
+ int chunkIndex = 0;
+ long offset = 0;
+ long expectedChunkSize = CHUNK_SIZE;
+ while (offset < SIZE) {
+ offset = offset + CHUNK_SIZE;
+ if (offset > SIZE) {
+ // We reached last chunk
+ expectedChunkSize = CHUNK_SIZE - (offset - SIZE);
+ offset = SIZE;
+ }
+ chunkIndex ++;
+ final byte[] chunk = fts.getNextChunk();
+ assertEquals("byte size not matching for chunk:", expectedChunkSize, chunk.length);
+ assertEquals("chunk index not matching", chunkIndex, fts.getChunkIndex());
+ fts.markSendStatus(true);
+ if (!fts.isLastChunk(chunkIndex)) {
+ fts.incrementChunkIndex();
+ }
+ }
+
+ assertEquals("totalChunks not matching", chunkIndex, fts.getTotalChunks());
+ }
+ }
+
+ private static final class MockByteSource extends ByteSource {
+ private final long size;
+
+ private MockByteSource(final long size) {
+ this.size = size;
+ }
+
+ @Override
+ public long size() {
+ return size;
+ }
+
+ @Override
+ public InputStream openStream() {
+ return new MockInputStream(size);
+ }
+ }
+
+ private static final class MockInputStream extends InputStream {
+ private long remaining;
+
+ MockInputStream(final long size) {
+ remaining = size;
+ }
+
+ @Override
+ public int read() {
+ if (remaining > 0) {
+ remaining--;
+ return 0;
+ }
+ return -1;
+ }
+
+ @Override
+ public int read(final byte[] bytes, final int off, final int len) {
+ Objects.checkFromIndexSize(off, len, bytes.length);
+ if (remaining <= 0) {
+ return -1;
+ }
+ final int count = len <= remaining ? len : (int) remaining;
+ Arrays.fill(bytes, off, off + count, (byte) 0);
+ remaining -= count;
+ return count;
+ }
+ }
+}
import akka.protobuf.ByteString;
import akka.testkit.TestActorRef;
import akka.testkit.javadsl.TestKit;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
import com.google.common.io.ByteSource;
import com.google.common.util.concurrent.Uninterruptibles;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Arrays;
-import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
+import java.util.OptionalInt;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.lang3.SerializationUtils;
import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
import org.opendaylight.controller.cluster.raft.messages.RaftRPC;
import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries;
import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
import org.opendaylight.controller.cluster.raft.policy.DefaultRaftPolicy;
import org.opendaylight.controller.cluster.raft.policy.RaftPolicy;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.controller.cluster.raft.utils.ForwardMessageToBehaviorActor;
import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
import org.opendaylight.yangtools.concepts.Identifier;
private RaftActorBehavior sendReplicate(final MockRaftActorContext actorContext, final long term, final long index,
final Payload payload) {
- SimpleReplicatedLogEntry newEntry = new SimpleReplicatedLogEntry(index, term, payload);
- actorContext.getReplicatedLog().append(newEntry);
- return leader.handleMessage(leaderActor, new Replicate(null, null, newEntry, true));
+ actorContext.getReplicatedLog().append(new SimpleReplicatedLogEntry(index, term, payload));
+ return leader.handleMessage(leaderActor, new Replicate(index, true, null, null));
}
@Test
final int messageNr) {
final AppendEntries commitReq = allMessages.get(2 * messageNr + 1);
assertEquals(lastIndex + messageNr + 1, commitReq.getLeaderCommit());
- assertEquals(ImmutableList.of(), commitReq.getEntries());
+ assertEquals(List.of(), commitReq.getEntries());
}
private static void assertRequestEntry(final long lastIndex, final List<AppendEntries> allMessages,
actorContext.setLastApplied(0);
- long newLogIndex = actorContext.getReplicatedLog().lastIndex() + 1;
- long term = actorContext.getTermInformation().getCurrentTerm();
- ReplicatedLogEntry newEntry = new SimpleReplicatedLogEntry(
- newLogIndex, term, new MockRaftActorContext.MockPayload("foo"));
+ final long newLogIndex = actorContext.getReplicatedLog().lastIndex() + 1;
+ final long term = actorContext.getTermInformation().getCurrentTerm();
+ final var data = new MockRaftActorContext.MockPayload("foo");
- actorContext.getReplicatedLog().append(newEntry);
+ actorContext.getReplicatedLog().append(new SimpleReplicatedLogEntry(newLogIndex, term, data));
final Identifier id = new MockIdentifier("state-id");
- RaftActorBehavior raftBehavior = leader.handleMessage(leaderActor,
- new Replicate(leaderActor, id, newEntry, true));
+ final var raftBehavior = leader.handleMessage(leaderActor, new Replicate(newLogIndex, true, leaderActor, id));
// State should not change
assertTrue(raftBehavior instanceof Leader);
// We should get 2 ApplyState messages - 1 for new log entry and 1 for the previous
// one since lastApplied state is 0.
- List<ApplyState> applyStateList = MessageCollectorActor.getAllMatching(
- leaderActor, ApplyState.class);
+ final var applyStateList = MessageCollectorActor.getAllMatching(leaderActor, ApplyState.class);
assertEquals("ApplyState count", newLogIndex, applyStateList.size());
for (int i = 0; i <= newLogIndex - 1; i++) {
}
ApplyState last = applyStateList.get((int) newLogIndex - 1);
- assertEquals("getData", newEntry.getData(), last.getReplicatedLogEntry().getData());
+ assertEquals("getData", data, last.getReplicatedLogEntry().getData());
assertEquals("getIdentifier", id, last.getIdentifier());
}
final MockRaftActorContext actorContext = createActorContextWithFollower();
- Map<String, String> leadersSnapshot = new HashMap<>();
- leadersSnapshot.put("1", "A");
- leadersSnapshot.put("2", "B");
- leadersSnapshot.put("3", "C");
-
//clears leaders log
actorContext.getReplicatedLog().removeFrom(0);
//update follower timestamp
leader.markFollowerActive(FOLLOWER_ID);
- ByteString bs = toByteString(leadersSnapshot);
+ ByteString bs = toByteString(Map.of("1", "A", "2", "B", "3", "C"));
leader.setSnapshotHolder(new SnapshotHolder(Snapshot.create(ByteState.of(bs.toByteArray()),
- Collections.<ReplicatedLogEntry>emptyList(), commitIndex, snapshotTerm, commitIndex, snapshotTerm,
+ List.of(), commitIndex, snapshotTerm, commitIndex, snapshotTerm,
-1, null, null), ByteSource.wrap(bs.toByteArray())));
LeaderInstallSnapshotState fts = new LeaderInstallSnapshotState(
- actorContext.getConfigParams().getSnapshotChunkSize(), leader.logName());
+ actorContext.getConfigParams().getMaximumMessageSliceSize(), leader.logName());
fts.setSnapshotBytes(ByteSource.wrap(bs.toByteArray()));
leader.getFollower(FOLLOWER_ID).setLeaderInstallSnapshotState(fts);
MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
// new entry
- SimpleReplicatedLogEntry entry =
- new SimpleReplicatedLogEntry(newEntryIndex, currentTerm,
- new MockRaftActorContext.MockPayload("D"));
-
- actorContext.getReplicatedLog().append(entry);
+ actorContext.getReplicatedLog().append(
+ new SimpleReplicatedLogEntry(newEntryIndex, currentTerm, new MockRaftActorContext.MockPayload("D")));
//update follower timestamp
leader.markFollowerActive(FOLLOWER_ID);
// this should invoke a sendinstallsnapshot as followersLastIndex < snapshotIndex
RaftActorBehavior raftBehavior = leader.handleMessage(
- leaderActor, new Replicate(null, new MockIdentifier("state-id"), entry, true));
+ leaderActor, new Replicate(newEntryIndex, true, null, new MockIdentifier("state-id")));
assertTrue(raftBehavior instanceof Leader);
leader.setSnapshotHolder(null);
// new entry
- SimpleReplicatedLogEntry entry = new SimpleReplicatedLogEntry(newEntryIndex, currentTerm,
- new MockRaftActorContext.MockPayload("D"));
-
- actorContext.getReplicatedLog().append(entry);
+ actorContext.getReplicatedLog().append(
+ new SimpleReplicatedLogEntry(newEntryIndex, currentTerm, new MockRaftActorContext.MockPayload("D")));
//update follower timestamp
leader.markFollowerActive(FOLLOWER_ID);
- leader.handleMessage(leaderActor, new Replicate(null, new MockIdentifier("state-id"), entry, true));
+ leader.handleMessage(leaderActor, new Replicate(newEntryIndex, true, null, new MockIdentifier("state-id")));
assertEquals("isCapturing", true, actorContext.getSnapshotManager().isCapturing());
assertEquals(2, cs.getLastTerm());
// if an initiate is started again when first is in progress, it shouldnt initiate Capture
- leader.handleMessage(leaderActor, new Replicate(null, new MockIdentifier("state-id"), entry, true));
+ leader.handleMessage(leaderActor, new Replicate(newEntryIndex, true, null, new MockIdentifier("state-id")));
assertSame("CaptureSnapshot instance", cs, actorContext.getSnapshotManager().getCaptureSnapshot());
}
}
// new entry
- SimpleReplicatedLogEntry entry = new SimpleReplicatedLogEntry(newEntryIndex, currentTerm,
- new MockRaftActorContext.MockPayload("D"));
-
- actorContext.getReplicatedLog().append(entry);
+ actorContext.getReplicatedLog().append(
+ new SimpleReplicatedLogEntry(newEntryIndex, currentTerm, new MockRaftActorContext.MockPayload("D")));
//update follower timestamp
leader.markFollowerActive(FOLLOWER_ID);
MessageCollectorActor.clearMessages(followerActor);
// Sending Replicate message should not initiate another capture since the first is in progress.
- leader.handleMessage(leaderActor, new Replicate(null, new MockIdentifier("state-id"), entry, true));
+ leader.handleMessage(leaderActor, new Replicate(newEntryIndex, true, null, new MockIdentifier("state-id")));
assertSame("CaptureSnapshot instance", cs, actorContext.getSnapshotManager().getCaptureSnapshot());
// Similarly sending another AppendEntriesReply to force a snapshot should not initiate another capture.
// Now simulate the CaptureSnapshotReply to initiate snapshot install - the first chunk should be sent.
final byte[] bytes = new byte[]{1, 2, 3};
- installSnapshotStream.get().get().write(bytes);
+ installSnapshotStream.get().orElseThrow().write(bytes);
actorContext.getSnapshotManager().persist(ByteState.of(bytes), installSnapshotStream.get(),
Runtime.getRuntime().totalMemory());
MessageCollectorActor.expectFirstMatching(followerActor, InstallSnapshot.class);
leader.getFollower(FOLLOWER_ID).setNextIndex(0);
byte[] bytes = toByteString(leadersSnapshot).toByteArray();
- Snapshot snapshot = Snapshot.create(ByteState.of(bytes), Collections.<ReplicatedLogEntry>emptyList(),
+ Snapshot snapshot = Snapshot.create(ByteState.of(bytes), List.of(),
lastAppliedIndex, snapshotTerm, lastAppliedIndex, snapshotTerm, -1, null, null);
RaftActorBehavior raftBehavior = leader.handleMessage(leaderActor,
leader.getFollower(FOLLOWER_ID).setNextIndex(-1);
byte[] bytes = toByteString(leadersSnapshot).toByteArray();
- Snapshot snapshot = Snapshot.create(ByteState.of(bytes), Collections.<ReplicatedLogEntry>emptyList(),
+ Snapshot snapshot = Snapshot.create(ByteState.of(bytes), List.of(),
lastAppliedIndex, snapshotTerm, lastAppliedIndex, snapshotTerm, -1, null, null);
RaftActorBehavior raftBehavior = leader.handleMessage(leaderActor,
ByteString bs = toByteString(leadersSnapshot);
leader.setSnapshotHolder(new SnapshotHolder(Snapshot.create(ByteState.of(bs.toByteArray()),
- Collections.<ReplicatedLogEntry>emptyList(), commitIndex, snapshotTerm, commitIndex, snapshotTerm,
+ List.of(), commitIndex, snapshotTerm, commitIndex, snapshotTerm,
-1, null, null), ByteSource.wrap(bs.toByteArray())));
LeaderInstallSnapshotState fts = new LeaderInstallSnapshotState(
- actorContext.getConfigParams().getSnapshotChunkSize(), leader.logName());
+ actorContext.getConfigParams().getMaximumMessageSliceSize(), leader.logName());
fts.setSnapshotBytes(ByteSource.wrap(bs.toByteArray()));
leader.getFollower(FOLLOWER_ID).setLeaderInstallSnapshotState(fts);
while (!fts.isLastChunk(fts.getChunkIndex())) {
DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl() {
@Override
- public int getSnapshotChunkSize() {
+ public int getMaximumMessageSliceSize() {
return 50;
}
};
ByteString bs = toByteString(leadersSnapshot);
Snapshot snapshot = Snapshot.create(ByteState.of(bs.toByteArray()),
- Collections.<ReplicatedLogEntry>emptyList(), commitIndex, snapshotTerm, commitIndex, snapshotTerm,
- -1, null, null);
+ List.of(), commitIndex, snapshotTerm, commitIndex, snapshotTerm, -1, null, null);
leader.handleMessage(leaderActor, new SendInstallSnapshot(snapshot, ByteSource.wrap(bs.toByteArray())));
actorContext.setConfigParams(new DefaultConfigParamsImpl() {
@Override
- public int getSnapshotChunkSize() {
+ public int getMaximumMessageSliceSize() {
return 50;
}
});
ByteString bs = toByteString(leadersSnapshot);
Snapshot snapshot = Snapshot.create(ByteState.of(bs.toByteArray()),
- Collections.<ReplicatedLogEntry>emptyList(), commitIndex, snapshotTerm, commitIndex, snapshotTerm,
- -1, null, null);
+ List.of(), commitIndex, snapshotTerm, commitIndex, snapshotTerm, -1, null, null);
Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
leader.handleMessage(leaderActor, new SendInstallSnapshot(snapshot, ByteSource.wrap(bs.toByteArray())));
actorContext.setConfigParams(new DefaultConfigParamsImpl() {
@Override
- public int getSnapshotChunkSize() {
+ public int getMaximumMessageSliceSize() {
return 50;
}
});
ByteString bs = toByteString(leadersSnapshot);
Snapshot snapshot = Snapshot.create(ByteState.of(bs.toByteArray()),
- Collections.<ReplicatedLogEntry>emptyList(), commitIndex, snapshotTerm, commitIndex, snapshotTerm,
- -1, null, null);
+ List.of(), commitIndex, snapshotTerm, commitIndex, snapshotTerm, -1, null, null);
leader.handleMessage(leaderActor, new SendInstallSnapshot(snapshot, ByteSource.wrap(bs.toByteArray())));
assertEquals(1, installSnapshot.getChunkIndex());
assertEquals(3, installSnapshot.getTotalChunks());
- assertEquals(LeaderInstallSnapshotState.INITIAL_LAST_CHUNK_HASH_CODE,
- installSnapshot.getLastChunkHashCode().getAsInt());
+ assertEquals(OptionalInt.of(LeaderInstallSnapshotState.INITIAL_LAST_CHUNK_HASH_CODE),
+ installSnapshot.getLastChunkHashCode());
final int hashCode = Arrays.hashCode(installSnapshot.getData());
assertEquals(2, installSnapshot.getChunkIndex());
assertEquals(3, installSnapshot.getTotalChunks());
- assertEquals(hashCode, installSnapshot.getLastChunkHashCode().getAsInt());
+ assertEquals(OptionalInt.of(hashCode), installSnapshot.getLastChunkHashCode());
}
@Test
private MockRaftActorContext createActorContextWithFollower() {
MockRaftActorContext actorContext = createActorContext();
- actorContext.setPeerAddresses(ImmutableMap.<String, String>builder().put(FOLLOWER_ID,
- followerActor.path().toString()).build());
+ actorContext.setPeerAddresses(Map.of(FOLLOWER_ID, followerActor.path().toString()));
return actorContext;
}
DefaultConfigParamsImpl followerConfig = new DefaultConfigParamsImpl();
followerConfig.setElectionTimeoutFactor(10000);
followerActorContext.setConfigParams(followerConfig);
- followerActorContext.setPeerAddresses(ImmutableMap.of(LEADER_ID, leaderActor.path().toString()));
+ followerActorContext.setPeerAddresses(Map.of(LEADER_ID, leaderActor.path().toString()));
return followerActorContext;
}
final MockRaftActorContext leaderActorContext = createActorContext();
MockRaftActorContext followerActorContext = createActorContext(FOLLOWER_ID, followerActor);
- followerActorContext.setPeerAddresses(ImmutableMap.of(LEADER_ID, leaderActor.path().toString()));
+ followerActorContext.setPeerAddresses(Map.of(LEADER_ID, leaderActor.path().toString()));
Follower follower = new Follower(followerActorContext);
followerActor.underlyingActor().setBehavior(follower);
FollowerLogInformation followerInfo = leader.getFollower(FOLLOWER_ID);
assertEquals(payloadVersion, leader.getLeaderPayloadVersion());
- assertEquals(RaftVersions.HELIUM_VERSION, followerInfo.getRaftVersion());
+ assertEquals(RaftVersions.FLUORINE_VERSION, followerInfo.getRaftVersion());
AppendEntriesReply reply = new AppendEntriesReply(FOLLOWER_ID, 1, true, 2, 1, payloadVersion);
MockRaftActorContext leaderActorContext = createActorContextWithFollower();
((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setHeartBeatInterval(
new FiniteDuration(1000, TimeUnit.SECONDS));
- ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setSnapshotChunkSize(2);
+ // Note: the size here depends on estimate
+ ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setMaximumMessageSliceSize(246);
leaderActorContext.setReplicatedLog(
new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 4, 1).build());
logStart("testReplicationWithPayloadSizeThatExceedsThreshold");
final int serializedSize = SerializationUtils.serialize(new AppendEntries(1, LEADER_ID, -1, -1,
- Arrays.asList(new SimpleReplicatedLogEntry(0, 1,
+ List.of(new SimpleReplicatedLogEntry(0, 1,
new MockRaftActorContext.MockPayload("large"))), 0, -1, (short)0)).length;
final MockRaftActorContext.MockPayload largePayload =
new MockRaftActorContext.MockPayload("large", serializedSize);
MockRaftActorContext leaderActorContext = createActorContextWithFollower();
((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setHeartBeatInterval(
new FiniteDuration(300, TimeUnit.MILLISECONDS));
- ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setSnapshotChunkSize(serializedSize - 50);
+ ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setMaximumMessageSliceSize(serializedSize - 50);
leaderActorContext.setReplicatedLog(new MockRaftActorContext.MockReplicatedLogBuilder().build());
leaderActorContext.setCommitIndex(-1);
leaderActorContext.setLastApplied(-1);
((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setHeartBeatInterval(
new FiniteDuration(100, TimeUnit.MILLISECONDS));
((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setElectionTimeoutFactor(1);
- ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setSnapshotChunkSize(10);
+ ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setMaximumMessageSliceSize(10);
leaderActorContext.setReplicatedLog(new MockRaftActorContext.MockReplicatedLogBuilder().build());
leaderActorContext.setCommitIndex(-1);
leaderActorContext.setLastApplied(-1);
MessageCollectorActor.clearMessages(followerActor);
sendReplicate(leaderActorContext, term, 0, new MockRaftActorContext.MockPayload("large",
- leaderActorContext.getConfigParams().getSnapshotChunkSize() + 1));
+ leaderActorContext.getConfigParams().getMaximumMessageSliceSize() + 1));
MessageCollectorActor.expectFirstMatching(followerActor, MessageSlice.class);
// Sleep for at least 3 * election timeout so the slicing state expires.
// Initial heartbeat shouldn't have the leader address
AppendEntries appendEntries = MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
- assertFalse(appendEntries.getLeaderAddress().isPresent());
+ assertNull(appendEntries.leaderAddress());
MessageCollectorActor.clearMessages(followerActor);
// Send AppendEntriesReply indicating the follower needs the leader address
leader.handleMessage(leaderActor, SendHeartBeat.INSTANCE);
appendEntries = MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
- assertTrue(appendEntries.getLeaderAddress().isPresent());
- assertEquals(leaderActor.path().toString(), appendEntries.getLeaderAddress().get());
+ assertEquals(leaderActor.path().toString(), appendEntries.leaderAddress());
MessageCollectorActor.clearMessages(followerActor);
// Send AppendEntriesReply indicating the follower does not need the leader address
leader.handleMessage(leaderActor, SendHeartBeat.INSTANCE);
appendEntries = MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
- assertFalse(appendEntries.getLeaderAddress().isPresent());
+ assertNull(appendEntries.leaderAddress());
}
@Override
assertEquals("New votedFor", null, actorContext.getTermInformation().getVotedFor());
}
- private class MockConfigParamsImpl extends DefaultConfigParamsImpl {
+ private static class MockConfigParamsImpl extends DefaultConfigParamsImpl {
private final long electionTimeOutIntervalMillis;
- private final int snapshotChunkSize;
+ private final int maximumMessageSliceSize;
- MockConfigParamsImpl(final long electionTimeOutIntervalMillis, final int snapshotChunkSize) {
+ MockConfigParamsImpl(final long electionTimeOutIntervalMillis, final int maximumMessageSliceSize) {
this.electionTimeOutIntervalMillis = electionTimeOutIntervalMillis;
- this.snapshotChunkSize = snapshotChunkSize;
+ this.maximumMessageSliceSize = maximumMessageSliceSize;
}
@Override
}
@Override
- public int getSnapshotChunkSize() {
- return snapshotChunkSize;
+ public int getMaximumMessageSliceSize() {
+ return maximumMessageSliceSize;
}
}
}
import akka.protobuf.ByteString;
import com.google.common.io.ByteSource;
import java.io.IOException;
-import java.io.Serializable;
import java.util.Arrays;
import java.util.HashMap;
-import java.util.Map;
import java.util.OptionalInt;
import org.apache.commons.lang3.SerializationUtils;
import org.junit.Before;
import org.junit.Test;
+import org.junit.runner.RunWith;
import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
import org.opendaylight.controller.cluster.io.FileBackedOutputStream;
import org.opendaylight.controller.cluster.io.FileBackedOutputStreamFactory;
import org.opendaylight.controller.cluster.raft.RaftActorContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
public class SnapshotTrackerTest {
private static final Logger LOG = LoggerFactory.getLogger(SnapshotTrackerTest.class);
+ private final HashMap<String, String> data = new HashMap<>();
+
@Mock
private RaftActorContext mockContext;
private FileBackedOutputStream fbos;
- private Map<String, String> data;
private ByteString byteString;
private byte[] chunk1;
private byte[] chunk2;
@Before
public void setup() {
- MockitoAnnotations.initMocks(this);
-
- data = new HashMap<>();
data.put("key1", "value1");
data.put("key2", "value2");
data.put("key3", "value3");
- byteString = ByteString.copyFrom(SerializationUtils.serialize((Serializable) data));
+ byteString = ByteString.copyFrom(SerializationUtils.serialize(data));
chunk1 = getNextChunk(byteString, 0, 10);
chunk2 = getNextChunk(byteString, 10, 10);
chunk3 = getNextChunk(byteString, 20, byteString.size());
int start = offset;
if (size > snapshotLength) {
size = snapshotLength;
- } else {
- if (start + size > snapshotLength) {
- size = snapshotLength - start;
- }
+ } else if (start + size > snapshotLength) {
+ size = snapshotLength - start;
}
byte[] nextChunk = new byte[size];
*/
package org.opendaylight.controller.cluster.raft.client.messages;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertSame;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
/**
* @author Thomas Pantelis
*/
public class ShutdownTest {
-
@Test
public void test() {
- Shutdown cloned = (Shutdown) SerializationUtils.clone(Shutdown.INSTANCE);
+ final var bytes = SerializationUtils.serialize(Shutdown.INSTANCE);
+ assertEquals(86, bytes.length);
+ final var cloned = SerializationUtils.deserialize(bytes);
assertSame("Cloned instance", Shutdown.INSTANCE, cloned);
}
}
import static org.junit.Assert.assertEquals;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
import org.opendaylight.controller.cluster.raft.RaftVersions;
* @author Thomas Pantelis
*/
public class AppendEntriesReplyTest {
-
@Test
public void testSerialization() {
- AppendEntriesReply expected = new AppendEntriesReply("follower", 5, true, 100, 4, (short)6, true, true,
- RaftVersions.CURRENT_VERSION);
- AppendEntriesReply cloned = (AppendEntriesReply) SerializationUtils.clone(expected);
+ final var expected = new AppendEntriesReply("follower", 5, true, 100, 4, (short)6, true, true,
+ RaftVersions.CURRENT_VERSION);
- assertEquals("getTerm", expected.getTerm(), cloned.getTerm());
- assertEquals("getFollowerId", expected.getFollowerId(), cloned.getFollowerId());
- assertEquals("getLogLastTerm", expected.getLogLastTerm(), cloned.getLogLastTerm());
- assertEquals("getLogLastIndex", expected.getLogLastIndex(), cloned.getLogLastIndex());
- assertEquals("getPayloadVersion", expected.getPayloadVersion(), cloned.getPayloadVersion());
- assertEquals("getRaftVersion", expected.getRaftVersion(), cloned.getRaftVersion());
- assertEquals("isForceInstallSnapshot", expected.isForceInstallSnapshot(), cloned.isForceInstallSnapshot());
- assertEquals("isNeedsLeaderAddress", expected.isNeedsLeaderAddress(), cloned.isNeedsLeaderAddress());
- }
-
- @Test
- @Deprecated
- public void testPreFluorineSerialization() {
- AppendEntriesReply expected = new AppendEntriesReply("follower", 5, true, 100, 4, (short)6, true, true,
- RaftVersions.BORON_VERSION);
- AppendEntriesReply cloned = (AppendEntriesReply) SerializationUtils.clone(expected);
+ final var bytes = SerializationUtils.serialize(expected);
+ assertEquals(98, bytes.length);
+ final var cloned = (AppendEntriesReply) SerializationUtils.deserialize(bytes);
assertEquals("getTerm", expected.getTerm(), cloned.getTerm());
assertEquals("getFollowerId", expected.getFollowerId(), cloned.getFollowerId());
assertEquals("getPayloadVersion", expected.getPayloadVersion(), cloned.getPayloadVersion());
assertEquals("getRaftVersion", expected.getRaftVersion(), cloned.getRaftVersion());
assertEquals("isForceInstallSnapshot", expected.isForceInstallSnapshot(), cloned.isForceInstallSnapshot());
- assertEquals("isNeedsLeaderAddress", false, cloned.isNeedsLeaderAddress());
+ assertEquals("isNeedsLeaderAddress", expected.isNeedsLeaderAddress(), cloned.isNeedsLeaderAddress());
}
}
package org.opendaylight.controller.cluster.raft.messages;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import java.util.Arrays;
import java.util.Iterator;
-import org.apache.commons.lang.SerializationUtils;
+import java.util.List;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
import org.opendaylight.controller.cluster.raft.RaftVersions;
* @author Thomas Pantelis
*/
public class AppendEntriesTest {
-
@Test
public void testSerialization() {
ReplicatedLogEntry entry1 = new SimpleReplicatedLogEntry(1, 2, new MockPayload("payload1"));
// Without leader address
- AppendEntries expected = new AppendEntries(5L, "node1", 7L, 8L, Arrays.asList(entry1, entry2), 10L,
- -1, payloadVersion, RaftVersions.CURRENT_VERSION, null);
+ var expected = new AppendEntries(5L, "node1", 7L, 8L, List.of(entry1, entry2), 10L, -1, payloadVersion,
+ RaftVersions.CURRENT_VERSION, null);
- AppendEntries cloned = (AppendEntries) SerializationUtils.clone(expected);
+ var bytes = SerializationUtils.serialize(expected);
+ assertEquals(285, bytes.length);
+ var cloned = (AppendEntries) SerializationUtils.deserialize(bytes);
verifyAppendEntries(expected, cloned, RaftVersions.CURRENT_VERSION);
// With leader address
- expected = new AppendEntries(5L, "node1", 7L, 8L, Arrays.asList(entry1, entry2), 10L,
- -1, payloadVersion, RaftVersions.CURRENT_VERSION, "leader address");
+ expected = new AppendEntries(5L, "node1", 7L, 8L, List.of(entry1, entry2), 10L, -1, payloadVersion,
+ RaftVersions.CURRENT_VERSION, "leader address");
- cloned = (AppendEntries) SerializationUtils.clone(expected);
+ bytes = SerializationUtils.serialize(expected);
+ assertEquals(301, bytes.length);
+ cloned = (AppendEntries) SerializationUtils.deserialize(bytes);
verifyAppendEntries(expected, cloned, RaftVersions.CURRENT_VERSION);
}
- @Test
- @Deprecated
- public void testPreFluorineSerialization() {
- ReplicatedLogEntry entry1 = new SimpleReplicatedLogEntry(1, 2, new MockPayload("payload1"));
-
- ReplicatedLogEntry entry2 = new SimpleReplicatedLogEntry(3, 4, new MockPayload("payload2"));
-
- short payloadVersion = 5;
-
- AppendEntries expected = new AppendEntries(5L, "node1", 7L, 8L, Arrays.asList(entry1, entry2), 10L,
- -1, payloadVersion, RaftVersions.BORON_VERSION, "leader address");
-
- AppendEntries cloned = (AppendEntries) SerializationUtils.clone(expected);
-
- verifyAppendEntries(expected, cloned, RaftVersions.BORON_VERSION);
- }
-
- private static void verifyAppendEntries(AppendEntries expected, AppendEntries actual, short recipientRaftVersion) {
+ private static void verifyAppendEntries(final AppendEntries expected, final AppendEntries actual,
+ final short recipientRaftVersion) {
assertEquals("getLeaderId", expected.getLeaderId(), actual.getLeaderId());
assertEquals("getTerm", expected.getTerm(), actual.getTerm());
assertEquals("getLeaderCommit", expected.getLeaderCommit(), actual.getLeaderCommit());
verifyReplicatedLogEntry(iter.next(), e);
}
- if (recipientRaftVersion >= RaftVersions.FLUORINE_VERSION) {
- assertEquals("getLeaderAddress", expected.getLeaderAddress(), actual.getLeaderAddress());
- assertEquals("getLeaderRaftVersion", RaftVersions.CURRENT_VERSION, actual.getLeaderRaftVersion());
- } else {
- assertFalse(actual.getLeaderAddress().isPresent());
- assertEquals("getLeaderRaftVersion", RaftVersions.BORON_VERSION, actual.getLeaderRaftVersion());
- }
+ assertEquals("getLeaderAddress", expected.leaderAddress(), actual.leaderAddress());
+ assertEquals("getLeaderRaftVersion", RaftVersions.CURRENT_VERSION, actual.getLeaderRaftVersion());
}
- private static void verifyReplicatedLogEntry(ReplicatedLogEntry expected, ReplicatedLogEntry actual) {
+ private static void verifyReplicatedLogEntry(final ReplicatedLogEntry expected, final ReplicatedLogEntry actual) {
assertEquals("getIndex", expected.getIndex(), actual.getIndex());
assertEquals("getTerm", expected.getTerm(), actual.getTerm());
assertEquals("getData", expected.getData().toString(), actual.getData().toString());
import static org.junit.Assert.assertEquals;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
/**
* @author Thomas Pantelis
*/
public class InstallSnapshotReplyTest {
-
@Test
public void testSerialization() {
- InstallSnapshotReply expected = new InstallSnapshotReply(5L, "follower", 1, true);
- InstallSnapshotReply cloned = (InstallSnapshotReply) SerializationUtils.clone(expected);
+ final var expected = new InstallSnapshotReply(5L, "follower", 1, true);
+ final var bytes = SerializationUtils.serialize(expected);
+ assertEquals(95, bytes.length);
+ final var cloned = (InstallSnapshotReply) SerializationUtils.deserialize(bytes);
assertEquals("getTerm", expected.getTerm(), cloned.getTerm());
assertEquals("getFollowerId", expected.getFollowerId(), cloned.getFollowerId());
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
-import java.io.Serializable;
-import java.util.Arrays;
+import java.util.List;
import java.util.Optional;
import java.util.OptionalInt;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
import org.opendaylight.controller.cluster.raft.RaftVersions;
import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
* @author Thomas Pantelis
*/
public class InstallSnapshotTest {
+ @Test
+ public void testCurrentSerialization() {
+ testSerialization(RaftVersions.CURRENT_VERSION, 1262, 1125);
+ }
@Test
- public void testSerialization() {
+ public void testFluorineSerialization() {
+ testSerialization(RaftVersions.FLUORINE_VERSION, 1302, 1165);
+ }
+
+ private static void testSerialization(final short raftVersion, final int fullSize, final int emptySize) {
byte[] data = new byte[1000];
for (int i = 0, j = 0; i < data.length; i++) {
data[i] = (byte)j;
}
}
- ServerConfigurationPayload serverConfig = new ServerConfigurationPayload(Arrays.asList(
+ var serverConfig = new ServerConfigurationPayload(List.of(
new ServerInfo("leader", true), new ServerInfo("follower", false)));
- InstallSnapshot expected = new InstallSnapshot(3L, "leaderId", 11L, 2L, data, 5, 6, OptionalInt.of(54321),
- Optional.of(serverConfig));
-
- Object serialized = expected.toSerializable(RaftVersions.CURRENT_VERSION);
- assertEquals("Serialized type", InstallSnapshot.class, serialized.getClass());
+ assertInstallSnapshot(fullSize, new InstallSnapshot(3L, "leaderId", 11L, 2L, data, 5, 6, OptionalInt.of(54321),
+ Optional.of(serverConfig), raftVersion));
- InstallSnapshot actual = (InstallSnapshot) SerializationUtils.clone((Serializable) serialized);
- verifyInstallSnapshot(expected, actual);
+ assertInstallSnapshot(emptySize, new InstallSnapshot(3L, "leaderId", 11L, 2L, data, 5, 6, OptionalInt.empty(),
+ Optional.empty(), raftVersion));
+ }
- expected = new InstallSnapshot(3L, "leaderId", 11L, 2L, data, 5, 6);
- actual = (InstallSnapshot) SerializationUtils.clone((Serializable) expected.toSerializable(
- RaftVersions.CURRENT_VERSION));
- verifyInstallSnapshot(expected, actual);
+ private static void assertInstallSnapshot(final int expectedSize, final InstallSnapshot expected) {
+ final var bytes = SerializationUtils.serialize(expected);
+ assertEquals(expectedSize, bytes.length);
+ verifyInstallSnapshot(expected, (InstallSnapshot) SerializationUtils.deserialize(bytes));
}
private static void verifyInstallSnapshot(final InstallSnapshot expected, final InstallSnapshot actual) {
assertEquals("getServerConfig present", expected.getServerConfig().isPresent(),
actual.getServerConfig().isPresent());
if (expected.getServerConfig().isPresent()) {
- assertEquals("getServerConfig", expected.getServerConfig().get().getServerConfig(),
- actual.getServerConfig().get().getServerConfig());
+ assertEquals("getServerConfig", expected.getServerConfig().orElseThrow().getServerConfig(),
+ actual.getServerConfig().orElseThrow().getServerConfig());
}
}
}
import static org.junit.Assert.assertEquals;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
/**
* @author Thomas Pantelis
*/
public class RequestVoteReplyTest {
-
@Test
public void testSerialization() {
- RequestVoteReply expected = new RequestVoteReply(5, true);
- RequestVoteReply cloned = (RequestVoteReply) SerializationUtils.clone(expected);
+ final var expected = new RequestVoteReply(5, true);
+ final var bytes = SerializationUtils.serialize(expected);
+ assertEquals(78, bytes.length);
+ final var cloned = (RequestVoteReply) SerializationUtils.deserialize(bytes);
assertEquals("getTerm", expected.getTerm(), cloned.getTerm());
assertEquals("isVoteGranted", expected.isVoteGranted(), cloned.isVoteGranted());
import static org.junit.Assert.assertEquals;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
/**
* @author Thomas Pantelis
*/
public class RequestVoteTest {
-
@Test
public void testSerialization() {
- RequestVote expected = new RequestVote(4, "candidateId", 3, 2);
- RequestVote cloned = (RequestVote) SerializationUtils.clone(expected);
+ final var expected = new RequestVote(4, "candidateId", 3, 2);
+ final var bytes = SerializationUtils.serialize(expected);
+ assertEquals(97, bytes.length);
+ final var cloned = (RequestVote) SerializationUtils.deserialize(bytes);
assertEquals("getTerm", expected.getTerm(), cloned.getTerm());
assertEquals("getCandidateId", expected.getCandidateId(), cloned.getCandidateId());
import static org.junit.Assert.assertEquals;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
/**
* @author Thomas Pantelis
*/
public class ApplyJournalEntriesTest {
-
@Test
public void testSerialization() {
- ApplyJournalEntries expected = new ApplyJournalEntries(5);
- ApplyJournalEntries cloned = (ApplyJournalEntries) SerializationUtils.clone(expected);
+ final var expected = new ApplyJournalEntries(5);
+ final var bytes = SerializationUtils.serialize(expected);
+ assertEquals(80, bytes.length);
+ final var cloned = (ApplyJournalEntries) SerializationUtils.deserialize(bytes);
assertEquals("getFromIndex", expected.getToIndex(), cloned.getToIndex());
}
import static org.junit.Assert.assertEquals;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
/**
* @author Thomas Pantelis
*/
public class DeleteEntriesTest {
-
@Test
public void testSerialization() {
- DeleteEntries expected = new DeleteEntries(5);
- DeleteEntries cloned = (DeleteEntries) SerializationUtils.clone(expected);
+ final var expected = new DeleteEntries(5);
+ final var bytes = SerializationUtils.serialize(expected);
+ assertEquals(79, bytes.length);
+ final var cloned = (DeleteEntries) SerializationUtils.deserialize(bytes);
assertEquals("getFromIndex", expected.getFromIndex(), cloned.getFromIndex());
}
*/
package org.opendaylight.controller.cluster.raft.persisted;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertSame;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
/**
*
*/
public class EmptyStateTest {
-
@Test
public void testSerialization() {
- EmptyState cloned = (EmptyState) SerializationUtils.clone(EmptyState.INSTANCE);
+ final var bytes = SerializationUtils.serialize(EmptyState.INSTANCE);
+ assertEquals(82, bytes.length);
+ final var cloned = SerializationUtils.deserialize(bytes);
assertSame("cloned", EmptyState.INSTANCE, cloned);
}
}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.persisted;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertSame;
+
+import org.apache.commons.lang3.SerializationUtils;
+import org.junit.Test;
+
+public class NoopPayloadTest {
+ @Test
+ public void testSerialization() {
+ final var bytes = SerializationUtils.serialize(NoopPayload.INSTANCE);
+ assertEquals(74, bytes.length);
+ assertSame(NoopPayload.INSTANCE, SerializationUtils.deserialize(bytes));
+ }
+}
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
-import java.util.Arrays;
-import org.apache.commons.lang.SerializationUtils;
+import java.util.List;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
/**
* @author Thomas Pantelis
*/
public class ServerConfigurationPayloadTest {
-
@Test
public void testSerialization() {
- ServerConfigurationPayload expected = new ServerConfigurationPayload(Arrays.asList(new ServerInfo("1", true),
- new ServerInfo("2", false)));
- ServerConfigurationPayload cloned = (ServerConfigurationPayload) SerializationUtils.clone(expected);
+ final var expected = new ServerConfigurationPayload(List.of(new ServerInfo("1", true),
+ new ServerInfo("2", false)));
+
+ final var bytes = SerializationUtils.serialize(expected);
+ assertEquals(125, bytes.length);
+ final var cloned = (ServerConfigurationPayload) SerializationUtils.deserialize(bytes);
assertEquals("getServerConfig", expected.getServerConfig(), cloned.getServerConfig());
}
@Test
public void testSize() {
- ServerConfigurationPayload expected = new ServerConfigurationPayload(Arrays.asList(new ServerInfo("1", true)));
+ final var expected = new ServerConfigurationPayload(List.of(new ServerInfo("1", true)));
assertTrue(expected.size() > 0);
}
}
import static org.junit.Assert.assertEquals;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
import org.opendaylight.controller.cluster.raft.MockRaftActorContext;
* @author Thomas Pantelis
*/
public class SimpleReplicatedLogEntryTest {
-
@Test
public void testSerialization() {
- SimpleReplicatedLogEntry expected = new SimpleReplicatedLogEntry(0, 1,
- new MockRaftActorContext.MockPayload("A"));
- SimpleReplicatedLogEntry cloned = (SimpleReplicatedLogEntry) SerializationUtils.clone(expected);
+ final var expected = new SimpleReplicatedLogEntry(0, 1, new MockRaftActorContext.MockPayload("A"));
+ final var bytes = SerializationUtils.serialize(expected);
+ assertEquals(218, bytes.length);
+ final var cloned = (SimpleReplicatedLogEntry) SerializationUtils.deserialize(bytes);
assertEquals("getTerm", expected.getTerm(), cloned.getTerm());
assertEquals("getIndex", expected.getIndex(), cloned.getIndex());
import static org.junit.Assert.assertEquals;
-import java.util.Arrays;
-import java.util.Collections;
import java.util.List;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
* @author Thomas Pantelis
*/
public class SnapshotTest {
-
@Test
public void testSerialization() {
- testSerialization(new byte[]{1, 2, 3, 4, 5, 6, 7}, Arrays.asList(
- new SimpleReplicatedLogEntry(6, 2, new MockPayload("payload"))));
- testSerialization(new byte[]{1, 2, 3, 4, 5, 6, 7, 8, 9}, Collections.emptyList());
+ testSerialization(new byte[]{1, 2, 3, 4, 5, 6, 7}, List.of(
+ new SimpleReplicatedLogEntry(6, 2, new MockPayload("payload"))), 491);
+ testSerialization(new byte[]{1, 2, 3, 4, 5, 6, 7, 8, 9}, List.of(), 345);
}
- private static void testSerialization(final byte[] state, final List<ReplicatedLogEntry> unapplied) {
+ private static void testSerialization(final byte[] state, final List<ReplicatedLogEntry> unapplied,
+ final int expectedSize) {
long lastIndex = 6;
long lastTerm = 2;
long lastAppliedIndex = 5;
long lastAppliedTerm = 1;
long electionTerm = 3;
String electionVotedFor = "member-1";
- ServerConfigurationPayload serverConfig = new ServerConfigurationPayload(Arrays.asList(
+ ServerConfigurationPayload serverConfig = new ServerConfigurationPayload(List.of(
new ServerInfo("1", true), new ServerInfo("2", false)));
- Snapshot expected = Snapshot.create(ByteState.of(state), unapplied, lastIndex, lastTerm, lastAppliedIndex,
+ final var expected = Snapshot.create(ByteState.of(state), unapplied, lastIndex, lastTerm, lastAppliedIndex,
lastAppliedTerm, electionTerm, electionVotedFor, serverConfig);
- Snapshot cloned = (Snapshot) SerializationUtils.clone(expected);
+ final var bytes = SerializationUtils.serialize(expected);
+ assertEquals(expectedSize, bytes.length);
+ final var cloned = (Snapshot) SerializationUtils.deserialize(bytes);
assertEquals("lastIndex", expected.getLastIndex(), cloned.getLastIndex());
assertEquals("lastTerm", expected.getLastTerm(), cloned.getLastTerm());
import static org.junit.Assert.assertEquals;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
/**
* @author Thomas Pantelis
*/
public class UpdateElectionTermTest {
-
@Test
public void testSerialization() {
- UpdateElectionTerm expected = new UpdateElectionTerm(5, "leader");
- UpdateElectionTerm cloned = (UpdateElectionTerm) SerializationUtils.clone(expected);
+ final var expected = new UpdateElectionTerm(5, "leader");
+ final var bytes = SerializationUtils.serialize(expected);
+ assertEquals(88, bytes.length);
+ final var cloned = (UpdateElectionTerm) SerializationUtils.deserialize(bytes);
assertEquals("getCurrentTerm", expected.getCurrentTerm(), cloned.getCurrentTerm());
assertEquals("getVotedFor", expected.getVotedFor(), cloned.getVotedFor());
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import scala.Option;
import scala.concurrent.Future;
import scala.jdk.javaapi.CollectionConverters;
final Class<?> ofType;
WriteMessagesComplete(final int count, final Class<?> ofType) {
- this.latch = new CountDownLatch(count);
+ latch = new CountDownLatch(count);
this.ofType = ofType;
}
}
if (++count <= max && entry.getKey() >= fromSequenceNr && entry.getKey() <= toSequenceNr) {
PersistentRepr persistentMessage =
new PersistentImpl(deserialize(entry.getValue()), entry.getKey(), persistenceId,
- null, false, null, null, 0);
+ null, false, null, null, 0, Option.empty());
replayCallback.accept(persistentMessage);
}
}
loggers = ["akka.testkit.TestEventListener", "akka.event.slf4j.Slf4jLogger"]
actor {
+ provider = "akka.cluster.ClusterActorRefProvider"
+
# enable to test serialization only.
serialize-messages = off
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>mdsal-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../parent</relativePath>
</parent>
<packaging>bundle</packaging>
<dependencies>
- <!-- Akka -->
<dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-actor_2.13</artifactId>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
</dependency>
<dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-persistence_2.13</artifactId>
+ <groupId>com.typesafe</groupId>
+ <artifactId>config</artifactId>
</dependency>
<dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-slf4j_2.13</artifactId>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-testkit_2.13</artifactId>
+ <groupId>io.dropwizard.metrics</groupId>
+ <artifactId>metrics-core</artifactId>
</dependency>
<dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-persistence-tck_2.13</artifactId>
+ <groupId>org.eclipse.jdt</groupId>
+ <artifactId>org.eclipse.jdt.annotation</artifactId>
</dependency>
-
- <!-- Codahale -->
<dependency>
- <groupId>io.dropwizard.metrics</groupId>
- <artifactId>metrics-core</artifactId>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>atomix-storage</artifactId>
</dependency>
-
- <!-- Scala -->
<dependency>
- <groupId>org.scala-lang</groupId>
- <artifactId>scala-library</artifactId>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>repackaged-akka</artifactId>
</dependency>
-
- <!-- Clustering commons for metrics -->
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-clustering-commons</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.scala-lang</groupId>
+ <artifactId>scala-library</artifactId>
+ </dependency>
- <!-- Atomix -->
<dependency>
- <groupId>io.atomix</groupId>
- <artifactId>atomix-storage</artifactId>
- <version>3.1.5</version>
- <scope>provided</scope>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-testkit_2.13</artifactId>
</dependency>
<dependency>
- <groupId>io.atomix</groupId>
- <artifactId>atomix-utils</artifactId>
- <version>3.1.5</version>
- <scope>provided</scope>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-persistence-tck_2.13</artifactId>
</dependency>
-
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
<scope>test</scope>
</dependency>
+ <dependency>
+ <groupId>org.scalatestplus</groupId>
+ <artifactId>junit-4-13_2.13</artifactId>
+ <scope>test</scope>
+ </dependency>
</dependencies>
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.felix</groupId>
- <artifactId>maven-bundle-plugin</artifactId>
- <extensions>true</extensions>
- <configuration>
- <instructions>
- <Bundle-Name>${project.groupId}.${project.artifactId}</Bundle-Name>
- <!-- atomix.io is using an older Guava, so let's embed it to prevent duplicates -->
- <Embed-Dependency>*;inline=true;groupId=io.atomix</Embed-Dependency>
- </instructions>
- </configuration>
- </plugin>
- </plugins>
- </build>
-
<scm>
<connection>scm:git:http://git.opendaylight.org/gerrit/controller.git</connection>
<developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
--- /dev/null
+/*
+ * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.akka.segjournal;
+
+import static java.util.Objects.requireNonNull;
+
+import com.codahale.metrics.Histogram;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.ReplayMessages;
+import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.WriteMessages;
+import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.WrittenMessages;
+
+/**
+ * Abstraction of a data journal. This provides a unified interface towards {@link SegmentedJournalActor}, allowing
+ * specialization for various formats.
+ */
+abstract class DataJournal {
+ // Mirrors fields from associated actor
+ final @NonNull String persistenceId;
+ private final Histogram messageSize;
+
+ // Tracks largest message size we have observed either during recovery or during write
+ private int largestObservedSize;
+
+ DataJournal(final String persistenceId, final Histogram messageSize) {
+ this.persistenceId = requireNonNull(persistenceId);
+ this.messageSize = requireNonNull(messageSize);
+ }
+
+ final void recordMessageSize(final int size) {
+ messageSize.update(size);
+ updateLargestSize(size);
+ }
+
+ final void updateLargestSize(final int size) {
+ if (size > largestObservedSize) {
+ largestObservedSize = size;
+ }
+ }
+
+ /**
+ * Return the last sequence number completely written to the journal.
+ *
+ * @return Last written sequence number, {@code -1} if there are no in the journal.
+ */
+ abstract long lastWrittenSequenceNr();
+
+ /**
+ * Delete all messages up to specified sequence number.
+ *
+ * @param sequenceNr Sequence number to delete to.
+ */
+ abstract void deleteTo(long sequenceNr);
+
+ /**
+ * Delete all messages up to specified sequence number.
+ *
+ * @param sequenceNr Sequence number to compact to.
+ */
+ abstract void compactTo(long sequenceNr);
+
+ /**
+ * Close this journal, freeing up resources associated with it.
+ */
+ abstract void close();
+
+ /**
+ * Handle a request to replay messages.
+ *
+ * @param message Request message
+ * @param fromSequenceNr Sequence number to replay from, adjusted for deletions
+ */
+ abstract void handleReplayMessages(@NonNull ReplayMessages message, long fromSequenceNr);
+
+ /**
+ * Handle a request to store some messages.
+ *
+ * @param message {@link WriteMessages} message
+ * @return a {@link WrittenMessages} object
+ */
+ abstract @NonNull WrittenMessages handleWriteMessages(@NonNull WriteMessages message);
+
+ /**
+ * Flush all messages to durable storage.
+ */
+ abstract void flush();
+}
import static java.util.Objects.requireNonNull;
import akka.persistence.PersistentRepr;
-import io.atomix.storage.journal.JournalSegment;
/**
* A single entry in the data journal. We do not store {@code persistenceId} for each entry, as that is a
- * journal-invariant, nor do we store {@code sequenceNr}, as that information is maintained by {@link JournalSegment}'s
- * index.
- *
- * @author Robert Varga
+ * journal-invariant, nor do we store {@code sequenceNr}, as that information is maintained by a particular journal
+ * segment's index.
*/
-abstract class DataJournalEntry {
+abstract sealed class DataJournalEntry {
+ /**
+ * A single data journal entry on its way to the backing file.
+ */
static final class ToPersistence extends DataJournalEntry {
private final PersistentRepr repr;
}
}
+ /**
+ * A single data journal entry on its way from the backing file.
+ */
static final class FromPersistence extends DataJournalEntry {
private final String manifest;
private final String writerUuid;
--- /dev/null
+/*
+ * Copyright (c) 2019 Pantheon Technologies, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.akka.segjournal;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.ActorSystem;
+import akka.actor.ExtendedActorSystem;
+import akka.persistence.PersistentRepr;
+import akka.serialization.JavaSerializer;
+import com.google.common.base.VerifyException;
+import io.atomix.storage.journal.JournalSerdes.EntryInput;
+import io.atomix.storage.journal.JournalSerdes.EntryOutput;
+import io.atomix.storage.journal.JournalSerdes.EntrySerdes;
+import java.io.IOException;
+import java.util.concurrent.Callable;
+import org.opendaylight.controller.akka.segjournal.DataJournalEntry.FromPersistence;
+import org.opendaylight.controller.akka.segjournal.DataJournalEntry.ToPersistence;
+
+/**
+ * Kryo serializer for {@link DataJournalEntry}. Each {@link SegmentedJournalActor} has its own instance, as well as
+ * a nested JavaSerializer to handle the payload.
+ *
+ * <p>
+ * Since we are persisting only parts of {@link PersistentRepr}, this class asymmetric by design:
+ * {@link #write(EntryOutput, DataJournalEntry)} only accepts {@link ToPersistence} subclass, which is a wrapper
+ * around a {@link PersistentRepr}, while {@link #read(EntryInput)} produces an {@link FromPersistence}, which
+ * needs further processing to reconstruct a {@link PersistentRepr}.
+ */
+final class DataJournalEntrySerdes implements EntrySerdes<DataJournalEntry> {
+ private final ExtendedActorSystem actorSystem;
+
+ DataJournalEntrySerdes(final ActorSystem actorSystem) {
+ this.actorSystem = requireNonNull((ExtendedActorSystem) actorSystem);
+ }
+
+ @Override
+ public void write(final EntryOutput output, final DataJournalEntry entry) throws IOException {
+ if (entry instanceof ToPersistence toPersistence) {
+ final var repr = toPersistence.repr();
+ output.writeString(repr.manifest());
+ output.writeString(repr.writerUuid());
+ output.writeObject(repr.payload());
+ } else {
+ throw new VerifyException("Unexpected entry " + entry);
+ }
+ }
+
+ @Override
+ public DataJournalEntry read(final EntryInput input) throws IOException {
+ return new FromPersistence(input.readString(), input.readString(),
+ JavaSerializer.currentSystem().withValue(actorSystem, (Callable<Object>) input::readObject));
+ }
+}
+++ /dev/null
-/*
- * Copyright (c) 2019 Pantheon Technologies, s.r.o. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.akka.segjournal;
-
-import static com.google.common.base.Verify.verify;
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorSystem;
-import akka.actor.ExtendedActorSystem;
-import akka.persistence.PersistentRepr;
-import com.esotericsoftware.kryo.Kryo;
-import com.esotericsoftware.kryo.Serializer;
-import com.esotericsoftware.kryo.io.Input;
-import com.esotericsoftware.kryo.io.Output;
-import com.esotericsoftware.kryo.serializers.JavaSerializer;
-import java.util.concurrent.Callable;
-import org.opendaylight.controller.akka.segjournal.DataJournalEntry.FromPersistence;
-import org.opendaylight.controller.akka.segjournal.DataJournalEntry.ToPersistence;
-
-/**
- * Kryo serializer for {@link DataJournalEntry}. Each {@link SegmentedJournalActor} has its own instance, as well as
- * a nested JavaSerializer to handle the payload.
- *
- * <p>
- * Since we are persisting only parts of {@link PersistentRepr}, this class asymmetric by design:
- * {@link #write(Kryo, Output, DataJournalEntry)} only accepts {@link ToPersistence} subclass, which is a wrapper
- * around a {@link PersistentRepr}, while {@link #read(Kryo, Input, Class)} produces an {@link FromPersistence}, which
- * needs further processing to reconstruct a {@link PersistentRepr}.
- *
- * @author Robert Varga
- */
-final class DataJournalEntrySerializer extends Serializer<DataJournalEntry> {
- private final JavaSerializer serializer = new JavaSerializer();
- private final ExtendedActorSystem actorSystem;
-
- DataJournalEntrySerializer(final ActorSystem actorSystem) {
- this.actorSystem = requireNonNull((ExtendedActorSystem) actorSystem);
- }
-
- @Override
- public void write(final Kryo kryo, final Output output, final DataJournalEntry object) {
- verify(object instanceof ToPersistence);
- final PersistentRepr repr = ((ToPersistence) object).repr();
- output.writeString(repr.manifest());
- output.writeString(repr.writerUuid());
- serializer.write(kryo, output, repr.payload());
- }
-
- @Override
- public DataJournalEntry read(final Kryo kryo, final Input input, final Class<DataJournalEntry> type) {
- final String manifest = input.readString();
- final String uuid = input.readString();
- final Object payload = akka.serialization.JavaSerializer.currentSystem().withValue(actorSystem,
- (Callable<Object>)() -> serializer.read(kryo, input, type));
- return new FromPersistence(manifest, uuid, payload);
- }
-}
--- /dev/null
+/*
+ * Copyright (c) 2019, 2020 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.akka.segjournal;
+
+import akka.actor.ActorSystem;
+import akka.persistence.PersistentRepr;
+import com.codahale.metrics.Histogram;
+import com.google.common.base.VerifyException;
+import io.atomix.storage.journal.JournalReader;
+import io.atomix.storage.journal.JournalSerdes;
+import io.atomix.storage.journal.JournalWriter;
+import io.atomix.storage.journal.SegmentedJournal;
+import io.atomix.storage.journal.StorageLevel;
+import java.io.File;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+import org.opendaylight.controller.akka.segjournal.DataJournalEntry.FromPersistence;
+import org.opendaylight.controller.akka.segjournal.DataJournalEntry.ToPersistence;
+import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.ReplayMessages;
+import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.WriteMessages;
+import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.WrittenMessages;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.jdk.javaapi.CollectionConverters;
+
+/**
+ * Version 0 data journal, where every journal entry maps to exactly one segmented file entry.
+ */
+final class DataJournalV0 extends DataJournal {
+ private static final Logger LOG = LoggerFactory.getLogger(DataJournalV0.class);
+
+ private final SegmentedJournal<DataJournalEntry> entries;
+
+ DataJournalV0(final String persistenceId, final Histogram messageSize, final ActorSystem system,
+ final StorageLevel storage, final File directory, final int maxEntrySize, final int maxSegmentSize) {
+ super(persistenceId, messageSize);
+ entries = SegmentedJournal.<DataJournalEntry>builder()
+ .withStorageLevel(storage).withDirectory(directory).withName("data")
+ .withNamespace(JournalSerdes.builder()
+ .register(new DataJournalEntrySerdes(system), FromPersistence.class, ToPersistence.class)
+ .build())
+ .withMaxEntrySize(maxEntrySize).withMaxSegmentSize(maxSegmentSize)
+ .build();
+ }
+
+ @Override
+ long lastWrittenSequenceNr() {
+ return entries.writer().getLastIndex();
+ }
+
+ @Override
+ void deleteTo(final long sequenceNr) {
+ entries.writer().commit(sequenceNr);
+ }
+
+ @Override
+ void compactTo(final long sequenceNr) {
+ entries.compact(sequenceNr + 1);
+ }
+
+ @Override
+ void close() {
+ flush();
+ entries.close();
+ }
+
+ @Override
+ void flush() {
+ entries.writer().flush();
+ }
+
+ @Override
+ @SuppressWarnings("checkstyle:illegalCatch")
+ void handleReplayMessages(final ReplayMessages message, final long fromSequenceNr) {
+ try (var reader = entries.openReader(fromSequenceNr)) {
+ handleReplayMessages(reader, message);
+ } catch (Exception e) {
+ LOG.warn("{}: failed to replay messages for {}", persistenceId, message, e);
+ message.promise.failure(e);
+ } finally {
+ message.promise.success(null);
+ }
+ }
+
+ private void handleReplayMessages(final JournalReader<DataJournalEntry> reader, final ReplayMessages message) {
+ int count = 0;
+ while (count < message.max && reader.getNextIndex() <= message.toSequenceNr) {
+ final var repr = reader.tryNext((index, entry, size) -> {
+ LOG.trace("{}: replay index={} entry={}", persistenceId, index, entry);
+ updateLargestSize(size);
+ if (entry instanceof FromPersistence fromPersistence) {
+ return fromPersistence.toRepr(persistenceId, index);
+ }
+ throw new VerifyException("Unexpected entry " + entry);
+ });
+
+ if (repr == null) {
+ break;
+ }
+
+ LOG.debug("{}: replaying {}", persistenceId, repr);
+ message.replayCallback.accept(repr);
+ count++;
+ }
+ LOG.debug("{}: successfully replayed {} entries", persistenceId, count);
+ }
+
+ @Override
+ @SuppressWarnings("checkstyle:illegalCatch")
+ WrittenMessages handleWriteMessages(final WriteMessages message) {
+ final int count = message.size();
+ final var responses = new ArrayList<>();
+ final var writer = entries.writer();
+ long writtenBytes = 0;
+
+ for (int i = 0; i < count; ++i) {
+ final long mark = writer.getLastIndex();
+ final var request = message.getRequest(i);
+
+ final var reprs = CollectionConverters.asJava(request.payload());
+ LOG.trace("{}: append {}/{}: {} items at mark {}", persistenceId, i, count, reprs.size(), mark);
+ try {
+ writtenBytes += writePayload(writer, reprs);
+ } catch (Exception e) {
+ LOG.warn("{}: failed to write out request {}/{} reverting to {}", persistenceId, i, count, mark, e);
+ responses.add(e);
+ writer.truncate(mark);
+ continue;
+ }
+ responses.add(null);
+ }
+
+ return new WrittenMessages(message, responses, writtenBytes);
+ }
+
+ private long writePayload(final JournalWriter<DataJournalEntry> writer, final List<PersistentRepr> reprs) {
+ long bytes = 0;
+ for (var repr : reprs) {
+ final Object payload = repr.payload();
+ if (!(payload instanceof Serializable)) {
+ throw new UnsupportedOperationException("Non-serializable payload encountered "
+ + payload.getClass());
+ }
+
+ LOG.trace("{}: starting append of {}", persistenceId, payload);
+ final var entry = writer.append(new ToPersistence(repr));
+ final int size = entry.size();
+ LOG.trace("{}: finished append of {} with {} bytes at {}", persistenceId, payload, size, entry.index());
+ recordMessageSize(size);
+ bytes += size;
+ }
+ return bytes;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2023 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.akka.segjournal;
+
+import io.atomix.storage.journal.JournalSerdes.EntryInput;
+import io.atomix.storage.journal.JournalSerdes.EntryOutput;
+import io.atomix.storage.journal.JournalSerdes.EntrySerdes;
+import java.io.IOException;
+
+enum LongEntrySerdes implements EntrySerdes<Long> {
+ LONG_ENTRY_SERDES {
+ @Override
+ public Long read(final EntryInput input) throws IOException {
+ return input.readLong();
+ }
+
+ @Override
+ public void write(final EntryOutput output, final Long entry) throws IOException {
+ output.writeLong(entry);
+ }
+ }
+}
import akka.persistence.PersistentRepr;
import akka.persistence.journal.japi.AsyncWriteJournal;
import com.typesafe.config.Config;
-import com.typesafe.config.ConfigMemorySize;
-import io.atomix.storage.StorageLevel;
import io.atomix.storage.journal.SegmentedJournal;
+import io.atomix.storage.journal.StorageLevel;
import java.io.File;
-import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.HashMap;
-import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.function.Consumer;
* An Akka persistence journal implementation on top of {@link SegmentedJournal}. This actor represents aggregation
* of multiple journals and performs a receptionist job between Akka and invidual per-persistenceId actors. See
* {@link SegmentedJournalActor} for details on how the persistence works.
- *
- * @author Robert Varga
*/
public class SegmentedFileJournal extends AsyncWriteJournal {
public static final String STORAGE_ROOT_DIRECTORY = "root-directory";
public static final int STORAGE_MAX_ENTRY_SIZE_DEFAULT = 16 * 1024 * 1024;
public static final String STORAGE_MAX_SEGMENT_SIZE = "max-segment-size";
public static final int STORAGE_MAX_SEGMENT_SIZE_DEFAULT = STORAGE_MAX_ENTRY_SIZE_DEFAULT * 8;
+ public static final String STORAGE_MAX_UNFLUSHED_BYTES = "max-unflushed-bytes";
public static final String STORAGE_MEMORY_MAPPED = "memory-mapped";
private static final Logger LOG = LoggerFactory.getLogger(SegmentedFileJournal.class);
private final StorageLevel storage;
private final int maxEntrySize;
private final int maxSegmentSize;
+ private final int maxUnflushedBytes;
public SegmentedFileJournal(final Config config) {
rootDir = new File(config.getString(STORAGE_ROOT_DIRECTORY));
maxEntrySize = getBytes(config, STORAGE_MAX_ENTRY_SIZE, STORAGE_MAX_ENTRY_SIZE_DEFAULT);
maxSegmentSize = getBytes(config, STORAGE_MAX_SEGMENT_SIZE, STORAGE_MAX_SEGMENT_SIZE_DEFAULT);
+ maxUnflushedBytes = getBytes(config, STORAGE_MAX_UNFLUSHED_BYTES, maxEntrySize);
if (config.hasPath(STORAGE_MEMORY_MAPPED)) {
storage = config.getBoolean(STORAGE_MEMORY_MAPPED) ? StorageLevel.MAPPED : StorageLevel.DISK;
@Override
public Future<Iterable<Optional<Exception>>> doAsyncWriteMessages(final Iterable<AtomicWrite> messages) {
- final Map<ActorRef, WriteMessages> map = new HashMap<>();
- final List<Future<Optional<Exception>>> result = new ArrayList<>();
+ final var map = new HashMap<ActorRef, WriteMessages>();
+ final var result = new ArrayList<Future<Optional<Exception>>>();
- for (AtomicWrite message : messages) {
- final String persistenceId = message.persistenceId();
- final ActorRef handler = handlers.computeIfAbsent(persistenceId, this::createHandler);
+ for (var message : messages) {
+ final var persistenceId = message.persistenceId();
+ final var handler = handlers.computeIfAbsent(persistenceId, this::createHandler);
result.add(map.computeIfAbsent(handler, key -> new WriteMessages()).add(message));
}
}
private ActorRef createHandler(final String persistenceId) {
- final String directoryName = encode(persistenceId);
- final File directory = new File(rootDir, directoryName);
+ final var directoryName = URLEncoder.encode(persistenceId, StandardCharsets.UTF_8);
+ final var directory = new File(rootDir, directoryName);
LOG.debug("Creating handler for {} in directory {}", persistenceId, directory);
- final ActorRef handler = context().actorOf(SegmentedJournalActor.props(persistenceId, directory, storage,
- maxEntrySize, maxSegmentSize));
+ final var handler = context().actorOf(SegmentedJournalActor.props(persistenceId, directory, storage,
+ maxEntrySize, maxSegmentSize, maxUnflushedBytes));
LOG.debug("Directory {} handled by {}", directory, handler);
return handler;
}
private <T> Future<T> delegateMessage(final String persistenceId, final AsyncMessage<T> message) {
- final ActorRef handler = handlers.get(persistenceId);
+ final var handler = handlers.get(persistenceId);
if (handler == null) {
return Futures.failed(new IllegalStateException("Cannot find handler for " + persistenceId));
}
return message.promise.future();
}
- private static String encode(final String str) {
- try {
- return URLEncoder.encode(str, StandardCharsets.UTF_8.name());
- } catch (UnsupportedEncodingException e) {
- // Shouldn't happen
- LOG.warn("Error encoding {}", str, e);
- return str;
- }
- }
-
private static int getBytes(final Config config, final String path, final int defaultValue) {
if (!config.hasPath(path)) {
return defaultValue;
}
- final ConfigMemorySize value = config.getMemorySize(path);
- final long result = value.toBytes();
- checkArgument(result <= Integer.MAX_VALUE, "Size %s exceeds maximum allowed %s", Integer.MAX_VALUE);
- return (int) result;
+ final long value = config.getBytes(path);
+ checkArgument(value <= Integer.MAX_VALUE, "Size %s exceeds maximum allowed %s", Integer.MAX_VALUE);
+ return (int) value;
}
}
import static java.util.Objects.requireNonNull;
import akka.actor.AbstractActor;
+import akka.actor.ActorRef;
import akka.actor.Props;
+import akka.japi.pf.ReceiveBuilder;
import akka.persistence.AtomicWrite;
import akka.persistence.PersistentRepr;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.Timer;
import com.google.common.base.MoreObjects;
-import io.atomix.storage.StorageLevel;
+import com.google.common.base.Stopwatch;
import io.atomix.storage.journal.Indexed;
+import io.atomix.storage.journal.JournalSerdes;
import io.atomix.storage.journal.SegmentedJournal;
-import io.atomix.storage.journal.SegmentedJournalReader;
-import io.atomix.storage.journal.SegmentedJournalWriter;
-import io.atomix.utils.serializer.Namespace;
+import io.atomix.storage.journal.StorageLevel;
import java.io.File;
-import java.io.Serializable;
+import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
-import org.opendaylight.controller.akka.segjournal.DataJournalEntry.FromPersistence;
-import org.opendaylight.controller.akka.segjournal.DataJournalEntry.ToPersistence;
import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
import org.opendaylight.controller.cluster.reporting.MetricsReporter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.concurrent.Future;
import scala.concurrent.Promise;
-import scala.jdk.javaapi.CollectionConverters;
/**
* This actor handles a single PersistentActor's journal. The journal is split into two {@link SegmentedJournal}s:
* <p>
* Split-file approach allows us to treat sequence numbers and indices as equivalent, without maintaining any explicit
* mapping information. The only additional information we need to maintain is the last deleted sequence number.
- *
- * @author Robert Varga
*/
-final class SegmentedJournalActor extends AbstractActor {
- abstract static class AsyncMessage<T> {
+abstract sealed class SegmentedJournalActor extends AbstractActor {
+ abstract static sealed class AsyncMessage<T> {
final Promise<T> promise = Promise.apply();
}
}
}
- private static final class ReplayMessages extends AsyncMessage<Void> {
+ static final class ReplayMessages extends AsyncMessage<Void> {
private final long fromSequenceNr;
- private final long toSequenceNr;
- private final long max;
- private final Consumer<PersistentRepr> replayCallback;
+ final long toSequenceNr;
+ final long max;
+ final Consumer<PersistentRepr> replayCallback;
ReplayMessages(final long fromSequenceNr,
final long toSequenceNr, final long max, final Consumer<PersistentRepr> replayCallback) {
private final List<Promise<Optional<Exception>>> results = new ArrayList<>();
Future<Optional<Exception>> add(final AtomicWrite write) {
- final Promise<Optional<Exception>> promise = Promise.apply();
+ final var promise = Promise.<Optional<Exception>>apply();
requests.add(write);
results.add(promise);
return promise.future();
}
+ int size() {
+ return requests.size();
+ }
+
+ AtomicWrite getRequest(final int index) {
+ return requests.get(index);
+ }
+
+ void setFailure(final int index, final Exception cause) {
+ results.get(index).success(Optional.of(cause));
+ }
+
+ void setSuccess(final int index) {
+ results.get(index).success(Optional.empty());
+ }
+
@Override
public String toString() {
return MoreObjects.toStringHelper(this).add("requests", requests).toString();
}
}
+ // responses == null on success, Exception on failure
+ record WrittenMessages(WriteMessages message, List<Object> responses, long writtenBytes) {
+ WrittenMessages {
+ verify(responses.size() == message.size(), "Mismatched %s and %s", message, responses);
+ verify(writtenBytes >= 0, "Unexpected length %s", writtenBytes);
+ }
+
+ private void complete() {
+ for (int i = 0, size = responses.size(); i < size; ++i) {
+ if (responses.get(i) instanceof Exception ex) {
+ message.setFailure(i, ex);
+ } else {
+ message.setSuccess(i);
+ }
+ }
+ }
+ }
+
+ /**
+ * A {@link SegmentedJournalActor} which delays issuing a flush operation until a watermark is reached or when the
+ * queue is empty.
+ *
+ * <p>
+ * The problem we are addressing is that there is a queue sitting in from of the actor, which we have no direct
+ * access to. Since a flush involves committing data to durable storage, that operation can easily end up dominating
+ * workloads.
+ *
+ * <p>
+ * We solve this by having an additional queue in which we track which messages were written and trigger a flush
+ * only when the number of bytes we have written exceeds specified limit. The other part is that each time this
+ * queue becomes non-empty, we send a dedicated message to self. This acts as a actor queue probe -- when we receive
+ * it, we know we have processed all messages that were in the queue when we first delayed the write.
+ *
+ * <p>
+ * The combination of these mechanisms ensure we use a minimal delay while also ensuring we take advantage of
+ * batching opportunities.
+ */
+ private static final class Delayed extends SegmentedJournalActor {
+ private static final class Flush extends AsyncMessage<Void> {
+ final long batch;
+
+ Flush(final long batch) {
+ this.batch = batch;
+ }
+ }
+
+ private record UnflushedWrite(WrittenMessages message, Stopwatch start, long count) {
+ UnflushedWrite {
+ requireNonNull(message);
+ requireNonNull(start);
+ }
+ }
+
+ private final ArrayDeque<UnflushedWrite> unflushedWrites = new ArrayDeque<>();
+ private final Stopwatch unflushedDuration = Stopwatch.createUnstarted();
+ private final long maxUnflushedBytes;
+
+ private long batch = 0;
+ private long unflushedBytes = 0;
+
+ Delayed(final String persistenceId, final File directory, final StorageLevel storage,
+ final int maxEntrySize, final int maxSegmentSize, final int maxUnflushedBytes) {
+ super(persistenceId, directory, storage, maxEntrySize, maxSegmentSize);
+ this.maxUnflushedBytes = maxUnflushedBytes;
+ }
+
+ @Override
+ ReceiveBuilder addMessages(final ReceiveBuilder builder) {
+ return super.addMessages(builder).match(Flush.class, this::handleFlush);
+ }
+
+ private void handleFlush(final Flush message) {
+ if (message.batch == batch) {
+ flushWrites();
+ } else {
+ LOG.debug("{}: batch {} not flushed by {}", persistenceId(), batch, message.batch);
+ }
+ }
+
+ @Override
+ void onWrittenMessages(final WrittenMessages message, final Stopwatch started, final long count) {
+ boolean first = unflushedWrites.isEmpty();
+ if (first) {
+ unflushedDuration.start();
+ }
+ unflushedWrites.addLast(new UnflushedWrite(message, started, count));
+ unflushedBytes = unflushedBytes + message.writtenBytes;
+ if (unflushedBytes >= maxUnflushedBytes) {
+ LOG.debug("{}: reached {} unflushed journal bytes", persistenceId(), unflushedBytes);
+ flushWrites();
+ } else if (first) {
+ LOG.debug("{}: deferring journal flush", persistenceId());
+ self().tell(new Flush(++batch), ActorRef.noSender());
+ }
+ }
+
+ @Override
+ void flushWrites() {
+ final var unsyncedSize = unflushedWrites.size();
+ if (unsyncedSize == 0) {
+ // Nothing to flush
+ return;
+ }
+
+ LOG.debug("{}: flushing {} journal writes after {}", persistenceId(), unsyncedSize,
+ unflushedDuration.stop());
+ flushJournal(unflushedBytes, unsyncedSize);
+
+ final var sw = Stopwatch.createStarted();
+ unflushedWrites.forEach(write -> completeWriteMessages(write.message, write.start, write.count));
+ unflushedWrites.clear();
+ unflushedBytes = 0;
+ unflushedDuration.reset();
+ LOG.debug("{}: completed {} flushed journal writes in {}", persistenceId(), unsyncedSize, sw);
+ }
+ }
+
+ private static final class Immediate extends SegmentedJournalActor {
+ Immediate(final String persistenceId, final File directory, final StorageLevel storage,
+ final int maxEntrySize, final int maxSegmentSize) {
+ super(persistenceId, directory, storage, maxEntrySize, maxSegmentSize);
+ }
+
+ @Override
+ void onWrittenMessages(final WrittenMessages message, final Stopwatch started, final long count) {
+ flushJournal(message.writtenBytes, 1);
+ completeWriteMessages(message, started, count);
+ }
+
+ @Override
+ void flushWrites() {
+ // No-op
+ }
+ }
+
private static final Logger LOG = LoggerFactory.getLogger(SegmentedJournalActor.class);
- private static final Namespace DELETE_NAMESPACE = Namespace.builder().register(Long.class).build();
+ private static final JournalSerdes DELETE_NAMESPACE = JournalSerdes.builder()
+ .register(LongEntrySerdes.LONG_ENTRY_SERDES, Long.class)
+ .build();
private static final int DELETE_SEGMENT_SIZE = 64 * 1024;
private final String persistenceId;
private Meter messageWriteCount;
// Tracks the size distribution of messages
private Histogram messageSize;
-
- private SegmentedJournal<DataJournalEntry> dataJournal;
+ // Tracks the number of messages completed for each flush
+ private Histogram flushMessages;
+ // Tracks the number of bytes completed for each flush
+ private Histogram flushBytes;
+ // Tracks the duration of flush operations
+ private Timer flushTime;
+
+ private DataJournal dataJournal;
private SegmentedJournal<Long> deleteJournal;
private long lastDelete;
- // Tracks largest message size we have observed either during recovery or during write
- private int largestObservedSize;
-
- SegmentedJournalActor(final String persistenceId, final File directory, final StorageLevel storage,
+ private SegmentedJournalActor(final String persistenceId, final File directory, final StorageLevel storage,
final int maxEntrySize, final int maxSegmentSize) {
this.persistenceId = requireNonNull(persistenceId);
this.directory = requireNonNull(directory);
}
static Props props(final String persistenceId, final File directory, final StorageLevel storage,
- final int maxEntrySize, final int maxSegmentSize) {
- return Props.create(SegmentedJournalActor.class, requireNonNull(persistenceId), directory, storage,
- maxEntrySize, maxSegmentSize);
+ final int maxEntrySize, final int maxSegmentSize, final int maxUnflushedBytes) {
+ final var pid = requireNonNull(persistenceId);
+ return maxUnflushedBytes > 0
+ ? Props.create(Delayed.class, pid, directory, storage, maxEntrySize, maxSegmentSize, maxUnflushedBytes)
+ : Props.create(Immediate.class, pid, directory, storage, maxEntrySize, maxSegmentSize);
+ }
+
+ final String persistenceId() {
+ return persistenceId;
+ }
+
+ final void flushJournal(final long bytes, final int messages) {
+ final var sw = Stopwatch.createStarted();
+ dataJournal.flush();
+ LOG.debug("{}: journal flush completed in {}", persistenceId, sw.stop());
+ flushBytes.update(bytes);
+ flushMessages.update(messages);
+ flushTime.update(sw.elapsed(TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS);
}
@Override
public Receive createReceive() {
- return receiveBuilder()
- .match(DeleteMessagesTo.class, this::handleDeleteMessagesTo)
- .match(ReadHighestSequenceNr.class, this::handleReadHighestSequenceNr)
- .match(ReplayMessages.class, this::handleReplayMessages)
- .match(WriteMessages.class, this::handleWriteMessages)
- .matchAny(this::handleUnknown)
- .build();
+ return addMessages(receiveBuilder())
+ .matchAny(this::handleUnknown)
+ .build();
+ }
+
+ ReceiveBuilder addMessages(final ReceiveBuilder builder) {
+ return builder
+ .match(DeleteMessagesTo.class, this::handleDeleteMessagesTo)
+ .match(ReadHighestSequenceNr.class, this::handleReadHighestSequenceNr)
+ .match(ReplayMessages.class, this::handleReplayMessages)
+ .match(WriteMessages.class, this::handleWriteMessages);
}
@Override
LOG.debug("{}: actor starting", persistenceId);
super.preStart();
- final MetricRegistry registry = MetricsReporter.getInstance(MeteringBehavior.DOMAIN).getMetricsRegistry();
- final String actorName = self().path().parent().toStringWithoutAddress() + '/' + directory.getName();
+ final var registry = MetricsReporter.getInstance(MeteringBehavior.DOMAIN).getMetricsRegistry();
+ final var actorName = self().path().parent().toStringWithoutAddress() + '/' + directory.getName();
batchWriteTime = registry.timer(MetricRegistry.name(actorName, "batchWriteTime"));
messageWriteCount = registry.meter(MetricRegistry.name(actorName, "messageWriteCount"));
messageSize = registry.histogram(MetricRegistry.name(actorName, "messageSize"));
+ flushBytes = registry.histogram(MetricRegistry.name(actorName, "flushBytes"));
+ flushMessages = registry.histogram(MetricRegistry.name(actorName, "flushMessages"));
+ flushTime = registry.timer(MetricRegistry.name(actorName, "flushTime"));
}
@Override
ensureOpen();
LOG.debug("{}: delete messages {}", persistenceId, message);
- final long to = Long.min(dataJournal.writer().getLastIndex(), message.toSequenceNr);
+ flushWrites();
+
+ final long to = Long.min(dataJournal.lastWrittenSequenceNr(), message.toSequenceNr);
LOG.debug("{}: adjusted delete to {}", persistenceId, to);
if (lastDelete < to) {
LOG.debug("{}: deleting entries up to {}", persistenceId, to);
lastDelete = to;
- final SegmentedJournalWriter<Long> deleteWriter = deleteJournal.writer();
- final Indexed<Long> entry = deleteWriter.append(lastDelete);
+ final var deleteWriter = deleteJournal.writer();
+ final var entry = deleteWriter.append(lastDelete);
deleteWriter.commit(entry.index());
- dataJournal.writer().commit(lastDelete);
+ dataJournal.deleteTo(lastDelete);
LOG.debug("{}: compaction started", persistenceId);
- dataJournal.compact(lastDelete + 1);
+ dataJournal.compactTo(lastDelete);
deleteJournal.compact(entry.index());
LOG.debug("{}: compaction finished", persistenceId);
} else {
final Long sequence;
if (directory.isDirectory()) {
ensureOpen();
- sequence = dataJournal.writer().getLastIndex();
+ flushWrites();
+ sequence = dataJournal.lastWrittenSequenceNr();
} else {
sequence = 0L;
}
message.promise.success(sequence);
}
- @SuppressWarnings("checkstyle:illegalCatch")
private void handleReplayMessages(final ReplayMessages message) {
LOG.debug("{}: replaying messages {}", persistenceId, message);
ensureOpen();
+ flushWrites();
final long from = Long.max(lastDelete + 1, message.fromSequenceNr);
LOG.debug("{}: adjusted fromSequenceNr to {}", persistenceId, from);
- try (SegmentedJournalReader<DataJournalEntry> reader = dataJournal.openReader(from)) {
- int count = 0;
- while (reader.hasNext() && count < message.max) {
- final Indexed<DataJournalEntry> next = reader.next();
- if (next.index() > message.toSequenceNr) {
- break;
- }
-
- LOG.trace("{}: replay {}", persistenceId, next);
- updateLargestSize(next.size());
- final DataJournalEntry entry = next.entry();
- verify(entry instanceof FromPersistence, "Unexpected entry %s", entry);
-
- final PersistentRepr repr = ((FromPersistence) entry).toRepr(persistenceId, next.index());
- LOG.debug("{}: replaying {}", persistenceId, repr);
- message.replayCallback.accept(repr);
- count++;
- }
- LOG.debug("{}: successfully replayed {} entries", persistenceId, count);
- } catch (Exception e) {
- LOG.warn("{}: failed to replay messages for {}", persistenceId, message, e);
- message.promise.failure(e);
- } finally {
- message.promise.success(null);
- }
+ dataJournal.handleReplayMessages(message, from);
}
- @SuppressWarnings("checkstyle:illegalCatch")
private void handleWriteMessages(final WriteMessages message) {
ensureOpen();
- final SegmentedJournalWriter<DataJournalEntry> writer = dataJournal.writer();
- final long startTicks = System.nanoTime();
- final int count = message.requests.size();
- final long start = writer.getLastIndex();
-
- for (int i = 0; i < count; ++i) {
- final long mark = writer.getLastIndex();
- try {
- writeRequest(writer, message.requests.get(i));
- } catch (Exception e) {
- LOG.warn("{}: failed to write out request", persistenceId, e);
- message.results.get(i).success(Optional.of(e));
- writer.truncate(mark);
- continue;
- }
+ final var started = Stopwatch.createStarted();
+ final long start = dataJournal.lastWrittenSequenceNr();
+ final var writtenMessages = dataJournal.handleWriteMessages(message);
- message.results.get(i).success(Optional.empty());
- }
- writer.flush();
- batchWriteTime.update(System.nanoTime() - startTicks, TimeUnit.NANOSECONDS);
- messageWriteCount.mark(writer.getLastIndex() - start);
+ onWrittenMessages(writtenMessages, started, dataJournal.lastWrittenSequenceNr() - start);
}
- private void writeRequest(final SegmentedJournalWriter<DataJournalEntry> writer, final AtomicWrite request) {
- for (PersistentRepr repr : CollectionConverters.asJava(request.payload())) {
- final Object payload = repr.payload();
- if (!(payload instanceof Serializable)) {
- throw new UnsupportedOperationException("Non-serializable payload encountered " + payload.getClass());
- }
-
- final int size = writer.append(new ToPersistence(repr)).size();
- messageSize.update(size);
- updateLargestSize(size);
- }
+ final void completeWriteMessages(final WrittenMessages message, final Stopwatch started, final long count) {
+ batchWriteTime.update(started.stop().elapsed(TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS);
+ messageWriteCount.mark(count);
+ // log message after statistics are updated
+ LOG.debug("{}: write of {} bytes completed in {}", persistenceId, message.writtenBytes, started);
+ message.complete();
}
+ /**
+ * Handle a check of written messages.
+ *
+ * @param message Messages which were written
+ * @param started Stopwatch started when the write started
+ * @param count number of writes
+ */
+ abstract void onWrittenMessages(WrittenMessages message, Stopwatch started, long count);
+
private void handleUnknown(final Object message) {
LOG.error("{}: Received unknown message {}", persistenceId, message);
}
- private void updateLargestSize(final int size) {
- if (size > largestObservedSize) {
- largestObservedSize = size;
- }
- }
-
private void ensureOpen() {
if (dataJournal != null) {
verifyNotNull(deleteJournal);
return;
}
+ final var sw = Stopwatch.createStarted();
deleteJournal = SegmentedJournal.<Long>builder().withDirectory(directory).withName("delete")
.withNamespace(DELETE_NAMESPACE).withMaxSegmentSize(DELETE_SEGMENT_SIZE).build();
- final Indexed<Long> lastEntry = deleteJournal.writer().getLastEntry();
- lastDelete = lastEntry == null ? 0 : lastEntry.entry();
-
- dataJournal = SegmentedJournal.<DataJournalEntry>builder()
- .withStorageLevel(storage).withDirectory(directory).withName("data")
- .withNamespace(Namespace.builder()
- .register(new DataJournalEntrySerializer(context().system()),
- FromPersistence.class, ToPersistence.class)
- .build())
- .withMaxEntrySize(maxEntrySize).withMaxSegmentSize(maxSegmentSize)
- .build();
- final SegmentedJournalWriter<DataJournalEntry> writer = dataJournal.writer();
- writer.commit(lastDelete);
- LOG.debug("{}: journal open with last index {}, deleted to {}", persistenceId, writer.getLastIndex(),
- lastDelete);
+ final var lastDeleteRecovered = deleteJournal.openReader(deleteJournal.writer().getLastIndex())
+ .tryNext((index, value, length) -> value);
+ lastDelete = lastDeleteRecovered == null ? 0 : lastDeleteRecovered.longValue();
+
+ dataJournal = new DataJournalV0(persistenceId, messageSize, context().system(), storage, directory,
+ maxEntrySize, maxSegmentSize);
+ dataJournal.deleteTo(lastDelete);
+ LOG.debug("{}: journal open in {} with last index {}, deleted to {}", persistenceId, sw,
+ dataJournal.lastWrittenSequenceNr(), lastDelete);
}
+
+ abstract void flushWrites();
+
}
FileUtils.deleteQuietly(JOURNAL_DIR);
super.beforeAll();
}
+
+ @Override
+ public void afterAll() {
+ super.afterAll();
+ FileUtils.deleteQuietly(JOURNAL_DIR);
+ }
}
*/
package org.opendaylight.controller.akka.segjournal;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.reset;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import akka.persistence.PersistentRepr;
import akka.testkit.CallingThreadDispatcher;
import akka.testkit.javadsl.TestKit;
-import io.atomix.storage.StorageLevel;
+import io.atomix.storage.journal.StorageLevel;
import java.io.File;
import java.io.IOException;
import java.io.Serializable;
import java.util.function.Consumer;
import java.util.stream.Collectors;
import org.apache.commons.io.FileUtils;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.ExtendWith;
+import org.mockito.Mock;
+import org.mockito.junit.jupiter.MockitoExtension;
import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.AsyncMessage;
import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.WriteMessages;
import scala.concurrent.Future;
-public class SegmentedFileJournalTest {
+@ExtendWith(MockitoExtension.class)
+class SegmentedFileJournalTest {
private static final File DIRECTORY = new File("target/sfj-test");
private static final int SEGMENT_SIZE = 1024 * 1024;
private static final int MESSAGE_SIZE = 512 * 1024;
+ private static final int FLUSH_SIZE = 16 * 1024;
private static ActorSystem SYSTEM;
+ @Mock
+ private Consumer<PersistentRepr> firstCallback;
+
private TestKit kit;
private ActorRef actor;
- @BeforeClass
- public static void beforeClass() {
+ @BeforeAll
+ static void beforeClass() {
SYSTEM = ActorSystem.create("test");
}
- @AfterClass
- public static void afterClass() {
+ @AfterAll
+ static void afterClass() {
TestKit.shutdownActorSystem(SYSTEM);
SYSTEM = null;
}
- @Before
- public void before() {
+ @BeforeEach
+ void before() {
kit = new TestKit(SYSTEM);
FileUtils.deleteQuietly(DIRECTORY);
actor = actor();
}
- @After
- public void after() {
+ @AfterEach
+ void after() {
actor.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ FileUtils.deleteQuietly(DIRECTORY);
}
@Test
- public void testDeleteAfterStop() {
+ void testDeleteAfterStop() {
// Preliminary setup
final WriteMessages write = new WriteMessages();
final Future<Optional<Exception>> first = write.add(AtomicWrite.apply(PersistentRepr.apply("first", 1, "foo",
}
@Test
- public void testSegmentation() throws IOException {
+ void testSegmentation() throws IOException {
// We want to have roughly three segments
final LargePayload payload = new LargePayload();
}
@Test
- public void testComplexDeletesAndPartialReplays() throws Exception {
+ void testComplexDeletesAndPartialReplays() throws Exception {
for (int i = 0; i <= 4; i++) {
writeBigPaylod();
}
private ActorRef actor() {
return kit.childActorOf(SegmentedJournalActor.props("foo", DIRECTORY, StorageLevel.DISK, MESSAGE_SIZE,
- SEGMENT_SIZE).withDispatcher(CallingThreadDispatcher.Id()));
+ SEGMENT_SIZE, FLUSH_SIZE).withDispatcher(CallingThreadDispatcher.Id()));
}
private void deleteEntries(final long deleteTo) {
}
private void assertReplayCount(final int expected) {
- Consumer<PersistentRepr> firstCallback = mock(Consumer.class);
- doNothing().when(firstCallback).accept(any(PersistentRepr.class));
+ // Cast fixes an Eclipse warning 'generic array created'
+ reset((Object) firstCallback);
AsyncMessage<Void> replay = SegmentedJournalActor.replayMessages(0, Long.MAX_VALUE, Long.MAX_VALUE,
firstCallback);
actor.tell(replay, ActorRef.noSender());
return future.value().get().get();
}
- private static final class LargePayload implements Serializable {
+ static final class LargePayload implements Serializable {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
final byte[] bytes = new byte[MESSAGE_SIZE / 2];
-
}
}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>mdsal-it-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../mdsal-it-parent</relativePath>
</parent>
<artifactId>sal-binding-it</artifactId>
protected Option[] getAdditionalOptions() {
return new Option[] {
mavenBundle("org.opendaylight.controller", "sal-test-model").versionAsInProject(),
- mavenBundle("net.bytebuddy", "byte-buddy").versionAsInProject(),
};
}
}
import static org.junit.Assert.assertEquals;
import java.util.ArrayList;
-import java.util.List;
import javax.inject.Inject;
import org.junit.Test;
import org.opendaylight.mdsal.binding.api.NotificationPublishService;
import org.opendaylight.mdsal.binding.api.NotificationService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.bi.ba.notification.rev150205.OpendaylightTestNotificationListener;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.bi.ba.notification.rev150205.OutOfPixieDustNotification;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.bi.ba.notification.rev150205.OutOfPixieDustNotificationBuilder;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.NotificationListener;
import org.opendaylight.yangtools.yang.common.Uint16;
import org.ops4j.pax.exam.util.Filter;
import org.slf4j.Logger;
*/
@Test
public void notificationTest() throws Exception {
- NotificationTestListener listener1 = new NotificationTestListener();
- ListenerRegistration<NotificationListener> listener1Reg =
- notificationService.registerNotificationListener(listener1);
-
- LOG.info("The notification of type FlowAdded with cookie ID 0 is created. The "
- + "delay 100ms to make sure that the notification was delivered to "
- + "listener.");
- notificationPublishService.putNotification(noDustNotification("rainy day", 42));
- Thread.sleep(100);
-
- /**
- * Check that one notification was delivered and has correct cookie.
- */
- assertEquals(1, listener1.notificationBag.size());
- assertEquals("rainy day", listener1.notificationBag.get(0).getReason());
- assertEquals(42, listener1.notificationBag.get(0).getDaysTillNewDust().intValue());
-
- LOG.info("The registration of the Consumer 2. SalFlowListener is registered "
+ final var bag1 = new ArrayList<OutOfPixieDustNotification>();
+ try (var reg1 = notificationService.registerListener(OutOfPixieDustNotification.class, bag1::add)) {
+ LOG.info("""
+ The notification of type FlowAdded with cookie ID 0 is created. The\s\
+ delay 100ms to make sure that the notification was delivered to\s\
+ listener.""");
+ notificationPublishService.putNotification(noDustNotification("rainy day", 42));
+ Thread.sleep(100);
+
+ // Check that one notification was delivered and has correct cookie.
+ assertEquals(1, bag1.size());
+ assertEquals("rainy day", bag1.get(0).getReason());
+ assertEquals(42, bag1.get(0).getDaysTillNewDust().intValue());
+
+ LOG.info("The registration of the Consumer 2. SalFlowListener is registered "
+ "registered as notification listener.");
- NotificationTestListener listener2 = new NotificationTestListener();
- final ListenerRegistration<NotificationListener> listener2Reg =
- notificationService.registerNotificationListener(listener2);
-
- LOG.info("3 notifications are published");
- notificationPublishService.putNotification(noDustNotification("rainy day", 5));
- notificationPublishService.putNotification(noDustNotification("rainy day", 10));
- notificationPublishService.putNotification(noDustNotification("tax collector", 2));
-
- /**
- * The delay 100ms to make sure that the notifications were delivered to
- * listeners.
- */
- Thread.sleep(100);
-
- /**
- * Check that 3 notification was delivered to both listeners (first one
- * received 4 in total, second 3 in total).
- */
- assertEquals(4, listener1.notificationBag.size());
- assertEquals(3, listener2.notificationBag.size());
-
- /**
- * The second listener is closed (unregistered)
- *
- */
- listener2Reg.close();
-
- LOG.info("The notification 5 is published");
- notificationPublishService.putNotification(noDustNotification("entomologist hunt", 10));
-
- /**
- * The delay 100ms to make sure that the notification was delivered to
- * listener.
- */
- Thread.sleep(100);
-
- /**
- * Check that first consumer received 5 notifications in total, second
- * consumer received only three. Last notification was never received by
- * second consumer because its listener was unregistered.
- *
- */
- assertEquals(5, listener1.notificationBag.size());
- assertEquals(3, listener2.notificationBag.size());
+ final var bag2 = new ArrayList<OutOfPixieDustNotification>();
+ try (var reg2 = notificationService.registerListener(OutOfPixieDustNotification.class, bag2::add)) {
+ LOG.info("3 notifications are published");
+ notificationPublishService.putNotification(noDustNotification("rainy day", 5));
+ notificationPublishService.putNotification(noDustNotification("rainy day", 10));
+ notificationPublishService.putNotification(noDustNotification("tax collector", 2));
+
+ // The delay 100ms to make sure that the notifications were delivered to listeners.
+ Thread.sleep(100);
+
+ // Check that 3 notification was delivered to both listeners (first one received 4 in total, second 3
+ // in total).
+ assertEquals(4, bag1.size());
+ assertEquals(3, bag2.size());
+
+ // The second listener is closed (unregistered)
+ reg2.close();
+
+ LOG.info("The notification 5 is published");
+ notificationPublishService.putNotification(noDustNotification("entomologist hunt", 10));
+
+ // The delay 100ms to make sure that the notification was delivered to listener.
+ Thread.sleep(100);
+
+ // Check that first consumer received 5 notifications in total, second consumer received only three.
+ // Last notification was never received by second consumer because its listener was unregistered.
+ assertEquals(5, bag1.size());
+ assertEquals(3, bag2.size());
+ }
+ }
}
/**
ret.setReason(reason).setDaysTillNewDust(Uint16.valueOf(days));
return ret.build();
}
-
- /**
- * Implements {@link OpendaylightTestNotificationListener} and contains attributes which keep lists of objects of
- * the type {@link OutOfPixieDustNotification}.
- */
- public static class NotificationTestListener implements OpendaylightTestNotificationListener {
- List<OutOfPixieDustNotification> notificationBag = new ArrayList<>();
-
- @Override
- public void onOutOfPixieDustNotification(final OutOfPixieDustNotification arg0) {
- notificationBag.add(arg0);
- }
- }
}
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNotSame;
-import static org.junit.Assert.assertSame;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import javax.inject.Inject;
import org.junit.Before;
import org.junit.Test;
-import org.mockito.Mockito;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
import org.opendaylight.mdsal.binding.api.RpcProviderService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.OpendaylightTestRoutedRpcService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.RoutedSimpleRoute;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.RoutedSimpleRouteInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.RoutedSimpleRouteInputBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.RoutedSimpleRouteOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.store.rev140422.lists.UnorderedContainer;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.store.rev140422.lists.unordered.container.UnorderedList;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.store.rev140422.lists.unordered.container.UnorderedListKey;
-import org.opendaylight.yangtools.concepts.ObjectRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.ops4j.pax.exam.util.Filter;
* Covers routed rpc creation, registration, invocation, unregistration.
*/
public class RoutedServiceIT extends AbstractIT {
+ private static final Logger LOG = LoggerFactory.getLogger(RoutedServiceIT.class);
- private static final Logger LOG = LoggerFactory
- .getLogger(RoutedServiceIT.class);
-
- protected OpendaylightTestRoutedRpcService odlRoutedService1;
- protected OpendaylightTestRoutedRpcService odlRoutedService2;
+ protected RoutedSimpleRoute routedSimpleRouteRpc1;
+ protected RoutedSimpleRoute routedSimpleRouteRpc2;
@Inject
@Filter(timeout = 120 * 1000)
@Inject
@Filter(timeout = 120 * 1000)
- RpcConsumerRegistry rpcConsumerRegistry;
+ RpcService rpcService;
/**
* Prepare mocks.
*/
@Before
public void setUp() {
- odlRoutedService1 = mock(OpendaylightTestRoutedRpcService.class, "First Flow Service");
- odlRoutedService2 = mock(OpendaylightTestRoutedRpcService.class, "Second Flow Service");
- Mockito.when(odlRoutedService1.routedSimpleRoute(Mockito.<RoutedSimpleRouteInput>any()))
- .thenReturn(Futures.<RpcResult<RoutedSimpleRouteOutput>>immediateFuture(null));
- Mockito.when(odlRoutedService2.routedSimpleRoute(Mockito.<RoutedSimpleRouteInput>any()))
- .thenReturn(Futures.<RpcResult<RoutedSimpleRouteOutput>>immediateFuture(null));
+ routedSimpleRouteRpc1 = mock(RoutedSimpleRoute.class, "First Flow Rpc");
+ doReturn(RoutedSimpleRoute.class).when(routedSimpleRouteRpc1).implementedInterface();
+ doReturn(Futures.<RpcResult<RoutedSimpleRouteOutput>>immediateFuture(null)).when(routedSimpleRouteRpc1)
+ .invoke(any());
+
+ routedSimpleRouteRpc2 = mock(RoutedSimpleRoute.class, "Second Flow Rpc");
+ doReturn(RoutedSimpleRoute.class).when(routedSimpleRouteRpc2).implementedInterface();
+ doReturn(Futures.<RpcResult<RoutedSimpleRouteOutput>>immediateFuture(null)).when(routedSimpleRouteRpc2)
+ .invoke(any());
}
@Test
public void testServiceRegistration() {
- LOG.info("Register provider 1 with first implementation of routeSimpleService - service1 of node 1");
+ LOG.info("Register provider 1 with first implementation of routeSimpleService - rpc1 of node 1");
final InstanceIdentifier<UnorderedList> nodeOnePath = createNodeRef("foo:node:1");
final InstanceIdentifier<UnorderedList> nodeTwo = createNodeRef("foo:node:2");
- ObjectRegistration<OpendaylightTestRoutedRpcService> firstReg = rpcProviderService.registerRpcImplementation(
- OpendaylightTestRoutedRpcService.class, odlRoutedService1, Set.of(nodeOnePath));
+ Registration firstReg = rpcProviderService.registerRpcImplementation(routedSimpleRouteRpc1,
+ Set.of(nodeOnePath));
assertNotNull("Registration should not be null", firstReg);
- assertSame(odlRoutedService1, firstReg.getInstance());
- LOG.info("Register provider 2 with second implementation of routeSimpleService - service2 of node 2");
+ LOG.info("Register provider 2 with second implementation of routeSimpleService - rpc2 of node 2");
- ObjectRegistration<OpendaylightTestRoutedRpcService> secondReg = rpcProviderService.registerRpcImplementation(
- OpendaylightTestRoutedRpcService.class, odlRoutedService2, Set.of(nodeTwo));
+ Registration secondReg = rpcProviderService.registerRpcImplementation(routedSimpleRouteRpc2, Set.of(nodeTwo));
assertNotNull("Registration should not be null", firstReg);
- assertSame(odlRoutedService2, secondReg.getInstance());
assertNotSame(secondReg, firstReg);
- OpendaylightTestRoutedRpcService consumerService =
- rpcConsumerRegistry.getRpcService(OpendaylightTestRoutedRpcService.class);
+ RoutedSimpleRoute consumerService = rpcService.getRpc(RoutedSimpleRoute.class);
assertNotNull("MD-SAL instance of test Service should be returned", consumerService);
- assertNotSame("Provider instance and consumer instance should not be same.", odlRoutedService1,
+ assertNotSame("Provider instance and consumer instance should not be same.", routedSimpleRouteRpc1,
consumerService);
/**
* Consumer creates addFlow message for node one and sends it to the MD-SAL.
*/
final RoutedSimpleRouteInput simpleRouteFirstFoo = createSimpleRouteInput(nodeOnePath);
- consumerService.routedSimpleRoute(simpleRouteFirstFoo);
+ consumerService.invoke(simpleRouteFirstFoo);
/**
- * Verifies that implementation of the first provider received the same message from MD-SAL.
+ * Verifies that implementation of the first instance received the same message from MD-SAL.
*/
- verify(odlRoutedService1).routedSimpleRoute(simpleRouteFirstFoo);
+ verify(routedSimpleRouteRpc1).invoke(simpleRouteFirstFoo);
/**
* Verifies that second instance was not invoked with first message
*/
- verify(odlRoutedService2, times(0)).routedSimpleRoute(simpleRouteFirstFoo);
+ verify(routedSimpleRouteRpc2, times(0)).invoke(simpleRouteFirstFoo);
/**
* Consumer sends message to nodeTwo for three times. Should be processed by second instance.
*/
final RoutedSimpleRouteInput simpleRouteSecondFoo = createSimpleRouteInput(nodeTwo);
- consumerService.routedSimpleRoute(simpleRouteSecondFoo);
- consumerService.routedSimpleRoute(simpleRouteSecondFoo);
- consumerService.routedSimpleRoute(simpleRouteSecondFoo);
+ consumerService.invoke(simpleRouteSecondFoo);
+ consumerService.invoke(simpleRouteSecondFoo);
+ consumerService.invoke(simpleRouteSecondFoo);
/**
* Verifies that second instance was invoked 3 times with second message and first instance wasn't invoked.
*/
- verify(odlRoutedService2, times(3)).routedSimpleRoute(simpleRouteSecondFoo);
- verify(odlRoutedService1, times(0)).routedSimpleRoute(simpleRouteSecondFoo);
+ verify(routedSimpleRouteRpc2, times(3)).invoke(simpleRouteSecondFoo);
+ verify(routedSimpleRouteRpc1, times(0)).invoke(simpleRouteSecondFoo);
LOG.info("Unregistration of the path for the node one in the first provider");
firstReg.close();
LOG.info("Provider 2 registers path of node 1");
secondReg.close();
- secondReg = rpcProviderService.registerRpcImplementation(
- OpendaylightTestRoutedRpcService.class, odlRoutedService2, Set.of(nodeOnePath));
+ secondReg = rpcProviderService.registerRpcImplementation(routedSimpleRouteRpc2, Set.of(nodeOnePath));
/**
* A consumer sends third message to node 1.
*/
final RoutedSimpleRouteInput simpleRouteThirdFoo = createSimpleRouteInput(nodeOnePath);
- consumerService.routedSimpleRoute(simpleRouteThirdFoo);
+ consumerService.invoke(simpleRouteThirdFoo);
/**
* Verifies that provider 1 wasn't invoked and provider 2 was invoked 1 time.
* TODO: fix unregister path
*/
- //verify(odlRoutedService1, times(0)).routedSimpleRoute(simpleRouteThirdFoo);
- verify(odlRoutedService2).routedSimpleRoute(simpleRouteThirdFoo);
+ verify(routedSimpleRouteRpc2).invoke(simpleRouteThirdFoo);
}
/**
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>mdsal-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../parent</relativePath>
</parent>
<packaging>bundle</packaging>
<dependencies>
+ <dependency>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-common</artifactId>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>cds-access-api</artifactId>
within two election timeout periods the operation fails.";
}
- rpc add-prefix-shard-replica {
- input {
- leaf shard-prefix {
- mandatory true;
- type instance-identifier;
- }
-
- leaf data-store-type {
- mandatory true;
- type data-store-type;
- description "The type of the data store to which the replica belongs";
- }
- }
-
- description "Adds a replica of a shard to this node and joins it to an existing cluster. There must already be
- a shard existing on another node with a leader. This RPC first contacts peer member seed nodes
- searching for a shard. When found, an AddServer message is sent to the shard leader and applied as
- described in the Raft paper.";
- }
-
- rpc remove-prefix-shard-replica {
- input {
- leaf shard-prefix {
- mandatory true;
- type instance-identifier;
- }
- leaf member-name {
- mandatory true;
- type string;
- description "The cluster member from which the shard replica should be removed";
- }
-
- leaf data-store-type {
- mandatory true;
- type data-store-type;
- description "The type of the data store to which the replica belongs";
- }
- }
-
- description "Removes an existing replica of a prefix shard from this node via the RemoveServer mechanism as
- described in the Raft paper.";
- }
-
rpc add-replicas-for-all-shards {
output {
uses shard-result-output;
}
}
- rpc get-prefix-shard-role {
- input {
- leaf shard-prefix {
- mandatory true;
- type instance-identifier;
- }
-
- leaf data-store-type {
- mandatory true;
- type data-store-type;
- description "The type of the data store to which the replica belongs";
- }
- }
-
- output {
- leaf role {
- type string;
- description "Current role for the given shard, if not present the shard currently does not have a role";
- }
- }
-
- description "Returns the current role for the requested module shard.";
- }
-
rpc get-known-clients-for-all-shards {
description "Request all shards to report their known frontend clients. This is useful for determining what
generation should a resurrected member node should use.";
}
}
}
+
+ rpc activate-eos-datacenter {
+ description "Activates the datacenter that the node this rpc is called on belongs to. The caller must maintain
+ only a single active datacenter at a time as the singleton components will interfere with each
+ other otherwise. This only needs to be used if configuring multiple datacenters or if not using
+ default datacenter.";
+ }
+
+ rpc deactivate-eos-datacenter {
+ description "Deactivates the datacenter that the node this rpc is called on belongs to. The caller must maintain
+ only a single active datacenter at a time as the singleton components will interfere with each
+ other otherwise. This only needs to be used if configuring multiple datacenters or if not using
+ default datacenter.";
+ }
}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>mdsal-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../parent</relativePath>
</parent>
<dependencies>
<dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-cluster-admin-api</artifactId>
+ <groupId>com.github.spotbugs</groupId>
+ <artifactId>spotbugs-annotations</artifactId>
+ <optional>true</optional>
</dependency>
-
- <!-- Tests -->
<dependency>
- <groupId>org.slf4j</groupId>
- <artifactId>slf4j-simple</artifactId>
- <scope>test</scope>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.commons</groupId>
+ <artifactId>commons-lang3</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jdt</groupId>
+ <artifactId>org.eclipse.jdt.annotation</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>concepts</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-common</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>mdsal-binding-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>mdsal-dom-spi</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>yang-binding</artifactId>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-akka-raft</artifactId>
- <type>test-jar</type>
- <scope>test</scope>
+ <artifactId>eos-dom-akka</artifactId>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-distributed-datastore</artifactId>
- <type>test-jar</type>
- <version>${project.version}</version>
- <scope>test</scope>
+ <artifactId>cds-access-api</artifactId>
</dependency>
<dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-test-util</artifactId>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>repackaged-akka</artifactId>
</dependency>
<dependency>
- <groupId>org.opendaylight.controller.samples</groupId>
- <artifactId>clustering-it-model</artifactId>
- <scope>test</scope>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-akka-raft</artifactId>
</dependency>
<dependency>
- <groupId>commons-lang</groupId>
- <artifactId>commons-lang</artifactId>
- <scope>test</scope>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-cluster-admin-api</artifactId>
</dependency>
-
- <!-- Akka -->
<dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-actor_2.13</artifactId>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-distributed-datastore</artifactId>
</dependency>
<dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-testkit_2.13</artifactId>
- <scope>test</scope>
+ <groupId>org.osgi</groupId>
+ <artifactId>org.osgi.service.component.annotations</artifactId>
</dependency>
-
- <!-- Scala -->
<dependency>
<groupId>org.scala-lang</groupId>
<artifactId>scala-library</artifactId>
</dependency>
- <!-- OpenDaylight -->
+ <!-- Tests -->
<dependency>
- <groupId>org.opendaylight.mdsal</groupId>
- <artifactId>mdsal-binding-api</artifactId>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-testkit_2.13</artifactId>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-akka-raft</artifactId>
+ <type>test-jar</type>
+ <scope>test</scope>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-distributed-datastore</artifactId>
+ <type>test-jar</type>
+ <version>${project.version}</version>
+ <scope>test</scope>
</dependency>
<dependency>
- <groupId>org.opendaylight.mdsal</groupId>
- <artifactId>yang-binding</artifactId>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-api</artifactId>
+ <scope>test</scope>
</dependency>
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-common</artifactId>
+ <artifactId>yang-test-util</artifactId>
</dependency>
<dependency>
- <groupId>org.apache.commons</groupId>
- <artifactId>commons-lang3</artifactId>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>clustering-it-model</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-simple</artifactId>
+ <scope>test</scope>
</dependency>
-
</dependencies>
<build>
import akka.dispatch.OnComplete;
import akka.pattern.Patterns;
import akka.util.Timeout;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Strings;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableMap;
import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.controller.cluster.access.concepts.MemberName;
import org.opendaylight.controller.cluster.datastore.DistributedDataStoreInterface;
-import org.opendaylight.controller.cluster.datastore.messages.AddPrefixShardReplica;
import org.opendaylight.controller.cluster.datastore.messages.AddShardReplica;
import org.opendaylight.controller.cluster.datastore.messages.ChangeShardMembersVotingStatus;
import org.opendaylight.controller.cluster.datastore.messages.FlipShardMembersVotingStatus;
import org.opendaylight.controller.cluster.datastore.messages.GetShardRoleReply;
import org.opendaylight.controller.cluster.datastore.messages.MakeLeaderLocal;
import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
-import org.opendaylight.controller.cluster.datastore.messages.RemovePrefixShardReplica;
import org.opendaylight.controller.cluster.datastore.messages.RemoveShardReplica;
import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshotList;
import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
import org.opendaylight.controller.cluster.raft.client.messages.GetSnapshot;
-import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddPrefixShardReplicaInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddPrefixShardReplicaOutput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddPrefixShardReplicaOutputBuilder;
+import org.opendaylight.controller.eos.akka.DataCenterControl;
+import org.opendaylight.mdsal.binding.api.RpcProviderService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ActivateEosDatacenter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ActivateEosDatacenterInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ActivateEosDatacenterOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShards;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShardsInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShardsOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShardsOutputBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplicaInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplicaOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplicaOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastore;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastoreInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastoreOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastoreOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShards;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShardsInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShardsOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShardsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShard;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShardInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShardOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShardOutputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DeactivateEosDatacenter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DeactivateEosDatacenterInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DeactivateEosDatacenterOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShards;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShardsInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShardsOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShardsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetKnownClientsForAllShards;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetKnownClientsForAllShardsInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetKnownClientsForAllShardsOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetKnownClientsForAllShardsOutputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetPrefixShardRoleInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetPrefixShardRoleOutput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetPrefixShardRoleOutputBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetShardRoleInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetShardRoleOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetShardRoleOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.LocateShard;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.LocateShardInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.LocateShardOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.LocateShardOutputBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.MakeLeaderLocalInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.MakeLeaderLocalOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.MakeLeaderLocalOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicas;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicasInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicasOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicasOutputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemovePrefixShardReplicaInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemovePrefixShardReplicaOutput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemovePrefixShardReplicaOutputBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveShardReplicaInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveShardReplicaOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveShardReplicaOutputBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.shard.result.output.ShardResult;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.shard.result.output.ShardResultBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.shard.result.output.ShardResultKey;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.concepts.Registration;
import org.opendaylight.yangtools.yang.common.Empty;
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
+import org.opendaylight.yangtools.yang.common.ErrorType;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
import org.opendaylight.yangtools.yang.common.Uint32;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.concurrent.Future;
*
* @author Thomas Pantelis
*/
-public class ClusterAdminRpcService implements ClusterAdminService {
+public final class ClusterAdminRpcService {
private static final Timeout SHARD_MGR_TIMEOUT = new Timeout(1, TimeUnit.MINUTES);
private static final Logger LOG = LoggerFactory.getLogger(ClusterAdminRpcService.class);
private static final @NonNull RpcResult<LocateShardOutput> LOCAL_SHARD_RESULT =
RpcResultBuilder.success(new LocateShardOutputBuilder()
- .setMemberNode(new LocalBuilder().setLocal(Empty.getInstance()).build())
+ .setMemberNode(new LocalBuilder().setLocal(Empty.value()).build())
.build())
.build();
private final DistributedDataStoreInterface configDataStore;
private final DistributedDataStoreInterface operDataStore;
- private final BindingNormalizedNodeSerializer serializer;
private final Timeout makeLeaderLocalTimeout;
+ private final DataCenterControl dataCenterControl;
public ClusterAdminRpcService(final DistributedDataStoreInterface configDataStore,
- final DistributedDataStoreInterface operDataStore,
- final BindingNormalizedNodeSerializer serializer) {
+ final DistributedDataStoreInterface operDataStore,
+ final DataCenterControl dataCenterControl) {
this.configDataStore = configDataStore;
this.operDataStore = operDataStore;
- this.serializer = serializer;
- this.makeLeaderLocalTimeout =
+ makeLeaderLocalTimeout =
new Timeout(configDataStore.getActorUtils().getDatastoreContext()
.getShardLeaderElectionTimeout().duration().$times(2));
- }
- @Override
- public ListenableFuture<RpcResult<AddShardReplicaOutput>> addShardReplica(final AddShardReplicaInput input) {
+ this.dataCenterControl = dataCenterControl;
+ }
+
+ Registration registerWith(final RpcProviderService rpcProviderService) {
+ return rpcProviderService.registerRpcImplementations(
+ (org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013
+ .AddShardReplica) this::addShardReplica,
+ (org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013
+ .RemoveShardReplica) this::removeShardReplica,
+ (LocateShard) this::locateShard,
+ (org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013
+ .MakeLeaderLocal) this::makeLeaderLocal,
+ (AddReplicasForAllShards) this::addReplicasForAllShards,
+ (RemoveAllShardReplicas) this::removeAllShardReplicas,
+ (ChangeMemberVotingStatesForShard) this::changeMemberVotingStatesForShard,
+ (ChangeMemberVotingStatesForAllShards) this::changeMemberVotingStatesForAllShards,
+ (FlipMemberVotingStatesForAllShards) this::flipMemberVotingStatesForAllShards,
+ (org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013
+ .GetShardRole) this::getShardRole,
+ (BackupDatastore) this::backupDatastore,
+ (GetKnownClientsForAllShards) this::getKnownClientsForAllShards,
+ (ActivateEosDatacenter) this::activateEosDatacenter,
+ (DeactivateEosDatacenter) this::deactivateEosDatacenter);
+ }
+
+ @VisibleForTesting
+ ListenableFuture<RpcResult<AddShardReplicaOutput>> addShardReplica(final AddShardReplicaInput input) {
final String shardName = input.getShardName();
if (Strings.isNullOrEmpty(shardName)) {
return newFailedRpcResultFuture("A valid shard name must be specified");
LOG.info("Adding replica for shard {}", shardName);
- final SettableFuture<RpcResult<AddShardReplicaOutput>> returnFuture = SettableFuture.create();
- ListenableFuture<Success> future = sendMessageToShardManager(dataStoreType, new AddShardReplica(shardName));
- Futures.addCallback(future, new FutureCallback<Success>() {
- @Override
- public void onSuccess(final Success success) {
- LOG.info("Successfully added replica for shard {}", shardName);
- returnFuture.set(newSuccessfulResult(new AddShardReplicaOutputBuilder().build()));
- }
+ final var returnFuture = SettableFuture.<RpcResult<AddShardReplicaOutput>>create();
+ Futures.addCallback(sendMessageToShardManager(dataStoreType, new AddShardReplica(shardName)),
+ new FutureCallback<Success>() {
+ @Override
+ public void onSuccess(final Success success) {
+ LOG.info("Successfully added replica for shard {}", shardName);
+ returnFuture.set(newSuccessfulResult(new AddShardReplicaOutputBuilder().build()));
+ }
- @Override
- public void onFailure(final Throwable failure) {
- onMessageFailure(String.format("Failed to add replica for shard %s", shardName),
+ @Override
+ public void onFailure(final Throwable failure) {
+ onMessageFailure(String.format("Failed to add replica for shard %s", shardName),
returnFuture, failure);
- }
- }, MoreExecutors.directExecutor());
+ }
+ }, MoreExecutors.directExecutor());
return returnFuture;
}
- @Override
- public ListenableFuture<RpcResult<RemoveShardReplicaOutput>> removeShardReplica(
- final RemoveShardReplicaInput input) {
+ @VisibleForTesting
+ ListenableFuture<RpcResult<RemoveShardReplicaOutput>> removeShardReplica(final RemoveShardReplicaInput input) {
final String shardName = input.getShardName();
if (Strings.isNullOrEmpty(shardName)) {
return newFailedRpcResultFuture("A valid shard name must be specified");
return returnFuture;
}
- @Override
- public ListenableFuture<RpcResult<LocateShardOutput>> locateShard(final LocateShardInput input) {
+ private ListenableFuture<RpcResult<LocateShardOutput>> locateShard(final LocateShardInput input) {
final ActorUtils utils;
switch (input.getDataStoreType()) {
case Config:
return ret;
}
- @Override
- public ListenableFuture<RpcResult<MakeLeaderLocalOutput>> makeLeaderLocal(final MakeLeaderLocalInput input) {
+ @VisibleForTesting
+ ListenableFuture<RpcResult<MakeLeaderLocalOutput>> makeLeaderLocal(final MakeLeaderLocalInput input) {
final String shardName = input.getShardName();
if (Strings.isNullOrEmpty(shardName)) {
return newFailedRpcResultFuture("A valid shard name must be specified");
return future;
}
- @Override
- public ListenableFuture<RpcResult<AddPrefixShardReplicaOutput>> addPrefixShardReplica(
- final AddPrefixShardReplicaInput input) {
-
- final InstanceIdentifier<?> identifier = input.getShardPrefix();
- if (identifier == null) {
- return newFailedRpcResultFuture("A valid shard identifier must be specified");
- }
-
- final DataStoreType dataStoreType = input.getDataStoreType();
- if (dataStoreType == null) {
- return newFailedRpcResultFuture("A valid DataStoreType must be specified");
- }
-
- LOG.info("Adding replica for shard {}, datastore type {}", identifier, dataStoreType);
-
- final YangInstanceIdentifier prefix = serializer.toYangInstanceIdentifier(identifier);
- final SettableFuture<RpcResult<AddPrefixShardReplicaOutput>> returnFuture = SettableFuture.create();
- ListenableFuture<Success> future = sendMessageToShardManager(dataStoreType, new AddPrefixShardReplica(prefix));
- Futures.addCallback(future, new FutureCallback<Success>() {
- @Override
- public void onSuccess(final Success success) {
- LOG.info("Successfully added replica for shard {}", prefix);
- returnFuture.set(newSuccessfulResult(new AddPrefixShardReplicaOutputBuilder().build()));
- }
-
- @Override
- public void onFailure(final Throwable failure) {
- onMessageFailure(String.format("Failed to add replica for shard %s", prefix),
- returnFuture, failure);
- }
- }, MoreExecutors.directExecutor());
-
- return returnFuture;
- }
-
- @Override
- public ListenableFuture<RpcResult<RemovePrefixShardReplicaOutput>> removePrefixShardReplica(
- final RemovePrefixShardReplicaInput input) {
-
- final InstanceIdentifier<?> identifier = input.getShardPrefix();
- if (identifier == null) {
- return newFailedRpcResultFuture("A valid shard identifier must be specified");
- }
-
- final DataStoreType dataStoreType = input.getDataStoreType();
- if (dataStoreType == null) {
- return newFailedRpcResultFuture("A valid DataStoreType must be specified");
- }
-
- final String memberName = input.getMemberName();
- if (Strings.isNullOrEmpty(memberName)) {
- return newFailedRpcResultFuture("A valid member name must be specified");
- }
-
- LOG.info("Removing replica for shard {} memberName {}, datastoreType {}",
- identifier, memberName, dataStoreType);
- final YangInstanceIdentifier prefix = serializer.toYangInstanceIdentifier(identifier);
-
- final SettableFuture<RpcResult<RemovePrefixShardReplicaOutput>> returnFuture = SettableFuture.create();
- final ListenableFuture<Success> future = sendMessageToShardManager(dataStoreType,
- new RemovePrefixShardReplica(prefix, MemberName.forName(memberName)));
- Futures.addCallback(future, new FutureCallback<Success>() {
- @Override
- public void onSuccess(final Success success) {
- LOG.info("Successfully removed replica for shard {}", prefix);
- returnFuture.set(newSuccessfulResult(new RemovePrefixShardReplicaOutputBuilder().build()));
- }
-
- @Override
- public void onFailure(final Throwable failure) {
- onMessageFailure(String.format("Failed to remove replica for shard %s", prefix),
- returnFuture, failure);
- }
- }, MoreExecutors.directExecutor());
-
- return returnFuture;
- }
-
- @Override
- public ListenableFuture<RpcResult<AddReplicasForAllShardsOutput>> addReplicasForAllShards(
+ @VisibleForTesting ListenableFuture<RpcResult<AddReplicasForAllShardsOutput>> addReplicasForAllShards(
final AddReplicasForAllShardsInput input) {
LOG.info("Adding replicas for all shards");
"Failed to add replica");
}
-
- @Override
- public ListenableFuture<RpcResult<RemoveAllShardReplicasOutput>> removeAllShardReplicas(
+ @VisibleForTesting ListenableFuture<RpcResult<RemoveAllShardReplicasOutput>> removeAllShardReplicas(
final RemoveAllShardReplicasInput input) {
LOG.info("Removing replicas for all shards");
sendMessageToManagerForConfiguredShards(DataStoreType.Config, shardResultData, messageSupplier);
sendMessageToManagerForConfiguredShards(DataStoreType.Operational, shardResultData, messageSupplier);
- return waitForShardResults(shardResultData, shardResults ->
- new RemoveAllShardReplicasOutputBuilder().setShardResult(shardResults).build(),
- " Failed to remove replica");
+ return waitForShardResults(shardResultData,
+ shardResults -> new RemoveAllShardReplicasOutputBuilder().setShardResult(shardResults).build(),
+ " Failed to remove replica");
}
- @Override
- public ListenableFuture<RpcResult<ChangeMemberVotingStatesForShardOutput>> changeMemberVotingStatesForShard(
+ @VisibleForTesting
+ ListenableFuture<RpcResult<ChangeMemberVotingStatesForShardOutput>> changeMemberVotingStatesForShard(
final ChangeMemberVotingStatesForShardInput input) {
final String shardName = input.getShardName();
if (Strings.isNullOrEmpty(shardName)) {
return newFailedRpcResultFuture("A valid shard name must be specified");
}
- DataStoreType dataStoreType = input.getDataStoreType();
+ final var dataStoreType = input.getDataStoreType();
if (dataStoreType == null) {
return newFailedRpcResultFuture("A valid DataStoreType must be specified");
}
- List<MemberVotingState> memberVotingStates = input.getMemberVotingState();
+ final var memberVotingStates = input.getMemberVotingState();
if (memberVotingStates == null || memberVotingStates.isEmpty()) {
return newFailedRpcResultFuture("No member voting state input was specified");
}
- ChangeShardMembersVotingStatus changeVotingStatus = toChangeShardMembersVotingStatus(shardName,
- memberVotingStates);
-
+ final var changeVotingStatus = toChangeShardMembersVotingStatus(shardName, memberVotingStates);
LOG.info("Change member voting states for shard {}: {}", shardName,
changeVotingStatus.getMeberVotingStatusMap());
- final SettableFuture<RpcResult<ChangeMemberVotingStatesForShardOutput>> returnFuture = SettableFuture.create();
- ListenableFuture<Success> future = sendMessageToShardManager(dataStoreType, changeVotingStatus);
- Futures.addCallback(future, new FutureCallback<Success>() {
- @Override
- public void onSuccess(final Success success) {
- LOG.info("Successfully changed member voting states for shard {}", shardName);
- returnFuture.set(newSuccessfulResult(new ChangeMemberVotingStatesForShardOutputBuilder().build()));
- }
+ final var returnFuture = SettableFuture.<RpcResult<ChangeMemberVotingStatesForShardOutput>>create();
+ Futures.addCallback(sendMessageToShardManager(dataStoreType, changeVotingStatus),
+ new FutureCallback<Success>() {
+ @Override
+ public void onSuccess(final Success success) {
+ LOG.info("Successfully changed member voting states for shard {}", shardName);
+ returnFuture.set(newSuccessfulResult(new ChangeMemberVotingStatesForShardOutputBuilder().build()));
+ }
- @Override
- public void onFailure(final Throwable failure) {
- onMessageFailure(String.format("Failed to change member voting states for shard %s", shardName),
+ @Override
+ public void onFailure(final Throwable failure) {
+ onMessageFailure(String.format("Failed to change member voting states for shard %s", shardName),
returnFuture, failure);
- }
- }, MoreExecutors.directExecutor());
+ }
+ }, MoreExecutors.directExecutor());
return returnFuture;
}
- @Override
- public ListenableFuture<RpcResult<ChangeMemberVotingStatesForAllShardsOutput>> changeMemberVotingStatesForAllShards(
+ @VisibleForTesting
+ ListenableFuture<RpcResult<ChangeMemberVotingStatesForAllShardsOutput>> changeMemberVotingStatesForAllShards(
final ChangeMemberVotingStatesForAllShardsInput input) {
List<MemberVotingState> memberVotingStates = input.getMemberVotingState();
if (memberVotingStates == null || memberVotingStates.isEmpty()) {
"Failed to change member voting states");
}
- @Override
- public ListenableFuture<RpcResult<FlipMemberVotingStatesForAllShardsOutput>> flipMemberVotingStatesForAllShards(
+ @VisibleForTesting
+ ListenableFuture<RpcResult<FlipMemberVotingStatesForAllShardsOutput>> flipMemberVotingStatesForAllShards(
final FlipMemberVotingStatesForAllShardsInput input) {
- final List<Entry<ListenableFuture<Success>, ShardResultBuilder>> shardResultData = new ArrayList<>();
- Function<String, Object> messageSupplier = FlipShardMembersVotingStatus::new;
+ final var shardResultData = new ArrayList<Entry<ListenableFuture<Success>, ShardResultBuilder>>();
+ final Function<String, Object> messageSupplier = FlipShardMembersVotingStatus::new;
LOG.info("Flip member voting states for all shards");
"Failed to change member voting states");
}
- @Override
- public ListenableFuture<RpcResult<GetShardRoleOutput>> getShardRole(final GetShardRoleInput input) {
+ private ListenableFuture<RpcResult<GetShardRoleOutput>> getShardRole(final GetShardRoleInput input) {
final String shardName = input.getShardName();
if (Strings.isNullOrEmpty(shardName)) {
return newFailedRpcResultFuture("A valid shard name must be specified");
return returnFuture;
}
- @Override
- public ListenableFuture<RpcResult<GetPrefixShardRoleOutput>> getPrefixShardRole(
- final GetPrefixShardRoleInput input) {
- final InstanceIdentifier<?> identifier = input.getShardPrefix();
- if (identifier == null) {
- return newFailedRpcResultFuture("A valid shard identifier must be specified");
- }
-
- final DataStoreType dataStoreType = input.getDataStoreType();
- if (dataStoreType == null) {
- return newFailedRpcResultFuture("A valid DataStoreType must be specified");
- }
-
- LOG.info("Getting prefix shard role for shard: {}, datastore type {}", identifier, dataStoreType);
-
- final YangInstanceIdentifier prefix = serializer.toYangInstanceIdentifier(identifier);
- final String shardName = ClusterUtils.getCleanShardName(prefix);
- final SettableFuture<RpcResult<GetPrefixShardRoleOutput>> returnFuture = SettableFuture.create();
- ListenableFuture<GetShardRoleReply> future = sendMessageToShardManager(dataStoreType,
- new GetShardRole(shardName));
- Futures.addCallback(future, new FutureCallback<GetShardRoleReply>() {
- @Override
- public void onSuccess(final GetShardRoleReply reply) {
- if (reply == null) {
- returnFuture.set(ClusterAdminRpcService.<GetPrefixShardRoleOutput>newFailedRpcResultBuilder(
- "No Shard role present. Please retry..").build());
- return;
- }
-
- LOG.info("Successfully received role:{} for shard {}", reply.getRole(), shardName);
- final GetPrefixShardRoleOutputBuilder builder = new GetPrefixShardRoleOutputBuilder();
- if (reply.getRole() != null) {
- builder.setRole(reply.getRole());
- }
- returnFuture.set(newSuccessfulResult(builder.build()));
- }
-
- @Override
- public void onFailure(final Throwable failure) {
- returnFuture.set(ClusterAdminRpcService.<GetPrefixShardRoleOutput>newFailedRpcResultBuilder(
- "Failed to get shard role.", failure).build());
- }
- }, MoreExecutors.directExecutor());
-
- return returnFuture;
- }
-
- @Override
- public ListenableFuture<RpcResult<BackupDatastoreOutput>> backupDatastore(final BackupDatastoreInput input) {
+ @VisibleForTesting
+ ListenableFuture<RpcResult<BackupDatastoreOutput>> backupDatastore(final BackupDatastoreInput input) {
LOG.debug("backupDatastore: {}", input);
if (Strings.isNullOrEmpty(input.getFilePath())) {
return returnFuture;
}
-
- @Override
- public ListenableFuture<RpcResult<GetKnownClientsForAllShardsOutput>> getKnownClientsForAllShards(
+ private ListenableFuture<RpcResult<GetKnownClientsForAllShardsOutput>> getKnownClientsForAllShards(
final GetKnownClientsForAllShardsInput input) {
final ImmutableMap<ShardIdentifier, ListenableFuture<GetKnownClientsReply>> allShardReplies =
getAllShardLeadersClients();
MoreExecutors.directExecutor());
}
+ private ListenableFuture<RpcResult<ActivateEosDatacenterOutput>> activateEosDatacenter(
+ final ActivateEosDatacenterInput input) {
+ LOG.debug("Activating EOS Datacenter");
+ final SettableFuture<RpcResult<ActivateEosDatacenterOutput>> future = SettableFuture.create();
+ Futures.addCallback(dataCenterControl.activateDataCenter(), new FutureCallback<>() {
+ @Override
+ public void onSuccess(final Empty result) {
+ LOG.debug("Successfully activated datacenter.");
+ future.set(RpcResultBuilder.<ActivateEosDatacenterOutput>success().build());
+ }
+
+ @Override
+ public void onFailure(final Throwable failure) {
+ future.set(ClusterAdminRpcService.<ActivateEosDatacenterOutput>newFailedRpcResultBuilder(
+ "Failed to activate datacenter.", failure).build());
+ }
+ }, MoreExecutors.directExecutor());
+
+ return future;
+ }
+
+ private ListenableFuture<RpcResult<DeactivateEosDatacenterOutput>> deactivateEosDatacenter(
+ final DeactivateEosDatacenterInput input) {
+ LOG.debug("Deactivating EOS Datacenter");
+ final SettableFuture<RpcResult<DeactivateEosDatacenterOutput>> future = SettableFuture.create();
+ Futures.addCallback(dataCenterControl.deactivateDataCenter(), new FutureCallback<>() {
+ @Override
+ public void onSuccess(final Empty result) {
+ LOG.debug("Successfully deactivated datacenter.");
+ future.set(RpcResultBuilder.<DeactivateEosDatacenterOutput>success().build());
+ }
+
+ @Override
+ public void onFailure(final Throwable failure) {
+ future.set(ClusterAdminRpcService.<DeactivateEosDatacenterOutput>newFailedRpcResultBuilder(
+ "Failed to deactivate datacenter.", failure).build());
+ }
+ }, MoreExecutors.directExecutor());
+
+ return future;
+ }
+
private static RpcResult<GetKnownClientsForAllShardsOutput> processReplies(
final ImmutableMap<ShardIdentifier, ListenableFuture<GetKnownClientsReply>> allShardReplies) {
final Map<ShardResultKey, ShardResult> result = Maps.newHashMapWithExpectedSize(allShardReplies.size());
final List<MemberVotingState> memberVotingStatus) {
Map<String, Boolean> serverVotingStatusMap = new HashMap<>();
for (MemberVotingState memberStatus: memberVotingStatus) {
- serverVotingStatusMap.put(memberStatus.getMemberName(), memberStatus.isVoting());
+ serverVotingStatusMap.put(memberStatus.getMemberName(), memberStatus.getVoting());
}
return new ChangeShardMembersVotingStatus(shardName, serverVotingStatusMap);
}
return ask(shardManager, message, SHARD_MGR_TIMEOUT);
}
- @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
- justification = "https://github.com/spotbugs/spotbugs/issues/811")
@SuppressWarnings("checkstyle:IllegalCatch")
private static void saveSnapshotsToFile(final DatastoreSnapshotList snapshots, final String fileName,
final SettableFuture<RpcResult<BackupDatastoreOutput>> returnFuture) {
--- /dev/null
+/*
+ * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.admin;
+
+import org.opendaylight.controller.cluster.datastore.DistributedDataStoreInterface;
+import org.opendaylight.controller.eos.akka.DataCenterControl;
+import org.opendaylight.mdsal.binding.api.RpcProviderService;
+import org.opendaylight.yangtools.concepts.Registration;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@Component(service = { })
+public final class OSGiClusterAdmin {
+ private static final Logger LOG = LoggerFactory.getLogger(OSGiClusterAdmin.class);
+
+ private final Registration reg;
+
+ @Activate
+ public OSGiClusterAdmin(
+ @Reference(target = "(type=distributed-config)") final DistributedDataStoreInterface configDatastore,
+ @Reference(target = "(type=distributed-operational)") final DistributedDataStoreInterface operDatastore,
+ @Reference final RpcProviderService rpcProviderService,
+ @Reference final DataCenterControl dataCenterControls,
+ @Reference final DataCenterControl dataCenterControl) {
+ reg = new ClusterAdminRpcService(configDatastore, operDatastore, dataCenterControl)
+ .registerWith(rpcProviderService);
+ LOG.info("Cluster Admin services started");
+ }
+
+ @Deactivate
+ void deactivate() {
+ reg.close();
+ LOG.info("Cluster Admin services stopped");
+ }
+}
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
- xmlns:odl="http://opendaylight.org/xmlns/blueprint/v1.0.0"
- odl:use-default-for-reference-types="true">
-
- <!-- ClusterAdminRpcService -->
-
- <reference id="configDatastore" interface="org.opendaylight.controller.cluster.datastore.DistributedDataStoreInterface"
- odl:type="distributed-config"/>
-
- <reference id="operationalDatastore" interface="org.opendaylight.controller.cluster.datastore.DistributedDataStoreInterface"
- odl:type="distributed-operational"/>
-
- <reference id="normalizedNodeSerializer" interface="org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer"/>
-
- <bean id="clusterAdminService" class="org.opendaylight.controller.cluster.datastore.admin.ClusterAdminRpcService">
- <argument ref="configDatastore"/>
- <argument ref="operationalDatastore"/>
- <argument ref="normalizedNodeSerializer"/>
- </bean>
-
- <odl:rpc-implementation ref="clusterAdminService"/>
-
-</blueprint>
\ No newline at end of file
import static java.lang.Boolean.FALSE;
import static java.lang.Boolean.TRUE;
+import static java.util.Objects.requireNonNull;
import static org.hamcrest.CoreMatchers.anyOf;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.MatcherAssert.assertThat;
import akka.actor.PoisonPill;
import akka.actor.Status.Success;
import akka.cluster.Cluster;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
import java.io.File;
-import java.io.FileInputStream;
-import java.net.URI;
-import java.util.AbstractMap.SimpleEntry;
+import java.nio.file.Files;
import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
-import java.util.Map.Entry;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
-import org.mockito.Mockito;
import org.opendaylight.controller.cluster.access.concepts.MemberName;
+import org.opendaylight.controller.cluster.databroker.ClientBackedDataStore;
import org.opendaylight.controller.cluster.datastore.AbstractDataStore;
import org.opendaylight.controller.cluster.datastore.DatastoreContext;
import org.opendaylight.controller.cluster.datastore.MemberNode;
import org.opendaylight.controller.cluster.datastore.MemberNode.RaftStateVerifier;
import org.opendaylight.controller.cluster.datastore.Shard;
import org.opendaylight.controller.cluster.datastore.config.ModuleShardConfiguration;
-import org.opendaylight.controller.cluster.datastore.config.PrefixShardConfiguration;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.messages.CreateShard;
import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
-import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
import org.opendaylight.controller.cluster.raft.RaftState;
import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
import org.opendaylight.controller.cluster.raft.persisted.ServerInfo;
import org.opendaylight.controller.cluster.raft.persisted.UpdateElectionTerm;
import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
-import org.opendaylight.controller.cluster.sharding.messages.PrefixShardCreated;
import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
-import org.opendaylight.controller.md.cluster.datastore.model.PeopleModel;
-import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.Cars;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.People;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddPrefixShardReplicaInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddPrefixShardReplicaInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddPrefixShardReplicaOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShardsInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShardsOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplicaInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplicaOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastoreInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastoreOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShardsInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShardsOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShardInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShardOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShardsInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShardsOutput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetPrefixShardRoleInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetPrefixShardRoleInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetPrefixShardRoleOutput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetShardRoleInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetShardRoleInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetShardRoleOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.MakeLeaderLocalInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.MakeLeaderLocalOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicasInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicasOutput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemovePrefixShardReplicaInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemovePrefixShardReplicaInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemovePrefixShardReplicaOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveShardReplicaInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveShardReplicaOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.member.voting.states.input.MemberVotingStateBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.shard.result.output.ShardResult;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.shard.result.output.ShardResultBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.shard.result.output.ShardResultKey;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.common.RpcError;
import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.common.XMLNamespace;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
/**
* Unit tests for ClusterAdminRpcService.
* @author Thomas Pantelis
*/
public class ClusterAdminRpcServiceTest {
+ record ExpState(String name, boolean voting) {
+ ExpState {
+ requireNonNull(name);
+ }
+ }
+
private static final MemberName MEMBER_1 = MemberName.forName("member-1");
private static final MemberName MEMBER_2 = MemberName.forName("member-2");
private static final MemberName MEMBER_3 = MemberName.forName("member-3");
@After
public void tearDown() {
- for (MemberNode m : Lists.reverse(memberNodes)) {
- m.cleanup();
+ for (var member : Lists.reverse(memberNodes)) {
+ member.cleanup();
}
memberNodes.clear();
}
@Test
public void testBackupDatastore() throws Exception {
- MemberNode node = MemberNode.builder(memberNodes).akkaConfig("Member1")
- .moduleShardsConfig("module-shards-member1.conf").waitForShardLeader("cars", "people")
- .testName("testBackupDatastore").build();
+ final var node = MemberNode.builder(memberNodes)
+ .akkaConfig("Member1")
+ .moduleShardsConfig("module-shards-member1.conf")
+ .waitForShardLeader("cars", "people")
+ .testName("testBackupDatastore")
+ .build();
- String fileName = "target/testBackupDatastore";
- new File(fileName).delete();
+ final var fileName = "target/testBackupDatastore";
+ final var file = new File(fileName);
+ file.delete();
- ClusterAdminRpcService service = new ClusterAdminRpcService(node.configDataStore(), node.operDataStore(), null);
+ final var service = new ClusterAdminRpcService(node.configDataStore(), node.operDataStore(), null);
- RpcResult<BackupDatastoreOutput> rpcResult = service .backupDatastore(new BackupDatastoreInputBuilder()
- .setFilePath(fileName).build()).get(5, TimeUnit.SECONDS);
+ var rpcResult = service.backupDatastore(new BackupDatastoreInputBuilder().setFilePath(fileName).build())
+ .get(5, TimeUnit.SECONDS);
verifySuccessfulRpcResult(rpcResult);
- try (FileInputStream fis = new FileInputStream(fileName)) {
- List<DatastoreSnapshot> snapshots = SerializationUtils.deserialize(fis);
+ try (var fis = Files.newInputStream(file.toPath())) {
+ final List<DatastoreSnapshot> snapshots = SerializationUtils.deserialize(fis);
assertEquals("DatastoreSnapshot size", 2, snapshots.size());
- ImmutableMap<String, DatastoreSnapshot> map = ImmutableMap.of(snapshots.get(0).getType(), snapshots.get(0),
- snapshots.get(1).getType(), snapshots.get(1));
+ final var map = Map.of(
+ snapshots.get(0).getType(), snapshots.get(0),
+ snapshots.get(1).getType(), snapshots.get(1));
verifyDatastoreSnapshot(node.configDataStore().getActorUtils().getDataStoreName(),
map.get(node.configDataStore().getActorUtils().getDataStoreName()), "cars", "people");
} finally {
node.configDataStore().getActorUtils().getShardManager().tell(node.datastoreContextBuilder()
.shardInitializationTimeout(200, TimeUnit.MILLISECONDS).build(), ActorRef.noSender());
- ActorRef carsShardActor = node.configDataStore().getActorUtils().findLocalShard("cars").get();
+ final var carsShardActor = node.configDataStore().getActorUtils().findLocalShard("cars").orElseThrow();
node.kit().watch(carsShardActor);
carsShardActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
node.kit().expectTerminated(carsShardActor);
private static void verifyDatastoreSnapshot(final String type, final DatastoreSnapshot datastoreSnapshot,
final String... expShardNames) {
assertNotNull("Missing DatastoreSnapshot for type " + type, datastoreSnapshot);
- Set<String> shardNames = new HashSet<>();
- for (DatastoreSnapshot.ShardSnapshot s: datastoreSnapshot.getShardSnapshots()) {
- shardNames.add(s.getName());
+ var shardNames = new HashSet<String>();
+ for (var snapshot : datastoreSnapshot.getShardSnapshots()) {
+ shardNames.add(snapshot.getName());
}
- assertEquals("DatastoreSnapshot shard names", Sets.newHashSet(expShardNames), shardNames);
- }
-
- @Test
- public void testAddRemovePrefixShardReplica() throws Exception {
- String name = "testAddPrefixShardReplica";
- String moduleShardsConfig = "module-shards-default.conf";
-
- final MemberNode member1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
- .moduleShardsConfig(moduleShardsConfig).build();
- final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
- .moduleShardsConfig(moduleShardsConfig).build();
- final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
- .moduleShardsConfig(moduleShardsConfig).build();
-
- member1.waitForMembersUp("member-2", "member-3");
- replicaNode2.kit().waitForMembersUp("member-1", "member-3");
- replicaNode3.kit().waitForMembersUp("member-1", "member-2");
-
- final ActorRef shardManager1 = member1.configDataStore().getActorUtils().getShardManager();
-
- shardManager1.tell(new PrefixShardCreated(new PrefixShardConfiguration(
- new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH),
- "prefix", Collections.singleton(MEMBER_1))),
- ActorRef.noSender());
-
- member1.kit().waitUntilLeader(member1.configDataStore().getActorUtils(),
- ClusterUtils.getCleanShardName(CarsModel.BASE_PATH));
-
- final InstanceIdentifier<Cars> identifier = InstanceIdentifier.create(Cars.class);
- final BindingNormalizedNodeSerializer serializer = Mockito.mock(BindingNormalizedNodeSerializer.class);
- Mockito.doReturn(CarsModel.BASE_PATH).when(serializer).toYangInstanceIdentifier(identifier);
-
- addPrefixShardReplica(replicaNode2, identifier, serializer,
- ClusterUtils.getCleanShardName(CarsModel.BASE_PATH), "member-1");
-
- addPrefixShardReplica(replicaNode3, identifier, serializer,
- ClusterUtils.getCleanShardName(CarsModel.BASE_PATH), "member-1", "member-2");
-
- verifyRaftPeersPresent(member1.configDataStore(), ClusterUtils.getCleanShardName(CarsModel.BASE_PATH),
- "member-2", "member-3");
-
- removePrefixShardReplica(member1, identifier, "member-3", serializer,
- ClusterUtils.getCleanShardName(CarsModel.BASE_PATH), "member-2");
-
- verifyNoShardPresent(replicaNode3.configDataStore(), ClusterUtils.getCleanShardName(CarsModel.BASE_PATH));
- verifyRaftPeersPresent(replicaNode2.configDataStore(), ClusterUtils.getCleanShardName(CarsModel.BASE_PATH),
- "member-1");
-
- removePrefixShardReplica(member1, identifier, "member-2", serializer,
- ClusterUtils.getCleanShardName(CarsModel.BASE_PATH));
-
- verifyNoShardPresent(replicaNode2.configDataStore(), ClusterUtils.getCleanShardName(CarsModel.BASE_PATH));
- }
-
- @Test
- public void testGetShardRole() throws Exception {
- String name = "testGetShardRole";
- String moduleShardsConfig = "module-shards-default-member-1.conf";
-
- final MemberNode member1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
- .moduleShardsConfig(moduleShardsConfig).build();
-
- member1.kit().waitUntilLeader(member1.configDataStore().getActorUtils(), "default");
-
- final RpcResult<GetShardRoleOutput> successResult =
- getShardRole(member1, Mockito.mock(BindingNormalizedNodeSerializer.class), "default");
- verifySuccessfulRpcResult(successResult);
- assertEquals("Leader", successResult.getResult().getRole());
-
- final RpcResult<GetShardRoleOutput> failedResult =
- getShardRole(member1, Mockito.mock(BindingNormalizedNodeSerializer.class), "cars");
-
- verifyFailedRpcResult(failedResult);
-
- final ActorRef shardManager1 = member1.configDataStore().getActorUtils().getShardManager();
-
- shardManager1.tell(new PrefixShardCreated(new PrefixShardConfiguration(
- new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH),
- "prefix", Collections.singleton(MEMBER_1))),
- ActorRef.noSender());
-
- member1.kit().waitUntilLeader(member1.configDataStore().getActorUtils(),
- ClusterUtils.getCleanShardName(CarsModel.BASE_PATH));
-
- final InstanceIdentifier<Cars> identifier = InstanceIdentifier.create(Cars.class);
- final BindingNormalizedNodeSerializer serializer = Mockito.mock(BindingNormalizedNodeSerializer.class);
- Mockito.doReturn(CarsModel.BASE_PATH).when(serializer).toYangInstanceIdentifier(identifier);
-
- final RpcResult<GetPrefixShardRoleOutput> prefixSuccessResult =
- getPrefixShardRole(member1, identifier, serializer);
-
- verifySuccessfulRpcResult(prefixSuccessResult);
- assertEquals("Leader", prefixSuccessResult.getResult().getRole());
-
- final InstanceIdentifier<People> peopleId = InstanceIdentifier.create(People.class);
- Mockito.doReturn(PeopleModel.BASE_PATH).when(serializer).toYangInstanceIdentifier(peopleId);
-
- final RpcResult<GetPrefixShardRoleOutput> prefixFail =
- getPrefixShardRole(member1, peopleId, serializer);
-
- verifyFailedRpcResult(prefixFail);
+ assertEquals("DatastoreSnapshot shard names", Set.of(expShardNames), shardNames);
}
@Test
String name = "testGetPrefixShardRole";
String moduleShardsConfig = "module-shards-default-member-1.conf";
- final MemberNode member1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+ final var member1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
.moduleShardsConfig(moduleShardsConfig).build();
member1.kit().waitUntilLeader(member1.configDataStore().getActorUtils(), "default");
-
-
}
@Test
String name = "testModuleShardLeaderMovement";
String moduleShardsConfig = "module-shards-member1.conf";
- final MemberNode member1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+ final var member1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
.waitForShardLeader("cars").moduleShardsConfig(moduleShardsConfig).build();
- final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+ final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
.moduleShardsConfig(moduleShardsConfig).build();
- final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+ final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
.moduleShardsConfig(moduleShardsConfig).build();
member1.waitForMembersUp("member-2", "member-3");
public void testAddShardReplica() throws Exception {
String name = "testAddShardReplica";
String moduleShardsConfig = "module-shards-cars-member-1.conf";
- MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+ final var leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
.moduleShardsConfig(moduleShardsConfig).waitForShardLeader("cars").build();
- MemberNode newReplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+ final var newReplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
.moduleShardsConfig(moduleShardsConfig).build();
leaderNode1.waitForMembersUp("member-2");
doAddShardReplica(newReplicaNode2, "cars", "member-1");
- MemberNode newReplicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+ var newReplicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
.moduleShardsConfig(moduleShardsConfig).build();
leaderNode1.waitForMembersUp("member-3");
verifyRaftPeersPresent(newReplicaNode2.operDataStore(), "cars", "member-1", "member-3");
// Write data to member-2's config datastore and read/verify via member-3
- final NormalizedNode<?, ?> configCarsNode = writeCarsNodeAndVerify(newReplicaNode2.configDataStore(),
+ final var configCarsNode = writeCarsNodeAndVerify(newReplicaNode2.configDataStore(),
newReplicaNode3.configDataStore());
// Write data to member-3's oper datastore and read/verify via member-2
writeCarsNodeAndVerify(newReplicaNode3.operDataStore(), newReplicaNode2.operDataStore());
// Verify all data has been replicated. We expect 4 log entries and thus last applied index of 3 -
- // 2 ServerConfigurationPayload entries, the transaction payload entry plus a purge payload.
+ // 2 ServerConfigurationPayload entries, the transaction payload entry plus a purge payload.
RaftStateVerifier verifier = raftState -> {
- assertEquals("Commit index", 4, raftState.getCommitIndex());
- assertEquals("Last applied index", 4, raftState.getLastApplied());
+ assertEquals("Commit index", 3, raftState.getCommitIndex());
+ assertEquals("Last applied index", 3, raftState.getLastApplied());
};
verifyRaftState(leaderNode1.configDataStore(), "cars", verifier);
@Test
public void testAddShardReplicaFailures() throws Exception {
String name = "testAddShardReplicaFailures";
- MemberNode memberNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+ final var memberNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
.moduleShardsConfig("module-shards-cars-member-1.conf").build();
- ClusterAdminRpcService service = new ClusterAdminRpcService(memberNode.configDataStore(),
- memberNode.operDataStore(), null);
+ final var service = new ClusterAdminRpcService(memberNode.configDataStore(), memberNode.operDataStore(), null);
- RpcResult<AddShardReplicaOutput> rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder()
- .setDataStoreType(DataStoreType.Config).build()).get(10, TimeUnit.SECONDS);
+ var rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder()
+ .setDataStoreType(DataStoreType.Config)
+ .build())
+ .get(10, TimeUnit.SECONDS);
verifyFailedRpcResult(rpcResult);
- rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName("cars")
- .build()).get(10, TimeUnit.SECONDS);
+ rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName("cars").build())
+ .get(10, TimeUnit.SECONDS);
verifyFailedRpcResult(rpcResult);
rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName("people")
- .setDataStoreType(DataStoreType.Config).build()).get(10, TimeUnit.SECONDS);
+ .setDataStoreType(DataStoreType.Config)
+ .build())
+ .get(10, TimeUnit.SECONDS);
verifyFailedRpcResult(rpcResult);
}
- private static NormalizedNode<?, ?> writeCarsNodeAndVerify(final AbstractDataStore writeToStore,
+ private static ContainerNode writeCarsNodeAndVerify(final AbstractDataStore writeToStore,
final AbstractDataStore readFromStore) throws Exception {
- DOMStoreWriteTransaction writeTx = writeToStore.newWriteOnlyTransaction();
- NormalizedNode<?, ?> carsNode = CarsModel.create();
+ final var writeTx = writeToStore.newWriteOnlyTransaction();
+ final var carsNode = CarsModel.create();
writeTx.write(CarsModel.BASE_PATH, carsNode);
- DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
- Boolean canCommit = cohort.canCommit().get(7, TimeUnit.SECONDS);
- assertEquals("canCommit", TRUE, canCommit);
+ final var cohort = writeTx.ready();
+ assertEquals("canCommit", TRUE, cohort.canCommit().get(7, TimeUnit.SECONDS));
cohort.preCommit().get(5, TimeUnit.SECONDS);
cohort.commit().get(5, TimeUnit.SECONDS);
}
private static void readCarsNodeAndVerify(final AbstractDataStore readFromStore,
- final NormalizedNode<?, ?> expCarsNode) throws Exception {
- Optional<NormalizedNode<?, ?>> optional = readFromStore.newReadOnlyTransaction().read(CarsModel.BASE_PATH)
- .get(15, TimeUnit.SECONDS);
- assertTrue("isPresent", optional.isPresent());
- assertEquals("Data node", expCarsNode, optional.get());
- }
-
- private static RpcResult<GetShardRoleOutput> getShardRole(final MemberNode memberNode,
- final BindingNormalizedNodeSerializer serializer, final String shardName) throws Exception {
-
- final GetShardRoleInput input = new GetShardRoleInputBuilder()
- .setDataStoreType(DataStoreType.Config)
- .setShardName(shardName)
- .build();
-
- final ClusterAdminRpcService service =
- new ClusterAdminRpcService(memberNode.configDataStore(), memberNode.operDataStore(), serializer);
-
- return service.getShardRole(input).get(10, TimeUnit.SECONDS);
- }
-
- private static RpcResult<GetPrefixShardRoleOutput> getPrefixShardRole(
- final MemberNode memberNode,
- final InstanceIdentifier<?> identifier,
- final BindingNormalizedNodeSerializer serializer) throws Exception {
-
- final GetPrefixShardRoleInput input = new GetPrefixShardRoleInputBuilder()
- .setDataStoreType(DataStoreType.Config)
- .setShardPrefix(identifier)
- .build();
-
- final ClusterAdminRpcService service =
- new ClusterAdminRpcService(memberNode.configDataStore(), memberNode.operDataStore(), serializer);
-
- return service.getPrefixShardRole(input).get(10, TimeUnit.SECONDS);
- }
-
- private static void addPrefixShardReplica(final MemberNode memberNode, final InstanceIdentifier<?> identifier,
- final BindingNormalizedNodeSerializer serializer, final String shardName,
- final String... peerMemberNames) throws Exception {
-
- final AddPrefixShardReplicaInput input = new AddPrefixShardReplicaInputBuilder()
- .setShardPrefix(identifier)
- .setDataStoreType(DataStoreType.Config).build();
-
- final ClusterAdminRpcService service =
- new ClusterAdminRpcService(memberNode.configDataStore(), memberNode.operDataStore(), serializer);
-
- final RpcResult<AddPrefixShardReplicaOutput> rpcResult = service.addPrefixShardReplica(input)
- .get(10, TimeUnit.SECONDS);
- verifySuccessfulRpcResult(rpcResult);
-
- verifyRaftPeersPresent(memberNode.configDataStore(), shardName, peerMemberNames);
- Optional<ActorRef> optional = memberNode.configDataStore().getActorUtils().findLocalShard(shardName);
- assertTrue("Replica shard not present", optional.isPresent());
- }
-
- private static void removePrefixShardReplica(final MemberNode memberNode, final InstanceIdentifier<?> identifier,
- final String removeFromMember, final BindingNormalizedNodeSerializer serializer, final String shardName,
- final String... peerMemberNames) throws Exception {
- final RemovePrefixShardReplicaInput input = new RemovePrefixShardReplicaInputBuilder()
- .setDataStoreType(DataStoreType.Config)
- .setShardPrefix(identifier)
- .setMemberName(removeFromMember).build();
-
- final ClusterAdminRpcService service =
- new ClusterAdminRpcService(memberNode.configDataStore(), memberNode.operDataStore(), serializer);
-
- final RpcResult<RemovePrefixShardReplicaOutput> rpcResult = service.removePrefixShardReplica(input)
- .get(10, TimeUnit.SECONDS);
- verifySuccessfulRpcResult(rpcResult);
-
- verifyRaftPeersPresent(memberNode.configDataStore(), shardName, peerMemberNames);
+ final ContainerNode expCarsNode) throws Exception {
+ assertEquals(Optional.of(expCarsNode),
+ readFromStore.newReadOnlyTransaction().read(CarsModel.BASE_PATH).get(15, TimeUnit.SECONDS));
}
private static void doAddShardReplica(final MemberNode memberNode, final String shardName,
final String... peerMemberNames) throws Exception {
memberNode.waitForMembersUp(peerMemberNames);
- ClusterAdminRpcService service = new ClusterAdminRpcService(memberNode.configDataStore(),
- memberNode.operDataStore(), null);
+ final var service = new ClusterAdminRpcService(memberNode.configDataStore(), memberNode.operDataStore(), null);
- RpcResult<AddShardReplicaOutput> rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder()
- .setShardName(shardName).setDataStoreType(DataStoreType.Config).build()).get(10, TimeUnit.SECONDS);
+ var rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder()
+ .setShardName(shardName)
+ .setDataStoreType(DataStoreType.Config)
+ .build()).get(10, TimeUnit.SECONDS);
verifySuccessfulRpcResult(rpcResult);
verifyRaftPeersPresent(memberNode.configDataStore(), shardName, peerMemberNames);
- Optional<ActorRef> optional = memberNode.operDataStore().getActorUtils().findLocalShard(shardName);
- assertFalse("Oper shard present", optional.isPresent());
+ assertEquals(Optional.empty(), memberNode.operDataStore().getActorUtils().findLocalShard(shardName));
- rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName(shardName)
- .setDataStoreType(DataStoreType.Operational).build()).get(10, TimeUnit.SECONDS);
+ rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder()
+ .setShardName(shardName)
+ .setDataStoreType(DataStoreType.Operational)
+ .build()).get(10, TimeUnit.SECONDS);
verifySuccessfulRpcResult(rpcResult);
verifyRaftPeersPresent(memberNode.operDataStore(), shardName, peerMemberNames);
private static void doMakeShardLeaderLocal(final MemberNode memberNode, final String shardName,
final String newLeader) throws Exception {
- ClusterAdminRpcService service = new ClusterAdminRpcService(memberNode.configDataStore(),
- memberNode.operDataStore(), null);
+ final var service = new ClusterAdminRpcService(memberNode.configDataStore(), memberNode.operDataStore(), null);
- final RpcResult<MakeLeaderLocalOutput> rpcResult = service.makeLeaderLocal(new MakeLeaderLocalInputBuilder()
- .setDataStoreType(DataStoreType.Config).setShardName(shardName).build())
- .get(10, TimeUnit.SECONDS);
+ final var rpcResult = service.makeLeaderLocal(new MakeLeaderLocalInputBuilder()
+ .setDataStoreType(DataStoreType.Config)
+ .setShardName(shardName)
+ .build()).get(10, TimeUnit.SECONDS);
verifySuccessfulRpcResult(rpcResult);
private static <T> T verifySuccessfulRpcResult(final RpcResult<T> rpcResult) {
if (!rpcResult.isSuccessful()) {
- if (rpcResult.getErrors().size() > 0) {
- RpcError error = Iterables.getFirst(rpcResult.getErrors(), null);
+ final var errors = rpcResult.getErrors();
+ if (errors.size() > 0) {
+ final var error = errors.get(0);
throw new AssertionError("Rpc failed with error: " + error, error.getCause());
}
private static void verifyFailedRpcResult(final RpcResult<?> rpcResult) {
assertFalse("RpcResult", rpcResult.isSuccessful());
- assertEquals("RpcResult errors size", 1, rpcResult.getErrors().size());
- RpcError error = Iterables.getFirst(rpcResult.getErrors(), null);
+ final var errors = rpcResult.getErrors();
+ assertEquals("RpcResult errors size", 1, errors.size());
+ final var error = errors.get(0);
assertNotNull("RpcResult error message null", error.getMessage());
}
public void testRemoveShardReplica() throws Exception {
String name = "testRemoveShardReplica";
String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
- final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+ final var leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
.moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
.build();
- final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+ final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
.moduleShardsConfig(moduleShardsConfig).build();
- final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+ final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
.moduleShardsConfig(moduleShardsConfig).build();
leaderNode1.configDataStore().waitTillReady();
// Invoke RPC service on member-3 to remove it's local shard
- ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
- replicaNode3.operDataStore(), null);
+ final var service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(), replicaNode3.operDataStore(),
+ null);
- RpcResult<RemoveShardReplicaOutput> rpcResult = service3.removeShardReplica(new RemoveShardReplicaInputBuilder()
- .setShardName("cars").setMemberName("member-3").setDataStoreType(DataStoreType.Config).build())
- .get(10, TimeUnit.SECONDS);
+ var rpcResult = service3.removeShardReplica(new RemoveShardReplicaInputBuilder()
+ .setShardName("cars").setMemberName("member-3")
+ .setDataStoreType(DataStoreType.Config)
+ .build()).get(10, TimeUnit.SECONDS);
verifySuccessfulRpcResult(rpcResult);
verifyRaftPeersPresent(leaderNode1.configDataStore(), "cars", "member-2");
Cluster.get(leaderNode1.kit().getSystem()).down(Cluster.get(replicaNode2.kit().getSystem()).selfAddress());
replicaNode2.cleanup();
- MemberNode newPeplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+ final var newPeplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
.moduleShardsConfig(moduleShardsConfig).build();
newPeplicaNode2.configDataStore().waitTillReady();
// Invoke RPC service on member-1 to remove member-2
- ClusterAdminRpcService service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(),
- leaderNode1.operDataStore(), null);
+ final var service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(), leaderNode1.operDataStore(),
+ null);
- rpcResult = service1.removeShardReplica(new RemoveShardReplicaInputBuilder().setShardName("cars")
- .setMemberName("member-2").setDataStoreType(DataStoreType.Config).build()).get(10, TimeUnit.SECONDS);
+ rpcResult = service1.removeShardReplica(new RemoveShardReplicaInputBuilder()
+ .setShardName("cars")
+ .setMemberName("member-2")
+ .setDataStoreType(DataStoreType.Config)
+ .build()).get(10, TimeUnit.SECONDS);
verifySuccessfulRpcResult(rpcResult);
verifyRaftPeersPresent(leaderNode1.configDataStore(), "cars");
public void testRemoveShardLeaderReplica() throws Exception {
String name = "testRemoveShardLeaderReplica";
String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
- final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+ final var leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
.moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
.build();
- final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+ final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
.moduleShardsConfig(moduleShardsConfig).build();
- final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+ final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
.moduleShardsConfig(moduleShardsConfig).build();
leaderNode1.configDataStore().waitTillReady();
// Invoke RPC service on leader member-1 to remove it's local shard
- ClusterAdminRpcService service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(),
- leaderNode1.operDataStore(), null);
+ final var service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(), leaderNode1.operDataStore(),
+ null);
- RpcResult<RemoveShardReplicaOutput> rpcResult = service1.removeShardReplica(new RemoveShardReplicaInputBuilder()
- .setShardName("cars").setMemberName("member-1").setDataStoreType(DataStoreType.Config).build())
- .get(10, TimeUnit.SECONDS);
+ final var rpcResult = service1.removeShardReplica(new RemoveShardReplicaInputBuilder()
+ .setShardName("cars")
+ .setMemberName("member-1")
+ .setDataStoreType(DataStoreType.Config)
+ .build()).get(10, TimeUnit.SECONDS);
verifySuccessfulRpcResult(rpcResult);
verifyRaftState(replicaNode2.configDataStore(), "cars", raftState ->
public void testAddReplicasForAllShards() throws Exception {
String name = "testAddReplicasForAllShards";
String moduleShardsConfig = "module-shards-member1.conf";
- MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+ final var leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
.moduleShardsConfig(moduleShardsConfig).waitForShardLeader("cars", "people").build();
- ModuleShardConfiguration petsModuleConfig = new ModuleShardConfiguration(URI.create("pets-ns"), "pets-module",
- "pets", null,
- Collections.singletonList(MEMBER_1));
+ final var petsModuleConfig = new ModuleShardConfiguration(XMLNamespace.of("pets-ns"), "pets-module", "pets",
+ null, List.of(MEMBER_1));
leaderNode1.configDataStore().getActorUtils().getShardManager().tell(
new CreateShard(petsModuleConfig, Shard.builder(), null), leaderNode1.kit().getRef());
leaderNode1.kit().expectMsgClass(Success.class);
leaderNode1.kit().waitUntilLeader(leaderNode1.configDataStore().getActorUtils(), "pets");
- MemberNode newReplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+ final var newReplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
.moduleShardsConfig(moduleShardsConfig).build();
leaderNode1.waitForMembersUp("member-2");
new CreateShard(petsModuleConfig, Shard.builder(), null), newReplicaNode2.kit().getRef());
newReplicaNode2.kit().expectMsgClass(Success.class);
- newReplicaNode2.operDataStore().getActorUtils().getShardManager().tell(
- new CreateShard(new ModuleShardConfiguration(URI.create("no-leader-ns"), "no-leader-module",
- "no-leader", null,
- Collections.singletonList(MEMBER_1)),
- Shard.builder(), null),
- newReplicaNode2.kit().getRef());
+ newReplicaNode2.operDataStore().getActorUtils().getShardManager()
+ .tell(new CreateShard(new ModuleShardConfiguration(XMLNamespace.of("no-leader-ns"), "no-leader-module",
+ "no-leader", null, List.of(MEMBER_1)),
+ Shard.builder(), null), newReplicaNode2.kit().getRef());
newReplicaNode2.kit().expectMsgClass(Success.class);
- ClusterAdminRpcService service = new ClusterAdminRpcService(newReplicaNode2.configDataStore(),
- newReplicaNode2.operDataStore(), null);
+ final var service = new ClusterAdminRpcService(newReplicaNode2.configDataStore(),
+ newReplicaNode2.operDataStore(), null);
- RpcResult<AddReplicasForAllShardsOutput> rpcResult = service.addReplicasForAllShards(
- new AddReplicasForAllShardsInputBuilder().build()).get(10, TimeUnit.SECONDS);
- AddReplicasForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult);
+ var rpcResult = service.addReplicasForAllShards(new AddReplicasForAllShardsInputBuilder().build())
+ .get(10, TimeUnit.SECONDS);
+ final var result = verifySuccessfulRpcResult(rpcResult);
verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
successShardResult("people", DataStoreType.Config),
successShardResult("pets", DataStoreType.Config),
public void testRemoveAllShardReplicas() throws Exception {
String name = "testRemoveAllShardReplicas";
String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
- final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+ final var leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
.moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
.build();
- final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+ final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
.moduleShardsConfig(moduleShardsConfig).build();
- final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+ final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
.moduleShardsConfig(moduleShardsConfig).build();
leaderNode1.configDataStore().waitTillReady();
verifyRaftPeersPresent(replicaNode2.configDataStore(), "cars", "member-1", "member-3");
verifyRaftPeersPresent(replicaNode3.configDataStore(), "cars", "member-1", "member-2");
- ModuleShardConfiguration petsModuleConfig = new ModuleShardConfiguration(URI.create("pets-ns"), "pets-module",
- "pets", null, Arrays.asList(MEMBER_1, MEMBER_2, MEMBER_3));
+ final var petsModuleConfig = new ModuleShardConfiguration(XMLNamespace.of("pets-ns"), "pets-module", "pets",
+ null, List.of(MEMBER_1, MEMBER_2, MEMBER_3));
leaderNode1.configDataStore().getActorUtils().getShardManager().tell(
new CreateShard(petsModuleConfig, Shard.builder(), null), leaderNode1.kit().getRef());
leaderNode1.kit().expectMsgClass(Success.class);
verifyRaftPeersPresent(replicaNode2.configDataStore(), "pets", "member-1", "member-3");
verifyRaftPeersPresent(replicaNode3.configDataStore(), "pets", "member-1", "member-2");
- ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
- replicaNode3.operDataStore(), null);
+ final var service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(), replicaNode3.operDataStore(),
+ null);
- RpcResult<RemoveAllShardReplicasOutput> rpcResult = service3.removeAllShardReplicas(
- new RemoveAllShardReplicasInputBuilder().setMemberName("member-3").build()).get(10, TimeUnit.SECONDS);
- RemoveAllShardReplicasOutput result = verifySuccessfulRpcResult(rpcResult);
+ var rpcResult = service3.removeAllShardReplicas(
+ new RemoveAllShardReplicasInputBuilder().setMemberName("member-3").build())
+ .get(10, TimeUnit.SECONDS);
+ final var result = verifySuccessfulRpcResult(rpcResult);
verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
successShardResult("people", DataStoreType.Config),
successShardResult("pets", DataStoreType.Config),
public void testChangeMemberVotingStatesForShard() throws Exception {
String name = "testChangeMemberVotingStatusForShard";
String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
- final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+ final var leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
.moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
.build();
- final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+ final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
.moduleShardsConfig(moduleShardsConfig).build();
- final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+ final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
.moduleShardsConfig(moduleShardsConfig).build();
leaderNode1.configDataStore().waitTillReady();
// Invoke RPC service on member-3 to change voting status
- ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
- replicaNode3.operDataStore(), null);
+ final var service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(), replicaNode3.operDataStore(),
+ null);
- RpcResult<ChangeMemberVotingStatesForShardOutput> rpcResult = service3
- .changeMemberVotingStatesForShard(new ChangeMemberVotingStatesForShardInputBuilder()
- .setShardName("cars").setDataStoreType(DataStoreType.Config)
- .setMemberVotingState(ImmutableList.of(
- new MemberVotingStateBuilder().setMemberName("member-2").setVoting(FALSE).build(),
- new MemberVotingStateBuilder().setMemberName("member-3").setVoting(FALSE).build()))
- .build())
- .get(10, TimeUnit.SECONDS);
+ var rpcResult = service3.changeMemberVotingStatesForShard(new ChangeMemberVotingStatesForShardInputBuilder()
+ .setShardName("cars").setDataStoreType(DataStoreType.Config)
+ .setMemberVotingState(List.of(
+ new MemberVotingStateBuilder().setMemberName("member-2").setVoting(FALSE).build(),
+ new MemberVotingStateBuilder().setMemberName("member-3").setVoting(FALSE).build()))
+ .build())
+ .get(10, TimeUnit.SECONDS);
verifySuccessfulRpcResult(rpcResult);
- verifyVotingStates(leaderNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE),
- new SimpleEntry<>("member-2", FALSE), new SimpleEntry<>("member-3", FALSE));
- verifyVotingStates(replicaNode2.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE),
- new SimpleEntry<>("member-2", FALSE), new SimpleEntry<>("member-3", FALSE));
- verifyVotingStates(replicaNode3.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE),
- new SimpleEntry<>("member-2", FALSE), new SimpleEntry<>("member-3", FALSE));
+ verifyVotingStates(leaderNode1.configDataStore(), "cars",
+ new ExpState("member-1", true), new ExpState("member-2", false), new ExpState("member-3", false));
+ verifyVotingStates(replicaNode2.configDataStore(), "cars",
+ new ExpState("member-1", true), new ExpState("member-2", false), new ExpState("member-3", false));
+ verifyVotingStates(replicaNode3.configDataStore(), "cars",
+ new ExpState("member-1", true), new ExpState("member-2", false), new ExpState("member-3", false));
}
@Test
public void testChangeMemberVotingStatesForSingleNodeShard() throws Exception {
String name = "testChangeMemberVotingStatesForSingleNodeShard";
String moduleShardsConfig = "module-shards-member1.conf";
- MemberNode leaderNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+ final var leaderNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
.moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
.build();
// Invoke RPC service on member-3 to change voting status
- ClusterAdminRpcService service = new ClusterAdminRpcService(leaderNode.configDataStore(),
- leaderNode.operDataStore(), null);
-
- RpcResult<ChangeMemberVotingStatesForShardOutput> rpcResult = service
- .changeMemberVotingStatesForShard(new ChangeMemberVotingStatesForShardInputBuilder()
- .setShardName("cars").setDataStoreType(DataStoreType.Config)
- .setMemberVotingState(ImmutableList
- .of(new MemberVotingStateBuilder().setMemberName("member-1").setVoting(FALSE).build()))
- .build())
- .get(10, TimeUnit.SECONDS);
+ final var service = new ClusterAdminRpcService(leaderNode.configDataStore(), leaderNode.operDataStore(), null);
+
+ final var rpcResult = service.changeMemberVotingStatesForShard(
+ new ChangeMemberVotingStatesForShardInputBuilder()
+ .setShardName("cars").setDataStoreType(DataStoreType.Config)
+ .setMemberVotingState(List.of(new MemberVotingStateBuilder()
+ .setMemberName("member-1")
+ .setVoting(FALSE)
+ .build()))
+ .build())
+ .get(10, TimeUnit.SECONDS);
verifyFailedRpcResult(rpcResult);
- verifyVotingStates(leaderNode.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE));
+ verifyVotingStates(leaderNode.configDataStore(), "cars", new ExpState("member-1", true));
}
@Test
public void testChangeMemberVotingStatesForAllShards() throws Exception {
String name = "testChangeMemberVotingStatesForAllShards";
String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
- final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
- .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
- DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
- .build();
-
- final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+ final var leaderNode1 = MemberNode.builder(memberNodes)
+ .akkaConfig("Member1")
+ .testName(name)
+ .moduleShardsConfig(moduleShardsConfig)
+ .datastoreContextBuilder(DatastoreContext.newBuilder()
+ .shardHeartbeatIntervalInMillis(300)
+ .shardElectionTimeoutFactor(1))
+ .build();
+
+ final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
.moduleShardsConfig(moduleShardsConfig).build();
- final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+ final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
.moduleShardsConfig(moduleShardsConfig).build();
leaderNode1.configDataStore().waitTillReady();
// Invoke RPC service on member-3 to change voting status
- ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
+ final var service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
replicaNode3.operDataStore(), null);
- RpcResult<ChangeMemberVotingStatesForAllShardsOutput> rpcResult = service3.changeMemberVotingStatesForAllShards(
- new ChangeMemberVotingStatesForAllShardsInputBuilder().setMemberVotingState(ImmutableList.of(
+ final var rpcResult = service3.changeMemberVotingStatesForAllShards(
+ new ChangeMemberVotingStatesForAllShardsInputBuilder()
+ .setMemberVotingState(List.of(
new MemberVotingStateBuilder().setMemberName("member-2").setVoting(FALSE).build(),
- new MemberVotingStateBuilder().setMemberName("member-3").setVoting(FALSE).build())).build())
+ new MemberVotingStateBuilder().setMemberName("member-3").setVoting(FALSE).build()))
+ .build())
.get(10, TimeUnit.SECONDS);
- ChangeMemberVotingStatesForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult);
+ final var result = verifySuccessfulRpcResult(rpcResult);
verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
successShardResult("people", DataStoreType.Config),
successShardResult("cars", DataStoreType.Operational),
successShardResult("people", DataStoreType.Operational));
- verifyVotingStates(new AbstractDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
- replicaNode2.configDataStore(), replicaNode2.operDataStore(),
- replicaNode3.configDataStore(), replicaNode3.operDataStore()},
- new String[]{"cars", "people"}, new SimpleEntry<>("member-1", TRUE),
- new SimpleEntry<>("member-2", FALSE), new SimpleEntry<>("member-3", FALSE));
+ verifyVotingStates(new ClientBackedDataStore[] {
+ leaderNode1.configDataStore(), leaderNode1.operDataStore(),
+ replicaNode2.configDataStore(), replicaNode2.operDataStore(),
+ replicaNode3.configDataStore(), replicaNode3.operDataStore()
+ }, new String[] { "cars", "people" },
+ new ExpState("member-1", true), new ExpState("member-2", false), new ExpState("member-3", false));
}
@Test
public void testFlipMemberVotingStates() throws Exception {
String name = "testFlipMemberVotingStates";
- ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(
- new ServerInfo("member-1", true), new ServerInfo("member-2", true),
- new ServerInfo("member-3", false)));
+ final var persistedServerConfig = new ServerConfigurationPayload(List.of(
+ new ServerInfo("member-1", true), new ServerInfo("member-2", true), new ServerInfo("member-3", false)));
setupPersistedServerConfigPayload(persistedServerConfig, "member-1", name, "cars", "people");
setupPersistedServerConfigPayload(persistedServerConfig, "member-2", name, "cars", "people");
setupPersistedServerConfigPayload(persistedServerConfig, "member-3", name, "cars", "people");
String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
- final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+ final var leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
.moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(DatastoreContext.newBuilder()
.shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(10))
.build();
- final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+ final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
.moduleShardsConfig(moduleShardsConfig).build();
- final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+ final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
.moduleShardsConfig(moduleShardsConfig).build();
leaderNode1.configDataStore().waitTillReady();
leaderNode1.operDataStore().waitTillReady();
replicaNode3.configDataStore().waitTillReady();
replicaNode3.operDataStore().waitTillReady();
- verifyVotingStates(leaderNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE),
- new SimpleEntry<>("member-2", TRUE), new SimpleEntry<>("member-3", FALSE));
+ verifyVotingStates(leaderNode1.configDataStore(), "cars",
+ new ExpState("member-1", true), new ExpState("member-2", true), new ExpState("member-3", false));
- ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
- replicaNode3.operDataStore(), null);
+ final var service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(), replicaNode3.operDataStore(),
+ null);
- RpcResult<FlipMemberVotingStatesForAllShardsOutput> rpcResult = service3.flipMemberVotingStatesForAllShards(
- new FlipMemberVotingStatesForAllShardsInputBuilder().build()).get(10, TimeUnit.SECONDS);
- FlipMemberVotingStatesForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult);
+ var rpcResult = service3.flipMemberVotingStatesForAllShards(
+ new FlipMemberVotingStatesForAllShardsInputBuilder().build())
+ .get(10, TimeUnit.SECONDS);
+ var result = verifySuccessfulRpcResult(rpcResult);
verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
successShardResult("people", DataStoreType.Config),
successShardResult("cars", DataStoreType.Operational),
successShardResult("people", DataStoreType.Operational));
- verifyVotingStates(new AbstractDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
- replicaNode2.configDataStore(), replicaNode2.operDataStore(),
- replicaNode3.configDataStore(), replicaNode3.operDataStore()},
- new String[]{"cars", "people"},
- new SimpleEntry<>("member-1", FALSE), new SimpleEntry<>("member-2", FALSE),
- new SimpleEntry<>("member-3", TRUE));
+ verifyVotingStates(new ClientBackedDataStore[] {
+ leaderNode1.configDataStore(), leaderNode1.operDataStore(),
+ replicaNode2.configDataStore(), replicaNode2.operDataStore(),
+ replicaNode3.configDataStore(), replicaNode3.operDataStore()
+ }, new String[] { "cars", "people" },
+ new ExpState("member-1", false), new ExpState("member-2", false), new ExpState("member-3", true));
// Leadership should have transferred to member 3 since it is the only remaining voting member.
verifyRaftState(leaderNode1.configDataStore(), "cars", raftState -> {
// Flip the voting states back to the original states.
rpcResult = service3.flipMemberVotingStatesForAllShards(
- new FlipMemberVotingStatesForAllShardsInputBuilder().build()).get(10, TimeUnit.SECONDS);
+ new FlipMemberVotingStatesForAllShardsInputBuilder().build())
+ .get(10, TimeUnit.SECONDS);
result = verifySuccessfulRpcResult(rpcResult);
verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
successShardResult("people", DataStoreType.Config),
successShardResult("cars", DataStoreType.Operational),
successShardResult("people", DataStoreType.Operational));
- verifyVotingStates(new AbstractDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
- replicaNode2.configDataStore(), replicaNode2.operDataStore(),
- replicaNode3.configDataStore(), replicaNode3.operDataStore()},
- new String[]{"cars", "people"},
- new SimpleEntry<>("member-1", TRUE), new SimpleEntry<>("member-2", TRUE),
- new SimpleEntry<>("member-3", FALSE));
+ verifyVotingStates(new ClientBackedDataStore[] {
+ leaderNode1.configDataStore(), leaderNode1.operDataStore(),
+ replicaNode2.configDataStore(), replicaNode2.operDataStore(),
+ replicaNode3.configDataStore(), replicaNode3.operDataStore()
+ }, new String[] { "cars", "people" },
+ new ExpState("member-1", true), new ExpState("member-2", true), new ExpState("member-3", false));
// Leadership should have transferred to member 1 or 2.
verifyRaftState(leaderNode1.configDataStore(), "cars", raftState -> {
// Members 1, 2, and 3 are initially started up as non-voting. Members 4, 5, and 6 are initially
// non-voting and simulated as down by not starting them up.
- ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(
+ final var persistedServerConfig = new ServerConfigurationPayload(List.of(
new ServerInfo("member-1", false), new ServerInfo("member-2", false),
new ServerInfo("member-3", false), new ServerInfo("member-4", true),
new ServerInfo("member-5", true), new ServerInfo("member-6", true)));
setupPersistedServerConfigPayload(persistedServerConfig, "member-3", name, "cars", "people");
String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
- final MemberNode replicaNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+ final var replicaNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
.moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
.build();
- final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+ final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
.moduleShardsConfig(moduleShardsConfig).build();
- final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+ final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
.moduleShardsConfig(moduleShardsConfig).build();
// Initially there won't be a leader b/c all the up nodes are non-voting.
replicaNode1.waitForMembersUp("member-2", "member-3");
- verifyVotingStates(replicaNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", FALSE),
- new SimpleEntry<>("member-2", FALSE), new SimpleEntry<>("member-3", FALSE),
- new SimpleEntry<>("member-4", TRUE), new SimpleEntry<>("member-5", TRUE),
- new SimpleEntry<>("member-6", TRUE));
+ verifyVotingStates(replicaNode1.configDataStore(), "cars",
+ new ExpState("member-1", false), new ExpState("member-2", false), new ExpState("member-3", false),
+ new ExpState("member-4", true), new ExpState("member-5", true), new ExpState("member-6", true));
verifyRaftState(replicaNode1.configDataStore(), "cars", raftState ->
assertEquals("Expected raft state", RaftState.Follower.toString(), raftState.getRaftState()));
- ClusterAdminRpcService service1 = new ClusterAdminRpcService(replicaNode1.configDataStore(),
- replicaNode1.operDataStore(), null);
+ final var service1 = new ClusterAdminRpcService(replicaNode1.configDataStore(), replicaNode1.operDataStore(),
+ null);
- RpcResult<FlipMemberVotingStatesForAllShardsOutput> rpcResult = service1.flipMemberVotingStatesForAllShards(
- new FlipMemberVotingStatesForAllShardsInputBuilder().build()).get(10, TimeUnit.SECONDS);
- FlipMemberVotingStatesForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult);
+ final var rpcResult = service1.flipMemberVotingStatesForAllShards(
+ new FlipMemberVotingStatesForAllShardsInputBuilder().build())
+ .get(10, TimeUnit.SECONDS);
+ final var result = verifySuccessfulRpcResult(rpcResult);
verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
successShardResult("people", DataStoreType.Config),
successShardResult("cars", DataStoreType.Operational),
successShardResult("people", DataStoreType.Operational));
- verifyVotingStates(new AbstractDataStore[]{replicaNode1.configDataStore(), replicaNode1.operDataStore(),
- replicaNode2.configDataStore(), replicaNode2.operDataStore(),
- replicaNode3.configDataStore(), replicaNode3.operDataStore()},
- new String[]{"cars", "people"},
- new SimpleEntry<>("member-1", TRUE), new SimpleEntry<>("member-2", TRUE),
- new SimpleEntry<>("member-3", TRUE), new SimpleEntry<>("member-4", FALSE),
- new SimpleEntry<>("member-5", FALSE), new SimpleEntry<>("member-6", FALSE));
+ verifyVotingStates(new ClientBackedDataStore[] {
+ replicaNode1.configDataStore(), replicaNode1.operDataStore(),
+ replicaNode2.configDataStore(), replicaNode2.operDataStore(),
+ replicaNode3.configDataStore(), replicaNode3.operDataStore()
+ }, new String[] { "cars", "people" },
+ new ExpState("member-1", true), new ExpState("member-2", true), new ExpState("member-3", true),
+ new ExpState("member-4", false), new ExpState("member-5", false), new ExpState("member-6", false));
// Since member 1 was changed to voting and there was no leader, it should've started and election
// and become leader
String name = "testFlipMemberVotingStatesWithVotingMembersDown";
// Members 4, 5, and 6 are initially non-voting and simulated as down by not starting them up.
- ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(
+ final var persistedServerConfig = new ServerConfigurationPayload(List.of(
new ServerInfo("member-1", true), new ServerInfo("member-2", true),
new ServerInfo("member-3", true), new ServerInfo("member-4", false),
new ServerInfo("member-5", false), new ServerInfo("member-6", false)));
setupPersistedServerConfigPayload(persistedServerConfig, "member-3", name, "cars", "people");
String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
- final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+ final var leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
.moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
.build();
- final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+ final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
.moduleShardsConfig(moduleShardsConfig).build();
- final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+ final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
.moduleShardsConfig(moduleShardsConfig).build();
leaderNode1.configDataStore().waitTillReady();
leaderNode1.operDataStore().waitTillReady();
- verifyVotingStates(leaderNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE),
- new SimpleEntry<>("member-2", TRUE), new SimpleEntry<>("member-3", TRUE),
- new SimpleEntry<>("member-4", FALSE), new SimpleEntry<>("member-5", FALSE),
- new SimpleEntry<>("member-6", FALSE));
+ verifyVotingStates(leaderNode1.configDataStore(), "cars",
+ new ExpState("member-1", true), new ExpState("member-2", true), new ExpState("member-3", true),
+ new ExpState("member-4", false), new ExpState("member-5", false), new ExpState("member-6", false));
- ClusterAdminRpcService service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(),
- leaderNode1.operDataStore(), null);
+ final var service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(), leaderNode1.operDataStore(),
+ null);
- RpcResult<FlipMemberVotingStatesForAllShardsOutput> rpcResult = service1.flipMemberVotingStatesForAllShards(
- new FlipMemberVotingStatesForAllShardsInputBuilder().build()).get(10, TimeUnit.SECONDS);
- FlipMemberVotingStatesForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult);
+ final var rpcResult = service1.flipMemberVotingStatesForAllShards(
+ new FlipMemberVotingStatesForAllShardsInputBuilder().build())
+ .get(10, TimeUnit.SECONDS);
+ final var result = verifySuccessfulRpcResult(rpcResult);
verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
successShardResult("people", DataStoreType.Config),
successShardResult("cars", DataStoreType.Operational),
successShardResult("people", DataStoreType.Operational));
// Members 2 and 3 are now non-voting but should get replicated with the new new server config.
- verifyVotingStates(new AbstractDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
- replicaNode2.configDataStore(), replicaNode2.operDataStore(),
- replicaNode3.configDataStore(), replicaNode3.operDataStore()},
- new String[]{"cars", "people"},
- new SimpleEntry<>("member-1", FALSE), new SimpleEntry<>("member-2", FALSE),
- new SimpleEntry<>("member-3", FALSE), new SimpleEntry<>("member-4", TRUE),
- new SimpleEntry<>("member-5", TRUE), new SimpleEntry<>("member-6", TRUE));
+ verifyVotingStates(new ClientBackedDataStore[] {
+ leaderNode1.configDataStore(), leaderNode1.operDataStore(),
+ replicaNode2.configDataStore(), replicaNode2.operDataStore(),
+ replicaNode3.configDataStore(), replicaNode3.operDataStore()
+ }, new String[] { "cars", "people" },
+ new ExpState("member-1", false), new ExpState("member-2", false), new ExpState("member-3", false),
+ new ExpState("member-4", true), new ExpState("member-5", true), new ExpState("member-6", true));
// The leader (member 1) was changed to non-voting but it shouldn't be able to step down as leader yet
// b/c it can't get a majority consensus with all voting members down. So verify it remains the leader.
private static void setupPersistedServerConfigPayload(final ServerConfigurationPayload serverConfig,
final String member, final String datastoreTypeSuffix, final String... shards) {
- String[] datastoreTypes = {"config_", "oper_"};
+ String[] datastoreTypes = { "config_", "oper_" };
for (String type : datastoreTypes) {
for (String shard : shards) {
- List<ServerInfo> newServerInfo = new ArrayList<>(serverConfig.getServerConfig().size());
- for (ServerInfo info : serverConfig.getServerConfig()) {
- newServerInfo.add(new ServerInfo(ShardIdentifier.create(shard, MemberName.forName(info.getId()),
+ final var newServerInfo = new ArrayList<ServerInfo>(serverConfig.getServerConfig().size());
+ for (var info : serverConfig.getServerConfig()) {
+ newServerInfo.add(new ServerInfo(ShardIdentifier.create(shard, MemberName.forName(info.peerId()),
type + datastoreTypeSuffix).toString(), info.isVoting()));
}
- String shardID = ShardIdentifier.create(shard, MemberName.forName(member),
+ final String shardID = ShardIdentifier.create(shard, MemberName.forName(member),
type + datastoreTypeSuffix).toString();
InMemoryJournal.addEntry(shardID, 1, new UpdateElectionTerm(1, null));
InMemoryJournal.addEntry(shardID, 2, new SimpleReplicatedLogEntry(0, 1,
}
}
- @SafeVarargs
- private static void verifyVotingStates(final AbstractDataStore[] datastores, final String[] shards,
- final SimpleEntry<String, Boolean>... expStates) throws Exception {
- for (AbstractDataStore datastore: datastores) {
- for (String shard: shards) {
+ private static void verifyVotingStates(final ClientBackedDataStore[] datastores, final String[] shards,
+ final ExpState... expStates) throws Exception {
+ for (var datastore : datastores) {
+ for (String shard : shards) {
verifyVotingStates(datastore, shard, expStates);
}
}
}
- @SafeVarargs
- private static void verifyVotingStates(final AbstractDataStore datastore, final String shardName,
- final SimpleEntry<String, Boolean>... expStates) throws Exception {
+ private static void verifyVotingStates(final ClientBackedDataStore datastore, final String shardName,
+ final ExpState... expStates) throws Exception {
String localMemberName = datastore.getActorUtils().getCurrentMemberName().getName();
- Map<String, Boolean> expStateMap = new HashMap<>();
- for (Entry<String, Boolean> e: expStates) {
- expStateMap.put(ShardIdentifier.create(shardName, MemberName.forName(e.getKey()),
- datastore.getActorUtils().getDataStoreName()).toString(), e.getValue());
+ var expStateMap = new HashMap<String, Boolean>();
+ for (var expState : expStates) {
+ expStateMap.put(ShardIdentifier.create(shardName, MemberName.forName(expState.name),
+ datastore.getActorUtils().getDataStoreName()).toString(), expState.voting);
}
verifyRaftState(datastore, shardName, raftState -> {
String localPeerId = ShardIdentifier.create(shardName, MemberName.forName(localMemberName),
datastore.getActorUtils().getDataStoreName()).toString();
assertEquals("Voting state for " + localPeerId, expStateMap.get(localPeerId), raftState.isVoting());
- for (Entry<String, Boolean> e: raftState.getPeerVotingStates().entrySet()) {
- assertEquals("Voting state for " + e.getKey(), expStateMap.get(e.getKey()), e.getValue());
+ for (var entry : raftState.getPeerVotingStates().entrySet()) {
+ assertEquals("Voting state for " + entry.getKey(), expStateMap.get(entry.getKey()), entry.getValue());
}
});
}
private static void verifyShardResults(final Map<ShardResultKey, ShardResult> shardResults,
final ShardResult... expShardResults) {
- Map<String, ShardResult> expResultsMap = new HashMap<>();
- for (ShardResult r: expShardResults) {
+ var expResultsMap = new HashMap<String, ShardResult>();
+ for (var r : expShardResults) {
expResultsMap.put(r.getShardName() + "-" + r.getDataStoreType(), r);
}
- for (ShardResult result: shardResults.values()) {
- ShardResult exp = expResultsMap.remove(result.getShardName() + "-" + result.getDataStoreType());
+ for (var result : shardResults.values()) {
+ var exp = expResultsMap.remove(result.getShardName() + "-" + result.getDataStoreType());
assertNotNull(String.format("Unexpected result for shard %s, type %s", result.getShardName(),
result.getDataStoreType()), exp);
- assertEquals("isSucceeded", exp.isSucceeded(), result.isSucceeded());
- if (exp.isSucceeded()) {
+ assertEquals("isSucceeded", exp.getSucceeded(), result.getSucceeded());
+ if (exp.getSucceeded()) {
assertNull("Expected null error message", result.getErrorMessage());
} else {
assertNotNull("Expected error message", result.getErrorMessage());
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <parent>
+ <artifactId>mdsal-parent</artifactId>
+ <groupId>org.opendaylight.controller</groupId>
+ <version>9.0.3-SNAPSHOT</version>
+ <relativePath>../parent/pom.xml</relativePath>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+
+ <artifactId>sal-cluster-admin-karaf-cli</artifactId>
+ <packaging>bundle</packaging>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.karaf.shell</groupId>
+ <artifactId>org.apache.karaf.shell.core</artifactId>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-cluster-admin-impl</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>mdsal-binding-api</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.karaf.tooling</groupId>
+ <artifactId>karaf-services-maven-plugin</artifactId>
+ <version>${karaf.version}</version>
+ <executions>
+ <execution>
+ <id>service-metadata-generate</id>
+ <phase>process-classes</phase>
+ <goals>
+ <goal>service-metadata-generate</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+
+</project>
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.admin.command;
+
+
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.concurrent.ExecutionException;
+import org.apache.karaf.shell.api.action.Action;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+
+/**
+ * Common base class for all commands which end up invoking an RPC.
+ */
+public abstract class AbstractRpcAction implements Action {
+ @Override
+ @SuppressWarnings("checkstyle:RegexpSinglelineJava")
+ public final Object execute() throws InterruptedException, ExecutionException {
+ final RpcResult<?> result = invokeRpc().get();
+ if (!result.isSuccessful()) {
+ // FIXME: is there a better way to report errors?
+ System.out.println("Invocation failed: " + result.getErrors());
+ return null;
+ } else {
+ return result.getResult();
+ }
+ }
+
+ protected abstract ListenableFuture<? extends RpcResult<?>> invokeRpc();
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.admin.command;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ActivateEosDatacenter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ActivateEosDatacenterInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "cluster-admin", name = "activate-eos-datacenter", description = "Run an activate-eos-datacenter test")
+public class ActivateEosDatacenterCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(ActivateEosDatacenter.class)
+ .invoke(new ActivateEosDatacenterInputBuilder().build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.admin.command;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShards;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShardsInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "cluster-admin", name = "add-replicas-for-all-shards",
+ description = "Run an add-replicas-for-all-shards test")
+public class AddReplicasForAllShardsCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(AddReplicasForAllShards.class)
+ .invoke(new AddReplicasForAllShardsInputBuilder().build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.admin.command;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplica;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplicaInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "cluster-admin", name = "add-shard-replica", description = "Run an add-shard-replica test")
+public class AddShardReplicaCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+ @Argument(index = 0, name = "shard-name", required = true)
+ private String shardName;
+ @Argument(index = 1, name = "data-store-type", required = true, description = "config / operational")
+ private String dataStoreType;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(AddShardReplica.class)
+ .invoke(new AddShardReplicaInputBuilder()
+ .setShardName(shardName)
+ .setDataStoreType(DataStoreType.forName(dataStoreType))
+ .build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.admin.command;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastore;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastoreInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.Uint32;
+
+@Service
+@Command(scope = "cluster-admin", name = "backup-datastore", description = "Run a backup-datastore test")
+public class BackupDatastoreCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+ @Argument(index = 0, name = "file-path", required = true)
+ private String filePath;
+ @Argument(index = 1, name = "timeout", required = true)
+ private long timeout;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(BackupDatastore.class)
+ .invoke(new BackupDatastoreInputBuilder()
+ .setFilePath(filePath)
+ .setTimeout(Uint32.valueOf(timeout))
+ .build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.admin.command;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.List;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShards;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShardsInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.member.voting.states.input.MemberVotingState;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.member.voting.states.input.MemberVotingStateBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+
+@Service
+@Command(scope = "cluster-admin", name = "change-member-voting-states-for-all-shards",
+ description = "Run a change-member-voting-states-for-all-shards test")
+public class ChangeMemberVotingStatesForAllShardsCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+ @Argument(index = 0, name = "member-name", required = true)
+ private String memberName;
+ @Argument(index = 1, name = "voting", required = true)
+ private boolean voting;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ final MemberVotingState memberVotingState = new MemberVotingStateBuilder()
+ .setMemberName(memberName)
+ .setVoting(voting)
+ .build();
+
+ return rpcService.getRpc(ChangeMemberVotingStatesForAllShards.class)
+ .invoke(new ChangeMemberVotingStatesForAllShardsInputBuilder()
+ .setMemberVotingState(List.of(memberVotingState))
+ .build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.admin.command;
+
+
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.List;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShard;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShardInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.member.voting.states.input.MemberVotingState;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.member.voting.states.input.MemberVotingStateBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "cluster-admin", name = "change-member-voting-states-for-shard",
+ description = "Run a change-member-voting-states-for-shard test")
+public class ChangeMemberVotingStatesForShardCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+ @Argument(index = 0, name = "shard-name", required = true)
+ private String shardName;
+ @Argument(index = 1, name = "data-store-type", required = true, description = "config / operational")
+ private String dataStoreType;
+ @Argument(index = 2, name = "member-name", required = true)
+ private String memberName;
+ @Argument(index = 3, name = "voting", required = true)
+ private boolean voting;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ final MemberVotingState memberVotingState = new MemberVotingStateBuilder()
+ .setMemberName(memberName)
+ .setVoting(voting)
+ .build();
+
+ return rpcService.getRpc(ChangeMemberVotingStatesForShard.class)
+ .invoke(new ChangeMemberVotingStatesForShardInputBuilder()
+ .setShardName(shardName)
+ .setDataStoreType(DataStoreType.forName(dataStoreType))
+ .setMemberVotingState(List.of(memberVotingState))
+ .build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.admin.command;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DeactivateEosDatacenter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DeactivateEosDatacenterInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "cluster-admin", name = "deactivate-eos-datacenter",
+ description = "Run a deactivate-eos-datacenter test")
+public class DeactivateEosDatacenterCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(DeactivateEosDatacenter.class)
+ .invoke(new DeactivateEosDatacenterInputBuilder().build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.admin.command;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShards;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShardsInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "cluster-admin",name = "flip-member-voting-states-for-all-shards",
+ description = "Run a flip-member-voting-states-for-all-shards test")
+public class FlipMemberVotingStatesForAllShardsCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(FlipMemberVotingStatesForAllShards.class)
+ .invoke(new FlipMemberVotingStatesForAllShardsInputBuilder().build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.admin.command;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetKnownClientsForAllShards;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetKnownClientsForAllShardsInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "cluster-admin", name = "get-known-clients-for-all-shards",
+ description = "Run a get-known-clients-for-all-shards test")
+public class GetKnownClientsForAllShardsCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(GetKnownClientsForAllShards.class)
+ .invoke(new GetKnownClientsForAllShardsInputBuilder().build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.admin.command;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetShardRole;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetShardRoleInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "cluster-admin", name = "get-shard-role", description = "Run a get-shard-role test")
+public class GetShardRoleCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+ @Argument(index = 0, name = "shard-name", required = true)
+ private String shardName;
+ @Argument(index = 1, name = "data-store-type", required = true, description = "config / operational")
+ private String dataStoreType;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(GetShardRole.class)
+ .invoke(new GetShardRoleInputBuilder()
+ .setShardName(shardName)
+ .setDataStoreType(DataStoreType.forName(dataStoreType))
+ .build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.admin.command;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.LocateShard;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.LocateShardInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "cluster-admin", name = "locate-shard", description = "Run a locate-shard test")
+public class LocateShardCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+ @Argument(index = 0, name = "shard-name", required = true)
+ private String shardName;
+ @Argument(index = 1, name = "data-store-type", required = true, description = "config / operational")
+ private String dataStoreType;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(LocateShard.class)
+ .invoke(new LocateShardInputBuilder()
+ .setShardName(shardName)
+ .setDataStoreType(DataStoreType.forName(dataStoreType))
+ .build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.admin.command;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.MakeLeaderLocal;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.MakeLeaderLocalInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "cluster-admin", name = "make-leader-local", description = "Run a make-leader-local test")
+public class MakeLeaderLocalCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+ @Argument(index = 0, name = "shard-name", required = true)
+ private String shardName;
+ @Argument(index = 1, name = "data-store-type", required = true, description = "config / operational")
+ private String dataStoreType;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(MakeLeaderLocal.class)
+ .invoke(new MakeLeaderLocalInputBuilder()
+ .setShardName(shardName)
+ .setDataStoreType(DataStoreType.forName(dataStoreType))
+ .build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.admin.command;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicas;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicasInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "cluster-admin", name = "remove-all-shard-replicas",
+ description = "Run a remove-all-shard-replicas test")
+public class RemoveAllShardReplicasCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+ @Argument(index = 0, name = "member-name",required = true)
+ private String memberName;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(RemoveAllShardReplicas.class)
+ .invoke(new RemoveAllShardReplicasInputBuilder()
+ .setMemberName(memberName)
+ .build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.admin.command;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveShardReplica;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveShardReplicaInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "cluster-admin", name = "remove-shard-replica", description = "Run a remove-shard-replica")
+public class RemoveShardReplicaCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+ @Argument(index = 0, name = "shard-name", required = true)
+ private String shardName;
+ @Argument(index = 1, name = "data-store-type", required = true, description = "config / operational")
+ private String dataStoreType;
+ @Argument(index = 2, name = "member-name", required = true)
+ private String memberName;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(RemoveShardReplica.class)
+ .invoke(new RemoveShardReplicaInputBuilder()
+ .setShardName(shardName)
+ .setDataStoreType(DataStoreType.forName(dataStoreType))
+ .setMemberName(memberName)
+ .build());
+ }
+}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>mdsal-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../parent</relativePath>
</parent>
<packaging>bundle</packaging>
<dependencies>
- <!-- Java -->
<dependency>
- <groupId>org.xmlunit</groupId>
- <artifactId>xmlunit-legacy</artifactId>
+ <!-- Enforce Netty’s optional dependency on servlet API -->
+ <!-- FIXME: is this really needed ? -->
+ <groupId>javax.servlet</groupId>
+ <artifactId>javax.servlet-api</artifactId>
</dependency>
<dependency>
- <groupId>org.slf4j</groupId>
- <artifactId>slf4j-simple</artifactId>
- <scope>test</scope>
+ <groupId>com.github.spotbugs</groupId>
+ <artifactId>spotbugs-annotations</artifactId>
+ <optional>true</optional>
</dependency>
<dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-test-util</artifactId>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
</dependency>
-
- <!-- Apache -->
<dependency>
- <groupId>commons-lang</groupId>
- <artifactId>commons-lang</artifactId>
- <scope>test</scope>
+ <groupId>com.guicedee.services</groupId>
+ <artifactId>javax.inject</artifactId>
+ <scope>provided</scope>
+ <optional>true</optional>
</dependency>
<dependency>
- <groupId>commons-io</groupId>
- <artifactId>commons-io</artifactId>
- <scope>test</scope>
+ <groupId>com.typesafe</groupId>
+ <artifactId>config</artifactId>
</dependency>
<dependency>
- <groupId>org.apache.commons</groupId>
- <artifactId>commons-lang3</artifactId>
- <scope>test</scope>
+ <groupId>io.dropwizard.metrics</groupId>
+ <artifactId>metrics-core</artifactId>
</dependency>
-
- <!-- Akka -->
<dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-actor_2.13</artifactId>
+ <groupId>io.dropwizard.metrics</groupId>
+ <artifactId>metrics-graphite</artifactId>
</dependency>
<dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-cluster_2.13</artifactId>
+ <groupId>io.dropwizard.metrics</groupId>
+ <artifactId>metrics-jmx</artifactId>
</dependency>
<dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-osgi_2.13</artifactId>
- <exclusions>
- <exclusion>
- <groupId>org.osgi</groupId>
- <artifactId>org.osgi.compendium</artifactId>
- </exclusion>
- </exclusions>
+ <groupId>org.checkerframework</groupId>
+ <artifactId>checker-qual</artifactId>
+ <optional>true</optional>
</dependency>
<dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-persistence_2.13</artifactId>
+ <groupId>org.eclipse.jdt</groupId>
+ <artifactId>org.eclipse.jdt.annotation</artifactId>
</dependency>
<dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-remote_2.13</artifactId>
+ <groupId>org.kohsuke.metainf-services</groupId>
+ <artifactId>metainf-services</artifactId>
</dependency>
<dependency>
- <!-- Enforce Netty’s optional dependency on servlet API -->
- <!-- FIXME: is this really needed ? -->
- <groupId>javax.servlet</groupId>
- <artifactId>javax.servlet-api</artifactId>
+ <groupId>org.lz4</groupId>
+ <artifactId>lz4-java</artifactId>
+ <version>1.8.0</version>
</dependency>
<dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-slf4j_2.13</artifactId>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>concepts</artifactId>
</dependency>
<dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-testkit_2.13</artifactId>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>util</artifactId>
</dependency>
<dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-persistence-tck_2.13</artifactId>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-common</artifactId>
</dependency>
-
- <!-- Codahale -->
<dependency>
- <groupId>io.dropwizard.metrics</groupId>
- <artifactId>metrics-core</artifactId>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-api</artifactId>
</dependency>
<dependency>
- <groupId>io.dropwizard.metrics</groupId>
- <artifactId>metrics-graphite</artifactId>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-codec-binfmt</artifactId>
</dependency>
<dependency>
- <groupId>io.dropwizard.metrics</groupId>
- <artifactId>metrics-jmx</artifactId>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-impl</artifactId>
</dependency>
<dependency>
- <groupId>javax.inject</groupId>
- <artifactId>javax.inject</artifactId>
- <scope>provided</scope>
- <optional>true</optional>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-tree-api</artifactId>
</dependency>
<dependency>
- <groupId>org.kohsuke.metainf-services</groupId>
- <artifactId>metainf-services</artifactId>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-util</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-model-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-repo-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-repo-spi</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>repackaged-akka</artifactId>
</dependency>
<dependency>
<groupId>org.osgi</groupId>
- <artifactId>osgi.cmpn</artifactId>
+ <artifactId>org.osgi.service.component.annotations</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.scala-lang</groupId>
+ <artifactId>scala-library</artifactId>
</dependency>
- <!-- Google -->
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava-testlib</artifactId>
</dependency>
-
- <!-- Scala -->
<dependency>
- <groupId>org.scala-lang</groupId>
- <artifactId>scala-library</artifactId>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-persistence-tck_2.13</artifactId>
</dependency>
-
- <!-- OpenDaylight -->
<dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>util</artifactId>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-testkit_2.13</artifactId>
</dependency>
<dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-data-api</artifactId>
+ <groupId>commons-io</groupId>
+ <artifactId>commons-io</artifactId>
+ <scope>test</scope>
</dependency>
<dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-data-impl</artifactId>
+ <groupId>org.apache.commons</groupId>
+ <artifactId>commons-lang3</artifactId>
+ <scope>test</scope>
</dependency>
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-model-api</artifactId>
+ <artifactId>yang-test-util</artifactId>
</dependency>
<dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-model-util</artifactId>
+ <groupId>org.scalatestplus</groupId>
+ <artifactId>junit-4-13_2.13</artifactId>
+ <scope>test</scope>
</dependency>
<dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-data-codec-binfmt</artifactId>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-simple</artifactId>
+ <scope>test</scope>
</dependency>
-
- <!-- Compression -->
<dependency>
- <groupId>org.lz4</groupId>
- <artifactId>lz4-java</artifactId>
- <version>1.7.1</version>
+ <groupId>org.xmlunit</groupId>
+ <artifactId>xmlunit-core</artifactId>
</dependency>
</dependencies>
package org.opendaylight.controller.cluster;
import akka.japi.Procedure;
+import akka.persistence.JournalProtocol;
+import akka.persistence.SnapshotProtocol;
import akka.persistence.SnapshotSelectionCriteria;
+import org.eclipse.jdt.annotation.NonNull;
/**
* DataPersistenceProvider provides methods to persist data and is an abstraction of the akka-persistence persistence
* @return the last sequence number
*/
long getLastSequenceNumber();
+
+ /**
+ * Receive and potentially handle a {@link JournalProtocol} response.
+ *
+ * @param response A {@link JournalProtocol} response
+ * @return {@code true} if the response was handled
+ */
+ boolean handleJournalResponse(JournalProtocol.@NonNull Response response);
+
+ /**
+ * Receive and potentially handle a {@link SnapshotProtocol} response.
+ *
+ * @param response A {@link SnapshotProtocol} response
+ * @return {@code true} if the response was handled
+ */
+ boolean handleSnapshotResponse(SnapshotProtocol.@NonNull Response response);
}
package org.opendaylight.controller.cluster;
import akka.japi.Procedure;
+import akka.persistence.JournalProtocol;
+import akka.persistence.SnapshotProtocol;
import akka.persistence.SnapshotSelectionCriteria;
/**
public class DelegatingPersistentDataProvider implements DataPersistenceProvider {
private DataPersistenceProvider delegate;
- public DelegatingPersistentDataProvider(DataPersistenceProvider delegate) {
+ public DelegatingPersistentDataProvider(final DataPersistenceProvider delegate) {
this.delegate = delegate;
}
- public void setDelegate(DataPersistenceProvider delegate) {
+ public void setDelegate(final DataPersistenceProvider delegate) {
this.delegate = delegate;
}
}
@Override
- public <T> void persist(T entry, Procedure<T> procedure) {
+ public <T> void persist(final T entry, final Procedure<T> procedure) {
delegate.persist(entry, procedure);
}
@Override
- public <T> void persistAsync(T entry, Procedure<T> procedure) {
+ public <T> void persistAsync(final T entry, final Procedure<T> procedure) {
delegate.persistAsync(entry, procedure);
}
@Override
- public void saveSnapshot(Object entry) {
+ public void saveSnapshot(final Object entry) {
delegate.saveSnapshot(entry);
}
@Override
- public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
+ public void deleteSnapshots(final SnapshotSelectionCriteria criteria) {
delegate.deleteSnapshots(criteria);
}
@Override
- public void deleteMessages(long sequenceNumber) {
+ public void deleteMessages(final long sequenceNumber) {
delegate.deleteMessages(sequenceNumber);
}
public long getLastSequenceNumber() {
return delegate.getLastSequenceNumber();
}
+
+ @Override
+ public boolean handleJournalResponse(final JournalProtocol.Response response) {
+ return delegate.handleJournalResponse(response);
+ }
+
+ @Override
+ public boolean handleSnapshotResponse(final SnapshotProtocol.Response response) {
+ return delegate.handleSnapshotResponse(response);
+ }
}
import static java.util.Objects.requireNonNull;
import akka.japi.Procedure;
+import akka.persistence.JournalProtocol;
+import akka.persistence.SnapshotProtocol;
import akka.persistence.SnapshotSelectionCriteria;
import org.opendaylight.controller.cluster.common.actor.ExecuteInSelfActor;
import org.slf4j.Logger;
LOG.error("An unexpected error occurred", e);
}
}
+
+ @Override
+ public boolean handleJournalResponse(final JournalProtocol.Response response) {
+ return false;
+ }
+
+ @Override
+ public boolean handleSnapshotResponse(final SnapshotProtocol.Response response) {
+ return false;
+ }
}
import akka.japi.Procedure;
import akka.persistence.AbstractPersistentActor;
+import akka.persistence.DeleteMessagesSuccess;
+import akka.persistence.DeleteSnapshotsSuccess;
+import akka.persistence.JournalProtocol;
+import akka.persistence.SnapshotProtocol;
import akka.persistence.SnapshotSelectionCriteria;
/**
* A DataPersistenceProvider implementation with persistence enabled.
*/
public class PersistentDataProvider implements DataPersistenceProvider {
-
private final AbstractPersistentActor persistentActor;
- public PersistentDataProvider(AbstractPersistentActor persistentActor) {
+ public PersistentDataProvider(final AbstractPersistentActor persistentActor) {
this.persistentActor = requireNonNull(persistentActor, "persistentActor can't be null");
}
}
@Override
- public <T> void persist(T entry, Procedure<T> procedure) {
+ public <T> void persist(final T entry, final Procedure<T> procedure) {
persistentActor.persist(entry, procedure);
}
@Override
- public <T> void persistAsync(T entry, Procedure<T> procedure) {
+ public <T> void persistAsync(final T entry, final Procedure<T> procedure) {
persistentActor.persistAsync(entry, procedure);
}
@Override
- public void saveSnapshot(Object snapshot) {
+ public void saveSnapshot(final Object snapshot) {
persistentActor.saveSnapshot(snapshot);
}
@Override
- public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
+ public void deleteSnapshots(final SnapshotSelectionCriteria criteria) {
persistentActor.deleteSnapshots(criteria);
}
@Override
- public void deleteMessages(long sequenceNumber) {
+ public void deleteMessages(final long sequenceNumber) {
persistentActor.deleteMessages(sequenceNumber);
}
public long getLastSequenceNumber() {
return persistentActor.lastSequenceNr();
}
+
+ @Override
+ public boolean handleJournalResponse(final JournalProtocol.Response response) {
+ return response instanceof DeleteMessagesSuccess;
+ }
+
+ @Override
+ public boolean handleSnapshotResponse(final SnapshotProtocol.Response response) {
+ return response instanceof DeleteSnapshotsSuccess;
+ }
}
public abstract class AbstractUntypedActor extends AbstractActor implements ExecuteInSelfActor {
// The member name should be lower case but it's referenced in many subclasses. Suppressing the CS warning for now.
- @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_PRIVATE")
@SuppressWarnings("checkstyle:MemberName")
+ @SuppressFBWarnings(value = "SLF4J_LOGGER_SHOULD_BE_PRIVATE", justification = "Class identity is required")
protected final Logger LOG = LoggerFactory.getLogger(getClass());
+ @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design")
protected AbstractUntypedActor() {
LOG.debug("Actor created {}", getSelf());
getContext().system().actorSelection("user/termination-monitor").tell(new Monitor(getSelf()), getSelf());
*/
package org.opendaylight.controller.cluster.common.actor;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+
/**
* Actor with its behaviour metered. Metering is enabled by configuration.
*/
public abstract class AbstractUntypedActorWithMetering extends AbstractUntypedActor {
-
- //this is used in the metric name. Some transient actors do not have defined names
+ // this is used in the metric name. Some transient actors do not have defined names
private String actorNameOverride;
+ @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design")
public AbstractUntypedActorWithMetering() {
if (isMetricsCaptureEnabled()) {
getContext().become(new MeteringBehavior(this));
}
}
- public AbstractUntypedActorWithMetering(String actorNameOverride) {
+ @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design")
+ public AbstractUntypedActorWithMetering(final String actorNameOverride) {
this.actorNameOverride = actorNameOverride;
if (isMetricsCaptureEnabled()) {
getContext().become(new MeteringBehavior(this));
}
private boolean isMetricsCaptureEnabled() {
- CommonConfig config = new CommonConfig(getContext().system().settings().config());
- return config.isMetricCaptureEnabled();
+ return new CommonConfig(getContext().system().settings().config()).isMetricCaptureEnabled();
}
public String getActorNameOverride() {
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+// FIXME: override getContext(), getSelf() and others to be final to get rid of
+// SpotBugs MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR violation
public abstract class AbstractUntypedPersistentActor extends AbstractPersistentActor implements ExecuteInSelfActor {
// The member name should be lower case but it's referenced in many subclasses. Suppressing the CS warning for now.
- @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_PRIVATE")
@SuppressWarnings("checkstyle:MemberName")
+ @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_PRIVATE")
protected final Logger LOG = LoggerFactory.getLogger(getClass());
+ @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design")
protected AbstractUntypedPersistentActor() {
LOG.trace("Actor created {}", getSelf());
getContext().system().actorSelection("user/termination-monitor").tell(new Monitor(getSelf()), getSelf());
*/
package org.opendaylight.controller.cluster.common.actor;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+
/**
* Actor with its behaviour metered. Metering is enabled by configuration.
*/
public abstract class AbstractUntypedPersistentActorWithMetering extends AbstractUntypedPersistentActor {
-
+ @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design")
public AbstractUntypedPersistentActorWithMetering() {
if (isMetricsCaptureEnabled()) {
getContext().become(new MeteringBehavior(this));
}
private boolean isMetricsCaptureEnabled() {
- CommonConfig config = new CommonConfig(getContext().system().settings().config());
- return config.isMetricCaptureEnabled();
+ return new CommonConfig(getContext().system().settings().config()).isMetricCaptureEnabled();
}
}
import scala.runtime.BoxedUnit;
/**
- * Represents behaviour that can be exhibited by actors of type {@link akka.actor.UntypedActor}
+ * Represents behaviour that can be exhibited by actors of type {@link AbstractActor}
*
* <p>
* This behaviour meters actor's default behaviour. It captures 2 metrics:
* @param message the message to process
*/
@Override
- public BoxedUnit apply(Object message) {
+ public BoxedUnit apply(final Object message) {
final String messageType = message.getClass().getSimpleName();
final String msgProcessingTimeByMsgType =
MetricRegistry.name(actorQualifiedName, MSG_PROCESSING_RATE, messageType);
import akka.actor.Address;
import akka.actor.Props;
import akka.actor.UntypedAbstractActor;
+import akka.cluster.Cluster;
+import akka.cluster.ClusterEvent;
import akka.japi.Effect;
import akka.remote.AssociationErrorEvent;
import akka.remote.RemotingLifecycleEvent;
-import akka.remote.ThisActorSystemQuarantinedEvent;
+import akka.remote.artery.ThisActorSystemQuarantinedEvent;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.util.HashSet;
import java.util.Set;
import org.slf4j.Logger;
private final Effect callback;
private boolean quarantined;
- private Set<Address> addressSet = new HashSet<>();
+ private final Set<Address> addressSet = new HashSet<>();
private int count = 0;
+ @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design")
protected QuarantinedMonitorActor(final Effect callback) {
this.callback = callback;
LOG.debug("Created QuarantinedMonitorActor");
getContext().system().eventStream().subscribe(getSelf(), RemotingLifecycleEvent.class);
+ getContext().system().eventStream().subscribe(getSelf(), ClusterEvent.MemberDowned.class);
}
@Override
return;
}
- if (message instanceof ThisActorSystemQuarantinedEvent) {
- final ThisActorSystemQuarantinedEvent event = (ThisActorSystemQuarantinedEvent) message;
+ if (message instanceof ThisActorSystemQuarantinedEvent event) {
LOG.warn("Got quarantined by {}", event.remoteAddress());
quarantined = true;
// execute the callback
callback.apply();
- } else if (message instanceof AssociationErrorEvent) {
- String errorMessage = message.toString();
+ } else if (message instanceof AssociationErrorEvent event) {
+ final String errorMessage = message.toString();
LOG.trace("errorMessage:{}", errorMessage);
if (errorMessage.contains("The remote system has a UID that has been quarantined")) {
- Address address = ((AssociationErrorEvent) message).getRemoteAddress();
+ final Address address = event.getRemoteAddress();
addressSet.add(address);
count++;
LOG.trace("address:{} addressSet: {} count:{}", address, addressSet, count);
if (count >= MESSAGE_THRESHOLD && addressSet.size() > 1) {
count = 0;
addressSet.clear();
- final AssociationErrorEvent event = (AssociationErrorEvent) message;
LOG.warn("Got quarantined via AssociationEvent by {}", event.remoteAddress());
quarantined = true;
count = 0;
addressSet.clear();
}
+ } else if (message instanceof ClusterEvent.MemberDowned event) {
+ if (Cluster.get(getContext().system()).selfMember().equals(event.member())) {
+ LOG.warn("This member has been downed, restarting");
+
+ callback.apply();
+ }
}
}
* NormalizedNodeNavigator walks a {@link NormalizedNodeVisitor} through the NormalizedNode.
*/
public class NormalizedNodeNavigator {
-
private final NormalizedNodeVisitor visitor;
public NormalizedNodeNavigator(final NormalizedNodeVisitor visitor) {
this.visitor = requireNonNull(visitor, "visitor should not be null");
}
- public void navigate(String parentPath, final NormalizedNode<?, ?> normalizedNode) {
+ public void navigate(String parentPath, final NormalizedNode normalizedNode) {
if (parentPath == null) {
parentPath = "";
}
}
private void navigateDataContainerNode(final int level, final String parentPath,
- final DataContainerNode<?> dataContainerNode) {
+ final DataContainerNode dataContainerNode) {
visitor.visitNode(level, parentPath, dataContainerNode);
- String newParentPath = parentPath + "/" + dataContainerNode.getIdentifier().toString();
+ String newParentPath = parentPath + "/" + dataContainerNode.name().toString();
- for (NormalizedNode<?, ?> node : dataContainerNode.getValue()) {
- if (node instanceof MixinNode && node instanceof NormalizedNodeContainer) {
- navigateNormalizedNodeContainerMixin(level, newParentPath, (NormalizedNodeContainer<?, ?, ?>) node);
+ for (var node : dataContainerNode.body()) {
+ if (node instanceof MixinNode && node instanceof NormalizedNodeContainer<?> container) {
+ navigateNormalizedNodeContainerMixin(level, newParentPath, container);
} else {
navigateNormalizedNode(level, newParentPath, node);
}
}
-
}
private void navigateNormalizedNodeContainerMixin(final int level, final String parentPath,
- final NormalizedNodeContainer<?, ?, ?> node) {
+ final NormalizedNodeContainer<?> node) {
visitor.visitNode(level, parentPath, node);
- String newParentPath = parentPath + "/" + node.getIdentifier().toString();
+ String newParentPath = parentPath + "/" + node.name().toString();
- for (NormalizedNode<?, ?> normalizedNode : node.getValue()) {
- if (normalizedNode instanceof MixinNode && normalizedNode instanceof NormalizedNodeContainer) {
- navigateNormalizedNodeContainerMixin(level + 1, newParentPath,
- (NormalizedNodeContainer<?, ?, ?>) normalizedNode);
+ for (var normalizedNode : node.body()) {
+ if (normalizedNode instanceof MixinNode && normalizedNode instanceof NormalizedNodeContainer<?> container) {
+ navigateNormalizedNodeContainerMixin(level + 1, newParentPath, container);
} else {
navigateNormalizedNode(level, newParentPath, normalizedNode);
}
}
-
}
- private void navigateNormalizedNode(final int level, final String parentPath,
- final NormalizedNode<?, ?> normalizedNode) {
- if (normalizedNode instanceof DataContainerNode) {
-
- final DataContainerNode<?> dataContainerNode = (DataContainerNode<?>) normalizedNode;
-
- navigateDataContainerNode(level + 1, parentPath, dataContainerNode);
+ private void navigateNormalizedNode(final int level, final String parentPath, final NormalizedNode normalizedNode) {
+ if (normalizedNode instanceof DataContainerNode dataContainer) {
+ navigateDataContainerNode(level + 1, parentPath, dataContainer);
} else {
visitor.visitNode(level + 1, parentPath, normalizedNode);
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.datastore.node.utils;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
public interface NormalizedNodeVisitor {
- void visitNode(int level, String parentPath, NormalizedNode<?, ?> normalizedNode);
+ void visitNode(int level, String parentPath, NormalizedNode normalizedNode);
}
*/
package org.opendaylight.controller.cluster.datastore.node.utils.stream;
-import static org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion.MAGNESIUM;
-
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
@FunctionalInterface
public interface Applier<T> {
- void apply(T instance, YangInstanceIdentifier path, NormalizedNode<?, ?> node);
+ void apply(T instance, YangInstanceIdentifier path, NormalizedNode node);
}
- public static Optional<NormalizedNode<?, ?>> readNormalizedNode(final DataInput in) throws IOException {
+ public static Optional<NormalizedNode> readNormalizedNode(final DataInput in) throws IOException {
if (!in.readBoolean()) {
return Optional.empty();
}
return Optional.of(NormalizedNodeDataInput.newDataInput(in).readNormalizedNode());
}
- public static void writeNormalizedNode(final DataOutput out, final @Nullable NormalizedNode<?, ?> node)
+ public static void writeNormalizedNode(final DataOutput out, final @Nullable NormalizedNode node)
throws IOException {
- writeNormalizedNode(out, MAGNESIUM, node);
+ writeNormalizedNode(out, NormalizedNodeStreamVersion.POTASSIUM, node);
}
- public static void writeNormalizedNode(final DataOutput out,
- final org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion version,
- final @Nullable NormalizedNode<?, ?> node) throws IOException {
+ public static void writeNormalizedNode(final DataOutput out, final NormalizedNodeStreamVersion version,
+ final @Nullable NormalizedNode node) throws IOException {
if (node != null) {
out.writeBoolean(true);
public static void writePath(final DataOutput out, final @NonNull YangInstanceIdentifier path)
throws IOException {
- writePath(out, MAGNESIUM, path);
+ writePath(out, NormalizedNodeStreamVersion.POTASSIUM, path);
}
public static void writePath(final DataOutput out, final NormalizedNodeStreamVersion version,
public static <T> void readNodeAndPath(final DataInput in, final T instance, final Applier<T> applier)
throws IOException {
final NormalizedNodeDataInput stream = NormalizedNodeDataInput.newDataInput(in);
- NormalizedNode<?, ?> node = stream.readNormalizedNode();
+ NormalizedNode node = stream.readNormalizedNode();
YangInstanceIdentifier path = stream.readYangInstanceIdentifier();
applier.apply(instance, path, node);
}
public static void writeNodeAndPath(final DataOutput out, final NormalizedNodeStreamVersion version,
- final YangInstanceIdentifier path, final NormalizedNode<?, ?> node) throws IOException {
+ final YangInstanceIdentifier path, final NormalizedNode node) throws IOException {
try (NormalizedNodeDataOutput stream = version.newDataOutput(out)) {
stream.writeNormalizedNode(node);
stream.writeYangInstanceIdentifier(path);
}
public static void writeNodeAndPath(final DataOutput out, final YangInstanceIdentifier path,
- final NormalizedNode<?, ?> node) throws IOException {
- writeNodeAndPath(out, MAGNESIUM, path, node);
+ final NormalizedNode node) throws IOException {
+ writeNodeAndPath(out, NormalizedNodeStreamVersion.POTASSIUM, path, node);
}
public static <T> void readPathAndNode(final DataInput in, final T instance, final Applier<T> applier)
throws IOException {
final NormalizedNodeDataInput stream = NormalizedNodeDataInput.newDataInput(in);
YangInstanceIdentifier path = stream.readYangInstanceIdentifier();
- NormalizedNode<?, ?> node = stream.readNormalizedNode();
+ NormalizedNode node = stream.readNormalizedNode();
applier.apply(instance, path, node);
}
public static void writePathAndNode(final DataOutput out,
final org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion version,
- final YangInstanceIdentifier path, final NormalizedNode<?, ?> node) throws IOException {
+ final YangInstanceIdentifier path, final NormalizedNode node) throws IOException {
try (NormalizedNodeDataOutput stream = version.newDataOutput(out)) {
stream.writeYangInstanceIdentifier(path);
stream.writeNormalizedNode(node);
}
public static void writePathAndNode(final DataOutput out, final YangInstanceIdentifier path,
- final NormalizedNode<?, ?> node) throws IOException {
- writePathAndNode(out, MAGNESIUM, path, node);
+ final NormalizedNode node) throws IOException {
+ writePathAndNode(out, NormalizedNodeStreamVersion.POTASSIUM, path, node);
}
}
import java.util.Optional;
import javax.xml.transform.dom.DOMSource;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
import org.opendaylight.yangtools.yang.data.impl.schema.ReusableImmutableNormalizedNodeStreamWriter;
-import org.opendaylight.yangtools.yang.data.util.DataSchemaContextNode;
+import org.opendaylight.yangtools.yang.data.util.DataSchemaContext;
import org.opendaylight.yangtools.yang.data.util.DataSchemaContextTree;
import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
private static final Logger LOG = LoggerFactory.getLogger(AbstractNormalizedNodePruner.class);
- private final Deque<DataSchemaContextNode<?>> stack = new ArrayDeque<>();
+ private final Deque<DataSchemaContext> stack = new ArrayDeque<>();
private final ReusableImmutableNormalizedNodeStreamWriter delegate =
ReusableImmutableNormalizedNodeStreamWriter.create();
private final DataSchemaContextTree tree;
- private DataSchemaContextNode<?> nodePathSchemaNode;
- private NormalizedNode<?, ?> normalizedNode;
+ private DataSchemaContext nodePathSchemaNode;
+ private NormalizedNode normalizedNode;
private State state = State.UNITIALIZED;
private int unknown;
this.tree = requireNonNull(tree);
}
- AbstractNormalizedNodePruner(final SchemaContext schemaContext) {
+ AbstractNormalizedNodePruner(final EffectiveModelContext schemaContext) {
this(DataSchemaContextTree.from(schemaContext));
}
enter(ReusableImmutableNormalizedNodeStreamWriter::startContainerNode, name, childSizeHint);
}
- @Override
- public final void startYangModeledAnyXmlNode(final NodeIdentifier nodeIdentifier, final int count) {
- // FIXME: implement this
- throw new UnsupportedOperationException("Not implemented yet");
- }
-
@Override
public final void startUnkeyedList(final NodeIdentifier name, final int childSizeHint) throws IOException {
enter(ReusableImmutableNormalizedNodeStreamWriter::startUnkeyedList, name, childSizeHint);
enter(ReusableImmutableNormalizedNodeStreamWriter::startChoiceNode, name, childSizeHint);
}
- @Override
- public final void startAugmentationNode(final AugmentationIdentifier identifier) throws IOException {
- enter(ReusableImmutableNormalizedNodeStreamWriter::startAugmentationNode, identifier);
- }
-
@Override
public final boolean startAnyxmlNode(final NodeIdentifier name, final Class<?> objectModel) throws IOException {
if (enter(name)) {
}
}
- Object translateScalar(final DataSchemaContextNode<?> context, final Object value) throws IOException {
+ Object translateScalar(final DataSchemaContext context, final Object value) {
// Default is pass-through
return value;
}
}
if (stack.isEmpty()) {
- normalizedNode = delegate.getResult();
+ final var result = delegate.result();
+ normalizedNode = result != null ? result.data() : null;
state = State.CLOSED;
}
}
* @return Resulting node for the path, if it was not pruned
* @throws IllegalStateException if this pruner has not been closed
*/
- public final Optional<NormalizedNode<?, ?>> getResult() {
+ public final Optional<NormalizedNode> getResult() {
checkState(state == State.CLOSED, "Cannot get result in state %s", state);
return Optional.ofNullable(normalizedNode);
}
return false;
}
- final DataSchemaContextNode<?> schema;
- final DataSchemaContextNode<?> parent = currentSchema();
+ final DataSchemaContext schema;
+ final DataSchemaContext parent = currentSchema();
if (parent != null) {
- schema = parent.getChild(name);
+ schema = parent instanceof DataSchemaContext.Composite compositeParent ? compositeParent.childByArg(name)
+ : null;
} else {
schema = nodePathSchemaNode;
}
}
stack.push(schema);
- final DataSchemaNode dataSchema = schema.getDataSchemaNode();
+ final DataSchemaNode dataSchema = schema.dataSchemaNode();
if (dataSchema != null) {
delegate.nextDataSchemaNode(dataSchema);
}
}
}
- final DataSchemaContextNode<?> currentSchema() {
+ final DataSchemaContext currentSchema() {
return stack.peek();
}
}
import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.util.DataSchemaContextTree;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
/**
* The NormalizedNodePruner removes all nodes from the input NormalizedNode that do not have a corresponding
@Beta
public abstract class ReusableNormalizedNodePruner extends AbstractNormalizedNodePruner {
private static final class SimplePruner extends ReusableNormalizedNodePruner {
- SimplePruner(final SchemaContext schemaContext) {
+ SimplePruner(final EffectiveModelContext schemaContext) {
super(schemaContext);
}
}
}
- ReusableNormalizedNodePruner(final SchemaContext schemaContext) {
+ ReusableNormalizedNodePruner(final EffectiveModelContext schemaContext) {
super(schemaContext);
}
* @return A new uninitialized pruner
* @throws NullPointerException if {@code schemaContext} is null
*/
- public static @NonNull ReusableNormalizedNodePruner forSchemaContext(final SchemaContext schemaContext) {
+ public static @NonNull ReusableNormalizedNodePruner forSchemaContext(final EffectiveModelContext schemaContext) {
return new SimplePruner(schemaContext);
}
/**
* Create a new pruner bound to a DataSchemaContextTree. This is a more efficient alternative of
- * {@link #forSchemaContext(SchemaContext)}.
+ * {@link #forSchemaContext(EffectiveModelContext)}.
*
* @param tree DataSchemaContextTree to use
* @return A new uninitialized pruner
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
import org.opendaylight.yangtools.yang.data.impl.schema.ReusableImmutableNormalizedNodeStreamWriter;
-import org.opendaylight.yangtools.yang.data.util.DataSchemaContextNode;
+import org.opendaylight.yangtools.yang.data.util.DataSchemaContext;
import org.opendaylight.yangtools.yang.data.util.DataSchemaContextTree;
import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
import org.opendaylight.yangtools.yang.model.api.LeafSchemaNode;
UINT8 {
@Override
public Object apply(final Object obj) {
- if (obj instanceof Short) {
+ if (obj instanceof Short shortObj) {
LOG.trace("Translating legacy uint8 {}", obj);
- return Uint8.valueOf((Short) obj);
+ return Uint8.valueOf(shortObj);
}
return obj;
}
UINT16 {
@Override
public Object apply(final Object obj) {
- if (obj instanceof Integer) {
+ if (obj instanceof Integer intObj) {
LOG.trace("Translating legacy uint16 {}", obj);
- return Uint16.valueOf((Integer) obj);
+ return Uint16.valueOf(intObj);
}
return obj;
}
UINT32 {
@Override
public Object apply(final Object obj) {
- if (obj instanceof Long) {
+ if (obj instanceof Long longObj) {
LOG.trace("Translating legacy uint32 {}", obj);
- return Uint32.valueOf((Long) obj);
+ return Uint32.valueOf(longObj);
}
return obj;
}
UINT64 {
@Override
public Object apply(final Object obj) {
- if (obj instanceof BigInteger) {
+ if (obj instanceof BigInteger bigInt) {
LOG.trace("Translating legacy uint64 {}", obj);
- return Uint64.valueOf((BigInteger) obj);
+ return Uint64.valueOf(bigInt);
}
return obj;
}
}
@Override
- Object translateScalar(final DataSchemaContextNode<?> context, final Object value) throws IOException {
- final DataSchemaNode schema = context.getDataSchemaNode();
- return schema instanceof TypedDataSchemaNode ? adaptValue(((TypedDataSchemaNode) schema).getType(), value)
- : value;
+ Object translateScalar(final DataSchemaContext context, final Object value) {
+ final DataSchemaNode schema = context.dataSchemaNode();
+ return schema instanceof TypedDataSchemaNode typed ? adaptValue(typed.getType(), value) : value;
}
private void adaptEntry(final ReusableImmutableNormalizedNodeStreamWriter writer, final NodeWithValue<?> name) {
final NodeWithValue<?> adapted;
- final DataSchemaNode schema = currentSchema().getDataSchemaNode();
- if (schema instanceof TypedDataSchemaNode) {
+ final DataSchemaNode schema = currentSchema().dataSchemaNode();
+ if (schema instanceof TypedDataSchemaNode typed) {
final Object oldValue = name.getValue();
- final Object newValue = adaptValue(((TypedDataSchemaNode) schema).getType(), oldValue);
+ final Object newValue = adaptValue(typed.getType(), oldValue);
adapted = newValue == oldValue ? name : new NodeWithValue<>(name.getNodeType(), newValue);
} else {
adapted = name;
private void adaptEntry(final ReusableImmutableNormalizedNodeStreamWriter writer,
final NodeIdentifierWithPredicates name, final int size) {
final NodeIdentifierWithPredicates adapted;
- final DataSchemaNode schema = currentSchema().getDataSchemaNode();
- if (schema instanceof ListSchemaNode) {
- adapted = NIP_ADAPTERS.getUnchecked((ListSchemaNode) schema).apply(name);
+ final DataSchemaNode schema = currentSchema().dataSchemaNode();
+ if (schema instanceof ListSchemaNode list) {
+ adapted = NIP_ADAPTERS.getUnchecked(list).apply(name);
} else {
adapted = name;
}
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModificationCursor;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModificationCursor;
/**
* Abstract {@link DataTreeModificationCursor} which tracks the current path. Subclasses can get the current path
*/
@Beta
public abstract class AbstractDataTreeModificationCursor implements DataTreeModificationCursor {
- private YangInstanceIdentifier current = YangInstanceIdentifier.empty();
+ private YangInstanceIdentifier current = YangInstanceIdentifier.of();
protected final YangInstanceIdentifier current() {
return current;
}
@Override
- public final Optional<NormalizedNode<?, ?>> readNode(final PathArgument child) {
+ public final Optional<NormalizedNode> readNode(final PathArgument child) {
throw new UnsupportedOperationException("Not implemented");
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.cluster.datastore.persisted;
+package org.opendaylight.controller.cluster.io;
import static java.util.Objects.requireNonNull;
+import com.google.common.annotations.Beta;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.MoreObjects;
import com.google.common.collect.ImmutableList;
+import com.google.common.io.ByteSink;
import java.io.DataOutput;
import java.io.IOException;
+import java.io.InputStream;
import java.io.ObjectInput;
import java.util.ArrayList;
import java.util.List;
import org.eclipse.jdt.annotation.NonNullByDefault;
import org.opendaylight.yangtools.concepts.Immutable;
+@Beta
@NonNullByDefault
-final class ChunkedByteArray implements Immutable {
+public final class ChunkedByteArray implements Immutable {
private final ImmutableList<byte[]> chunks;
private final int size;
this.chunks = requireNonNull(chunks);
}
- static ChunkedByteArray readFrom(final ObjectInput in, final int size, final int chunkSize)
+ public static ChunkedByteArray readFrom(final ObjectInput in, final int size, final int chunkSize)
throws IOException {
final List<byte[]> chunks = new ArrayList<>(requiredChunks(size, chunkSize));
int remaining = size;
return new ChunkedByteArray(size, ImmutableList.copyOf(chunks));
}
- int size() {
+ public int size() {
return size;
}
- ChunkedInputStream openStream() {
+ public InputStream openStream() {
return new ChunkedInputStream(size, chunks.iterator());
}
- void copyTo(final DataOutput output) throws IOException {
+ public void copyTo(final DataOutput output) throws IOException {
for (byte[] chunk : chunks) {
output.write(chunk, 0, chunk.length);
}
}
+ public void copyTo(final ByteSink output) throws IOException {
+ for (byte[] chunk : chunks) {
+ output.write(chunk);
+ }
+ }
+
@Override
public String toString() {
return MoreObjects.toStringHelper(this).add("size", size).add("chunkCount", chunks.size()).toString();
}
+ @VisibleForTesting
ImmutableList<byte[]> getChunks() {
return chunks;
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.cluster.datastore.persisted;
+package org.opendaylight.controller.cluster.io;
import static java.util.Objects.requireNonNull;
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.cluster.datastore.persisted;
+package org.opendaylight.controller.cluster.io;
+import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkState;
import static com.google.common.base.Verify.verify;
import static com.google.common.math.IntMath.ceilingPowerOfTwo;
+import static com.google.common.math.IntMath.isPowerOfTwo;
+import com.google.common.annotations.Beta;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableList.Builder;
import java.io.ByteArrayOutputStream;
import java.util.Arrays;
import java.util.Deque;
import java.util.Iterator;
-import org.opendaylight.yangtools.concepts.Variant;
+import org.opendaylight.yangtools.concepts.Either;
/**
* An {@link OutputStream} implementation which collects data is a series of {@code byte[]} chunks, each of which has
* <ul>
* <li>Data acquisition, during which we start with an initial (power-of-two) size and proceed to fill it up. Once the
* buffer is full, we stash it, allocate a new buffer twice its size and repeat the process. Once we hit
- * {@link #MAX_ARRAY_SIZE}, we do not grow subsequent buffer. We also can skip some intermediate sizes if data
+ * {@code maxChunkSize}, we do not grow subsequent buffer. We also can skip some intermediate sizes if data
* is introduced in large chunks via {@link #write(byte[], int, int)}.</li>
* <li>Buffer consolidation, which occurs when the stream is {@link #close() closed}. At this point we construct the
* final collection of buffers.</li>
* <p>
* The data acquisition strategy results in predictably-sized buffers, which are growing exponentially in size until
* they hit maximum size. Intrinsic property here is that the total capacity of chunks created during the ramp up is
- * guaranteed to fit into {@code MAX_ARRAY_SIZE}, hence they can readily be compacted into a single buffer, which
- * replaces them. Combined with the requirement to trim the last buffer to have accurate length, this algorithm
- * guarantees total number of internal copy operations is capped at {@code 2 * MAX_ARRAY_SIZE}. The number of produced
- * chunks is also well-controlled:
+ * guaranteed to fit into {@code maxChunkSize}, hence they can readily be compacted into a single buffer, which replaces
+ * them. Combined with the requirement to trim the last buffer to have accurate length, this algorithm guarantees total
+ * number of internal copy operations is capped at {@code 2 * maxChunkSize}. The number of produced chunks is also
+ * well-controlled:
* <ul>
* <li>for slowly-built data, we will maintain perfect packing</li>
* <li>for fast-startup data, we will be at most one one chunk away from packing perfectly</li>
* @author Robert Varga
* @author Tomas Olvecky
*/
-final class ChunkedOutputStream extends OutputStream {
- static final int MAX_ARRAY_SIZE = ceilingPowerOfTwo(Integer.getInteger(
- "org.opendaylight.controller.cluster.datastore.persisted.max-array-size", 256 * 1024));
+@Beta
+public final class ChunkedOutputStream extends OutputStream {
private static final int MIN_ARRAY_SIZE = 32;
+ private final int maxChunkSize;
+
// byte[] or a List
private Object result;
// Lazily-allocated to reduce pressure for single-chunk streams
private int currentOffset;
private int size;
- ChunkedOutputStream(final int requestedInitialCapacity) {
- currentChunk = new byte[initialCapacity(requestedInitialCapacity)];
+ public ChunkedOutputStream(final int requestedInitialCapacity, final int maxChunkSize) {
+ checkArgument(isPowerOfTwo(maxChunkSize), "Maximum chunk size %s is not a power of two", maxChunkSize);
+ checkArgument(maxChunkSize > 0, "Maximum chunk size %s is not positive", maxChunkSize);
+ this.maxChunkSize = maxChunkSize;
+ currentChunk = new byte[initialCapacity(requestedInitialCapacity, maxChunkSize)];
}
@Override
}
}
- int size() {
+ public int size() {
return size;
}
- ChunkedByteArray toChunkedByteArray() {
+ public Either<byte[], ChunkedByteArray> toVariant() {
checkClosed();
- return new ChunkedByteArray(size, result instanceof byte[] ? ImmutableList.of((byte[]) result)
- : (ImmutableList<byte[]>) result);
+ return result instanceof byte[] bytes ? Either.ofFirst(bytes)
+ : Either.ofSecond(new ChunkedByteArray(size, (ImmutableList<byte[]>) result));
}
- Variant<byte[], ChunkedByteArray> toVariant() {
+ @VisibleForTesting
+ ChunkedByteArray toChunkedByteArray() {
checkClosed();
- return result instanceof byte[] ? Variant.ofFirst((byte[]) result)
- : Variant.ofSecond(new ChunkedByteArray(size, (ImmutableList<byte[]>) result));
+ return new ChunkedByteArray(size, result instanceof byte[] bytes ? ImmutableList.of(bytes)
+ : (ImmutableList<byte[]>) result);
}
private Object computeResult() {
// Simple case: it's only the current buffer, return that
return trimChunk(currentChunk, currentOffset);
}
- if (size <= MAX_ARRAY_SIZE) {
+ if (size <= maxChunkSize) {
// We have collected less than full chunk of data, let's have just one chunk ...
final byte[] singleChunk;
if (currentOffset == 0 && prevChunks.size() == 1) {
final Iterator<byte[]> it = prevChunks.iterator();
do {
final byte[] chunk = it.next();
- if (chunk.length == MAX_ARRAY_SIZE) {
+ if (chunk.length == maxChunkSize) {
break;
}
}
}
- private static int nextChunkSize(final int currentSize, final int requested) {
- return currentSize == MAX_ARRAY_SIZE || requested >= MAX_ARRAY_SIZE
- ? MAX_ARRAY_SIZE : Math.max(currentSize * 2, ceilingPowerOfTwo(requested));
+ private int nextChunkSize(final int currentSize, final int requested) {
+ return currentSize == maxChunkSize || requested >= maxChunkSize
+ ? maxChunkSize : Math.max(currentSize * 2, ceilingPowerOfTwo(requested));
}
- private static int nextChunkSize(final int currentSize) {
- return currentSize < MAX_ARRAY_SIZE ? currentSize * 2 : MAX_ARRAY_SIZE;
+ private int nextChunkSize(final int currentSize) {
+ return currentSize < maxChunkSize ? currentSize * 2 : maxChunkSize;
}
- private static int initialCapacity(final int requestedSize) {
+ private static int initialCapacity(final int requestedSize, final int maxChunkSize) {
if (requestedSize < MIN_ARRAY_SIZE) {
return MIN_ARRAY_SIZE;
}
- if (requestedSize > MAX_ARRAY_SIZE) {
- return MAX_ARRAY_SIZE;
+ if (requestedSize > maxChunkSize) {
+ return maxChunkSize;
}
return ceilingPowerOfTwo(requestedSize);
}
*/
package org.opendaylight.controller.cluster.io;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.FinalizablePhantomReference;
-import com.google.common.base.FinalizableReferenceQueue;
-import com.google.common.collect.Sets;
import com.google.common.io.ByteSource;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
+import java.lang.ref.Cleaner;
+import java.lang.ref.Cleaner.Cleanable;
import java.nio.file.Files;
-import java.util.Iterator;
-import java.util.Set;
import org.checkerframework.checker.lock.qual.GuardedBy;
import org.checkerframework.checker.lock.qual.Holding;
import org.eclipse.jdt.annotation.NonNull;
private static final Logger LOG = LoggerFactory.getLogger(FileBackedOutputStream.class);
/**
- * This stores the Cleanup PhantomReference instances statically. This is necessary because PhantomReferences
- * need a hard reference so they're not garbage collected. Once finalized, the Cleanup PhantomReference removes
- * itself from this map and thus becomes eligible for garbage collection.
+ * A Cleaner instance responsible for deleting any files which may be lost due to us not being cleaning up
+ * temporary files.
*/
- @VisibleForTesting
- static final Set<Cleanup> REFERENCE_CACHE = Sets.newConcurrentHashSet();
-
- /**
- * Used as the ReferenceQueue for the Cleanup PhantomReferences.
- */
- private static final FinalizableReferenceQueue REFERENCE_QUEUE = new FinalizableReferenceQueue();
+ private static final Cleaner FILE_CLEANER = Cleaner.create();
private final int fileThreshold;
private final String fileDirectory;
@GuardedBy("this")
private File file;
+ @GuardedBy("this")
+ private Cleanable fileCleanup;
+
@GuardedBy("this")
private ByteSource source;
if (file != null) {
return Files.newInputStream(file.toPath());
} else {
- return new ByteArrayInputStream(memory.getBuffer(), 0, memory.getCount());
+ return new ByteArrayInputStream(memory.buf(), 0, memory.count());
}
}
}
*/
public synchronized void cleanup() {
LOG.debug("In cleanup");
-
closeQuietly();
-
- if (file != null) {
- Iterator<Cleanup> iter = REFERENCE_CACHE.iterator();
- while (iter.hasNext()) {
- if (file.equals(iter.next().file)) {
- iter.remove();
- break;
- }
- }
-
- LOG.debug("cleanup - deleting temp file {}", file);
-
- deleteFile(file);
- file = null;
+ if (fileCleanup != null) {
+ fileCleanup.clean();
}
+ // Already deleted above
+ file = null;
}
@Holding("this")
throw new IOException("Stream already closed");
}
- if (file == null && memory.getCount() + len > fileThreshold) {
- File temp = File.createTempFile("FileBackedOutputStream", null,
+ if (file == null && memory.count() + len > fileThreshold) {
+ final File temp = File.createTempFile("FileBackedOutputStream", null,
fileDirectory == null ? null : new File(fileDirectory));
temp.deleteOnExit();
+ final Cleaner.Cleanable cleanup = FILE_CLEANER.register(this, () -> deleteFile(temp));
- LOG.debug("Byte count {} has exceeded threshold {} - switching to file: {}", memory.getCount() + len,
+ LOG.debug("Byte count {} has exceeded threshold {} - switching to file: {}", memory.count() + len,
fileThreshold, temp);
- OutputStream transfer = null;
+ final OutputStream transfer;
try {
transfer = Files.newOutputStream(temp.toPath());
- transfer.write(memory.getBuffer(), 0, memory.getCount());
- transfer.flush();
-
- // We've successfully transferred the data; switch to writing to file
- out = transfer;
- file = temp;
- memory = null;
-
- new Cleanup(this, file);
- } catch (IOException e) {
- if (transfer != null) {
+ try {
+ transfer.write(memory.buf(), 0, memory.count());
+ transfer.flush();
+ } catch (IOException e) {
try {
transfer.close();
} catch (IOException ex) {
LOG.debug("Error closing temp file {}", temp, ex);
}
+ throw e;
}
-
- deleteFile(temp);
+ } catch (IOException e) {
+ cleanup.clean();
throw e;
}
+
+ // We've successfully transferred the data; switch to writing to file
+ out = transfer;
+ file = temp;
+ fileCleanup = cleanup;
+ memory = null;
}
}
private static void deleteFile(final File file) {
+ LOG.debug("Deleting temp file {}", file);
if (!file.delete()) {
LOG.warn("Could not delete temp file {}", file);
}
/**
* ByteArrayOutputStream that exposes its internals for efficiency.
*/
- private static class MemoryOutputStream extends ByteArrayOutputStream {
- byte[] getBuffer() {
+ private static final class MemoryOutputStream extends ByteArrayOutputStream {
+ byte[] buf() {
return buf;
}
- int getCount() {
+ int count() {
return count;
}
}
-
- /**
- * PhantomReference that deletes the temp file when the FileBackedOutputStream is garbage collected.
- */
- private static class Cleanup extends FinalizablePhantomReference<FileBackedOutputStream> {
- private final File file;
-
- Cleanup(final FileBackedOutputStream referent, final File file) {
- super(referent, REFERENCE_QUEUE);
- this.file = file;
-
- REFERENCE_CACHE.add(this);
-
- LOG.debug("Added Cleanup for temp file {}", file);
- }
-
- @Override
- public void finalizeReferent() {
- LOG.debug("In finalizeReferent");
-
- if (REFERENCE_CACHE.remove(this)) {
- LOG.debug("finalizeReferent - deleting temp file {}", file);
- deleteFile(file);
- }
- }
- }
}
private final String logContext;
MessageAssembler(final Builder builder) {
- this.fileBackedStreamFactory = requireNonNull(builder.fileBackedStreamFactory,
+ fileBackedStreamFactory = requireNonNull(builder.fileBackedStreamFactory,
"FiledBackedStreamFactory cannot be null");
- this.assembledMessageCallback = requireNonNull(builder.assembledMessageCallback,
+ assembledMessageCallback = requireNonNull(builder.assembledMessageCallback,
"assembledMessageCallback cannot be null");
- this.logContext = builder.logContext;
+ logContext = builder.logContext;
stateCache = CacheBuilder.newBuilder()
.expireAfterAccess(builder.expireStateAfterInactivityDuration, builder.expireStateAfterInactivityUnit)
* @return true if the message was handled, false otherwise
*/
public boolean handleMessage(final Object message, final @NonNull ActorRef sendTo) {
- if (message instanceof MessageSlice) {
- LOG.debug("{}: handleMessage: {}", logContext, message);
- onMessageSlice((MessageSlice) message, sendTo);
+ if (message instanceof MessageSlice messageSlice) {
+ LOG.debug("{}: handleMessage: {}", logContext, messageSlice);
+ onMessageSlice(messageSlice, sendTo);
return true;
- } else if (message instanceof AbortSlicing) {
- LOG.debug("{}: handleMessage: {}", logContext, message);
- onAbortSlicing((AbortSlicing) message);
+ } else if (message instanceof AbortSlicing abortSlicing) {
+ LOG.debug("{}: handleMessage: {}", logContext, abortSlicing);
+ onAbortSlicing(abortSlicing);
return true;
}
final AssembledMessageState state = stateCache.get(identifier, () -> createState(messageSlice));
processMessageSliceForState(messageSlice, state, sendTo);
} catch (ExecutionException e) {
- final MessageSliceException messageSliceEx;
final Throwable cause = e.getCause();
- if (cause instanceof MessageSliceException) {
- messageSliceEx = (MessageSliceException) cause;
- } else {
- messageSliceEx = new MessageSliceException(String.format(
- "Error creating state for identifier %s", identifier), cause);
- }
+ final MessageSliceException messageSliceEx = cause instanceof MessageSliceException sliceEx ? sliceEx
+ : new MessageSliceException(String.format("Error creating state for identifier %s", identifier), cause);
messageSlice.getReplyTo().tell(MessageSliceReply.failed(identifier, messageSliceEx, sendTo),
ActorRef.noSender());
* @return this Builder
*/
public Builder fileBackedStreamFactory(final FileBackedOutputStreamFactory newFileBackedStreamFactory) {
- this.fileBackedStreamFactory = requireNonNull(newFileBackedStreamFactory);
+ fileBackedStreamFactory = requireNonNull(newFileBackedStreamFactory);
return this;
}
* @return this Builder
*/
public Builder assembledMessageCallback(final BiConsumer<Object, ActorRef> newAssembledMessageCallback) {
- this.assembledMessageCallback = newAssembledMessageCallback;
+ assembledMessageCallback = newAssembledMessageCallback;
return this;
}
*/
public Builder expireStateAfterInactivity(final long duration, final TimeUnit unit) {
checkArgument(duration > 0, "duration must be > 0");
- this.expireStateAfterInactivityDuration = duration;
- this.expireStateAfterInactivityUnit = unit;
+ expireStateAfterInactivityDuration = duration;
+ expireStateAfterInactivityUnit = unit;
return this;
}
* @return this Builder
*/
public Builder logContext(final String newLogContext) {
- this.logContext = newLogContext;
+ logContext = newLogContext;
return this;
}
@Override
public boolean equals(final Object obj) {
- if (this == obj) {
- return true;
- }
-
- if (!(obj instanceof MessageSliceIdentifier)) {
- return false;
- }
-
- MessageSliceIdentifier other = (MessageSliceIdentifier) obj;
- return other.clientIdentifier.equals(clientIdentifier) && other.slicerId == slicerId
- && other.messageId == messageId;
+ return this == obj || obj instanceof MessageSliceIdentifier other
+ && other.clientIdentifier.equals(clientIdentifier) && other.slicerId == slicerId
+ && other.messageId == messageId;
}
@Override
private final long id;
MessageSlicer(final Builder builder) {
- this.fileBackedStreamFactory = builder.fileBackedStreamFactory;
- this.messageSliceSize = builder.messageSliceSize;
- this.maxSlicingTries = builder.maxSlicingTries;
+ fileBackedStreamFactory = builder.fileBackedStreamFactory;
+ messageSliceSize = builder.messageSliceSize;
+ maxSlicingTries = builder.maxSlicingTries;
id = SLICER_ID_COUNTER.getAndIncrement();
- this.logContext = builder.logContext + "_slicer-id-" + id;
+ logContext = builder.logContext + "_slicer-id-" + id;
CacheBuilder<Identifier, SlicedMessageState<ActorRef>> cacheBuilder =
CacheBuilder.newBuilder().removalListener(this::stateRemoved);
* @return true if the message was handled, false otherwise
*/
public boolean handleMessage(final Object message) {
- if (message instanceof MessageSliceReply) {
- LOG.debug("{}: handleMessage: {}", logContext, message);
- return onMessageSliceReply((MessageSliceReply) message);
+ if (message instanceof MessageSliceReply sliceReply) {
+ LOG.debug("{}: handleMessage: {}", logContext, sliceReply);
+ return onMessageSliceReply(sliceReply);
}
return false;
private boolean onMessageSliceReply(final MessageSliceReply reply) {
final Identifier identifier = reply.getIdentifier();
- if (!(identifier instanceof MessageSliceIdentifier)
- || ((MessageSliceIdentifier)identifier).getSlicerId() != id) {
+ if (!(identifier instanceof MessageSliceIdentifier sliceIdentifier) || sliceIdentifier.getSlicerId() != id) {
return false;
}
final Optional<MessageSliceException> failure = reply.getFailure();
if (failure.isPresent()) {
LOG.warn("{}: Received failed {}", logContext, reply);
- processMessageSliceException(failure.get(), state, reply.getSendTo());
+ processMessageSliceException(failure.orElseThrow(), state, reply.getSendTo());
return true;
}
* @return this Builder
*/
public Builder fileBackedStreamFactory(final FileBackedOutputStreamFactory newFileBackedStreamFactory) {
- this.fileBackedStreamFactory = requireNonNull(newFileBackedStreamFactory);
+ fileBackedStreamFactory = requireNonNull(newFileBackedStreamFactory);
return this;
}
*/
public Builder messageSliceSize(final int newMessageSliceSize) {
checkArgument(newMessageSliceSize > 0, "messageSliceSize must be > 0");
- this.messageSliceSize = newMessageSliceSize;
+ messageSliceSize = newMessageSliceSize;
return this;
}
*/
public Builder maxSlicingTries(final int newMaxSlicingTries) {
checkArgument(newMaxSlicingTries > 0, "newMaxSlicingTries must be > 0");
- this.maxSlicingTries = newMaxSlicingTries;
+ maxSlicingTries = newMaxSlicingTries;
return this;
}
*/
public Builder expireStateAfterInactivity(final long duration, final TimeUnit unit) {
checkArgument(duration > 0, "duration must be > 0");
- this.expireStateAfterInactivityDuration = duration;
- this.expireStateAfterInactivityUnit = unit;
+ expireStateAfterInactivityDuration = duration;
+ expireStateAfterInactivityUnit = unit;
return this;
}
* @return this Builder
*/
public Builder logContext(final String newLogContext) {
- this.logContext = requireNonNull(newLogContext);
+ logContext = requireNonNull(newLogContext);
return this;
}
* @author Thomas Pantelis
*/
public class LeaderStateChanged {
- private final String memberId;
- private final String leaderId;
+ private final @NonNull String memberId;
+ private final @Nullable String leaderId;
private final short leaderPayloadVersion;
- public LeaderStateChanged(@NonNull String memberId, @Nullable String leaderId, short leaderPayloadVersion) {
+ public LeaderStateChanged(final @NonNull String memberId, final @Nullable String leaderId,
+ final short leaderPayloadVersion) {
this.memberId = requireNonNull(memberId);
this.leaderId = leaderId;
this.leaderPayloadVersion = leaderPayloadVersion;
}
- } else if (message instanceof RoleChanged) {
+ } else if (message instanceof RoleChanged roleChanged) {
// this message is sent by RaftActor. Notify registered listeners when this message is received.
- RoleChanged roleChanged = (RoleChanged) message;
LOG.info("RoleChangeNotifier for {} , received role change from {} to {}", memberId,
roleChanged.getOldRole(), roleChanged.getNewRole());
new RoleChangeNotification(roleChanged.getMemberId(),
roleChanged.getOldRole(), roleChanged.getNewRole());
- for (ActorRef listener: registeredListeners.values()) {
+ for (ActorRef listener : registeredListeners.values()) {
listener.tell(latestRoleChangeNotification, getSelf());
}
- } else if (message instanceof LeaderStateChanged) {
- latestLeaderStateChanged = (LeaderStateChanged)message;
+ } else if (message instanceof LeaderStateChanged leaderStateChanged) {
+ latestLeaderStateChanged = leaderStateChanged;
- for (ActorRef listener: registeredListeners.values()) {
+ for (ActorRef listener : registeredListeners.values()) {
listener.tell(latestLeaderStateChanged, getSelf());
}
} else {
import java.io.InputStream;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
-import java.io.UnsupportedEncodingException;
import java.net.URLDecoder;
import java.net.URLEncoder;
import java.nio.charset.StandardCharsets;
*
* @author Thomas Pantelis
*/
-public class LocalSnapshotStore extends SnapshotStore {
+public final class LocalSnapshotStore extends SnapshotStore {
private static final Logger LOG = LoggerFactory.getLogger(LocalSnapshotStore.class);
private static final int PERSISTENCE_ID_START_INDEX = "snapshot-".length();
private final File snapshotDir;
public LocalSnapshotStore(final Config config) {
- this.executionContext = context().system().dispatchers().lookup(config.getString("stream-dispatcher"));
+ executionContext = context().system().dispatchers().lookup(config.getString("stream-dispatcher"));
snapshotDir = new File(config.getString("dir"));
final int localMaxLoadAttempts = config.getInt("max-load-attempts");
}
private static String encode(final String str) {
- try {
- return URLEncoder.encode(str, StandardCharsets.UTF_8.name());
- } catch (UnsupportedEncodingException e) {
- // Shouldn't happen
- LOG.warn("Error encoding {}", str, e);
- return str;
- }
+ return URLEncoder.encode(str, StandardCharsets.UTF_8);
}
private static String decode(final String str) {
- try {
- return URLDecoder.decode(str, StandardCharsets.UTF_8.name());
- } catch (final UnsupportedEncodingException e) {
- // Shouldn't happen
- LOG.warn("Error decoding {}", str, e);
- return str;
- }
+ return URLDecoder.decode(str, StandardCharsets.UTF_8);
}
@VisibleForTesting
+++ /dev/null
-/*
- * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.raft.protobuff.client.messages;
-
-/**
- * An instance of a Payload class is meant to be used as the Payload for
- * AppendEntries.
- *
- * <p>
- * When an actor which is derived from RaftActor attempts to persistData it
- * must pass an instance of the Payload class. Similarly when state needs to
- * be applied to the derived RaftActor it will be passed an instance of the
- * Payload class.
- */
-public abstract class Payload {
- public abstract int size();
-}
import java.util.Set;
import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.controller.cluster.schema.provider.impl.YangTextSchemaSourceSerializationProxy;
-import org.opendaylight.yangtools.yang.model.repo.api.SourceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.source.SourceIdentifier;
import scala.concurrent.Future;
/**
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.schema.provider.impl;
import akka.dispatch.OnComplete;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.SettableFuture;
import org.opendaylight.controller.cluster.schema.provider.RemoteYangTextSourceProvider;
-import org.opendaylight.yangtools.yang.model.repo.api.SourceIdentifier;
-import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource;
+import org.opendaylight.yangtools.yang.model.api.source.SourceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.source.YangTextSource;
import org.opendaylight.yangtools.yang.model.repo.spi.SchemaSourceProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.concurrent.ExecutionContext;
-import scala.concurrent.Future;
/**
* Provides schema sources from {@link RemoteYangTextSourceProvider}.
*/
@Beta
-public class RemoteSchemaProvider implements SchemaSourceProvider<YangTextSchemaSource> {
+public class RemoteSchemaProvider implements SchemaSourceProvider<YangTextSource> {
private static final Logger LOG = LoggerFactory.getLogger(RemoteSchemaProvider.class);
private final RemoteYangTextSourceProvider remoteRepo;
}
@Override
- public ListenableFuture<YangTextSchemaSource> getSource(final SourceIdentifier sourceIdentifier) {
- LOG.trace("Getting yang schema source for {}", sourceIdentifier.getName());
-
- Future<YangTextSchemaSourceSerializationProxy> result = remoteRepo.getYangTextSchemaSource(sourceIdentifier);
+ public ListenableFuture<YangTextSource> getSource(final SourceIdentifier sourceIdentifier) {
+ LOG.trace("Getting yang schema source for {}", sourceIdentifier.name().getLocalName());
- final SettableFuture<YangTextSchemaSource> res = SettableFuture.create();
- result.onComplete(new OnComplete<YangTextSchemaSourceSerializationProxy>() {
+ final var res = SettableFuture.<YangTextSource>create();
+ remoteRepo.getYangTextSchemaSource(sourceIdentifier).onComplete(new OnComplete<>() {
@Override
- public void onComplete(final Throwable throwable,
- final YangTextSchemaSourceSerializationProxy yangTextSchemaSourceSerializationProxy) {
- if (yangTextSchemaSourceSerializationProxy != null) {
- res.set(yangTextSchemaSourceSerializationProxy.getRepresentation());
+ public void onComplete(final Throwable failure, final YangTextSchemaSourceSerializationProxy success) {
+ if (success != null) {
+ res.set(success.getRepresentation());
}
- if (throwable != null) {
- res.setException(throwable);
+ if (failure != null) {
+ res.setException(failure);
}
}
}, executionContext);
import java.io.IOException;
import java.util.Set;
import org.opendaylight.controller.cluster.schema.provider.RemoteYangTextSourceProvider;
+import org.opendaylight.yangtools.yang.model.api.source.SourceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.source.YangTextSource;
import org.opendaylight.yangtools.yang.model.repo.api.SchemaRepository;
-import org.opendaylight.yangtools.yang.model.repo.api.SourceIdentifier;
-import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.concurrent.Future;
LOG.trace("Sending yang schema source for {}", identifier);
final Promise<YangTextSchemaSourceSerializationProxy> promise = akka.dispatch.Futures.promise();
- ListenableFuture<YangTextSchemaSource> future =
- repository.getSchemaSource(identifier, YangTextSchemaSource.class);
+ ListenableFuture<YangTextSource> future =
+ repository.getSchemaSource(identifier, YangTextSource.class);
- Futures.addCallback(future, new FutureCallback<YangTextSchemaSource>() {
+ Futures.addCallback(future, new FutureCallback<YangTextSource>() {
@Override
- public void onSuccess(final YangTextSchemaSource result) {
+ public void onSuccess(final YangTextSource result) {
try {
promise.success(new YangTextSchemaSourceSerializationProxy(result));
} catch (IOException e) {
- LOG.warn("Unable to read schema source for {}", result.getIdentifier(), e);
+ LOG.warn("Unable to read schema source for {}", result.sourceId(), e);
promise.failure(e);
}
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.schema.provider.impl;
import com.google.common.annotations.Beta;
-import com.google.common.io.ByteSource;
+import com.google.common.io.CharSource;
import java.io.IOException;
import java.io.Serializable;
import org.opendaylight.yangtools.yang.common.Revision;
-import org.opendaylight.yangtools.yang.model.repo.api.RevisionSourceIdentifier;
-import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource;
+import org.opendaylight.yangtools.yang.common.UnresolvedQName.Unqualified;
+import org.opendaylight.yangtools.yang.model.api.source.SourceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.source.YangTextSource;
+import org.opendaylight.yangtools.yang.model.spi.source.DelegatedYangTextSource;
/**
- * {@link org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource} serialization proxy.
+ * {@link YangTextSource} serialization proxy.
*/
@Beta
public class YangTextSchemaSourceSerializationProxy implements Serializable {
private static final long serialVersionUID = -6361268518176019477L;
- private final byte[] schemaSource;
+ private final String schemaSource;
private final Revision revision;
private final String name;
- public YangTextSchemaSourceSerializationProxy(final YangTextSchemaSource source) throws IOException {
- this.revision = source.getIdentifier().getRevision().orElse(null);
- this.name = source.getIdentifier().getName();
- this.schemaSource = source.read();
+ public YangTextSchemaSourceSerializationProxy(final YangTextSource source) throws IOException {
+ final var sourceId = source.sourceId();
+ revision = sourceId.revision();
+ name = sourceId.name().getLocalName();
+ schemaSource = source.read();
}
- public YangTextSchemaSource getRepresentation() {
- return YangTextSchemaSource.delegateForByteSource(
- RevisionSourceIdentifier.create(name, revision), ByteSource.wrap(schemaSource));
+ public YangTextSource getRepresentation() {
+ return new DelegatedYangTextSource(new SourceIdentifier(Unqualified.of(name), revision),
+ CharSource.wrap(schemaSource));
}
}
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
import com.google.common.testing.FakeTicker;
import java.util.List;
-import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
ticker.advance(20, MILLISECONDS);
MessageTracker.Context context2 = messageTracker.received(new Foo());
- Assert.assertEquals(true, context2.error().isPresent());
- Assert.assertEquals(0, context2.error().get().getMessageProcessingTimesSinceLastExpectedMessage().size());
-
+ assertEquals(true, context2.error().isPresent());
+ assertEquals(0, context2.error().orElseThrow().getMessageProcessingTimesSinceLastExpectedMessage().size());
}
@Test
MessageTracker.Context context2 = messageTracker.received(new Foo());
- Assert.assertEquals(true, context2.error().isPresent());
+ assertEquals(true, context2.error().isPresent());
- MessageTracker.Error error = context2.error().get();
+ MessageTracker.Error error = context2.error().orElseThrow();
List<MessageTracker.MessageProcessingTime> messageProcessingTimes =
error.getMessageProcessingTimesSinceLastExpectedMessage();
- Assert.assertEquals(3, messageProcessingTimes.size());
+ assertEquals(3, messageProcessingTimes.size());
- Assert.assertEquals(String.class, messageProcessingTimes.get(0).getMessageClass());
- Assert.assertEquals(Long.class, messageProcessingTimes.get(1).getMessageClass());
- Assert.assertEquals(Integer.class, messageProcessingTimes.get(2).getMessageClass());
- Assert.assertTrue(messageProcessingTimes.get(2).getElapsedTimeInNanos() > MILLISECONDS.toNanos(10));
- Assert.assertEquals(Foo.class, error.getLastExpectedMessage().getClass());
- Assert.assertEquals(Foo.class, error.getCurrentExpectedMessage().getClass());
+ assertEquals(String.class, messageProcessingTimes.get(0).getMessageClass());
+ assertEquals(Long.class, messageProcessingTimes.get(1).getMessageClass());
+ assertEquals(Integer.class, messageProcessingTimes.get(2).getMessageClass());
+ assertTrue(messageProcessingTimes.get(2).getElapsedTimeInNanos() > MILLISECONDS.toNanos(10));
+ assertEquals(Foo.class, error.getLastExpectedMessage().getClass());
+ assertEquals(Foo.class, error.getCurrentExpectedMessage().getClass());
LOG.error("An error occurred : {}" , error);
}
ticker.advance(1, MILLISECONDS);
MessageTracker.Context context2 = messageTracker.received(new Foo());
- Assert.assertEquals(false, context2.error().isPresent());
-
+ assertEquals(false, context2.error().isPresent());
}
@Test
messageTracker.received(new Foo());
- try {
- messageTracker.received(new Foo());
- fail("Expected an IllegalStateException");
- } catch (IllegalStateException e) {
- // expected
- }
+ assertThrows(IllegalStateException.class, () -> messageTracker.received(new Foo()));
}
@Test
MessageTracker.Context context = messageTracker.received(new Foo());
- Assert.assertEquals(true, context.error().isPresent());
+ assertEquals(true, context.error().isPresent());
- MessageTracker.Error error = context.error().get();
+ MessageTracker.Error error = context.error().orElseThrow();
- Assert.assertEquals(null, error.getLastExpectedMessage());
- Assert.assertEquals(Foo.class, error.getCurrentExpectedMessage().getClass());
+ assertEquals(null, error.getLastExpectedMessage());
+ assertEquals(Foo.class, error.getCurrentExpectedMessage().getClass());
String errorString = error.toString();
- Assert.assertTrue(errorString.contains("Last Expected Message = null"));
+ assertTrue(errorString.contains("Last Expected Message = null"));
LOG.error("An error occurred : {}", error);
}
MessageTracker.Context context = messageTracker.received(new Foo());
- Assert.assertEquals(true, context.error().isPresent());
-
+ assertEquals(true, context.error().isPresent());
}
@Test
messageTracker.begin();
try (MessageTracker.Context ctx = messageTracker.received(45)) {
- Assert.assertEquals(false, ctx.error().isPresent());
+ assertEquals(false, ctx.error().isPresent());
}
try (MessageTracker.Context ctx = messageTracker.received(45L)) {
- Assert.assertEquals(false, ctx.error().isPresent());
+ assertEquals(false, ctx.error().isPresent());
}
List<MessageTracker.MessageProcessingTime> processingTimeList =
messageTracker.getMessagesSinceLastExpectedMessage();
- Assert.assertEquals(2, processingTimeList.size());
+ assertEquals(2, processingTimeList.size());
assertEquals(Integer.class, processingTimeList.get(0).getMessageClass());
assertEquals(Long.class, processingTimeList.get(1).getMessageClass());
-
}
-
}
import akka.japi.Effect;
import akka.remote.AssociationErrorEvent;
import akka.remote.InvalidAssociation;
-import akka.remote.ThisActorSystemQuarantinedEvent;
+import akka.remote.UniqueAddress;
+import akka.remote.artery.ThisActorSystemQuarantinedEvent;
import akka.testkit.javadsl.TestKit;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
+import org.junit.runner.RunWith;
import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
import scala.Option;
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
public class QuarantinedMonitorActorTest {
private static final Address LOCAL = Address.apply("http", "local");
@Before
public void setUp() {
- MockitoAnnotations.initMocks(this);
system = ActorSystem.apply();
actor = system.actorOf(QuarantinedMonitorActor.props(callback));
}
public void testOnReceiveQuarantined() throws Exception {
final Throwable t = new RuntimeException("Remote has quarantined this system");
final InvalidAssociation cause = InvalidAssociation.apply(LOCAL, REMOTE, t, Option.apply(null));
- final ThisActorSystemQuarantinedEvent event = new ThisActorSystemQuarantinedEvent(LOCAL, REMOTE);
+ final UniqueAddress localAddress = new UniqueAddress(LOCAL, 1);
+ final UniqueAddress remoteAddress = new UniqueAddress(REMOTE, 2);
+ final ThisActorSystemQuarantinedEvent event = new ThisActorSystemQuarantinedEvent(localAddress, remoteAddress);
actor.tell(event, ActorRef.noSender());
verify(callback, timeout(1000)).apply();
}
actor.tell(event, ActorRef.noSender());
verify(callback, never()).apply();
}
-
}
\ No newline at end of file
package org.opendaylight.controller.cluster.datastore.node.utils.stream;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
-import com.google.common.collect.ImmutableSet;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
-import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.IOException;
-import java.nio.charset.Charset;
-import java.util.Arrays;
-import java.util.Set;
+import java.nio.charset.StandardCharsets;
import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.stream.Collectors;
import javax.xml.transform.dom.DOMSource;
-import org.custommonkey.xmlunit.Diff;
-import org.custommonkey.xmlunit.XMLUnit;
import org.junit.Test;
import org.opendaylight.yangtools.util.xml.UntrustedXML;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.AugmentationNode;
-import org.opendaylight.yangtools.yang.data.api.schema.ChoiceNode;
-import org.opendaylight.yangtools.yang.data.api.schema.DOMSourceAnyxmlNode;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafSetNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.OrderedMapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.UnkeyedListEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.UnkeyedListNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.w3c.dom.Document;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
+import org.xmlunit.builder.DiffBuilder;
public class SerializationUtilsTest {
-
- private static final QName CONTAINER_Q_NAME = QName.create("ns-1", "2017-03-17", "container1");
+ private static final QName CONTAINER1 = QName.create("ns-1", "2017-03-17", "container1");
@Test
- public void testSerializeDeserializeNodes() throws IOException {
- final NormalizedNode<?, ?> normalizedNode = createNormalizedNode();
- final byte[] bytes = serializeNormalizedNode(normalizedNode);
- assertEquals(10564, bytes.length);
- assertEquals(normalizedNode, deserializeNormalizedNode(bytes));
+ public void testSerializeDeserializeNodes() throws Exception {
+ final var normalizedNode = createNormalizedNode();
+ final var bytes = serialize(normalizedNode);
+ assertEquals(10567, bytes.length);
+ assertEquals(normalizedNode, deserialize(bytes));
}
@Test
public void testSerializeDeserializeAnyXmlNode() throws Exception {
- final ByteArrayInputStream is =
- new ByteArrayInputStream("<xml><data/></xml>".getBytes(Charset.defaultCharset()));
- final Document parse = UntrustedXML.newDocumentBuilder().parse(is);
- final DOMSourceAnyxmlNode anyXmlNode = Builders.anyXmlBuilder()
- .withNodeIdentifier(id("anyXmlNode"))
- .withValue(new DOMSource(parse))
- .build();
- final byte[] bytes = serializeNormalizedNode(anyXmlNode);
+ final var parse = UntrustedXML.newDocumentBuilder().parse(
+ new ByteArrayInputStream("<xml><data/></xml>".getBytes(StandardCharsets.UTF_8)));
+ final var anyXmlNode = ImmutableNodes.newAnyxmlBuilder(DOMSource.class)
+ .withNodeIdentifier(id("anyXmlNode"))
+ .withValue(new DOMSource(parse))
+ .build();
+ final byte[] bytes = serialize(anyXmlNode);
assertEquals(113, bytes.length);
- final NormalizedNode<?, ?> deserialized = deserializeNormalizedNode(bytes);
- final DOMSource value = (DOMSource) deserialized.getValue();
- final Diff diff = XMLUnit.compareXML((Document) anyXmlNode.getValue().getNode(),
- value.getNode().getOwnerDocument());
- assertTrue(diff.toString(), diff.similar());
+
+ final var diff = DiffBuilder.compare(anyXmlNode.body().getNode())
+ // FIXME: why all this magic?
+ .withTest(((DOMSource) deserialize(bytes).body()).getNode().getOwnerDocument())
+ .checkForSimilar()
+ .build();
+ assertFalse(diff.toString(), diff.hasDifferences());
}
@Test
public void testSerializeDeserializePath() throws IOException {
- final ByteArrayOutputStream bos = new ByteArrayOutputStream();
- final DataOutput out = new DataOutputStream(bos);
- final YangInstanceIdentifier path = YangInstanceIdentifier.builder()
- .node(id("container1"))
- .node(autmentationId("list1", "list2"))
- .node(listId("list1", "keyName1", "keyValue1"))
- .node(leafSetId("leafSer1", "leafSetValue1"))
- .build();
- SerializationUtils.writePath(out, path);
+ final var path = YangInstanceIdentifier.builder()
+ .node(id("container1"))
+ .node(listId("list1", "keyName1", "keyValue1"))
+ .node(leafSetId("leafSer1", "leafSetValue1"))
+ .build();
+
+ final var bos = new ByteArrayOutputStream();
+ try (var out = new DataOutputStream(bos)) {
+ SerializationUtils.writePath(out, path);
+ }
- final byte[] bytes = bos.toByteArray();
- assertEquals(119, bytes.length);
+ final var bytes = bos.toByteArray();
+ assertEquals(105, bytes.length);
- final YangInstanceIdentifier deserialized =
- SerializationUtils.readPath(new DataInputStream(new ByteArrayInputStream(bytes)));
- assertEquals(path, deserialized);
+ assertEquals(path, SerializationUtils.readPath(new DataInputStream(new ByteArrayInputStream(bytes))));
}
@Test
public void testSerializeDeserializePathAndNode() throws IOException {
- final ByteArrayOutputStream bos = new ByteArrayOutputStream();
- final DataOutput out = new DataOutputStream(bos);
- final NormalizedNode<?, ?> node = createNormalizedNode();
- final YangInstanceIdentifier path = YangInstanceIdentifier.create(id("container1"));
- SerializationUtils.writeNodeAndPath(out, path, node);
-
- final byte[] bytes = bos.toByteArray();
- assertEquals(10566, bytes.length);
+ final var path = YangInstanceIdentifier.of(id("container1"));
+ final var node = createNormalizedNode();
- final DataInputStream in = new DataInputStream(new ByteArrayInputStream(bytes));
- final AtomicBoolean applierCalled = new AtomicBoolean(false);
- SerializationUtils.readNodeAndPath(in, applierCalled, (instance, deserializedPath, deserializedNode) -> {
- assertEquals(path, deserializedPath);
- assertEquals(node, deserializedNode);
- applierCalled.set(true);
- });
- assertTrue(applierCalled.get());
- }
-
- @Test
- public void testSerializeDeserializeAugmentNoref() throws IOException {
- final YangInstanceIdentifier expected = YangInstanceIdentifier.create(
- AugmentationIdentifier.create(ImmutableSet.of(
- QName.create("foo", "leaf1"),
- QName.create("bar", "leaf2"))));
-
- final ByteArrayOutputStream bos = new ByteArrayOutputStream();
- final DataOutput out = new DataOutputStream(bos);
- SerializationUtils.writePath(out, expected);
+ final var bos = new ByteArrayOutputStream();
+ try (var out = new DataOutputStream(bos)) {
+ SerializationUtils.writeNodeAndPath(out, path, node);
+ }
final byte[] bytes = bos.toByteArray();
- assertEquals(37, bytes.length);
-
- final DataInputStream in = new DataInputStream(new ByteArrayInputStream(bytes));
- final YangInstanceIdentifier read = SerializationUtils.readPath(in);
- assertEquals(expected, read);
+ assertEquals(10569, bytes.length);
+
+ final var applierCalled = new AtomicBoolean(false);
+ try (var in = new DataInputStream(new ByteArrayInputStream(bytes))) {
+ SerializationUtils.readNodeAndPath(in, applierCalled, (instance, deserializedPath, deserializedNode) -> {
+ assertEquals(path, deserializedPath);
+ assertEquals(node, deserializedNode);
+ applierCalled.set(true);
+ });
+ }
+ assertTrue(applierCalled.get());
}
- private static NormalizedNode<?, ?> deserializeNormalizedNode(final byte[] bytes) throws IOException {
- return SerializationUtils.readNormalizedNode(new DataInputStream(new ByteArrayInputStream(bytes))).get();
+ private static NormalizedNode deserialize(final byte[] bytes) throws Exception {
+ return SerializationUtils.readNormalizedNode(new DataInputStream(new ByteArrayInputStream(bytes)))
+ .orElseThrow();
}
- private static byte[] serializeNormalizedNode(final NormalizedNode<?, ?> node) throws IOException {
- ByteArrayOutputStream bos = new ByteArrayOutputStream();
+ private static byte[] serialize(final NormalizedNode node) throws Exception {
+ final var bos = new ByteArrayOutputStream();
SerializationUtils.writeNormalizedNode(new DataOutputStream(bos), node);
return bos.toByteArray();
}
- private static NormalizedNode<?, ?> createNormalizedNode() {
- final LeafSetNode<Object> leafSetNode = Builders.leafSetBuilder()
- .withNodeIdentifier(id("leafSetNode"))
- .withChild(createLeafSetEntry("leafSetNode", "leafSetValue1"))
- .withChild(createLeafSetEntry("leafSetNode", "leafSetValue2"))
- .build();
- final LeafSetNode<Object> orderedLeafSetNode = Builders.orderedLeafSetBuilder()
- .withNodeIdentifier(id("orderedLeafSetNode"))
- .withChild(createLeafSetEntry("orderedLeafSetNode", "value1"))
- .withChild(createLeafSetEntry("orderedLeafSetNode", "value2"))
- .build();
- final LeafNode<Boolean> booleanLeaf = createLeaf("booleanLeaf", true);
- final LeafNode<Byte> byteLeaf = createLeaf("byteLeaf", (byte) 0);
- final LeafNode<Short> shortLeaf = createLeaf("shortLeaf", (short) 55);
- final LeafNode<Integer> intLeaf = createLeaf("intLeaf", 11);
- final LeafNode<Long> longLeaf = createLeaf("longLeaf", 151515L);
- final LeafNode<String> stringLeaf = createLeaf("stringLeaf", "stringValue");
- final LeafNode<String> longStringLeaf = createLeaf("longStringLeaf", getLongString());
- final LeafNode<QName> qNameLeaf = createLeaf("stringLeaf", QName.create("base", "qName"));
- final LeafNode<YangInstanceIdentifier> idLeaf = createLeaf("stringLeaf", YangInstanceIdentifier.empty());
- final MapEntryNode entry1 = Builders.mapEntryBuilder()
- .withNodeIdentifier(listId("mapNode", "key", "key1"))
+ private static ContainerNode createNormalizedNode() {
+ final var stringLeaf = createLeaf("stringLeaf", "stringValue");
+ final var entry1 = ImmutableNodes.newMapEntryBuilder()
+ .withNodeIdentifier(listId("mapNode", "key", "key1"))
+ .withChild(stringLeaf)
+ .build();
+ final var entry2 = ImmutableNodes.newMapEntryBuilder()
+ .withNodeIdentifier(listId("mapNode", "key", "key2"))
+ .withChild(stringLeaf)
+ .build();
+
+ return ImmutableNodes.newContainerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(CONTAINER1))
+ .withChild(createLeaf("booleanLeaf", true))
+ .withChild(createLeaf("byteLeaf", (byte) 0))
+ .withChild(createLeaf("shortLeaf", (short) 55))
+ .withChild(createLeaf("intLeaf", 11))
+ .withChild(createLeaf("longLeaf", 151515L))
.withChild(stringLeaf)
- .build();
- final MapEntryNode entry2 = Builders.mapEntryBuilder()
- .withNodeIdentifier(listId("mapNode", "key", "key2"))
- .withChild(stringLeaf)
- .build();
- final MapNode mapNode = Builders.mapBuilder()
- .withNodeIdentifier(id("mapNode"))
- .withChild(entry1)
- .withChild(entry2)
- .build();
- final OrderedMapNode orderedMapNode = Builders.orderedMapBuilder()
- .withNodeIdentifier(id("orderedMapNode"))
- .withChild(entry2)
- .withChild(entry1)
- .build();
- final UnkeyedListEntryNode unkeyedListEntry1 = Builders.unkeyedListEntryBuilder()
- .withNodeIdentifier(id("unkeyedList"))
- .withChild(stringLeaf)
- .build();
- final UnkeyedListEntryNode unkeyedListEntry2 = Builders.unkeyedListEntryBuilder()
- .withNodeIdentifier(id("unkeyedList"))
- .withChild(stringLeaf)
- .build();
- final UnkeyedListNode unkeyedListNode = Builders.unkeyedListBuilder()
- .withNodeIdentifier(id("unkeyedList"))
- .withChild(unkeyedListEntry1)
- .withChild(unkeyedListEntry2)
- .build();
- final ImmutableSet<QName> childNames =
- ImmutableSet.of(QName.create(CONTAINER_Q_NAME, "aug1"), QName.create(CONTAINER_Q_NAME, "aug1"));
- final AugmentationNode augmentationNode = Builders.augmentationBuilder()
- .withNodeIdentifier(new YangInstanceIdentifier.AugmentationIdentifier(childNames))
+ .withChild(createLeaf("longStringLeaf", "0123456789".repeat(1000)))
+ .withChild(createLeaf("stringLeaf", QName.create("base", "qName")))
+ .withChild(createLeaf("stringLeaf", YangInstanceIdentifier.of(QName.create("test", "test"))))
+ .withChild(ImmutableNodes.newSystemMapBuilder()
+ .withNodeIdentifier(id("mapNode"))
+ .withChild(entry1)
+ .withChild(entry2)
+ .build())
+ .withChild(ImmutableNodes.newUserMapBuilder()
+ .withNodeIdentifier(id("orderedMapNode"))
+ .withChild(entry2)
+ .withChild(entry1)
+ .build())
+ .withChild(ImmutableNodes.newUnkeyedListBuilder()
+ .withNodeIdentifier(id("unkeyedList"))
+ .withChild(ImmutableNodes.newUnkeyedListEntryBuilder()
+ .withNodeIdentifier(id("unkeyedList"))
+ .withChild(stringLeaf)
+ .build())
+ .withChild(ImmutableNodes.newUnkeyedListEntryBuilder()
+ .withNodeIdentifier(id("unkeyedList"))
+ .withChild(stringLeaf)
+ .build())
+ .build())
+ .withChild(ImmutableNodes.newSystemLeafSetBuilder()
+ .withNodeIdentifier(id("leafSetNode"))
+ .withChild(createLeafSetEntry("leafSetNode", "leafSetValue1"))
+ .withChild(createLeafSetEntry("leafSetNode", "leafSetValue2"))
+ .build())
+ .withChild(ImmutableNodes.newUserLeafSetBuilder()
+ .withNodeIdentifier(id("orderedLeafSetNode"))
+ .withChild(createLeafSetEntry("orderedLeafSetNode", "value1"))
+ .withChild(createLeafSetEntry("orderedLeafSetNode", "value2"))
+ .build())
.withChild(createLeaf("aug1", "aug1Value"))
.withChild(createLeaf("aug2", "aug2Value"))
- .build();
- final ChoiceNode choiceNode = Builders.choiceBuilder()
- .withNodeIdentifier(id("choiceNode"))
- .withChild(createLeaf("choiceLeaf", 12))
- .build();
- return Builders.containerBuilder()
- .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(CONTAINER_Q_NAME))
- .withChild(booleanLeaf)
- .withChild(byteLeaf)
- .withChild(shortLeaf)
- .withChild(intLeaf)
- .withChild(longLeaf)
- .withChild(stringLeaf)
- .withChild(longStringLeaf)
- .withChild(qNameLeaf)
- .withChild(idLeaf)
- .withChild(mapNode)
- .withChild(orderedMapNode)
- .withChild(unkeyedListNode)
- .withChild(leafSetNode)
- .withChild(orderedLeafSetNode)
- .withChild(augmentationNode)
- .withChild(choiceNode)
+ .withChild(ImmutableNodes.newChoiceBuilder()
+ .withNodeIdentifier(id("choiceNode"))
+ .withChild(createLeaf("choiceLeaf", 12))
+ .build())
.build();
}
}
private static LeafSetEntryNode<Object> createLeafSetEntry(final String leafSet, final String value) {
- return Builders.leafSetEntryBuilder()
- .withNodeIdentifier(leafSetId(leafSet, value))
- .withValue(value)
- .build();
+ return ImmutableNodes.leafSetEntry(leafSetId(leafSet, value));
}
- private static YangInstanceIdentifier.NodeIdentifier id(final String name) {
- return new YangInstanceIdentifier.NodeIdentifier(QName.create(CONTAINER_Q_NAME, name));
+ private static NodeIdentifier id(final String name) {
+ return new NodeIdentifier(QName.create(CONTAINER1, name));
}
- private static YangInstanceIdentifier.NodeIdentifierWithPredicates listId(final String listName,
- final String keyName,
- final Object keyValue) {
- return YangInstanceIdentifier.NodeIdentifierWithPredicates.of(QName.create(CONTAINER_Q_NAME, listName),
- QName.create(CONTAINER_Q_NAME, keyName), keyValue);
+ private static NodeIdentifierWithPredicates listId(final String listName, final String keyName,
+ final Object keyValue) {
+ return NodeIdentifierWithPredicates.of(QName.create(CONTAINER1, listName), QName.create(CONTAINER1, keyName),
+ keyValue);
}
- private static <T> YangInstanceIdentifier.NodeWithValue<T> leafSetId(final String node, final T value) {
- return new YangInstanceIdentifier.NodeWithValue<>(QName.create(CONTAINER_Q_NAME, node), value);
- }
-
- private static YangInstanceIdentifier.AugmentationIdentifier autmentationId(final String... nodes) {
- final Set<QName> qNames = Arrays.stream(nodes)
- .map(node -> QName.create(CONTAINER_Q_NAME, node))
- .collect(Collectors.toSet());
- return new YangInstanceIdentifier.AugmentationIdentifier(qNames);
- }
-
- private static String getLongString() {
- final StringBuilder builder = new StringBuilder(10000);
- for (int i = 0; i < 1000; i++) {
- builder.append("0123456789");
- }
- return builder.toString();
+ private static <T> NodeWithValue<T> leafSetId(final String node, final T value) {
+ return new NodeWithValue<>(QName.create(CONTAINER1, node), value);
}
}
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
+import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.containerNode;
import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapEntry;
import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapEntryBuilder;
import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapNodeBuilder;
-import com.google.common.collect.Sets;
import java.io.IOException;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicInteger;
import org.opendaylight.controller.cluster.datastore.node.utils.NormalizedNodeNavigator;
import org.opendaylight.controller.cluster.datastore.util.TestModel;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
-import org.opendaylight.yangtools.yang.data.api.schema.DOMSourceAnyxmlNode;
+import org.opendaylight.yangtools.yang.data.api.schema.AnyxmlNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.SystemLeafSetNode;
+import org.opendaylight.yangtools.yang.data.api.schema.SystemMapNode;
import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafSetEntryNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafSetNodeBuilder;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
@RunWith(MockitoJUnitRunner.StrictStubs.class)
public class NormalizedNodePrunerTest {
- private static final SchemaContext NO_TEST_SCHEMA = TestModel.createTestContextWithoutTestSchema();
- private static final SchemaContext NO_AUG_SCHEMA = TestModel.createTestContextWithoutAugmentationSchema();
- private static final SchemaContext FULL_SCHEMA = TestModel.createTestContext();
+ private static final EffectiveModelContext NO_TEST_SCHEMA = TestModel.createTestContextWithoutTestSchema();
+ private static final EffectiveModelContext NO_AUG_SCHEMA = TestModel.createTestContextWithoutAugmentationSchema();
+ private static final EffectiveModelContext FULL_SCHEMA = TestModel.createTestContext();
private static AbstractNormalizedNodePruner prunerFullSchema(final YangInstanceIdentifier path) {
final ReusableNormalizedNodePruner pruner = ReusableNormalizedNodePruner.forSchemaContext(FULL_SCHEMA);
NormalizedNodeWriter normalizedNodeWriter = NormalizedNodeWriter.forStreamWriter(pruner);
- NormalizedNode<?, ?> expected = createTestContainer();
+ NormalizedNode expected = createTestContainer();
normalizedNodeWriter.write(expected);
- NormalizedNode<?, ?> actual = pruner.getResult().orElseThrow();
+ NormalizedNode actual = pruner.getResult().orElseThrow();
assertEquals(expected, actual);
-
}
@Test(expected = IllegalStateException.class)
NormalizedNodeWriter normalizedNodeWriter = NormalizedNodeWriter.forStreamWriter(pruner);
- NormalizedNode<?, ?> expected = createTestContainer();
+ NormalizedNode expected = createTestContainer();
normalizedNodeWriter.write(expected);
- NormalizedNode<?, ?> actual = pruner.getResult().orElseThrow();
+ NormalizedNode actual = pruner.getResult().orElseThrow();
assertEquals(expected, actual);
NormalizedNodeWriter.forStreamWriter(pruner).write(expected);
-
}
-
@Test
public void testNodesPrunedWhenAugmentationSchemaMissing() throws IOException {
AbstractNormalizedNodePruner pruner = prunerNoAugSchema(TestModel.TEST_PATH);
NormalizedNodeWriter normalizedNodeWriter = NormalizedNodeWriter.forStreamWriter(pruner);
- NormalizedNode<?, ?> expected = createTestContainer();
+ NormalizedNode expected = createTestContainer();
normalizedNodeWriter.write(expected);
- NormalizedNode<?, ?> actual = pruner.getResult().orElseThrow();
+ NormalizedNode actual = pruner.getResult().orElseThrow();
assertNotEquals(expected, actual);
NormalizedNodeWriter normalizedNodeWriter = NormalizedNodeWriter.forStreamWriter(pruner);
- NormalizedNode<?, ?> expected = createTestContainer();
+ NormalizedNode expected = createTestContainer();
normalizedNodeWriter.write(expected);
// Asserting true here instead of checking actual value because I don't want this assertion to be fragile
assertTrue(countNodes(expected, "urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test") > 0);
-
}
- private static int countNodes(final NormalizedNode<?,?> normalizedNode, final String namespaceFilter) {
+ private static int countNodes(final NormalizedNode normalizedNode, final String namespaceFilter) {
if (normalizedNode == null) {
return 0;
}
final AtomicInteger count = new AtomicInteger();
new NormalizedNodeNavigator((level, parentPath, normalizedNode1) -> {
- if (!(normalizedNode1.getIdentifier() instanceof AugmentationIdentifier)) {
- if (normalizedNode1.getIdentifier().getNodeType().getNamespace().toString().contains(namespaceFilter)) {
- count.incrementAndGet();
- }
+ if (normalizedNode1.name().getNodeType().getNamespace().toString().contains(namespaceFilter)) {
+ count.incrementAndGet();
}
- }).navigate(YangInstanceIdentifier.empty().toString(), normalizedNode);
+ }).navigate(YangInstanceIdentifier.of().toString(), normalizedNode);
return count.get();
}
@Test
public void testLeafNodeNotPrunedWhenHasNoParent() throws IOException {
AbstractNormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.DESC_QNAME));
- NormalizedNode<?, ?> input = Builders.leafBuilder().withNodeIdentifier(
- new NodeIdentifier(TestModel.DESC_QNAME)).withValue("test").build();
+ NormalizedNode input = ImmutableNodes.leafNode(TestModel.DESC_QNAME, "test");
NormalizedNodeWriter.forStreamWriter(pruner).write(input);
assertEquals("normalizedNode", input, pruner.getResult().orElseThrow());
}
- @Test
- public void testLeafNodePrunedWhenHasAugmentationParentAndSchemaMissing() throws IOException {
- AugmentationIdentifier augId = new AugmentationIdentifier(Sets.newHashSet(TestModel.AUG_CONT_QNAME));
- AbstractNormalizedNodePruner pruner = prunerFullSchema(YangInstanceIdentifier.builder()
- .node(TestModel.TEST_QNAME).node(TestModel.AUGMENTED_LIST_QNAME)
- .node(TestModel.AUGMENTED_LIST_QNAME).node(augId).build());
- LeafNode<Object> child = Builders.leafBuilder().withNodeIdentifier(
- new NodeIdentifier(TestModel.INVALID_QNAME)).withValue("test").build();
- NormalizedNode<?, ?> input = Builders.augmentationBuilder().withNodeIdentifier(augId).withChild(child).build();
- NormalizedNodeWriter.forStreamWriter(pruner).write(input);
-
- NormalizedNode<?, ?> actual = pruner.getResult().orElseThrow();
- assertEquals("normalizedNode", Builders.augmentationBuilder().withNodeIdentifier(augId).build(), actual);
- }
-
@Test
public void testLeafNodePrunedWhenHasNoParentAndSchemaMissing() throws IOException {
AbstractNormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.INVALID_QNAME));
- NormalizedNode<?, ?> input = Builders.leafBuilder().withNodeIdentifier(
- new NodeIdentifier(TestModel.INVALID_QNAME)).withValue("test").build();
+ LeafNode<String> input = ImmutableNodes.leafNode(TestModel.INVALID_QNAME, "test");
NormalizedNodeWriter.forStreamWriter(pruner).write(input);
assertEquals(Optional.empty(), pruner.getResult());
@Test
public void testLeafSetEntryNodeNotPrunedWhenHasNoParent() throws IOException {
AbstractNormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.SHOE_QNAME));
- NormalizedNode<?, ?> input = Builders.leafSetEntryBuilder().withValue("puma").withNodeIdentifier(
- new NodeWithValue<>(TestModel.SHOE_QNAME, "puma")).build();
+ LeafSetEntryNode<?> input = ImmutableNodes.leafSetEntry(TestModel.SHOE_QNAME, "puma");
NormalizedNodeWriter.forStreamWriter(pruner).write(input);
- NormalizedNode<?, ?> actual = pruner.getResult().orElseThrow();
+ NormalizedNode actual = pruner.getResult().orElseThrow();
assertEquals("normalizedNode", input, actual);
}
@Test
public void testLeafSetEntryNodeNotPrunedWhenHasParent() throws IOException {
AbstractNormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.SHOE_QNAME));
- LeafSetEntryNode<Object> child = Builders.leafSetEntryBuilder().withValue("puma").withNodeIdentifier(
- new NodeWithValue<>(TestModel.SHOE_QNAME, "puma")).build();
- NormalizedNode<?, ?> input = Builders.leafSetBuilder().withNodeIdentifier(
- new NodeIdentifier(TestModel.SHOE_QNAME)).withChild(child).build();
+ SystemLeafSetNode<?> input = ImmutableNodes.<String>newSystemLeafSetBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TestModel.SHOE_QNAME))
+ .withChildValue("puma")
+ .build();
NormalizedNodeWriter.forStreamWriter(pruner).write(input);
- NormalizedNode<?, ?> actual = pruner.getResult().orElseThrow();
+ NormalizedNode actual = pruner.getResult().orElseThrow();
assertEquals("normalizedNode", input, actual);
}
@Test
public void testLeafSetEntryNodePrunedWhenHasNoParentAndSchemaMissing() throws IOException {
AbstractNormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.INVALID_QNAME));
- NormalizedNode<?, ?> input = Builders.leafSetEntryBuilder().withValue("test").withNodeIdentifier(
- new NodeWithValue<>(TestModel.INVALID_QNAME, "test")).build();
+ LeafSetEntryNode<?> input = ImmutableNodes.leafSetEntry(TestModel.INVALID_QNAME, "test");
NormalizedNodeWriter.forStreamWriter(pruner).write(input);
assertEquals(Optional.empty(), pruner.getResult());
@Test
public void testLeafSetEntryNodePrunedWhenHasParentAndSchemaMissing() throws IOException {
AbstractNormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.INVALID_QNAME));
- LeafSetEntryNode<Object> child = Builders.leafSetEntryBuilder().withValue("test").withNodeIdentifier(
- new NodeWithValue<>(TestModel.INVALID_QNAME, "test")).build();
- NormalizedNode<?, ?> input = Builders.leafSetBuilder().withNodeIdentifier(
- new NodeIdentifier(TestModel.INVALID_QNAME)).withChild(child).build();
- NormalizedNodeWriter.forStreamWriter(pruner).write(input);
+ NormalizedNodeWriter.forStreamWriter(pruner).write(ImmutableNodes.<String>newSystemLeafSetBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TestModel.INVALID_QNAME))
+ .withChildValue("test")
+ .build());
assertEquals(Optional.empty(), pruner.getResult());
}
@Test
public void testAnyXMLNodeNotPrunedWhenHasNoParent() throws IOException {
AbstractNormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.ANY_XML_QNAME));
- NormalizedNode<?, ?> input = Builders.anyXmlBuilder().withNodeIdentifier(
- new NodeIdentifier(TestModel.ANY_XML_QNAME)).withValue(mock(DOMSource.class)).build();
+ AnyxmlNode<DOMSource> input = ImmutableNodes.newAnyxmlBuilder(DOMSource.class)
+ .withNodeIdentifier(new NodeIdentifier(TestModel.ANY_XML_QNAME))
+ .withValue(mock(DOMSource.class))
+ .build();
NormalizedNodeWriter.forStreamWriter(pruner).write(input);
- NormalizedNode<?, ?> actual = pruner.getResult().orElseThrow();
- assertEquals("normalizedNode", input, actual);
+ assertEquals(input, pruner.getResult().orElseThrow());
}
@Test
public void testAnyXMLNodeNotPrunedWhenHasParent() throws IOException {
- AbstractNormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH);
- DOMSourceAnyxmlNode child = Builders.anyXmlBuilder().withNodeIdentifier(
- new NodeIdentifier(TestModel.ANY_XML_QNAME)).withValue(mock(DOMSource.class)).build();
- NormalizedNode<?, ?> input = Builders.containerBuilder().withNodeIdentifier(
- new NodeIdentifier(TestModel.TEST_QNAME)).withChild(child).build();
+ final var pruner = prunerFullSchema(TestModel.TEST_PATH);
+ final var input = ImmutableNodes.newContainerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+ .withChild(ImmutableNodes.newAnyxmlBuilder(DOMSource.class)
+ .withNodeIdentifier(new NodeIdentifier(TestModel.ANY_XML_QNAME))
+ .withValue(mock(DOMSource.class))
+ .build())
+ .build();
NormalizedNodeWriter.forStreamWriter(pruner).write(input);
- NormalizedNode<?, ?> actual = pruner.getResult().orElseThrow();
- assertEquals("normalizedNode", input, actual);
+ assertEquals(input, pruner.getResult().orElseThrow());
}
@Test
public void testAnyXmlNodePrunedWhenHasNoParentAndSchemaMissing() throws IOException {
AbstractNormalizedNodePruner pruner = prunerNoTestSchema(TestModel.TEST_PATH.node(TestModel.ANY_XML_QNAME));
- NormalizedNode<?, ?> input = Builders.anyXmlBuilder().withNodeIdentifier(
- new NodeIdentifier(TestModel.ANY_XML_QNAME)).withValue(mock(DOMSource.class)).build();
- NormalizedNodeWriter.forStreamWriter(pruner).write(input);
+ NormalizedNodeWriter.forStreamWriter(pruner).write(ImmutableNodes.newAnyxmlBuilder(DOMSource.class)
+ .withNodeIdentifier(new NodeIdentifier(TestModel.ANY_XML_QNAME))
+ .withValue(mock(DOMSource.class))
+ .build());
assertEquals(Optional.empty(), pruner.getResult());
}
.node(TestModel.INNER_CONTAINER_QNAME).build();
AbstractNormalizedNodePruner pruner = prunerFullSchema(path);
- NormalizedNode<?, ?> input = ImmutableNodes.containerNode(TestModel.INNER_CONTAINER_QNAME);
+ ContainerNode input = containerNode(TestModel.INNER_CONTAINER_QNAME);
NormalizedNodeWriter.forStreamWriter(pruner).write(input);
- NormalizedNode<?, ?> actual = pruner.getResult().orElseThrow();
- assertEquals("normalizedNode", input, actual);
+ assertEquals(input, pruner.getResult().orElseThrow());
}
@Test
.node(TestModel.INVALID_QNAME).build();
AbstractNormalizedNodePruner pruner = prunerFullSchema(path);
- NormalizedNode<?, ?> input = ImmutableNodes.containerNode(TestModel.INVALID_QNAME);
- NormalizedNodeWriter.forStreamWriter(pruner).write(input);
+ NormalizedNodeWriter.forStreamWriter(pruner).write(containerNode(TestModel.INVALID_QNAME));
assertEquals(Optional.empty(), pruner.getResult());
}
.build();
AbstractNormalizedNodePruner pruner = prunerFullSchema(path);
- MapNode innerList = mapNodeBuilder(TestModel.INNER_LIST_QNAME).withChild(mapEntryBuilder(
- TestModel.INNER_LIST_QNAME, TestModel.NAME_QNAME, "one").withChild(
- ImmutableNodes.containerNode(TestModel.INVALID_QNAME)).build()).build();
- NormalizedNode<?, ?> input = mapEntryBuilder(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1)
- .withChild(innerList).build();
- NormalizedNodeWriter.forStreamWriter(pruner).write(input);
-
- NormalizedNode<?, ?> expected = mapEntryBuilder(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1)
- .withChild(mapNodeBuilder(TestModel.INNER_LIST_QNAME).withChild(mapEntryBuilder(
- TestModel.INNER_LIST_QNAME, TestModel.NAME_QNAME, "one").build()).build()).build();
- NormalizedNode<?, ?> actual = pruner.getResult().orElseThrow();
- assertEquals("normalizedNode", expected, actual);
+ NormalizedNodeWriter.forStreamWriter(pruner)
+ .write(mapEntryBuilder(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1)
+ .withChild(mapNodeBuilder(TestModel.INNER_LIST_QNAME)
+ .withChild(mapEntryBuilder(TestModel.INNER_LIST_QNAME, TestModel.NAME_QNAME, "one")
+ .withChild(containerNode(TestModel.INVALID_QNAME))
+ .build())
+ .build())
+ .build());
+
+ assertEquals(mapEntryBuilder(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1)
+ .withChild(mapNodeBuilder(TestModel.INNER_LIST_QNAME)
+ .withChild(mapEntryBuilder(TestModel.INNER_LIST_QNAME, TestModel.NAME_QNAME, "one").build())
+ .build())
+ .build(), pruner.getResult().orElseThrow());
}
@Test
.node(TestModel.INNER_LIST_QNAME).build();
AbstractNormalizedNodePruner pruner = prunerFullSchema(path);
- MapNode input = mapNodeBuilder(TestModel.INNER_LIST_QNAME).withChild(mapEntryBuilder(
- TestModel.INNER_LIST_QNAME, TestModel.NAME_QNAME, "one").withChild(
- ImmutableNodes.containerNode(TestModel.INNER_CONTAINER_QNAME)).build()).build();
+ SystemMapNode input = mapNodeBuilder(TestModel.INNER_LIST_QNAME)
+ .withChild(mapEntryBuilder(TestModel.INNER_LIST_QNAME, TestModel.NAME_QNAME, "one")
+ .withChild(containerNode(TestModel.INNER_CONTAINER_QNAME))
+ .build())
+ .build();
NormalizedNodeWriter.forStreamWriter(pruner).write(input);
- NormalizedNode<?, ?> actual = pruner.getResult().orElseThrow();
- assertEquals("normalizedNode", input, actual);
+ assertEquals(input, pruner.getResult().orElseThrow());
}
@Test
.node(TestModel.INVALID_QNAME).build();
AbstractNormalizedNodePruner pruner = prunerFullSchema(path);
- MapNode input = mapNodeBuilder(TestModel.INVALID_QNAME).withChild(mapEntryBuilder(
- TestModel.INVALID_QNAME, TestModel.NAME_QNAME, "one").withChild(
- ImmutableNodes.containerNode(TestModel.INNER_CONTAINER_QNAME)).build()).build();
- NormalizedNodeWriter.forStreamWriter(pruner).write(input);
+ NormalizedNodeWriter.forStreamWriter(pruner).write(mapNodeBuilder(TestModel.INVALID_QNAME)
+ .withChild(mapEntryBuilder(TestModel.INVALID_QNAME, TestModel.NAME_QNAME, "one")
+ .withChild(containerNode(TestModel.INNER_CONTAINER_QNAME))
+ .build())
+ .build());
assertEquals(Optional.empty(), pruner.getResult());
}
.build();
AbstractNormalizedNodePruner pruner = prunerFullSchema(path);
- MapNode innerList = mapNodeBuilder(TestModel.INVALID_QNAME).withChild(mapEntryBuilder(
- TestModel.INVALID_QNAME, TestModel.NAME_QNAME, "one").withChild(
- ImmutableNodes.containerNode(TestModel.INNER_CONTAINER_QNAME)).build()).build();
- NormalizedNode<?, ?> input = mapEntryBuilder(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1)
- .withChild(innerList).build();
- NormalizedNodeWriter.forStreamWriter(pruner).write(input);
-
- NormalizedNode<?, ?> expected = mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1);
- NormalizedNode<?, ?> actual = pruner.getResult().orElseThrow();
- assertEquals("normalizedNode", expected, actual);
+ NormalizedNodeWriter.forStreamWriter(pruner)
+ .write(mapEntryBuilder(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1)
+ .withChild(mapNodeBuilder(TestModel.INVALID_QNAME)
+ .withChild(mapEntryBuilder(TestModel.INVALID_QNAME, TestModel.NAME_QNAME, "one")
+ .withChild(containerNode(TestModel.INNER_CONTAINER_QNAME))
+ .build())
+ .build())
+ .build());
+
+ assertEquals(mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1),
+ pruner.getResult().orElseThrow());
}
- private static NormalizedNode<?, ?> createTestContainer() {
- byte[] bytes1 = {1, 2, 3};
- LeafSetEntryNode<Object> entry1 = ImmutableLeafSetEntryNodeBuilder.create().withNodeIdentifier(
- new NodeWithValue<>(TestModel.BINARY_LEAF_LIST_QNAME, bytes1)).withValue(bytes1).build();
-
- byte[] bytes2 = {};
- LeafSetEntryNode<Object> entry2 = ImmutableLeafSetEntryNodeBuilder.create().withNodeIdentifier(
- new NodeWithValue<>(TestModel.BINARY_LEAF_LIST_QNAME, bytes2)).withValue(bytes2).build();
-
+ private static ContainerNode createTestContainer() {
return TestModel.createBaseTestContainerBuilder()
- .withChild(ImmutableLeafSetNodeBuilder.create().withNodeIdentifier(
- new NodeIdentifier(TestModel.BINARY_LEAF_LIST_QNAME))
- .withChild(entry1).withChild(entry2).build())
- .withChild(ImmutableNodes.leafNode(TestModel.SOME_BINARY_DATA_QNAME, new byte[]{1, 2, 3, 4}))
- .build();
+ .withChild(ImmutableNodes.newSystemLeafSetBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TestModel.BINARY_LEAF_LIST_QNAME))
+ .withChildValue(new byte[] {1, 2, 3})
+ .withChildValue(new byte[0])
+ .build())
+ .withChild(ImmutableNodes.leafNode(TestModel.SOME_BINARY_DATA_QNAME, new byte[] {1, 2, 3, 4}))
+ .build();
}
}
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
import org.opendaylight.yangtools.yang.test.util.YangParserTestUtils;
@Test
public void testListTranslation() throws IOException {
- assertEquals(Builders.mapBuilder()
+ assertEquals(ImmutableNodes.newSystemMapBuilder()
.withNodeIdentifier(new NodeIdentifier(LST))
- .withChild(Builders.mapEntryBuilder()
+ .withChild(ImmutableNodes.newMapEntryBuilder()
.withNodeIdentifier(NodeIdentifierWithPredicates.of(LST, ImmutableMap.<QName, Object>builder()
.put(A, (byte) 1)
.put(B, (short) 1)
.withChild(ImmutableNodes.leafNode(H, Uint64.ONE))
.build())
.build(),
- prune(Builders.mapBuilder()
+ prune(ImmutableNodes.newSystemMapBuilder()
.withNodeIdentifier(new NodeIdentifier(LST))
- .withChild(Builders.mapEntryBuilder()
+ .withChild(ImmutableNodes.newMapEntryBuilder()
.withNodeIdentifier(NodeIdentifierWithPredicates.of(LST, ImmutableMap.<QName, Object>builder()
.put(A, (byte) 1)
.put(B, (short) 1)
@Test
public void testContainerTranslation() throws IOException {
- assertEquals(Builders.containerBuilder()
+ assertEquals(ImmutableNodes.newContainerBuilder()
.withNodeIdentifier(new NodeIdentifier(CONT))
.withChild(ImmutableNodes.leafNode(A, (byte) 1))
.withChild(ImmutableNodes.leafNode(B, (short) 1))
.withChild(ImmutableNodes.leafNode(G, Uint32.ONE))
.withChild(ImmutableNodes.leafNode(H, Uint64.ONE))
.build(),
- prune(Builders.containerBuilder()
+ prune(ImmutableNodes.newContainerBuilder()
.withNodeIdentifier(new NodeIdentifier(CONT))
.withChild(ImmutableNodes.leafNode(A, (byte) 1))
.withChild(ImmutableNodes.leafNode(B, (short) 1))
@Test
public void testLeafList8() throws IOException {
- assertEquals(Builders.leafSetBuilder()
+ assertEquals(ImmutableNodes.newSystemLeafSetBuilder()
.withNodeIdentifier(new NodeIdentifier(LFLST8))
- .withChild(Builders.leafSetEntryBuilder()
- .withNodeIdentifier(new NodeWithValue<>(LFLST8, Uint8.ONE))
- .withValue(Uint8.ONE)
- .build())
+ .withChildValue(Uint8.ONE)
.build(),
- prune(Builders.leafSetBuilder()
+ prune(ImmutableNodes.newSystemLeafSetBuilder()
.withNodeIdentifier(new NodeIdentifier(LFLST8))
- .withChild(Builders.leafSetEntryBuilder()
- .withNodeIdentifier(new NodeWithValue<>(LFLST8, (short) 1))
- .withValue((short) 1)
- .build())
+ .withChildValue((short) 1)
.build()));
}
@Test
public void testLeafList16() throws IOException {
- assertEquals(Builders.leafSetBuilder()
+ assertEquals(ImmutableNodes.newSystemLeafSetBuilder()
.withNodeIdentifier(new NodeIdentifier(LFLST16))
- .withChild(Builders.leafSetEntryBuilder()
- .withNodeIdentifier(new NodeWithValue<>(LFLST16, Uint16.ONE))
- .withValue(Uint16.ONE)
- .build())
+ .withChildValue(Uint16.ONE)
.build(),
- prune(Builders.leafSetBuilder()
+ prune(ImmutableNodes.newSystemLeafSetBuilder()
.withNodeIdentifier(new NodeIdentifier(LFLST16))
- .withChild(Builders.leafSetEntryBuilder()
- .withNodeIdentifier(new NodeWithValue<>(LFLST16, 1))
- .withValue(1)
- .build())
+ .withChildValue(1)
.build()));
}
@Test
public void testLeafList32() throws IOException {
- assertEquals(Builders.leafSetBuilder()
+ assertEquals(ImmutableNodes.newSystemLeafSetBuilder()
.withNodeIdentifier(new NodeIdentifier(LFLST32))
- .withChild(Builders.leafSetEntryBuilder()
- .withNodeIdentifier(new NodeWithValue<>(LFLST32, Uint32.ONE))
- .withValue(Uint32.ONE)
- .build())
+ .withChildValue(Uint32.ONE)
.build(),
- prune(Builders.leafSetBuilder()
+ prune(ImmutableNodes.newSystemLeafSetBuilder()
.withNodeIdentifier(new NodeIdentifier(LFLST32))
- .withChild(Builders.leafSetEntryBuilder()
- .withNodeIdentifier(new NodeWithValue<>(LFLST32, 1L))
- .withValue(1L)
- .build())
+ .withChildValue(1L)
.build()));
}
@Test
public void testLeafList64() throws IOException {
- assertEquals(Builders.leafSetBuilder()
+ assertEquals(ImmutableNodes.newSystemLeafSetBuilder()
.withNodeIdentifier(new NodeIdentifier(LFLST64))
- .withChild(Builders.leafSetEntryBuilder()
- .withNodeIdentifier(new NodeWithValue<>(LFLST64, Uint64.ONE))
- .withValue(Uint64.ONE)
- .build())
+ .withChildValue(Uint64.ONE)
.build(),
- prune(Builders.leafSetBuilder()
+ prune(ImmutableNodes.newSystemLeafSetBuilder()
.withNodeIdentifier(new NodeIdentifier(LFLST64))
- .withChild(Builders.leafSetEntryBuilder()
- .withNodeIdentifier(new NodeWithValue<>(LFLST64, BigInteger.ONE))
- .withValue(BigInteger.ONE)
- .build())
+ .withChildValue(BigInteger.ONE)
.build()));
}
- private static NormalizedNode<?, ?> prune(final NormalizedNode<?, ?> node) throws IOException {
- final ReusableNormalizedNodePruner pruner = ReusableNormalizedNodePruner.forSchemaContext(CONTEXT)
- .withUintAdaption();
- pruner.initializeForPath(YangInstanceIdentifier.create(node.getIdentifier()));
+ private static NormalizedNode prune(final NormalizedNode node) throws IOException {
+ final var pruner = ReusableNormalizedNodePruner.forSchemaContext(CONTEXT).withUintAdaption();
+ pruner.initializeForPath(YangInstanceIdentifier.of(node.name()));
try (NormalizedNodeWriter writer = NormalizedNodeWriter.forStreamWriter(pruner)) {
writer.write(node);
}
pruner.close();
- return pruner.getResult().get();
+ return pruner.getResult().orElseThrow();
}
}
import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapEntry;
import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapEntryBuilder;
import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapNodeBuilder;
+import static org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes.leafNode;
-import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import java.io.InputStream;
-import java.math.BigDecimal;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
+import java.util.List;
+import org.opendaylight.yangtools.yang.common.Decimal64;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.common.Uint64;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
-import org.opendaylight.yangtools.yang.data.api.schema.AugmentationNode;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafSetNode;
import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.UnkeyedListEntryNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.DataContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.NormalizedNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafSetEntryNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafSetNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableMapEntryNodeBuilder;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.data.api.schema.builder.DataContainerNodeBuilder;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
import org.opendaylight.yangtools.yang.test.util.YangParserTestUtils;
public final class TestModel {
return TestModel.class.getResourceAsStream(resourceName);
}
- public static SchemaContext createTestContext() {
+ public static EffectiveModelContext createTestContext() {
return YangParserTestUtils.parseYangResources(TestModel.class, DATASTORE_TEST_YANG, DATASTORE_AUG_YANG,
DATASTORE_TEST_NOTIFICATION_YANG);
}
- public static SchemaContext createTestContextWithoutTestSchema() {
+ public static EffectiveModelContext createTestContextWithoutTestSchema() {
return YangParserTestUtils.parseYangResource(DATASTORE_TEST_NOTIFICATION_YANG);
}
- public static SchemaContext createTestContextWithoutAugmentationSchema() {
+ public static EffectiveModelContext createTestContextWithoutAugmentationSchema() {
return YangParserTestUtils.parseYangResources(TestModel.class, DATASTORE_TEST_YANG,
DATASTORE_TEST_NOTIFICATION_YANG);
}
public static DataContainerNodeBuilder<NodeIdentifier, ContainerNode> createBaseTestContainerBuilder() {
- // Create a list of shoes
- // This is to test leaf list entry
- final LeafSetEntryNode<Object> nike = ImmutableLeafSetEntryNodeBuilder.create().withNodeIdentifier(
- new NodeWithValue<>(SHOE_QNAME, "nike")).withValue("nike").build();
-
- final LeafSetEntryNode<Object> puma = ImmutableLeafSetEntryNodeBuilder.create().withNodeIdentifier(
- new NodeWithValue<>(SHOE_QNAME, "puma")).withValue("puma").build();
-
- final LeafSetNode<Object> shoes = ImmutableLeafSetNodeBuilder.create().withNodeIdentifier(
- new NodeIdentifier(SHOE_QNAME)).withChild(nike).withChild(puma).build();
-
- // Test a leaf-list where each entry contains an identity
- final LeafSetEntryNode<Object> cap1 =
- ImmutableLeafSetEntryNodeBuilder
- .create()
- .withNodeIdentifier(
- new NodeWithValue<>(QName.create(
- TEST_QNAME, "capability"), DESC_QNAME))
- .withValue(DESC_QNAME).build();
-
- final LeafSetNode<Object> capabilities =
- ImmutableLeafSetNodeBuilder
- .create()
- .withNodeIdentifier(
- new NodeIdentifier(QName.create(
- TEST_QNAME, "capability"))).withChild(cap1).build();
-
- ContainerNode switchFeatures =
- ImmutableContainerNodeBuilder
- .create()
- .withNodeIdentifier(
- new NodeIdentifier(SWITCH_FEATURES_QNAME))
- .withChild(capabilities).build();
-
- // Create a leaf list with numbers
- final LeafSetEntryNode<Object> five =
- ImmutableLeafSetEntryNodeBuilder
- .create()
- .withNodeIdentifier(
- new NodeWithValue<>(QName.create(
- TEST_QNAME, "number"), 5)).withValue(5).build();
- final LeafSetEntryNode<Object> fifteen =
- ImmutableLeafSetEntryNodeBuilder
- .create()
- .withNodeIdentifier(
- new NodeWithValue<>(QName.create(
- TEST_QNAME, "number"), 15)).withValue(15).build();
- final LeafSetNode<Object> numbers =
- ImmutableLeafSetNodeBuilder
- .create()
- .withNodeIdentifier(
- new NodeIdentifier(QName.create(
- TEST_QNAME, "number"))).withChild(five).withChild(fifteen)
- .build();
-
-
- // Create augmentations
- MapEntryNode augMapEntry = createAugmentedListEntry(1, "First Test");
-
- // Create a bits leaf
- NormalizedNodeBuilder<NodeIdentifier, Object, LeafNode<Object>>
- myBits = Builders.leafBuilder()
- .withNodeIdentifier(new NodeIdentifier(QName.create(TEST_QNAME, "my-bits")))
- .withValue(ImmutableSet.of("foo", "bar"));
-
- // Create unkeyed list entry
- UnkeyedListEntryNode unkeyedListEntry = Builders.unkeyedListEntryBuilder()
- .withNodeIdentifier(new NodeIdentifier(UNKEYED_LIST_QNAME))
- .withChild(ImmutableNodes.leafNode(NAME_QNAME, "unkeyed-entry-name"))
- .build();
-
// Create YangInstanceIdentifier with all path arg types.
- YangInstanceIdentifier instanceID = YangInstanceIdentifier.create(
- new NodeIdentifier(QName.create(TEST_QNAME, "qname")),
- NodeIdentifierWithPredicates.of(QName.create(TEST_QNAME, "list-entry"),
- QName.create(TEST_QNAME, "key"), 10),
- new AugmentationIdentifier(ImmutableSet.of(
- QName.create(TEST_QNAME, "aug1"), QName.create(TEST_QNAME, "aug2"))),
- new NodeWithValue<>(QName.create(TEST_QNAME, "leaf-list-entry"), "foo"));
-
- Map<QName, Object> keyValues = new HashMap<>();
- keyValues.put(CHILDREN_QNAME, FIRST_CHILD_NAME);
-
+ YangInstanceIdentifier instanceID = YangInstanceIdentifier.of(
+ new NodeIdentifier(QName.create(TEST_QNAME, "qname")),
+ NodeIdentifierWithPredicates.of(QName.create(TEST_QNAME, "list-entry"),
+ QName.create(TEST_QNAME, "key"), 10),
+ new NodeWithValue<>(QName.create(TEST_QNAME, "leaf-list-entry"), "foo"));
// Create the document
- return ImmutableContainerNodeBuilder
- .create()
- .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
- .withChild(myBits.build())
- .withChild(ImmutableNodes.leafNode(DESC_QNAME, DESC))
- .withChild(ImmutableNodes.leafNode(BOOLEAN_LEAF_QNAME, ENABLED))
- .withChild(ImmutableNodes.leafNode(SHORT_LEAF_QNAME, SHORT_ID))
- .withChild(ImmutableNodes.leafNode(BYTE_LEAF_QNAME, BYTE_ID))
- .withChild(ImmutableNodes.leafNode(TestModel.BIGINTEGER_LEAF_QNAME, Uint64.valueOf(100)))
- .withChild(ImmutableNodes.leafNode(TestModel.BIGDECIMAL_LEAF_QNAME, BigDecimal.valueOf(1.2)))
- .withChild(ImmutableNodes.leafNode(SOME_REF_QNAME, instanceID))
- .withChild(ImmutableNodes.leafNode(MYIDENTITY_QNAME, DESC_QNAME))
- .withChild(Builders.unkeyedListBuilder()
- .withNodeIdentifier(new NodeIdentifier(UNKEYED_LIST_QNAME))
- .withChild(unkeyedListEntry).build())
- .withChild(Builders.choiceBuilder()
- .withNodeIdentifier(new NodeIdentifier(TWO_THREE_QNAME))
- .withChild(ImmutableNodes.leafNode(TWO_QNAME, "two")).build())
- .withChild(Builders.orderedMapBuilder()
- .withNodeIdentifier(new NodeIdentifier(ORDERED_LIST_QNAME))
- .withValue(ImmutableList.<MapEntryNode>builder().add(
- mapEntryBuilder(ORDERED_LIST_QNAME, ORDERED_LIST_ENTRY_QNAME, "1").build(),
- mapEntryBuilder(ORDERED_LIST_QNAME, ORDERED_LIST_ENTRY_QNAME, "2").build()).build())
- .build())
- .withChild(shoes)
- .withChild(numbers)
- .withChild(switchFeatures)
- .withChild(mapNodeBuilder(AUGMENTED_LIST_QNAME).withChild(augMapEntry).build())
- .withChild(mapNodeBuilder(OUTER_LIST_QNAME)
- .withChild(mapEntry(OUTER_LIST_QNAME, ID_QNAME, ONE_ID))
- .withChild(BAR_NODE).build()
- );
+ return ImmutableNodes.newContainerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
+ // Create a bits leaf
+ .withChild(leafNode(QName.create(TEST_QNAME, "my-bits"), ImmutableSet.of("foo", "bar")))
+ .withChild(leafNode(DESC_QNAME, DESC))
+ .withChild(leafNode(BOOLEAN_LEAF_QNAME, ENABLED))
+ .withChild(leafNode(SHORT_LEAF_QNAME, SHORT_ID))
+ .withChild(leafNode(BYTE_LEAF_QNAME, BYTE_ID))
+ .withChild(leafNode(TestModel.BIGINTEGER_LEAF_QNAME, Uint64.valueOf(100)))
+ .withChild(leafNode(TestModel.BIGDECIMAL_LEAF_QNAME, Decimal64.valueOf("1.2").scaleTo(2)))
+ .withChild(leafNode(SOME_REF_QNAME, instanceID))
+ .withChild(leafNode(MYIDENTITY_QNAME, DESC_QNAME))
+ .withChild(ImmutableNodes.newUnkeyedListBuilder()
+ .withNodeIdentifier(new NodeIdentifier(UNKEYED_LIST_QNAME))
+ // Create unkeyed list entry
+ .withChild(ImmutableNodes.newUnkeyedListEntryBuilder()
+ .withNodeIdentifier(new NodeIdentifier(UNKEYED_LIST_QNAME))
+ .withChild(leafNode(NAME_QNAME, "unkeyed-entry-name"))
+ .build())
+ .build())
+ .withChild(ImmutableNodes.newChoiceBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TWO_THREE_QNAME))
+ .withChild(leafNode(TWO_QNAME, "two")).build())
+ .withChild(ImmutableNodes.newUserMapBuilder()
+ .withNodeIdentifier(new NodeIdentifier(ORDERED_LIST_QNAME))
+ .withValue(List.of(
+ mapEntryBuilder(ORDERED_LIST_QNAME, ORDERED_LIST_ENTRY_QNAME, "1").build(),
+ mapEntryBuilder(ORDERED_LIST_QNAME, ORDERED_LIST_ENTRY_QNAME, "2").build()))
+ .build())
+ .withChild(ImmutableNodes.newSystemLeafSetBuilder()
+ .withNodeIdentifier(new NodeIdentifier(SHOE_QNAME))
+ .withChildValue("nike")
+ .withChildValue("puma")
+ .build())
+ .withChild(ImmutableNodes.newSystemLeafSetBuilder()
+ .withNodeIdentifier(new NodeIdentifier(QName.create(TEST_QNAME, "number")))
+ .withChildValue(5)
+ .withChildValue(15)
+ .build())
+ .withChild(ImmutableNodes.newContainerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(SWITCH_FEATURES_QNAME))
+ // Test a leaf-list where each entry contains an identity
+ .withChild(ImmutableNodes.newSystemLeafSetBuilder()
+ .withNodeIdentifier(new NodeIdentifier(QName.create(TEST_QNAME, "capability")))
+ .withChildValue(DESC_QNAME)
+ .build())
+ .build())
+ .withChild(mapNodeBuilder(AUGMENTED_LIST_QNAME)
+ // Create augmentations
+ .withChild(createAugmentedListEntry(1, "First Test"))
+ .build())
+ .withChild(mapNodeBuilder(OUTER_LIST_QNAME)
+ .withChild(mapEntry(OUTER_LIST_QNAME, ID_QNAME, ONE_ID))
+ .withChild(BAR_NODE)
+ .build());
}
public static ContainerNode createTestContainer() {
}
public static MapEntryNode createAugmentedListEntry(final int id, final String name) {
- Set<QName> childAugmentations = new HashSet<>();
- childAugmentations.add(AUG_CONT_QNAME);
-
- ContainerNode augCont = ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(new NodeIdentifier(AUG_CONT_QNAME))
- .withChild(ImmutableNodes.leafNode(AUG_NAME_QNAME, name))
- .build();
-
-
- final AugmentationIdentifier augmentationIdentifier = new AugmentationIdentifier(childAugmentations);
- final AugmentationNode augmentationNode =
- Builders.augmentationBuilder()
- .withNodeIdentifier(augmentationIdentifier).withChild(augCont)
- .build();
-
- return ImmutableMapEntryNodeBuilder.create()
- .withNodeIdentifier(NodeIdentifierWithPredicates.of(AUGMENTED_LIST_QNAME, ID_QNAME, id))
- .withChild(ImmutableNodes.leafNode(ID_QNAME, id))
- .withChild(augmentationNode).build();
+ return ImmutableNodes.newMapEntryBuilder()
+ .withNodeIdentifier(NodeIdentifierWithPredicates.of(AUGMENTED_LIST_QNAME, ID_QNAME, id))
+ .withChild(leafNode(ID_QNAME, id))
+ .withChild(ImmutableNodes.newContainerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(AUG_CONT_QNAME))
+ .withChild(leafNode(AUG_NAME_QNAME, name))
+ .build())
+ .build();
}
public static ContainerNode createFamily() {
- final DataContainerNodeBuilder<NodeIdentifier, ContainerNode>
- familyContainerBuilder = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
- new NodeIdentifier(FAMILY_QNAME));
-
- final CollectionNodeBuilder<MapEntryNode, MapNode> childrenBuilder =
- mapNodeBuilder(CHILDREN_QNAME);
-
- final DataContainerNodeBuilder<NodeIdentifierWithPredicates, MapEntryNode>
- firstChildBuilder = mapEntryBuilder(CHILDREN_QNAME, CHILD_NUMBER_QNAME, FIRST_CHILD_ID);
- final DataContainerNodeBuilder<NodeIdentifierWithPredicates, MapEntryNode>
- secondChildBuilder = mapEntryBuilder(CHILDREN_QNAME, CHILD_NUMBER_QNAME, SECOND_CHILD_ID);
-
- final DataContainerNodeBuilder<NodeIdentifierWithPredicates, MapEntryNode>
- firstGrandChildBuilder = mapEntryBuilder(GRAND_CHILDREN_QNAME, GRAND_CHILD_NUMBER_QNAME,
- FIRST_GRAND_CHILD_ID);
- final DataContainerNodeBuilder<NodeIdentifierWithPredicates, MapEntryNode>
- secondGrandChildBuilder = mapEntryBuilder(GRAND_CHILDREN_QNAME, GRAND_CHILD_NUMBER_QNAME,
- SECOND_GRAND_CHILD_ID);
-
- firstGrandChildBuilder
- .withChild(
- ImmutableNodes.leafNode(GRAND_CHILD_NUMBER_QNAME,
- FIRST_GRAND_CHILD_ID)).withChild(
- ImmutableNodes.leafNode(GRAND_CHILD_NAME_QNAME,
- FIRST_GRAND_CHILD_NAME));
-
- secondGrandChildBuilder.withChild(
- ImmutableNodes.leafNode(GRAND_CHILD_NUMBER_QNAME, SECOND_GRAND_CHILD_ID))
- .withChild(ImmutableNodes.leafNode(GRAND_CHILD_NAME_QNAME, SECOND_GRAND_CHILD_NAME));
-
- firstChildBuilder
- .withChild(ImmutableNodes.leafNode(CHILD_NUMBER_QNAME, FIRST_CHILD_ID))
- .withChild(ImmutableNodes.leafNode(CHILD_NAME_QNAME, FIRST_CHILD_NAME))
- .withChild(mapNodeBuilder(GRAND_CHILDREN_QNAME)
- .withChild(firstGrandChildBuilder.build())
- .build());
-
-
- secondChildBuilder
- .withChild(ImmutableNodes.leafNode(CHILD_NUMBER_QNAME, SECOND_CHILD_ID))
- .withChild(ImmutableNodes.leafNode(CHILD_NAME_QNAME, SECOND_CHILD_NAME))
- .withChild(mapNodeBuilder(GRAND_CHILDREN_QNAME)
- .withChild(firstGrandChildBuilder.build())
- .build());
-
- childrenBuilder.withChild(firstChildBuilder.build());
- childrenBuilder.withChild(secondChildBuilder.build());
-
- return familyContainerBuilder.withChild(childrenBuilder.build()).build();
+ final var firstGrandChildBuilder = mapEntryBuilder(
+ GRAND_CHILDREN_QNAME, GRAND_CHILD_NUMBER_QNAME, FIRST_GRAND_CHILD_ID)
+ .withChild(leafNode(GRAND_CHILD_NUMBER_QNAME,FIRST_GRAND_CHILD_ID))
+ .withChild(leafNode(GRAND_CHILD_NAME_QNAME, FIRST_GRAND_CHILD_NAME));
+
+ return ImmutableNodes.newContainerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(FAMILY_QNAME))
+ .withChild(ImmutableNodes.newSystemMapBuilder()
+ .withNodeIdentifier(new NodeIdentifier(CHILDREN_QNAME))
+ .withChild(mapEntryBuilder(CHILDREN_QNAME, CHILD_NUMBER_QNAME, FIRST_CHILD_ID)
+ .withChild(leafNode(CHILD_NUMBER_QNAME, FIRST_CHILD_ID))
+ .withChild(leafNode(CHILD_NAME_QNAME, FIRST_CHILD_NAME))
+ .withChild(mapNodeBuilder(GRAND_CHILDREN_QNAME)
+ .withChild(firstGrandChildBuilder.build())
+ .build())
+ .build())
+ .withChild(mapEntryBuilder(CHILDREN_QNAME, CHILD_NUMBER_QNAME, SECOND_CHILD_ID)
+ .withChild(leafNode(CHILD_NUMBER_QNAME, SECOND_CHILD_ID))
+ .withChild(leafNode(CHILD_NAME_QNAME, SECOND_CHILD_NAME))
+ .withChild(mapNodeBuilder(GRAND_CHILDREN_QNAME)
+ .withChild(firstGrandChildBuilder.build())
+ .build())
+ .build())
+ .build())
+ .build();
}
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.cluster.datastore.persisted;
+package org.opendaylight.controller.cluster.io;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
public class ChunkedOutputStreamTest {
private static final int INITIAL_SIZE = 256;
+ private static final int MAX_ARRAY_SIZE = 256 * 1024;
- private final ChunkedOutputStream stream = new ChunkedOutputStream(INITIAL_SIZE);
+ private final ChunkedOutputStream stream = new ChunkedOutputStream(INITIAL_SIZE, MAX_ARRAY_SIZE);
@Test
public void testBasicWrite() throws IOException {
@Test
public void testTwoChunksWrite() throws IOException {
- int size = ChunkedOutputStream.MAX_ARRAY_SIZE + 1;
+ int size = MAX_ARRAY_SIZE + 1;
for (int i = 0; i < size; ++i) {
stream.write(i);
}
int counter = 0;
- for (byte[] chunk: assertFinishedStream(size, 2)) {
- for (byte actual: chunk) {
+ for (byte[] chunk : assertFinishedStream(size, 2)) {
+ for (byte actual : chunk) {
assertEquals((byte) counter++, actual);
}
}
import com.google.common.util.concurrent.Uninterruptibles;
import java.io.File;
import java.io.IOException;
-import java.io.InputStream;
import java.util.Arrays;
import java.util.concurrent.TimeUnit;
import org.junit.After;
@Before
public void setup() {
deleteTempFiles(TEMP_DIR);
- FileBackedOutputStream.REFERENCE_CACHE.clear();
}
@After
assertArrayEquals("Read bytes", bytes, fbos.asByteSource().read());
assertArrayEquals("Read bytes", bytes, fbos.asByteSource().read());
- assertEquals("Reference cache size", 0, FileBackedOutputStream.REFERENCE_CACHE.size());
-
fbos.cleanup();
}
assertEquals("Temp file", tempFileName, findTempFileName(TEMP_DIR));
assertEquals("Size", bytes.length, fbos.asByteSource().size());
- InputStream inputStream = fbos.asByteSource().openStream();
-
- assertArrayEquals("Read bytes", bytes, fbos.asByteSource().read());
-
- byte[] inBytes = new byte[bytes.length];
- assertEquals("# bytes read", bytes.length, inputStream.read(inBytes));
- assertArrayEquals("Read InputStream", bytes, inBytes);
- assertEquals("End of stream", -1, inputStream.read());
+ try (var inputStream = fbos.asByteSource().openStream()) {
+ assertArrayEquals("Read bytes", bytes, fbos.asByteSource().read());
- inputStream.close();
-
- assertEquals("Reference cache size", 1, FileBackedOutputStream.REFERENCE_CACHE.size());
+ byte[] inBytes = new byte[bytes.length];
+ assertEquals("# bytes read", bytes.length, inputStream.read(inBytes));
+ assertArrayEquals("Read InputStream", bytes, inBytes);
+ assertEquals("End of stream", -1, inputStream.read());
+ }
fbos.cleanup();
- assertEquals("Reference cache size", 0, FileBackedOutputStream.REFERENCE_CACHE.size());
-
assertNull("Found unexpected temp file", findTempFileName(TEMP_DIR));
}
fail("Temp file was not deleted");
}
- static String findTempFileName(String dirPath) {
+ static String findTempFileName(final String dirPath) {
String[] files = new File(dirPath).list();
assertNotNull(files);
assertTrue("Found more than one temp file: " + Arrays.toString(files), files.length < 2);
return files.length == 1 ? files[0] : null;
}
- static boolean deleteFile(String file) {
+ static boolean deleteFile(final String file) {
return new File(file).delete();
}
- static void deleteTempFiles(String path) {
+ static void deleteTempFiles(final String path) {
String[] files = new File(path).list();
if (files != null) {
- for (String file: files) {
+ for (String file : files) {
deleteFile(path + File.separator + file);
}
}
}
- static void createDir(String path) {
+ static void createDir(final String path) {
File dir = new File(path);
if (!dir.exists() && !dir.mkdirs()) {
throw new RuntimeException("Failed to create temp dir " + path);
@Before
public void setup() {
FileBackedOutputStreamTest.deleteTempFiles(TEMP_DIR);
- FileBackedOutputStream.REFERENCE_CACHE.clear();
}
@After
import static org.junit.Assert.assertEquals;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
/**
@Test
public void testSerialization() {
AbortSlicing expected = new AbortSlicing(new StringIdentifier("test"));
- AbortSlicing cloned = (AbortSlicing) SerializationUtils.clone(expected);
+ AbortSlicing cloned = SerializationUtils.clone(expected);
assertEquals("getIdentifier", expected.getIdentifier(), cloned.getIdentifier());
}
}
final MessageSliceReply reply = testProbe.expectMsgClass(MessageSliceReply.class);
assertFailedMessageSliceReply(reply, IDENTIFIER, false);
- assertEquals("Failure cause", mockFailure, reply.getFailure().get().getCause());
+ assertEquals("Failure cause", mockFailure, reply.getFailure().orElseThrow().getCause());
assertFalse("MessageAssembler did not remove state for " + identifier, assembler.hasState(identifier));
verify(mockFiledBackedStream).cleanup();
final MessageSliceReply reply = testProbe.expectMsgClass(MessageSliceReply.class);
assertFailedMessageSliceReply(reply, IDENTIFIER, false);
- assertEquals("Failure cause", mockFailure, reply.getFailure().get().getCause());
+ assertEquals("Failure cause", mockFailure, reply.getFailure().orElseThrow().getCause());
assertFalse("MessageAssembler did not remove state for " + identifier, assembler.hasState(identifier));
verify(mockFiledBackedStream).cleanup();
}
}
- private MessageAssembler newMessageAssembler(String logContext) {
+ private MessageAssembler newMessageAssembler(final String logContext) {
return newMessageAssemblerBuilder(logContext).build();
}
- private Builder newMessageAssemblerBuilder(String logContext) {
+ private Builder newMessageAssemblerBuilder(final String logContext) {
return MessageAssembler.builder().fileBackedStreamFactory(mockFiledBackedStreamFactory)
.assembledMessageCallback(mockAssembledMessageCallback).logContext(logContext);
}
import static org.junit.Assert.assertEquals;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
/**
@Test
public void testSerialization() {
MessageSliceIdentifier expected = new MessageSliceIdentifier(new StringIdentifier("test"), 123L);
- MessageSliceIdentifier cloned = (MessageSliceIdentifier) SerializationUtils.clone(expected);
+ MessageSliceIdentifier cloned = SerializationUtils.clone(expected);
assertEquals("cloned", expected, cloned);
assertEquals("getClientIdentifier", expected.getClientIdentifier(), cloned.getClientIdentifier());
assertEquals("getSlicerId", expected.getSlicerId(), cloned.getSlicerId());
import akka.serialization.JavaSerializer;
import akka.testkit.TestProbe;
import akka.testkit.javadsl.TestKit;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
private void testSuccess() {
MessageSliceReply expected = MessageSliceReply.success(new StringIdentifier("test"), 3,
TestProbe.apply(actorSystem).ref());
- MessageSliceReply cloned = (MessageSliceReply) SerializationUtils.clone(expected);
+ MessageSliceReply cloned = SerializationUtils.clone(expected);
assertEquals("getIdentifier", expected.getIdentifier(), cloned.getIdentifier());
assertEquals("getSliceIndex", expected.getSliceIndex(), cloned.getSliceIndex());
private void testFailure() {
MessageSliceReply expected = MessageSliceReply.failed(new StringIdentifier("test"),
new MessageSliceException("mock", true), TestProbe.apply(actorSystem).ref());
- MessageSliceReply cloned = (MessageSliceReply) SerializationUtils.clone(expected);
+ MessageSliceReply cloned = SerializationUtils.clone(expected);
assertEquals("getIdentifier", expected.getIdentifier(), cloned.getIdentifier());
assertEquals("getSliceIndex", expected.getSliceIndex(), cloned.getSliceIndex());
assertEquals("getSendTo", expected.getSendTo(), cloned.getSendTo());
assertTrue("getFailure present", cloned.getFailure().isPresent());
- assertEquals("getFailure message", expected.getFailure().get().getMessage(),
- cloned.getFailure().get().getMessage());
- assertEquals("getFailure isRetriable", expected.getFailure().get().isRetriable(),
- cloned.getFailure().get().isRetriable());
+ assertEquals("getFailure message", expected.getFailure().orElseThrow().getMessage(),
+ cloned.getFailure().orElseThrow().getMessage());
+ assertEquals("getFailure isRetriable", expected.getFailure().orElseThrow().isRetriable(),
+ cloned.getFailure().orElseThrow().isRetriable());
}
}
import akka.serialization.JavaSerializer;
import akka.testkit.TestProbe;
import akka.testkit.javadsl.TestKit;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@After
public void tearDown() {
- TestKit.shutdownActorSystem(actorSystem, Boolean.TRUE);
+ TestKit.shutdownActorSystem(actorSystem, true);
}
@Test
MessageSlice expected = new MessageSlice(new StringIdentifier("test"), data, 2, 3, 54321,
TestProbe.apply(actorSystem).ref());
- MessageSlice cloned = (MessageSlice) SerializationUtils.clone(expected);
+ MessageSlice cloned = SerializationUtils.clone(expected);
assertEquals("getIdentifier", expected.getIdentifier(), cloned.getIdentifier());
assertEquals("getSliceIndex", expected.getSliceIndex(), cloned.getSliceIndex());
assertEquals("Identifier", identifier, ((MessageSliceIdentifier)reply.getIdentifier())
.getClientIdentifier());
assertEquals("Failure present", Boolean.TRUE, reply.getFailure().isPresent());
- assertEquals("isRetriable", isRetriable, reply.getFailure().get().isRetriable());
+ assertEquals("isRetriable", isRetriable, reply.getFailure().orElseThrow().isRetriable());
}
static void assertMessageSlice(final MessageSlice sliceMessage, final Identifier identifier, final int sliceIndex,
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
-import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.nio.charset.StandardCharsets;
import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
-import org.mockito.MockitoAnnotations;
+import org.junit.runner.RunWith;
+import org.mockito.junit.MockitoJUnitRunner;
import scala.Option;
/**
*
* @author Thomas Pantelis
*/
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
public class LocalSnapshotStoreTest {
private static final String PERSISTENCE_ID = "member-1-shard-default-config";
private static final String PREFIX_BASED_SHARD_PERSISTENCE_ID = "member-1-shard-id-ints!-config";
@Before
public void setup() {
- MockitoAnnotations.initMocks(this);
cleanSnapshotDir();
}
}
}
- private static String toSnapshotName(final String persistenceId, final int seqNr, final int timestamp)
- throws UnsupportedEncodingException {
- final String encodedPersistenceId = URLEncoder.encode(persistenceId, StandardCharsets.UTF_8.name());
- return "snapshot-" + encodedPersistenceId + "-" + seqNr + "-" + timestamp;
+ private static String toSnapshotName(final String persistenceId, final int seqNr, final int timestamp) {
+ return "snapshot-" + URLEncoder.encode(persistenceId, StandardCharsets.UTF_8) + "-" + seqNr + "-" + timestamp;
}
}
*/
package org.opendaylight.controller.cluster.schema.provider.impl;
-import static org.hamcrest.MatcherAssert.assertThat;
-import static org.hamcrest.Matchers.instanceOf;
-import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertThrows;
import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import akka.dispatch.ExecutionContexts;
import akka.dispatch.Futures;
import com.google.common.io.CharSource;
-import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.MoreExecutors;
import java.io.IOException;
-import java.nio.charset.StandardCharsets;
import java.util.concurrent.ExecutionException;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.cluster.schema.provider.RemoteYangTextSourceProvider;
-import org.opendaylight.yangtools.yang.common.Revision;
-import org.opendaylight.yangtools.yang.model.repo.api.RevisionSourceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.source.SourceIdentifier;
import org.opendaylight.yangtools.yang.model.repo.api.SchemaSourceException;
-import org.opendaylight.yangtools.yang.model.repo.api.SourceIdentifier;
-import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource;
+import org.opendaylight.yangtools.yang.model.spi.source.DelegatedYangTextSource;
public class RemoteSchemaProviderTest {
- private static final SourceIdentifier ID = RevisionSourceIdentifier.create("Test", Revision.of("2015-10-30"));
+ private static final SourceIdentifier ID = new SourceIdentifier("Test", "2015-10-30");
private RemoteSchemaProvider remoteSchemaProvider;
private RemoteYangTextSourceProvider mockedRemoteSchemaRepository;
@Test
public void getExistingYangTextSchemaSource() throws IOException, InterruptedException, ExecutionException {
- YangTextSchemaSource schemaSource = YangTextSchemaSource.delegateForByteSource(ID,
- CharSource.wrap("Test").asByteSource(StandardCharsets.UTF_8));
+ final var schemaSource = new DelegatedYangTextSource(ID, CharSource.wrap("Test"));
doReturn(Futures.successful(new YangTextSchemaSourceSerializationProxy(schemaSource)))
.when(mockedRemoteSchemaRepository).getYangTextSchemaSource(ID);
- YangTextSchemaSource providedSource = remoteSchemaProvider.getSource(ID).get();
- assertEquals(ID, providedSource.getIdentifier());
- assertArrayEquals(schemaSource.read(), providedSource.read());
+ final var providedSource = remoteSchemaProvider.getSource(ID).get();
+ assertEquals(ID, providedSource.sourceId());
+ assertEquals(schemaSource.read(), providedSource.read());
}
@Test
public void getNonExistingSchemaSource() throws InterruptedException {
- doReturn(Futures.failed(new SchemaSourceException("Source not provided")))
- .when(mockedRemoteSchemaRepository).getYangTextSchemaSource(ID);
+ final var exception = new SchemaSourceException(ID, "Source not provided");
+ doReturn(Futures.failed(exception)).when(mockedRemoteSchemaRepository).getYangTextSchemaSource(ID);
- ListenableFuture<YangTextSchemaSource> sourceFuture = remoteSchemaProvider.getSource(ID);
+ final var sourceFuture = remoteSchemaProvider.getSource(ID);
assertTrue(sourceFuture.isDone());
- try {
- sourceFuture.get();
- fail("Expected a failure to occur");
- } catch (ExecutionException e) {
- assertThat(e.getCause(), instanceOf(SchemaSourceException.class));
- }
+
+ final var cause = assertThrows(ExecutionException.class, sourceFuture::get).getCause();
+ assertSame(exception, cause);
}
}
*/
package org.opendaylight.controller.cluster.schema.provider.impl;
-import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertThrows;
import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.doReturn;
-import com.google.common.io.ByteSource;
+import com.google.common.io.CharSource;
import com.google.common.util.concurrent.Futures;
import java.util.Collections;
import java.util.Set;
import org.junit.Before;
import org.junit.Test;
-import org.mockito.Mockito;
-import org.opendaylight.yangtools.yang.common.Revision;
-import org.opendaylight.yangtools.yang.model.repo.api.RevisionSourceIdentifier;
+import org.junit.runner.RunWith;
+import org.mockito.Mock;
+import org.mockito.junit.MockitoJUnitRunner;
+import org.opendaylight.yangtools.yang.model.api.source.SourceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.source.YangTextSource;
import org.opendaylight.yangtools.yang.model.repo.api.SchemaRepository;
import org.opendaylight.yangtools.yang.model.repo.api.SchemaSourceException;
-import org.opendaylight.yangtools.yang.model.repo.api.SourceIdentifier;
-import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource;
+import org.opendaylight.yangtools.yang.model.spi.source.DelegatedYangTextSource;
import scala.concurrent.Await;
-import scala.concurrent.Future;
import scala.concurrent.duration.FiniteDuration;
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
public class RemoteYangTextSourceProviderImplTest {
+ private static final SourceIdentifier ID = new SourceIdentifier("Test", "2015-10-30");
- private static final SourceIdentifier ID = RevisionSourceIdentifier.create("Test", Revision.of("2015-10-30"));
+ @Mock
+ private SchemaRepository mockedLocalRepository;
private RemoteYangTextSourceProviderImpl remoteRepository;
- private SchemaRepository mockedLocalRepository;
private final Set<SourceIdentifier> providedSources = Collections.singleton(ID);
@Before
public void setUp() {
- mockedLocalRepository = Mockito.mock(SchemaRepository.class);
-
remoteRepository = new RemoteYangTextSourceProviderImpl(mockedLocalRepository, providedSources);
}
@Test
public void testGetExistingYangTextSchemaSource() throws Exception {
- String source = "Test source.";
- YangTextSchemaSource schemaSource = YangTextSchemaSource.delegateForByteSource(
- ID, ByteSource.wrap(source.getBytes()));
- Mockito.when(mockedLocalRepository.getSchemaSource(ID, YangTextSchemaSource.class)).thenReturn(
- Futures.immediateFuture(schemaSource));
+ var schemaSource = new DelegatedYangTextSource(ID, CharSource.wrap("Test source."));
+
+ doReturn(Futures.immediateFuture(schemaSource)).when(mockedLocalRepository)
+ .getSchemaSource(ID, YangTextSource.class);
- Future<YangTextSchemaSourceSerializationProxy> retrievedSourceFuture =
- remoteRepository.getYangTextSchemaSource(ID);
+ var retrievedSourceFuture = remoteRepository.getYangTextSchemaSource(ID);
assertTrue(retrievedSourceFuture.isCompleted());
- YangTextSchemaSource resultSchemaSource = Await.result(retrievedSourceFuture,
- FiniteDuration.Zero()).getRepresentation();
- assertEquals(resultSchemaSource.getIdentifier(), schemaSource.getIdentifier());
- assertArrayEquals(resultSchemaSource.read(), schemaSource.read());
+ var resultSchemaSource = Await.result(retrievedSourceFuture, FiniteDuration.Zero()).getRepresentation();
+ assertEquals(resultSchemaSource.sourceId(), schemaSource.sourceId());
+ assertEquals(resultSchemaSource.read(), schemaSource.read());
}
- @Test(expected = SchemaSourceException.class)
+ @Test
public void testGetNonExistentYangTextSchemaSource() throws Exception {
- Mockito.when(mockedLocalRepository.getSchemaSource(ID, YangTextSchemaSource.class)).thenReturn(
- Futures.immediateFailedFuture(new SchemaSourceException("Source is not provided")));
+ final var exception = new SchemaSourceException(ID, "Source is not provided");
+
+ doReturn(Futures.immediateFailedFuture(exception)).when(mockedLocalRepository)
+ .getSchemaSource(ID, YangTextSource.class);
- Future<YangTextSchemaSourceSerializationProxy> retrievedSourceFuture =
- remoteRepository.getYangTextSchemaSource(ID);
+ var retrievedSourceFuture = remoteRepository.getYangTextSchemaSource(ID);
assertTrue(retrievedSourceFuture.isCompleted());
- Await.result(retrievedSourceFuture, FiniteDuration.Zero());
+
+ final var ex = assertThrows(SchemaSourceException.class,
+ () -> Await.result(retrievedSourceFuture, FiniteDuration.Zero()));
+ assertSame(ex, exception);
}
@Test
public void testGetProvidedSources() throws Exception {
- Set<SourceIdentifier> remoteProvidedSources = Await.result(remoteRepository
- .getProvidedSources(), FiniteDuration.Zero());
+ var remoteProvidedSources = Await.result(remoteRepository.getProvidedSources(), FiniteDuration.Zero());
assertEquals(providedSources, remoteProvidedSources);
}
-
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.schema.provider.impl;
-import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
-import com.google.common.io.ByteSource;
+import com.google.common.io.CharSource;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
-import java.nio.charset.StandardCharsets;
import org.junit.Before;
import org.junit.Test;
-import org.opendaylight.yangtools.yang.common.Revision;
-import org.opendaylight.yangtools.yang.model.repo.api.RevisionSourceIdentifier;
-import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource;
+import org.opendaylight.yangtools.yang.model.api.source.SourceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.source.YangTextSource;
+import org.opendaylight.yangtools.yang.model.spi.source.DelegatedYangTextSource;
public class YangTextSourceSerializationProxyTest {
-
- private YangTextSchemaSource schemaSource;
+ private YangTextSource schemaSource;
@Before
public void setUp() {
- String source = "Test source.";
- schemaSource = YangTextSchemaSource.delegateForByteSource(
- RevisionSourceIdentifier.create("test", Revision.of("2015-10-30")),
- ByteSource.wrap(source.getBytes(StandardCharsets.UTF_8)));
+ schemaSource = new DelegatedYangTextSource(new SourceIdentifier("test", "2015-10-30"),
+ CharSource.wrap("Test source."));
}
-
@Test
public void serializeAndDeserializeProxy() throws ClassNotFoundException, IOException {
- YangTextSchemaSourceSerializationProxy proxy = new YangTextSchemaSourceSerializationProxy(schemaSource);
+ final var proxy = new YangTextSchemaSourceSerializationProxy(schemaSource);
ByteArrayOutputStream bos = new ByteArrayOutputStream();
ObjectOutputStream oos = new ObjectOutputStream(bos);
oos.writeObject(proxy);
final byte[] bytes = bos.toByteArray();
- assertEquals(353, bytes.length);
+ assertEquals(323, bytes.length);
ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bytes));
- YangTextSchemaSourceSerializationProxy deserializedProxy =
- (YangTextSchemaSourceSerializationProxy) ois.readObject();
+ final var deserializedProxy = (YangTextSchemaSourceSerializationProxy) ois.readObject();
- assertEquals(deserializedProxy.getRepresentation().getIdentifier(), proxy.getRepresentation().getIdentifier());
- assertArrayEquals(deserializedProxy.getRepresentation().read(), proxy.getRepresentation().read());
+ assertEquals(deserializedProxy.getRepresentation().sourceId(), proxy.getRepresentation().sourceId());
+ assertEquals(deserializedProxy.getRepresentation().read(), proxy.getRepresentation().read());
}
@Test
public void testProxyEqualsBackingYangTextSource() throws IOException {
- YangTextSchemaSourceSerializationProxy serializationProxy =
- new YangTextSchemaSourceSerializationProxy(schemaSource);
+ final var serializationProxy = new YangTextSchemaSourceSerializationProxy(schemaSource);
- assertEquals(serializationProxy.getRepresentation().getIdentifier(), schemaSource.getIdentifier());
- assertArrayEquals(serializationProxy.getRepresentation().read(), schemaSource.read());
+ assertEquals(serializationProxy.getRepresentation().sourceId(), schemaSource.sourceId());
+ assertEquals(serializationProxy.getRepresentation().read(), schemaSource.read());
}
}
<parent>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>odlparent-lite</artifactId>
- <version>7.0.5</version>
+ <version>13.0.11</version>
<relativePath/>
</parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-clustering-config</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<packaging>jar</packaging>
<description>Configuration files for md-sal clustering</description>
akka {
remote {
artery {
- enabled = off
+ enabled = on
+ transport = tcp
canonical.hostname = "127.0.0.1"
canonical.port = 2550
}
- netty.tcp {
- hostname = "127.0.0.1"
- port = 2550
- }
- # when under load we might trip a false positive on the failure detector
- # transport-failure-detector {
- # heartbeat-interval = 4 s
- # acceptable-heartbeat-pause = 16s
- # }
}
cluster {
- # Remove ".tcp" when using artery.
- seed-nodes = ["akka.tcp://opendaylight-cluster-data@127.0.0.1:2550"]
+ # Using artery.
+ seed-nodes = ["akka://opendaylight-cluster-data@127.0.0.1:2550"]
roles = [
"member-1"
]
+ # when under load we might trip a false positive on the failure detector
+ # failure-detector {
+ # heartbeat-interval = 4 s
+ # acceptable-heartbeat-pause = 16s
+ # }
}
persistence {
#shard-snapshot-batch-count=20000
# The percentage of Runtime.totalMemory() used by the in-memory journal log before a snapshot is to be taken.
+# Disabled, if direct threshold is enabled.
#shard-snapshot-data-threshold-percentage=12
+# The max size of in-memory journal(in MB), after reaching the limit, snapshot will be taken. Should be not less then 1.
+# If set to 0, direct threshold is disabled and percentage is used instead.
+#shard-snapshot-data-threshold=0
+
# The interval at which the leader of the shard will check if its majority followers are active and
# term itself as isolated.
#shard-isolated-leader-check-interval-in-millis=5000
# measures the latency for a commit and auto-adjusts the rate limit.
#transaction-creation-initial-rate-limit=100
-# The maximum thread pool size for each shard's data store data change notification executor.
-# THIS SETTING HAS HAD NO EFFECT FOR A LONG TIME, IS DEPRECATED, AND WILL BE REMOVED IN A FUTURE RELEASE
-#max-shard-data-change-executor-pool-size=20
-
-# The maximum queue size for each shard's data store data change notification executor.
-# THIS SETTING HAS HAD NO EFFECT FOR A LONG TIME, IS DEPRECATED, AND WILL BE REMOVED IN A FUTURE RELEASE
-#max-shard-data-change-executor-queue-size=1000
-
-# The maximum queue size for each shard's data store data change listener.
-# THIS SETTING HAS HAD NO EFFECT FOR A LONG TIME, IS DEPRECATED, AND WILL BE REMOVED IN A FUTURE RELEASE
-#max-shard-data-change-listener-queue-size=1000
-
-# The maximum queue size for each shard's data store executor.
-# THIS SETTING HAS HAD NO EFFECT FOR A LONG TIME, IS DEPRECATED, AND WILL BE REMOVED IN A FUTURE RELEASE
-#max-shard-data-store-executor-queue-size=5000
-
# A fully qualified java class name. The class should implement
# org.opendaylight.controller.cluster.raft.policy.RaftPolicy. This java class should be
# accessible to the distributed data store OSGi module so that it can be dynamically loaded via
#custom-raft-policy-implementation=
# When fragmenting messages thru the akka remoting framework, this is the maximum size in bytes
-# for a message slice.
-#maximum-message-slice-size=20480000
-
-# Enable tell-based protocol between frontend (applications) and backend (shards). Using this protocol
-# should avoid AskTimeoutExceptions seen under heavy load. Defaults to false (use ask-based protocol).
-#use-tell-based-protocol=true
+# for a message slice. This needs to be below Akka's maximum-frame-size and defaults to 480KiB.
+maximum-message-slice-size=491520
# Tune the maximum number of entries a follower is allowed to lag behind the leader before it is
# considered out-of-sync. This flag may require tuning in face of a large number of small transactions.
# Enable lz4 compression for snapshots sent from leader to followers
#use-lz4-compression=true
+
+# Export snapshot and journal content after recovery, possible modes: off, json
+#
+# Journal Json structure:
+# Entries : [
+# Entry : [
+# Node: [
+# Path : {},
+# ModificationType : {},
+# Data : {}
+# ]
+# ]
+# ]
+#
+# Snapshot Json structure:
+# RootNode : {}
+#
+export-on-recovery=off
+
+# Directory name for export files
+#recovery-export-base-dir=persistence-export
loggers = ["akka.event.slf4j.Slf4jLogger"]
logger-startup-timeout = 300s
+ # JFR requires boot delegation, which we do not have by default
+ java-flight-recorder {
+ enabled = false
+ }
+
actor {
warn-about-java-serializer-usage = off
provider = "akka.cluster.ClusterActorRefProvider"
# with read-only associations
use-passive-connections = off
- netty.tcp {
+ classic.netty.tcp {
maximum-frame-size = 419430400
send-buffer-size = 52428800
receive-buffer-size = 52428800
}
artery {
+ enabled = on
+ transport = tcp
+
advanced {
- #maximum-frame-size = 256 KiB
- #maximum-large-frame-size = 2 MiB
+ maximum-frame-size = 512 KiB
+ maximum-large-frame-size = 2 MiB
}
}
}
# This is crucial for correct behavior if you use Cluster Singleton or Cluster Sharding,
# especially together with Akka Persistence.
- #auto-down-unreachable-after = 30s
-
allow-weakly-up-members = on
use-dispatcher = cluster-dispatcher
failure-detector.acceptable-heartbeat-pause = 3 s
+
+ distributed-data {
+ # How often the Replicator should send out gossip information.
+ # This value controls how quickly Entity Ownership Service data is replicated
+ # across cluster nodes.
+ gossip-interval = 100 ms
+
+ # How often the subscribers will be notified of changes, if any.
+ # This value controls how quickly Entity Ownership Service decisions are
+ # propagated within a node.
+ notify-subscribers-interval = 20 ms
+ }
+
+ downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider"
+
+ split-brain-resolver {
+ active-strategy = keep-majority
+ stable-after = 7s
+ }
}
persistence {
journal {
# The following activates the default segmented file journal. Each persistent actor
# is stored in a separate directory, with multiple segment files. Segments are removed
- # when they are not longer required.
+ # when they are no longer required.
#
plugin = akka.persistence.journal.segmented-file
max-entry-size = 16M
# Maximum size of a segment
max-segment-size = 128M
+ # Maximum number of bytes that are written without synchronizing storage. Defaults to max-entry-size.
+ # Set to <= 0 to flush immediately.
+ #max-unflushed-bytes = 1M
+ # Map each segment into memory. Defaults to true, use false to keep a heap-based
+ # buffer instead.
+ memory-mapped = true
+ }
+ }
+
+ # Journal configuration for shards that have persistence turned off. They still need to have a journal plugin
+ # configured, since they still need to store things in the journal occasionally, but having larger segment sizes
+ # would be wastefull.
+ non-persistent {
+ journal {
+ class = "org.opendaylight.controller.akka.segjournal.SegmentedFileJournal"
+ # Root directory for segmented journal storage
+ root-directory = "segmented-journal"
+ # Maximum size of a single entry in the segmented journal
+ max-entry-size = 512K
+ # Maximum size of a segment
+ max-segment-size = 1M
+ # Maximum number of bytes that are written without synchronizing storage. Defaults to max-entry-size.
+ # Set to <= 0 to flush immediately.
+ #max-unflushed-bytes = 128K
# Map each segment into memory. Note that while this can improve performance,
# it will also place additional burden on system resources.
memory-mapped = false
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>mdsal-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../parent</relativePath>
</parent>
<dependencies>
<dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>concepts</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-common</artifactId>
+ <groupId>org.eclipse.jdt</groupId>
+ <artifactId>org.eclipse.jdt.annotation</artifactId>
</dependency>
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-data-util</artifactId>
+ <artifactId>util</artifactId>
</dependency>
</dependencies>
*/
package org.opendaylight.controller.md.sal.common.util.jmx;
-import com.google.common.annotations.Beta;
import java.lang.management.ManagementFactory;
import javax.management.InstanceAlreadyExistsException;
import javax.management.InstanceNotFoundException;
*
* @author Thomas Pantelis
*/
-@Beta
public abstract class AbstractMXBean {
-
private static final Logger LOG = LoggerFactory.getLogger(AbstractMXBean.class);
public static final String BASE_JMX_PREFIX = "org.opendaylight.controller:";
boolean registered = false;
try {
// Object to identify MBean
- final ObjectName mbeanName = this.getMBeanObjectName();
+ final ObjectName mbeanName = getMBeanObjectName();
LOG.debug("Register MBean {}", mbeanName);
* @return true is successfully unregistered, false otherwise.
*/
public boolean unregisterMBean() {
- boolean unregister = false;
try {
- ObjectName mbeanName = this.getMBeanObjectName();
- unregisterMBean(mbeanName);
- unregister = true;
+ unregisterMBean(getMBeanObjectName());
+ return true;
} catch (MBeanRegistrationException | InstanceNotFoundException | MalformedObjectNameException e) {
LOG.debug("Failed when unregistering MBean", e);
+ return false;
}
-
- return unregister;
}
private void unregisterMBean(ObjectName mbeanName) throws MBeanRegistrationException,
package org.opendaylight.controller.md.sal.common.util.jmx;
-import java.beans.ConstructorProperties;
+import javax.management.ConstructorParameters;
/**
* A bean class that holds various thread executor statistic metrics. This class is suitable for
private final Long largestQueueSize;
private final Long rejectedTaskCount;
- @ConstructorProperties({"activeThreadCount","currentThreadPoolSize","largestThreadPoolSize",
+ @ConstructorParameters({"activeThreadCount","currentThreadPoolSize","largestThreadPoolSize",
"maxThreadPoolSize","currentQueueSize","largestQueueSize","maxQueueSize",
"completedTaskCount","totalTaskCount","rejectedTaskCount"})
public ThreadExecutorStats(long activeThreadCount, long currentThreadPoolSize,
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.common.util;
-
-public final class Arguments {
-
- private Arguments() {
- throw new UnsupportedOperationException("Utility class");
- }
-
- /**
- * Checks if value is instance of provided class.
- *
- * @param value Value to check
- * @param type Type to check
- * @return Reference which was checked
- */
- @SuppressWarnings("unchecked")
- public static <T> T checkInstanceOf(Object value, Class<T> type) {
- if (!type.isInstance(value)) {
- throw new IllegalArgumentException(String.format("Value %s is not of type %s", value, type));
- }
- return (T) value;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.common.util;
-
-/**
- * An AutoCloseable that does nothing.
- *
- * @author Thomas Pantelis
- */
-public final class NoopAutoCloseable implements AutoCloseable {
- public static final NoopAutoCloseable INSTANCE = new NoopAutoCloseable();
-
- private NoopAutoCloseable() {
- }
-
- @Override
- public void close() {
- }
-}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>mdsal-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../parent</relativePath>
</parent>
<packaging>bundle</packaging>
<dependencies>
+ <dependency>
+ <groupId>com.github.spotbugs</groupId>
+ <artifactId>spotbugs-annotations</artifactId>
+ <optional>true</optional>
+ </dependency>
+
<!-- Java -->
<dependency>
<groupId>org.slf4j</groupId>
<dependency>
<groupId>org.osgi</groupId>
- <artifactId>org.osgi.core</artifactId>
+ <artifactId>org.osgi.framework</artifactId>
</dependency>
<dependency>
<groupId>org.osgi</groupId>
- <artifactId>osgi.cmpn</artifactId>
- </dependency>
-
- <!-- Akka -->
- <dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-actor_2.13</artifactId>
- </dependency>
- <dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-cluster_2.13</artifactId>
- </dependency>
- <dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-osgi_2.13</artifactId>
- <exclusions>
- <exclusion>
- <groupId>org.osgi</groupId>
- <artifactId>org.osgi.compendium</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
- <dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-persistence_2.13</artifactId>
+ <artifactId>org.osgi.service.component</artifactId>
</dependency>
<dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-remote_2.13</artifactId>
+ <groupId>org.osgi</groupId>
+ <artifactId>org.osgi.service.component.annotations</artifactId>
</dependency>
<dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-slf4j_2.13</artifactId>
+ <groupId>org.osgi</groupId>
+ <artifactId>org.osgi.service.metatype.annotations</artifactId>
</dependency>
+
+ <!-- Akka -->
<dependency>
<groupId>org.scala-lang.modules</groupId>
<artifactId>scala-java8-compat_2.13</artifactId>
<dependency>
<groupId>com.typesafe.akka</groupId>
<artifactId>akka-testkit_2.13</artifactId>
- <scope>test</scope>
</dependency>
<!-- Scala -->
<dependency>
<groupId>net.java.dev.stax-utils</groupId>
<artifactId>stax-utils</artifactId>
- <exclusions>
- <exclusion>
- <!-- JSR173 ships with JRE by default -->
- <groupId>com.bea.xml</groupId>
- <artifactId>jsr173-ri</artifactId>
- </exclusion>
- </exclusions>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>cds-dom-api</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>cds-mgmt-api</artifactId>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-akka-raft-example</artifactId>
<groupId>org.opendaylight.mdsal</groupId>
<artifactId>mdsal-binding-dom-codec-api</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>mdsal-common-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>mdsal-dom-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>mdsal-dom-spi</artifactId>
+ </dependency>
<dependency>
<groupId>org.opendaylight.mdsal</groupId>
<artifactId>mdsal-dom-broker</artifactId>
</dependency>
-
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>concepts</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>util</artifactId>
+ </dependency>
<dependency>
<groupId>org.opendaylight.mdsal</groupId>
<artifactId>yang-binding</artifactId>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>yang-data-impl</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-tree-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-tree-spi</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-tree-ri</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-util</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-codec-binfmt</artifactId>
+ </dependency>
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>yang-data-codec-xml</artifactId>
</dependency>
<dependency>
- <groupId>tech.pantheon.triemap</groupId>
- <artifactId>triemap</artifactId>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-codec-gson</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-model-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-model-spi</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-model-util</artifactId>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-text</artifactId>
</dependency>
- <dependency>
- <groupId>io.atomix</groupId>
- <artifactId>atomix-storage</artifactId>
- <version>3.1.5</version>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>io.atomix</groupId>
- <artifactId>atomix-utils</artifactId>
- <version>3.1.5</version>
- <scope>test</scope>
- </dependency>
<dependency>
<groupId>org.awaitility</groupId>
<artifactId>awaitility</artifactId>
<artifactId>commons-io</artifactId>
<scope>test</scope>
</dependency>
- <dependency>
- <groupId>commons-lang</groupId>
- <artifactId>commons-lang</artifactId>
- <scope>test</scope>
- </dependency>
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>yang-test-util</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>mdsal-binding-dom-codec</artifactId>
+ <scope>test</scope>
+ </dependency>
<dependency>
<groupId>org.opendaylight.mdsal</groupId>
<artifactId>mdsal-binding-test-utils</artifactId>
<instructions>
<Bundle-Name>${project.groupId}.${project.artifactId}</Bundle-Name>
- <!-- Karaf cannot handle Factory Component requirements, see https://issues.apache.org/jira/browse/KARAF-6625 -->
- <_dsannotations-options>norequirements</_dsannotations-options>
-
<Export-Package>
org.opendaylight.controller.cluster.datastore;
org.opendaylight.controller.cluster.datastore.config;
package org.opendaylight.controller.cluster.akka.osgi.impl;
import akka.actor.ActorSystem;
-import com.typesafe.config.Config;
import java.util.concurrent.TimeoutException;
import org.opendaylight.controller.cluster.ActorSystemProvider;
import org.opendaylight.controller.cluster.ActorSystemProviderListener;
public final class OSGiActorSystemProvider implements ActorSystemProvider {
private static final Logger LOG = LoggerFactory.getLogger(OSGiActorSystemProvider.class);
- @Reference
- AkkaConfigurationReader reader = null;
-
private ActorSystemProviderImpl delegate;
- @Override
- public ActorSystem getActorSystem() {
- return delegate.getActorSystem();
- }
-
- @Override
- public ListenerRegistration<ActorSystemProviderListener> registerActorSystemProviderListener(
- final ActorSystemProviderListener listener) {
- return delegate.registerActorSystemProviderListener(listener);
- }
-
@Activate
- void activate(final BundleContext bundleContext) {
+ public OSGiActorSystemProvider(@Reference final AkkaConfigurationReader reader, final BundleContext bundleContext) {
LOG.info("Actor System provider starting");
- final Config akkaConfig = AkkaConfigFactory.createAkkaConfig(reader);
+ final var akkaConfig = AkkaConfigFactory.createAkkaConfig(reader);
delegate = new ActorSystemProviderImpl(BundleClassLoaderFactory.createClassLoader(bundleContext),
QuarantinedMonitorActorPropsFactory.createProps(bundleContext, akkaConfig), akkaConfig);
LOG.info("Actor System provider started");
delegate = null;
LOG.info("Actor System provider stopped");
}
+
+ @Override
+ public ActorSystem getActorSystem() {
+ return delegate.getActorSystem();
+ }
+
+ @Override
+ public ListenerRegistration<ActorSystemProviderListener> registerActorSystemProviderListener(
+ final ActorSystemProviderListener listener) {
+ return delegate.registerActorSystemProviderListener(listener);
+ }
}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.databroker;
-
-import static com.google.common.base.Preconditions.checkState;
-
-import com.google.common.collect.ClassToInstanceMap;
-import com.google.common.collect.ImmutableClassToInstanceMap;
-import com.google.common.collect.ImmutableClassToInstanceMap.Builder;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.util.EnumMap;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicLong;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataBrokerExtension;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeService;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistration;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistry;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
-import org.opendaylight.mdsal.dom.spi.PingPongMergingDOMDataBroker;
-import org.opendaylight.mdsal.dom.spi.store.DOMStore;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTreeChangePublisher;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public abstract class AbstractDOMBroker extends AbstractDOMTransactionFactory<DOMStore>
- implements PingPongMergingDOMDataBroker {
-
- private static final Logger LOG = LoggerFactory.getLogger(AbstractDOMBroker.class);
-
- private final AtomicLong txNum = new AtomicLong();
- private final AtomicLong chainNum = new AtomicLong();
- private final ClassToInstanceMap<DOMDataBrokerExtension> extensions;
-
- private volatile AutoCloseable closeable;
-
- protected AbstractDOMBroker(final Map<LogicalDatastoreType, DOMStore> datastores) {
- super(datastores);
-
- Builder<DOMDataBrokerExtension> extBuilder = ImmutableClassToInstanceMap.builder();
- if (isSupported(datastores, DOMStoreTreeChangePublisher.class)) {
- extBuilder.put(DOMDataTreeChangeService.class, new DOMDataTreeChangeService() {
- @Override
- public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerDataTreeChangeListener(
- final DOMDataTreeIdentifier treeId, final L listener) {
- DOMStore store = getDOMStore(treeId.getDatastoreType());
- return ((DOMStoreTreeChangePublisher) store).registerTreeChangeListener(
- treeId.getRootIdentifier(), listener);
- }
- });
- }
-
- if (isSupported(datastores, DOMDataTreeCommitCohortRegistry.class)) {
- extBuilder.put(DOMDataTreeCommitCohortRegistry.class, new DOMDataTreeCommitCohortRegistry() {
- @Override
- public <T extends DOMDataTreeCommitCohort> DOMDataTreeCommitCohortRegistration<T> registerCommitCohort(
- final DOMDataTreeIdentifier path, final T cohort) {
- DOMStore store = getDOMStore(path.getDatastoreType());
- return ((DOMDataTreeCommitCohortRegistry) store).registerCommitCohort(path, cohort);
- }
- });
- }
-
- extensions = extBuilder.build();
- }
-
- private static boolean isSupported(final Map<LogicalDatastoreType, DOMStore> datastores,
- final Class<?> expDOMStoreInterface) {
- return datastores.values().stream().allMatch(expDOMStoreInterface::isInstance);
- }
-
- public void setCloseable(final AutoCloseable closeable) {
- this.closeable = closeable;
- }
-
- @Override
- @SuppressWarnings("checkstyle:IllegalCatch")
- public void close() {
- super.close();
-
- if (closeable != null) {
- try {
- closeable.close();
- } catch (Exception e) {
- LOG.debug("Error closing instance", e);
- }
- }
- }
-
- @Override
- protected Object newTransactionIdentifier() {
- return "DOM-" + txNum.getAndIncrement();
- }
-
- @Override
- public ClassToInstanceMap<DOMDataBrokerExtension> getExtensions() {
- return extensions;
- }
-
- @Override
- public DOMTransactionChain createTransactionChain(final DOMTransactionChainListener listener) {
- checkNotClosed();
-
- final Map<LogicalDatastoreType, DOMStoreTransactionChain> backingChains =
- new EnumMap<>(LogicalDatastoreType.class);
- for (Map.Entry<LogicalDatastoreType, DOMStore> entry : getTxFactories().entrySet()) {
- backingChains.put(entry.getKey(), entry.getValue().createTransactionChain());
- }
-
- final long chainId = chainNum.getAndIncrement();
- LOG.debug("Transaction chain {} created with listener {}, backing store chains {}", chainId, listener,
- backingChains);
- return new DOMBrokerTransactionChain(chainId, backingChains, this, listener);
- }
-
- @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
- justification = "https://github.com/spotbugs/spotbugs/issues/811")
- private DOMStore getDOMStore(final LogicalDatastoreType type) {
- DOMStore store = getTxFactories().get(type);
- checkState(store != null, "Requested logical data store is not available.");
- return store;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.databroker;
-
-import static com.google.common.base.Preconditions.checkArgument;
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.base.MoreObjects;
-import com.google.common.base.MoreObjects.ToStringHelper;
-import java.util.Collection;
-import java.util.EnumMap;
-import java.util.Map;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionFactory;
-
-public abstract class AbstractDOMBrokerTransaction<T extends DOMStoreTransaction> implements DOMDataTreeTransaction {
-
- private final EnumMap<LogicalDatastoreType, T> backingTxs;
- private final Object identifier;
- private final Map<LogicalDatastoreType, ? extends DOMStoreTransactionFactory> storeTxFactories;
-
- /**
- * Creates new composite Transactions.
- *
- * @param identifier Identifier of transaction.
- */
- protected AbstractDOMBrokerTransaction(final Object identifier,
- Map<LogicalDatastoreType, ? extends DOMStoreTransactionFactory> storeTxFactories) {
- this.identifier = requireNonNull(identifier, "Identifier should not be null");
- this.storeTxFactories = requireNonNull(storeTxFactories, "Store Transaction Factories should not be null");
- this.backingTxs = new EnumMap<>(LogicalDatastoreType.class);
- }
-
- /**
- * Returns subtransaction associated with supplied key.
- *
- * @param key the data store type key
- * @return the subtransaction
- * @throws NullPointerException
- * if key is null
- * @throws IllegalArgumentException
- * if no subtransaction is associated with key.
- */
- protected final T getSubtransaction(final LogicalDatastoreType key) {
- requireNonNull(key, "key must not be null.");
-
- T ret = backingTxs.get(key);
- if (ret == null) {
- ret = createTransaction(key);
- backingTxs.put(key, ret);
- }
- checkArgument(ret != null, "No subtransaction associated with %s", key);
- return ret;
- }
-
- protected abstract T createTransaction(LogicalDatastoreType key);
-
- /**
- * Returns immutable Iterable of all subtransactions.
- *
- */
- protected Collection<T> getSubtransactions() {
- return backingTxs.values();
- }
-
- @Override
- public Object getIdentifier() {
- return identifier;
- }
-
- @SuppressWarnings("checkstyle:IllegalCatch")
- protected void closeSubtransactions() {
- /*
- * We share one exception for all failures, which are added
- * as supressedExceptions to it.
- */
- IllegalStateException failure = null;
- for (T subtransaction : backingTxs.values()) {
- try {
- subtransaction.close();
- } catch (Exception e) {
- // If we did not allocated failure we allocate it
- if (failure == null) {
- failure = new IllegalStateException("Uncaught exception occured during closing transaction", e);
- } else {
- // We update it with additional exceptions, which occurred during error.
- failure.addSuppressed(e);
- }
- }
- }
- // If we have failure, we throw it at after all attempts to close.
- if (failure != null) {
- throw failure;
- }
- }
-
- protected DOMStoreTransactionFactory getTxFactory(LogicalDatastoreType type) {
- return storeTxFactories.get(type);
- }
-
- @Override
- public final String toString() {
- return addToStringAttributes(MoreObjects.toStringHelper(this).omitNullValues()).toString();
- }
-
- protected ToStringHelper addToStringAttributes(final ToStringHelper toStringHelper) {
- return toStringHelper.add("identifier", identifier);
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.databroker;
-
-import static com.google.common.base.Preconditions.checkArgument;
-import static com.google.common.base.Preconditions.checkState;
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.base.MoreObjects.ToStringHelper;
-import com.google.common.util.concurrent.FluentFuture;
-import com.google.common.util.concurrent.Futures;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Map;
-import java.util.concurrent.Future;
-import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
-import org.opendaylight.mdsal.dom.broker.TransactionCommitFailedExceptionMapper;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionFactory;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public abstract class AbstractDOMBrokerWriteTransaction<T extends DOMStoreWriteTransaction>
- extends AbstractDOMBrokerTransaction<T> implements DOMDataTreeWriteTransaction {
-
- @SuppressWarnings("rawtypes")
- private static final AtomicReferenceFieldUpdater<AbstractDOMBrokerWriteTransaction, AbstractDOMTransactionFactory>
- IMPL_UPDATER = AtomicReferenceFieldUpdater.newUpdater(AbstractDOMBrokerWriteTransaction.class,
- AbstractDOMTransactionFactory.class, "commitImpl");
- @SuppressWarnings("rawtypes")
- private static final AtomicReferenceFieldUpdater<AbstractDOMBrokerWriteTransaction, Future> FUTURE_UPDATER =
- AtomicReferenceFieldUpdater.newUpdater(AbstractDOMBrokerWriteTransaction.class, Future.class,
- "commitFuture");
- private static final Logger LOG = LoggerFactory.getLogger(AbstractDOMBrokerWriteTransaction.class);
- private static final Future<?> CANCELLED_FUTURE = Futures.immediateCancelledFuture();
-
- /**
- * Implementation of real commit. It also acts as an indication that
- * the transaction is running -- which we flip atomically using
- * {@link #IMPL_UPDATER}.
- */
- private volatile AbstractDOMTransactionFactory<?> commitImpl;
-
- /**
- * Future task of transaction commit. It starts off as null, but is
- * set appropriately on {@link #submit()} and {@link #cancel()} via
- * {@link AtomicReferenceFieldUpdater#lazySet(Object, Object)}.
- * <p/>
- * Lazy set is safe for use because it is only referenced to in the
- * {@link #cancel()} slow path, where we will busy-wait for it. The
- * fast path gets the benefit of a store-store barrier instead of the
- * usual store-load barrier.
- */
- private volatile Future<?> commitFuture;
-
- protected AbstractDOMBrokerWriteTransaction(final Object identifier,
- final Map<LogicalDatastoreType, ? extends DOMStoreTransactionFactory> storeTxFactories,
- final AbstractDOMTransactionFactory<?> commitImpl) {
- super(identifier, storeTxFactories);
- this.commitImpl = requireNonNull(commitImpl, "commitImpl must not be null.");
- }
-
- @Override
- public void put(final LogicalDatastoreType store, final YangInstanceIdentifier path,
- final NormalizedNode<?, ?> data) {
- checkRunning(commitImpl);
- checkInstanceIdentifierReferencesData(path,data);
- getSubtransaction(store).write(path, data);
- }
-
- private static void checkInstanceIdentifierReferencesData(final YangInstanceIdentifier path,
- final NormalizedNode<?, ?> data) {
- checkArgument(data != null, "Attempted to store null data at %s", path);
- final PathArgument lastArg = path.getLastPathArgument();
- if (lastArg != null) {
- checkArgument(lastArg.equals(data.getIdentifier()),
- "Instance identifier references %s but data identifier is %s", lastArg, data);
- }
- }
-
- @Override
- public void delete(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
- checkRunning(commitImpl);
- getSubtransaction(store).delete(path);
- }
-
- @Override
- public void merge(final LogicalDatastoreType store, final YangInstanceIdentifier path,
- final NormalizedNode<?, ?> data) {
- checkRunning(commitImpl);
- checkInstanceIdentifierReferencesData(path, data);
- getSubtransaction(store).merge(path, data);
- }
-
- @Override
- public boolean cancel() {
- final AbstractDOMTransactionFactory<?> impl = IMPL_UPDATER.getAndSet(this, null);
- if (impl != null) {
- LOG.trace("Transaction {} cancelled before submit", getIdentifier());
- FUTURE_UPDATER.lazySet(this, CANCELLED_FUTURE);
- closeSubtransactions();
- return true;
- }
-
- // The transaction is in process of being submitted or cancelled. Busy-wait
- // for the corresponding future.
- Future<?> future;
- do {
- future = commitFuture;
- }
- while (future == null);
-
- return future.cancel(false);
- }
-
- @Override
- @SuppressWarnings("checkstyle:IllegalCatch")
- public FluentFuture<? extends CommitInfo> commit() {
- final AbstractDOMTransactionFactory<?> impl = IMPL_UPDATER.getAndSet(this, null);
- checkRunning(impl);
-
- final Collection<T> txns = getSubtransactions();
- final Collection<DOMStoreThreePhaseCommitCohort> cohorts = new ArrayList<>(txns.size());
-
- FluentFuture<? extends CommitInfo> ret;
- try {
- for (final T txn : txns) {
- cohorts.add(txn.ready());
- }
-
- ret = impl.commit(this, cohorts);
- } catch (RuntimeException e) {
- ret = FluentFuture.from(Futures.immediateFailedFuture(
- TransactionCommitFailedExceptionMapper.COMMIT_ERROR_MAPPER.apply(e)));
- }
- FUTURE_UPDATER.lazySet(this, ret);
- return ret;
- }
-
- private void checkRunning(final AbstractDOMTransactionFactory<?> impl) {
- checkState(impl != null, "Transaction %s is no longer running", getIdentifier());
- }
-
- @Override
- protected ToStringHelper addToStringAttributes(final ToStringHelper toStringHelper) {
- return super.addToStringAttributes(toStringHelper).add("running", commitImpl == null);
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.databroker;
-
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.FluentFuture;
-import java.util.Collection;
-import java.util.EnumMap;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeReadTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeReadWriteTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionFactory;
-
-public abstract class AbstractDOMTransactionFactory<T extends DOMStoreTransactionFactory> implements AutoCloseable {
- @SuppressWarnings("rawtypes")
- private static final AtomicIntegerFieldUpdater<AbstractDOMTransactionFactory> UPDATER =
- AtomicIntegerFieldUpdater.newUpdater(AbstractDOMTransactionFactory.class, "closed");
- private final Map<LogicalDatastoreType, T> storeTxFactories;
- private volatile int closed = 0;
-
- protected AbstractDOMTransactionFactory(final Map<LogicalDatastoreType, T> txFactories) {
- this.storeTxFactories = new EnumMap<>(txFactories);
- }
-
- /**
- * Implementations must return unique identifier for each and every call of
- * this method.
- *
- * @return new Unique transaction identifier.
- */
- protected abstract Object newTransactionIdentifier();
-
- /**
- * Submits a transaction asynchronously for commit.
- *
- * @param transaction the transaction to submit
- * @param cohorts the associated cohorts
- * @return a resulting Future
- */
- protected abstract FluentFuture<? extends CommitInfo> commit(DOMDataTreeWriteTransaction transaction,
- Collection<DOMStoreThreePhaseCommitCohort> cohorts);
-
- /**
- * Creates a new read-only transaction.
- *
- * @return the transaction instance
- */
- public final DOMDataTreeReadTransaction newReadOnlyTransaction() {
- checkNotClosed();
-
- return new DOMBrokerReadOnlyTransaction(newTransactionIdentifier(), storeTxFactories);
- }
-
-
- /**
- * Creates a new write-only transaction.
- *
- * @return the transaction instance
- */
- public final DOMDataTreeWriteTransaction newWriteOnlyTransaction() {
- checkNotClosed();
-
- return new DOMBrokerWriteOnlyTransaction(newTransactionIdentifier(), storeTxFactories, this);
- }
-
-
- /**
- * Creates a new read-write transaction.
- *
- * @return the transaction instance
- */
- public final DOMDataTreeReadWriteTransaction newReadWriteTransaction() {
- checkNotClosed();
-
- return new DOMBrokerReadWriteTransaction(newTransactionIdentifier(), storeTxFactories, this);
- }
-
- /**
- * Convenience accessor of backing factories intended to be used only by
- * finalization of this class.
- *
- * <b>Note:</b>
- * Finalization of this class may want to access other functionality of
- * supplied Transaction factories.
- *
- * @return Map of backing transaction factories.
- */
- public final Map<LogicalDatastoreType, T> getTxFactories() {
- return storeTxFactories;
- }
-
- /**
- * Checks if instance is not closed.
- *
- * @throws IllegalStateException If instance of this class was closed.
- *
- */
- protected final void checkNotClosed() {
- Preconditions.checkState(closed == 0, "Transaction factory was closed. No further operations allowed.");
- }
-
- @Override
- public void close() {
- final boolean success = UPDATER.compareAndSet(this, 0, 1);
- Preconditions.checkState(success, "Transaction factory was already closed");
- }
-}
}
@Override
- public FluentFuture<Optional<NormalizedNode<?, ?>>> read(final YangInstanceIdentifier path) {
+ public FluentFuture<Optional<NormalizedNode>> read(final YangInstanceIdentifier path) {
return delegate().read(path);
}
}
@Override
- public FluentFuture<Optional<NormalizedNode<?, ?>>> read(final YangInstanceIdentifier path) {
+ public FluentFuture<Optional<NormalizedNode>> read(final YangInstanceIdentifier path) {
return delegate().read(path);
}
import static java.util.Objects.requireNonNull;
-import com.google.common.base.FinalizablePhantomReference;
-import com.google.common.base.FinalizableReferenceQueue;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import org.eclipse.jdt.annotation.NonNull;
-import org.eclipse.jdt.annotation.Nullable;
+import java.lang.ref.Cleaner;
+import java.lang.ref.Cleaner.Cleanable;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.controller.cluster.databroker.actors.dds.AbstractClientHandle;
import org.opendaylight.controller.cluster.databroker.actors.dds.ClientTransaction;
import org.opendaylight.mdsal.dom.spi.store.AbstractDOMStoreTransaction;
+import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransaction;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
*/
abstract class ClientBackedTransaction<T extends AbstractClientHandle<?>> extends
AbstractDOMStoreTransaction<TransactionIdentifier> {
- private static final class Finalizer extends FinalizablePhantomReference<ClientBackedTransaction<?>> {
- private static final FinalizableReferenceQueue QUEUE = new FinalizableReferenceQueue();
- private static final Set<Finalizer> FINALIZERS = ConcurrentHashMap.newKeySet();
- private static final Logger LOG = LoggerFactory.getLogger(Finalizer.class);
-
+ private static final class Cleanup implements Runnable {
private final AbstractClientHandle<?> transaction;
private final Throwable allocationContext;
- private Finalizer(final ClientBackedTransaction<?> referent, final AbstractClientHandle<?> transaction,
- final Throwable allocationContext) {
- super(referent, QUEUE);
- this.transaction = requireNonNull(transaction);
+ Cleanup(final AbstractClientHandle<?> transaction, final Throwable allocationContext) {
+ this.transaction = transaction;
this.allocationContext = allocationContext;
}
- static <T extends AbstractClientHandle<?>> @NonNull T recordTransaction(
- final @NonNull ClientBackedTransaction<T> referent, final @NonNull T transaction,
- final @Nullable Throwable allocationContext) {
- FINALIZERS.add(new Finalizer(referent, transaction, allocationContext));
- return transaction;
- }
-
@Override
- public void finalizeReferent() {
- FINALIZERS.remove(this);
+ public void run() {
if (transaction.abort()) {
LOG.info("Aborted orphan transaction {}", transaction, allocationContext);
}
}
}
+ private static final Logger LOG = LoggerFactory.getLogger(ClientBackedTransaction.class);
+ private static final Cleaner CLEANER = Cleaner.create();
+
private final T delegate;
+ private final Cleanable cleanable;
ClientBackedTransaction(final T delegate, final Throwable allocationContext) {
super(delegate.getIdentifier());
- this.delegate = Finalizer.recordTransaction(this, delegate, allocationContext);
- }
-
- final T delegate() {
- return delegate;
+ this.delegate = requireNonNull(delegate);
+ this.cleanable = CLEANER.register(this, new Cleanup(delegate, allocationContext));
}
@Override
public void close() {
delegate.abort();
+ // Run cleaning immediate so the references is not stuck in cleaner queue
+ cleanable.clean();
+ }
+
+ final T delegate() {
+ return delegate;
}
}
}
@Override
- public final void write(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ public final void write(final YangInstanceIdentifier path, final NormalizedNode data) {
delegate().write(path, data);
}
@Override
- public final void merge(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ public final void merge(final YangInstanceIdentifier path, final NormalizedNode data) {
delegate().merge(path, data);
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.cluster.datastore.jmx.mbeans;
+package org.opendaylight.controller.cluster.databroker;
import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.CommitStatsMXBean;
import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
import org.opendaylight.yangtools.util.DurationStatisticsTracker;
*
* @author Thomas Pantelis
*/
-public class CommitStatsMXBeanImpl extends AbstractMXBean implements CommitStatsMXBean {
-
+final class CommitStatsMXBeanImpl extends AbstractMXBean implements CommitStatsMXBean {
private final DurationStatisticsTracker commitStatsTracker;
/**
* @param commitStatsTracker the DurationStatsTracker used to obtain the stats.
* @param mbeantype mBeanType Used as the <code>type</code> property in the bean's ObjectName.
*/
- public CommitStatsMXBeanImpl(@NonNull DurationStatisticsTracker commitStatsTracker,
- @NonNull String mbeantype) {
+ CommitStatsMXBeanImpl(final @NonNull DurationStatisticsTracker commitStatsTracker,
+ final @NonNull String mbeantype) {
super("CommitStats", mbeantype, null);
this.commitStatsTracker = commitStatsTracker;
}
import static com.google.common.base.Preconditions.checkArgument;
import static java.util.Objects.requireNonNull;
-import static org.opendaylight.mdsal.dom.broker.TransactionCommitFailedExceptionMapper.CAN_COMMIT_ERROR_MAPPER;
-import static org.opendaylight.mdsal.dom.broker.TransactionCommitFailedExceptionMapper.COMMIT_ERROR_MAPPER;
-import static org.opendaylight.mdsal.dom.broker.TransactionCommitFailedExceptionMapper.PRE_COMMIT_MAPPER;
+import static org.opendaylight.mdsal.dom.spi.TransactionCommitFailedExceptionMapper.CAN_COMMIT_ERROR_MAPPER;
+import static org.opendaylight.mdsal.dom.spi.TransactionCommitFailedExceptionMapper.COMMIT_ERROR_MAPPER;
+import static org.opendaylight.mdsal.dom.spi.TransactionCommitFailedExceptionMapper.PRE_COMMIT_MAPPER;
import com.google.common.annotations.Beta;
import com.google.common.util.concurrent.AbstractFuture;
import com.google.common.util.concurrent.FluentFuture;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.MoreExecutors;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.List;
import java.util.Map;
import java.util.concurrent.Executor;
import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
import org.opendaylight.mdsal.common.api.DataStoreUnavailableException;
import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
import org.opendaylight.mdsal.common.api.TransactionCommitFailedException;
+import org.opendaylight.mdsal.dom.api.DOMDataBroker;
import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
-import org.opendaylight.mdsal.dom.broker.TransactionCommitFailedExceptionMapper;
+import org.opendaylight.mdsal.dom.spi.AbstractDOMDataBroker;
+import org.opendaylight.mdsal.dom.spi.TransactionCommitFailedExceptionMapper;
import org.opendaylight.mdsal.dom.spi.store.DOMStore;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.yangtools.util.DurationStatisticsTracker;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
* @author Thomas Pantelis
*/
@Beta
-public class ConcurrentDOMDataBroker extends AbstractDOMBroker {
+@Component(service = DOMDataBroker.class, property = "type=default")
+public class ConcurrentDOMDataBroker extends AbstractDOMDataBroker {
private static final Logger LOG = LoggerFactory.getLogger(ConcurrentDOMDataBroker.class);
private static final String CAN_COMMIT = "CAN_COMMIT";
private static final String PRE_COMMIT = "PRE_COMMIT";
public ConcurrentDOMDataBroker(final Map<LogicalDatastoreType, DOMStore> datastores,
final Executor listenableFutureExecutor, final DurationStatisticsTracker commitStatsTracker) {
super(datastores);
- this.clientFutureCallbackExecutor = requireNonNull(listenableFutureExecutor);
+ clientFutureCallbackExecutor = requireNonNull(listenableFutureExecutor);
this.commitStatsTracker = requireNonNull(commitStatsTracker);
}
- public DurationStatisticsTracker getCommitStatsTracker() {
- return commitStatsTracker;
+ @Activate
+ public ConcurrentDOMDataBroker(@Reference final DataBrokerCommitExecutor commitExecutor,
+ @Reference(target = "(type=distributed-config)") final DOMStore configDatastore,
+ @Reference(target = "(type=distributed-operational)") final DOMStore operDatastore) {
+ this(Map.of(
+ LogicalDatastoreType.CONFIGURATION, configDatastore, LogicalDatastoreType.OPERATIONAL, operDatastore),
+ commitExecutor.executor(), commitExecutor.commitStatsTracker());
+ LOG.info("DOM Data Broker started");
+ }
+
+ @Override
+ @Deactivate
+ public void close() {
+ LOG.info("DOM Data Broker stopping");
+ super.close();
+ LOG.info("DOM Data Broker stopped");
}
@Override
protected FluentFuture<? extends CommitInfo> commit(final DOMDataTreeWriteTransaction transaction,
- final Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
+ final DOMStoreThreePhaseCommitCohort cohort) {
checkArgument(transaction != null, "Transaction must not be null.");
- checkArgument(cohorts != null, "Cohorts must not be null.");
+ checkArgument(cohort != null, "Cohorts must not be null.");
LOG.debug("Tx: {} is submitted for execution.", transaction.getIdentifier());
- if (cohorts.isEmpty()) {
- return CommitInfo.emptyFluentFuture();
- }
-
- final AsyncNotifyingSettableFuture clientSubmitFuture =
- new AsyncNotifyingSettableFuture(clientFutureCallbackExecutor);
-
- doCanCommit(clientSubmitFuture, transaction, cohorts);
-
- return FluentFuture.from(clientSubmitFuture).transform(ignored -> CommitInfo.empty(),
- MoreExecutors.directExecutor());
+ final var clientSubmitFuture = new AsyncNotifyingSettableFuture(clientFutureCallbackExecutor);
+ doCanCommit(clientSubmitFuture, transaction, cohort);
+ return FluentFuture.from(clientSubmitFuture);
}
private void doCanCommit(final AsyncNotifyingSettableFuture clientSubmitFuture,
final DOMDataTreeWriteTransaction transaction,
- final Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
-
+ final DOMStoreThreePhaseCommitCohort cohort) {
final long startTime = System.nanoTime();
- final Iterator<DOMStoreThreePhaseCommitCohort> cohortIterator = cohorts.iterator();
-
- // Not using Futures.allAsList here to avoid its internal overhead.
- FutureCallback<Boolean> futureCallback = new FutureCallback<Boolean>() {
+ Futures.addCallback(cohort.canCommit(), new FutureCallback<>() {
@Override
public void onSuccess(final Boolean result) {
if (result == null || !result) {
- handleException(clientSubmitFuture, transaction, cohorts, CAN_COMMIT, CAN_COMMIT_ERROR_MAPPER,
- new TransactionCommitFailedException("Can Commit failed, no detailed cause available."));
- } else if (!cohortIterator.hasNext()) {
- // All cohorts completed successfully - we can move on to the preCommit phase
- doPreCommit(startTime, clientSubmitFuture, transaction, cohorts);
+ onFailure(new TransactionCommitFailedException("Can Commit failed, no detailed cause available."));
} else {
- Futures.addCallback(cohortIterator.next().canCommit(), this, MoreExecutors.directExecutor());
+ doPreCommit(startTime, clientSubmitFuture, transaction, cohort);
}
}
@Override
public void onFailure(final Throwable failure) {
- handleException(clientSubmitFuture, transaction, cohorts, CAN_COMMIT, CAN_COMMIT_ERROR_MAPPER, failure);
+ handleException(clientSubmitFuture, transaction, cohort, CAN_COMMIT, CAN_COMMIT_ERROR_MAPPER, failure);
}
- };
-
- Futures.addCallback(cohortIterator.next().canCommit(), futureCallback, MoreExecutors.directExecutor());
+ }, MoreExecutors.directExecutor());
}
- @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
- justification = "https://github.com/spotbugs/spotbugs/issues/811")
private void doPreCommit(final long startTime, final AsyncNotifyingSettableFuture clientSubmitFuture,
- final DOMDataTreeWriteTransaction transaction,
- final Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
-
- final Iterator<DOMStoreThreePhaseCommitCohort> cohortIterator = cohorts.iterator();
-
- // Not using Futures.allAsList here to avoid its internal overhead.
- FutureCallback<Void> futureCallback = new FutureCallback<Void>() {
+ final DOMDataTreeWriteTransaction transaction, final DOMStoreThreePhaseCommitCohort cohort) {
+ Futures.addCallback(cohort.preCommit(), new FutureCallback<>() {
@Override
- public void onSuccess(final Void notUsed) {
- if (!cohortIterator.hasNext()) {
- // All cohorts completed successfully - we can move on to the commit phase
- doCommit(startTime, clientSubmitFuture, transaction, cohorts);
- } else {
- ListenableFuture<Void> preCommitFuture = cohortIterator.next().preCommit();
- Futures.addCallback(preCommitFuture, this, MoreExecutors.directExecutor());
- }
+ public void onSuccess(final Empty result) {
+ doCommit(startTime, clientSubmitFuture, transaction, cohort);
}
@Override
public void onFailure(final Throwable failure) {
- handleException(clientSubmitFuture, transaction, cohorts, PRE_COMMIT, PRE_COMMIT_MAPPER, failure);
+ handleException(clientSubmitFuture, transaction, cohort, PRE_COMMIT, PRE_COMMIT_MAPPER, failure);
}
- };
-
- ListenableFuture<Void> preCommitFuture = cohortIterator.next().preCommit();
- Futures.addCallback(preCommitFuture, futureCallback, MoreExecutors.directExecutor());
+ }, MoreExecutors.directExecutor());
}
- @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
- justification = "https://github.com/spotbugs/spotbugs/issues/811")
private void doCommit(final long startTime, final AsyncNotifyingSettableFuture clientSubmitFuture,
- final DOMDataTreeWriteTransaction transaction,
- final Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
-
- final Iterator<DOMStoreThreePhaseCommitCohort> cohortIterator = cohorts.iterator();
-
- // Not using Futures.allAsList here to avoid its internal overhead.
- FutureCallback<Void> futureCallback = new FutureCallback<Void>() {
+ final DOMDataTreeWriteTransaction transaction, final DOMStoreThreePhaseCommitCohort cohort) {
+ Futures.addCallback(cohort.commit(), new FutureCallback<CommitInfo>() {
@Override
- public void onSuccess(final Void notUsed) {
- if (!cohortIterator.hasNext()) {
- // All cohorts completed successfully - we're done.
- commitStatsTracker.addDuration(System.nanoTime() - startTime);
-
- clientSubmitFuture.set();
- } else {
- ListenableFuture<Void> commitFuture = cohortIterator.next().commit();
- Futures.addCallback(commitFuture, this, MoreExecutors.directExecutor());
- }
+ public void onSuccess(final CommitInfo result) {
+ commitStatsTracker.addDuration(System.nanoTime() - startTime);
+ clientSubmitFuture.set();
}
@Override
public void onFailure(final Throwable throwable) {
- handleException(clientSubmitFuture, transaction, cohorts, COMMIT, COMMIT_ERROR_MAPPER, throwable);
+ handleException(clientSubmitFuture, transaction, cohort, COMMIT, COMMIT_ERROR_MAPPER, throwable);
}
- };
-
- ListenableFuture<Void> commitFuture = cohortIterator.next().commit();
- Futures.addCallback(commitFuture, futureCallback, MoreExecutors.directExecutor());
+ }, MoreExecutors.directExecutor());
}
- @SuppressFBWarnings(value = { "BC_UNCONFIRMED_CAST_OF_RETURN_VALUE", "UPM_UNCALLED_PRIVATE_METHOD" },
- justification = "Pertains to the assignment of the 'clientException' var. FindBugs flags this as an "
- + "uncomfirmed cast but the generic type in TransactionCommitFailedExceptionMapper is "
- + "TransactionCommitFailedException and thus should be deemed as confirmed."
- + "Also https://github.com/spotbugs/spotbugs/issues/811")
private static void handleException(final AsyncNotifyingSettableFuture clientSubmitFuture,
- final DOMDataTreeWriteTransaction transaction,
- final Collection<DOMStoreThreePhaseCommitCohort> cohorts,
- final String phase, final TransactionCommitFailedExceptionMapper exMapper,
- final Throwable throwable) {
-
+ final DOMDataTreeWriteTransaction transaction, final DOMStoreThreePhaseCommitCohort cohort,
+ final String phase, final TransactionCommitFailedExceptionMapper exMapper, final Throwable throwable) {
if (clientSubmitFuture.isDone()) {
// We must have had failures from multiple cohorts.
return;
// Use debug instead of warn level here because this exception gets propagate back to the caller via the Future
LOG.debug("Tx: {} Error during phase {}, starting Abort", transaction.getIdentifier(), phase, throwable);
- // Transaction failed - tell all cohorts to abort.
- @SuppressWarnings("unchecked")
- ListenableFuture<Void>[] canCommitFutures = new ListenableFuture[cohorts.size()];
- int index = 0;
- for (DOMStoreThreePhaseCommitCohort cohort : cohorts) {
- canCommitFutures[index++] = cohort.abort();
- }
-
// Propagate the original exception
final Exception e;
if (throwable instanceof NoShardLeaderException || throwable instanceof ShardLeaderNotRespondingException) {
e = new DataStoreUnavailableException(throwable.getMessage(), throwable);
- } else if (throwable instanceof Exception) {
- e = (Exception)throwable;
+ } else if (throwable instanceof Exception ex) {
+ e = ex;
} else {
e = new RuntimeException("Unexpected error occurred", throwable);
}
clientSubmitFuture.setException(exMapper.apply(e));
- ListenableFuture<List<Void>> combinedFuture = Futures.allAsList(canCommitFutures);
- Futures.addCallback(combinedFuture, new FutureCallback<List<Void>>() {
+ // abort
+ Futures.addCallback(cohort.abort(), new FutureCallback<Empty>() {
@Override
- public void onSuccess(final List<Void> notUsed) {
+ public void onSuccess(final Empty result) {
// Propagate the original exception to the client.
LOG.debug("Tx: {} aborted successfully", transaction.getIdentifier());
}
* FIXME: This class should probably be moved to yangtools common utils for re-usability and
* unified with AsyncNotifyingListenableFutureTask.
*/
- private static class AsyncNotifyingSettableFuture extends AbstractFuture<Void> {
-
+ private static class AsyncNotifyingSettableFuture extends AbstractFuture<CommitInfo> {
/**
* ThreadLocal used to detect if the task completion thread is running the future listener Runnables.
*/
boolean set() {
ON_TASK_COMPLETION_THREAD_TL.set(Boolean.TRUE);
try {
- return super.set(null);
+ return super.set(CommitInfo.empty());
} finally {
ON_TASK_COMPLETION_THREAD_TL.set(null);
}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.databroker;
-
-import com.google.common.util.concurrent.FluentFuture;
-import java.util.Map;
-import java.util.Optional;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionFactory;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-public class DOMBrokerReadOnlyTransaction
- extends AbstractDOMBrokerTransaction<DOMStoreReadTransaction> implements DOMDataTreeReadTransaction {
-
- /**
- * Creates new composite Transactions.
- *
- * @param identifier Identifier of transaction.
- */
- protected DOMBrokerReadOnlyTransaction(Object identifier,
- Map<LogicalDatastoreType, ? extends DOMStoreTransactionFactory> storeTxFactories) {
- super(identifier, storeTxFactories);
- }
-
- @Override
- public FluentFuture<Optional<NormalizedNode<?, ?>>> read(final LogicalDatastoreType store,
- final YangInstanceIdentifier path) {
- return getSubtransaction(store).read(path);
- }
-
- @Override
- public FluentFuture<Boolean> exists(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
- return getSubtransaction(store).exists(path);
- }
-
- @Override
- public void close() {
- closeSubtransactions();
- }
-
- @Override
- protected DOMStoreReadTransaction createTransaction(LogicalDatastoreType key) {
- return getTxFactory(key).newReadOnlyTransaction();
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.databroker;
-
-import com.google.common.util.concurrent.FluentFuture;
-import java.util.Map;
-import java.util.Optional;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionFactory;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-public class DOMBrokerReadWriteTransaction extends AbstractDOMBrokerWriteTransaction<DOMStoreReadWriteTransaction>
- implements DOMDataTreeReadWriteTransaction {
-
- /**
- * Constructs an instance.
- *
- * @param identifier identifier of transaction.
- * @param storeTxFactories the backing transaction store factories
- */
- protected DOMBrokerReadWriteTransaction(Object identifier,
- Map<LogicalDatastoreType, ? extends DOMStoreTransactionFactory> storeTxFactories,
- final AbstractDOMTransactionFactory<?> commitImpl) {
- super(identifier, storeTxFactories, commitImpl);
- }
-
- @Override
- public FluentFuture<Optional<NormalizedNode<?,?>>> read(final LogicalDatastoreType store,
- final YangInstanceIdentifier path) {
- return getSubtransaction(store).read(path);
- }
-
- @Override
- public FluentFuture<Boolean> exists(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
- return getSubtransaction(store).exists(path);
- }
-
- @Override
- protected DOMStoreReadWriteTransaction createTransaction(LogicalDatastoreType key) {
- return getTxFactory(key).newReadWriteTransaction();
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.databroker;
-
-import static com.google.common.base.Preconditions.checkState;
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.util.concurrent.FluentFuture;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.MoreExecutors;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.util.Collection;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-final class DOMBrokerTransactionChain extends AbstractDOMTransactionFactory<DOMStoreTransactionChain>
- implements DOMTransactionChain {
- private enum State {
- RUNNING,
- CLOSING,
- CLOSED,
- FAILED,
- }
-
- private static final AtomicIntegerFieldUpdater<DOMBrokerTransactionChain> COUNTER_UPDATER =
- AtomicIntegerFieldUpdater.newUpdater(DOMBrokerTransactionChain.class, "counter");
- private static final AtomicReferenceFieldUpdater<DOMBrokerTransactionChain, State> STATE_UPDATER =
- AtomicReferenceFieldUpdater.newUpdater(DOMBrokerTransactionChain.class, State.class, "state");
- private static final Logger LOG = LoggerFactory.getLogger(DOMBrokerTransactionChain.class);
- private final AtomicLong txNum = new AtomicLong();
- private final AbstractDOMBroker broker;
- private final DOMTransactionChainListener listener;
- private final long chainId;
-
- private volatile State state = State.RUNNING;
- private volatile int counter = 0;
-
- /**
- * Constructs an instance.
- *
- * @param chainId
- * ID of transaction chain
- * @param chains
- * Backing {@link DOMStoreTransactionChain}s.
- * @param listener
- * Listener, which listens on transaction chain events.
- * @throws NullPointerException
- * If any of arguments is null.
- */
- DOMBrokerTransactionChain(final long chainId, final Map<LogicalDatastoreType, DOMStoreTransactionChain> chains,
- final AbstractDOMBroker broker, final DOMTransactionChainListener listener) {
- super(chains);
- this.chainId = chainId;
- this.broker = requireNonNull(broker);
- this.listener = requireNonNull(listener);
- }
-
- private void checkNotFailed() {
- checkState(state != State.FAILED, "Transaction chain has failed");
- }
-
- @Override
- protected Object newTransactionIdentifier() {
- return "DOM-CHAIN-" + chainId + "-" + txNum.getAndIncrement();
- }
-
- @Override
- public FluentFuture<? extends CommitInfo> commit(
- final DOMDataTreeWriteTransaction transaction, final Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
- checkNotFailed();
- checkNotClosed();
-
- final FluentFuture<? extends CommitInfo> ret = broker.commit(transaction, cohorts);
-
- COUNTER_UPDATER.incrementAndGet(this);
- ret.addCallback(new FutureCallback<CommitInfo>() {
- @Override
- public void onSuccess(final CommitInfo result) {
- transactionCompleted();
- }
-
- @Override
- public void onFailure(final Throwable failure) {
- transactionFailed(transaction, failure);
- }
- }, MoreExecutors.directExecutor());
-
- return ret;
- }
-
- @Override
- public void close() {
- final boolean success = STATE_UPDATER.compareAndSet(this, State.RUNNING, State.CLOSING);
- if (!success) {
- LOG.debug("Chain {} is no longer running", this);
- return;
- }
-
- super.close();
- for (DOMStoreTransactionChain subChain : getTxFactories().values()) {
- subChain.close();
- }
-
- if (counter == 0) {
- finishClose();
- }
- }
-
- private void finishClose() {
- state = State.CLOSED;
- listener.onTransactionChainSuccessful(this);
- }
-
- @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
- justification = "https://github.com/spotbugs/spotbugs/issues/811")
- private void transactionCompleted() {
- if (COUNTER_UPDATER.decrementAndGet(this) == 0 && state == State.CLOSING) {
- finishClose();
- }
- }
-
- @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
- justification = "https://github.com/spotbugs/spotbugs/issues/811")
- private void transactionFailed(final DOMDataTreeWriteTransaction tx, final Throwable cause) {
- state = State.FAILED;
- LOG.debug("Transaction chain {} failed.", this, cause);
- listener.onTransactionChainFailed(this, tx, cause);
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Huawei Technologies Co. Ltd. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.databroker;
-
-import java.util.Map;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionFactory;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-
-public class DOMBrokerWriteOnlyTransaction extends AbstractDOMBrokerWriteTransaction<DOMStoreWriteTransaction> {
-
- /**
- * Constructs an instance.
- *
- * @param identifier identifier of transaction.
- * @param storeTxFactories the backing transaction store factories
- */
- public DOMBrokerWriteOnlyTransaction(Object identifier,
- Map<LogicalDatastoreType, ? extends DOMStoreTransactionFactory> storeTxFactories,
- AbstractDOMTransactionFactory<?> commitImpl) {
- super(identifier, storeTxFactories, commitImpl);
- }
-
- @Override
- protected DOMStoreWriteTransaction createTransaction(LogicalDatastoreType key) {
- return getTxFactory(key).newWriteOnlyTransaction();
- }
-
-}
--- /dev/null
+/*
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.databroker;
+
+import java.util.concurrent.Executor;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.md.sal.common.util.jmx.ThreadExecutorStatsMXBeanImpl;
+import org.opendaylight.yangtools.util.DurationStatisticsTracker;
+import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.metatype.annotations.AttributeDefinition;
+import org.osgi.service.metatype.annotations.Designate;
+import org.osgi.service.metatype.annotations.ObjectClassDefinition;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@Component(
+ service = DataBrokerCommitExecutor.class,
+ configurationPid = "org.opendaylight.controller.cluster.datastore.broker")
+@Designate(ocd = DataBrokerCommitExecutor.Config.class)
+public final class DataBrokerCommitExecutor {
+ @ObjectClassDefinition
+ public @interface Config {
+ @AttributeDefinition(name = "max-data-broker-future-callback-queue-size")
+ int callbackQueueSize() default 1000;
+ @AttributeDefinition(name = "max-data-broker-future-callback-pool-size")
+ int callbackPoolSize() default 20;
+ }
+
+ private static final Logger LOG = LoggerFactory.getLogger(DataBrokerCommitExecutor.class);
+
+ private final DurationStatisticsTracker commitStatsTracker = DurationStatisticsTracker.createConcurrent();
+ private final ThreadExecutorStatsMXBeanImpl threadStats;
+ private final CommitStatsMXBeanImpl commitStats;
+ private final ExecutorService executorService;
+
+ @Activate
+ public DataBrokerCommitExecutor(final Config config) {
+ executorService = SpecialExecutors.newBlockingBoundedCachedThreadPool(config.callbackPoolSize(),
+ config.callbackQueueSize(), "CommitFutures", ConcurrentDOMDataBroker.class);
+ threadStats = ThreadExecutorStatsMXBeanImpl.create(executorService, "CommitFutureExecutorStats",
+ "DOMDataBroker");
+ commitStats = new CommitStatsMXBeanImpl(commitStatsTracker, "DOMDataBroker");
+ commitStats.register();
+ LOG.info("DOM Data Broker commit exector started");
+ }
+
+ @Deactivate
+ void deactivate() {
+ LOG.info("DOM Data Broker commit exector stopping");
+ commitStats.unregister();
+ threadStats.unregister();
+ executorService.shutdown();
+ try {
+ executorService.awaitTermination(1, TimeUnit.MINUTES);
+ } catch (InterruptedException e) {
+ LOG.warn("Future executor failed to finish in time, giving up", e);
+ }
+ LOG.info("DOM Data Broker commit exector stopped");
+ }
+
+ Executor executor() {
+ return executorService;
+ }
+
+ DurationStatisticsTracker commitStatsTracker() {
+ return commitStatsTracker;
+ }
+}
+++ /dev/null
-/*
- * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.databroker;
-
-import com.google.common.annotations.Beta;
-import com.google.common.collect.ClassToInstanceMap;
-import com.google.common.collect.ImmutableMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.TimeUnit;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.CommitStatsMXBeanImpl;
-import org.opendaylight.controller.md.sal.common.util.jmx.ThreadExecutorStatsMXBeanImpl;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataBroker;
-import org.opendaylight.mdsal.dom.api.DOMDataBrokerExtension;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeReadTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeReadWriteTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
-import org.opendaylight.mdsal.dom.spi.store.DOMStore;
-import org.opendaylight.yangtools.util.DurationStatisticsTracker;
-import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
-import org.osgi.service.component.annotations.Activate;
-import org.osgi.service.component.annotations.Component;
-import org.osgi.service.component.annotations.Deactivate;
-import org.osgi.service.component.annotations.Reference;
-import org.osgi.service.metatype.annotations.AttributeDefinition;
-import org.osgi.service.metatype.annotations.Designate;
-import org.osgi.service.metatype.annotations.ObjectClassDefinition;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@Beta
-@Component(immediate = true, configurationPid = "org.opendaylight.controller.cluster.datastore.broker",
- property = "type=default")
-@Designate(ocd = OSGiDOMDataBroker.Config.class)
-public final class OSGiDOMDataBroker implements DOMDataBroker {
- @ObjectClassDefinition
- public @interface Config {
- @AttributeDefinition(name = "max-data-broker-future-callback-queue-size")
- int callbackQueueSize() default 1000;
- @AttributeDefinition(name = "max-data-broker-future-callback-pool-size")
- int callbackPoolSize() default 20;
- }
-
- private static final Logger LOG = LoggerFactory.getLogger(OSGiDOMDataBroker.class);
-
- @Reference(target = "(type=distributed-config)")
- DOMStore configDatastore = null;
- @Reference(target = "(type=distributed-operational)")
- DOMStore operDatastore = null;
-
- private ExecutorService executorService;
- private ConcurrentDOMDataBroker delegate;
- private CommitStatsMXBeanImpl commitStats;
- private ThreadExecutorStatsMXBeanImpl threadStats;
-
- @Override
- public DOMDataTreeReadTransaction newReadOnlyTransaction() {
- return delegate.newReadOnlyTransaction();
- }
-
- @Override
- public DOMDataTreeWriteTransaction newWriteOnlyTransaction() {
- return delegate.newWriteOnlyTransaction();
- }
-
- @Override
- public DOMDataTreeReadWriteTransaction newReadWriteTransaction() {
- return delegate.newReadWriteTransaction();
- }
-
- @Override
- public ClassToInstanceMap<DOMDataBrokerExtension> getExtensions() {
- return delegate.getExtensions();
- }
-
- @Override
- public DOMTransactionChain createTransactionChain(final DOMTransactionChainListener listener) {
- return delegate.createTransactionChain(listener);
- }
-
- @Override
- public DOMTransactionChain createMergingTransactionChain(final DOMTransactionChainListener listener) {
- return delegate.createMergingTransactionChain(listener);
- }
-
- @Activate
- void activate(final Config config) {
- LOG.info("DOM Data Broker starting");
- final DurationStatisticsTracker commitStatsTracker = DurationStatisticsTracker.createConcurrent();
-
- executorService = SpecialExecutors.newBlockingBoundedCachedThreadPool(config.callbackPoolSize(),
- config.callbackQueueSize(), "CommitFutures", ConcurrentDOMDataBroker.class);
- delegate = new ConcurrentDOMDataBroker(ImmutableMap.of(
- LogicalDatastoreType.CONFIGURATION, configDatastore, LogicalDatastoreType.OPERATIONAL, operDatastore),
- executorService, commitStatsTracker);
-
- commitStats = new CommitStatsMXBeanImpl(commitStatsTracker, "DOMDataBroker");
- commitStats.register();
- threadStats = ThreadExecutorStatsMXBeanImpl.create(executorService, "CommitFutureExecutorStats",
- "DOMDataBroker");
-
- LOG.info("DOM Data Broker started");
- }
-
- @Deactivate
- void deactivate() {
- LOG.info("DOM Data Broker stopping");
- commitStats.unregister();
- if (threadStats != null) {
- threadStats.unregister();
- }
-
- delegate.close();
- executorService.shutdown();
- try {
- executorService.awaitTermination(1, TimeUnit.MINUTES);
- } catch (InterruptedException e) {
- LOG.warn("Future executor failed to finish in time, giving up", e);
- }
- LOG.info("DOM Data Broker stopped");
- }
-}
import com.google.common.annotations.Beta;
import com.google.common.base.MoreObjects;
-import java.util.Collection;
+import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+import java.util.stream.Stream;
import org.eclipse.jdt.annotation.NonNull;
import org.eclipse.jdt.annotation.Nullable;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
private static final AtomicReferenceFieldUpdater<AbstractClientHandle, State> STATE_UPDATER =
AtomicReferenceFieldUpdater.newUpdater(AbstractClientHandle.class, State.class, "state");
- private final TransactionIdentifier transactionId;
- private final AbstractClientHistory parent;
+ private final @NonNull TransactionIdentifier transactionId;
+ private final @NonNull AbstractClientHistory parent;
private volatile State<T> state = new State<>();
}
@Override
+ // Non-final for mocking
public TransactionIdentifier getIdentifier() {
return transactionId;
}
*
* @return True if this transaction became closed during this call
*/
+ // Non-final for mocking
public boolean abort() {
if (commonAbort()) {
parent.onTransactionAbort(this);
}
private boolean commonAbort() {
- final Collection<T> toClose = ensureClosed();
+ final Map<Long, T> toClose = ensureClosed();
if (toClose == null) {
return false;
}
- toClose.forEach(AbstractProxyTransaction::abort);
+ toClose.values().forEach(AbstractProxyTransaction::abort);
+ parent.onTransactionShardsBound(transactionId, toClose.keySet());
return true;
}
* Make sure this snapshot is closed. If it became closed as the effect of this call, return a collection of
* {@link AbstractProxyTransaction} handles which need to be closed, too.
*
- * @return null if this snapshot has already been closed, otherwise a collection of proxies, which need to be
+ * @return null if this snapshot has already been closed, otherwise a State with of proxies, which need to be
* closed, too.
*/
- final @Nullable Collection<T> ensureClosed() {
- @SuppressWarnings("unchecked")
- final State<T> local = STATE_UPDATER.getAndSet(this, null);
- return local == null ? null : local.values();
+ final @Nullable Map<Long, T> ensureClosed() {
+ // volatile read and a conditional CAS. This ends up being better in the typical case when we are invoked more
+ // than once (see ClientBackedTransaction) than performing a STATE_UPDATER.getAndSet().
+ final State<T> local = state;
+ return local != null && STATE_UPDATER.compareAndSet(this, local, null) ? local : null;
}
final T ensureProxy(final YangInstanceIdentifier path) {
- final State<T> local = getState();
- final Long shard = parent.resolveShardForPath(path);
+ return ensureProxy(getState(), parent.resolveShardForPath(path));
+ }
+
+ private T ensureProxy(final State<T> localState, final Long shard) {
+ return localState.computeIfAbsent(shard, this::createProxy);
+ }
- return local.computeIfAbsent(shard, this::createProxy);
+ final Stream<T> ensureAllProxies() {
+ final var local = getState();
+ return parent.resolveAllShards().map(shard -> ensureProxy(local, shard));
}
final AbstractClientHistory parent() {
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
+import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLongFieldUpdater;
import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
import java.util.concurrent.locks.StampedLock;
+import java.util.stream.Stream;
import org.checkerframework.checker.lock.qual.GuardedBy;
+import org.checkerframework.checker.lock.qual.Holding;
+import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.controller.cluster.access.client.AbstractClientConnection;
import org.opendaylight.controller.cluster.access.client.ConnectedClientConnection;
import org.opendaylight.controller.cluster.access.client.ConnectionEntry;
import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
import org.opendaylight.controller.cluster.access.concepts.Response;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
import org.opendaylight.mdsal.dom.api.DOMTransactionChainClosedException;
import org.opendaylight.yangtools.concepts.Identifiable;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
private final Map<Long, ProxyHistory> histories = new ConcurrentHashMap<>();
private final StampedLock lock = new StampedLock();
- private final AbstractDataStoreClientBehavior client;
- private final LocalHistoryIdentifier identifier;
+ private final @NonNull AbstractDataStoreClientBehavior client;
+ private final @NonNull LocalHistoryIdentifier identifier;
// Used via NEXT_TX_UPDATER
@SuppressWarnings("unused")
}
@Override
- public LocalHistoryIdentifier getIdentifier() {
+ public final LocalHistoryIdentifier getIdentifier() {
return identifier;
}
return client.resolveShardForPath(path);
}
+ final Stream<Long> resolveAllShards() {
+ return client.resolveAllShards();
+ }
+
+ final ActorUtils actorUtils() {
+ return client.actorUtils();
+ }
+
@Override
final void localAbort(final Throwable cause) {
final State oldState = STATE_UPDATER.getAndSet(this, State.CLOSED);
/**
* Create a new history proxy for a given shard.
*
+ * @param shard Shard cookie
* @throws InversibleLockException if the shard is being reconnected
*/
- @GuardedBy("lock")
+ @Holding("lock")
private ProxyHistory createHistoryProxy(final Long shard) {
final AbstractClientConnection<ShardBackendInfo> connection = client.getConnection(shard);
final LocalHistoryIdentifier proxyId = new LocalHistoryIdentifier(identifier.getClientId(),
LOG.debug("Create history response {}", response);
}
- private ProxyHistory ensureHistoryProxy(final TransactionIdentifier transactionId, final Long shard) {
+ private @NonNull ProxyHistory ensureHistoryProxy(final TransactionIdentifier transactionId, final Long shard) {
while (true) {
try {
// Short-lived lock to ensure exclusion of createHistoryProxy and the lookup phase in startReconnect,
}
}
- final AbstractProxyTransaction createSnapshotProxy(final TransactionIdentifier transactionId, final Long shard) {
+ final @NonNull AbstractProxyTransaction createSnapshotProxy(final TransactionIdentifier transactionId,
+ final Long shard) {
return ensureHistoryProxy(transactionId, shard).createTransactionProxy(transactionId, true);
}
- final AbstractProxyTransaction createTransactionProxy(final TransactionIdentifier transactionId, final Long shard) {
+ final @NonNull AbstractProxyTransaction createTransactionProxy(final TransactionIdentifier transactionId,
+ final Long shard) {
return ensureHistoryProxy(transactionId, shard).createTransactionProxy(transactionId, false);
}
* @throws DOMTransactionChainClosedException if this history is closed
* @throws IllegalStateException if a previous dependent transaction has not been closed
*/
- public ClientTransaction createTransaction() {
+ // Non-final for mocking
+ public @NonNull ClientTransaction createTransaction() {
checkNotClosed();
synchronized (this) {
* @throws DOMTransactionChainClosedException if this history is closed
* @throws IllegalStateException if a previous dependent transaction has not been closed
*/
+ // Non-final for mocking
public ClientSnapshot takeSnapshot() {
checkNotClosed();
}
}
- @GuardedBy("this")
+ @Holding("this")
abstract ClientSnapshot doCreateSnapshot();
- @GuardedBy("this")
+ @Holding("this")
abstract ClientTransaction doCreateTransaction();
/**
- * Callback invoked from {@link ClientTransaction} when a child transaction readied for submission.
+ * Callback invoked from {@link AbstractClientHandle}'s lifecycle to inform that a particular transaction is
+ * completing with a set of participating shards.
*
* @param txId Transaction identifier
+ * @param participatingShards Participating shard cookies
+ */
+ final void onTransactionShardsBound(final TransactionIdentifier txId, final Set<Long> participatingShards) {
+ // Guard against startReconnect() kicking in. It is okay to connect new participants concurrently, as those
+ // will not see the holes caused by this.
+ final long stamp = lock.readLock();
+ try {
+ for (var entry : histories.entrySet()) {
+ if (!participatingShards.contains(entry.getKey())) {
+ entry.getValue().skipTransaction(txId);
+ }
+ }
+ } finally {
+ lock.unlockRead(stamp);
+ }
+ }
+
+ /**
+ * Callback invoked from {@link ClientTransaction} when a child transaction readied for submission.
+ *
+ * @param tx Client transaction
* @param cohort Transaction commit cohort
*/
synchronized AbstractTransactionCommitCohort onTransactionReady(final ClientTransaction tx,
*
* @param txId transaction identifier
*/
+ // Non-final for mocking
synchronized void onTransactionComplete(final TransactionIdentifier txId) {
if (readyTransactions.remove(txId) == null) {
LOG.warn("Could not find completed transaction {}", txId);
}
}
- HistoryReconnectCohort startReconnect(final ConnectedClientConnection<ShardBackendInfo> newConn) {
+ final HistoryReconnectCohort startReconnect(final ConnectedClientConnection<ShardBackendInfo> newConn) {
/*
* This looks ugly and unusual and there is a reason for that, as the locking involved is in multiple places.
*
}
};
}
-
}
import akka.actor.ActorRef;
import akka.util.Timeout;
+import com.google.common.base.Throwables;
import java.util.concurrent.TimeUnit;
import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.controller.cluster.access.client.AbstractClientActor;
try {
return (DataStoreClient) Await.result(ExplicitAsk.ask(actor, GET_CLIENT_FACTORY,
Timeout.apply(timeout, unit)), Duration.Inf());
- } catch (RuntimeException e) {
- throw e;
} catch (Exception e) {
- throw new RuntimeException(e);
+ Throwables.throwIfUnchecked(e);
+ throw new IllegalStateException(e);
}
}
}
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.StampedLock;
-import org.opendaylight.controller.cluster.access.client.BackendInfoResolver;
+import java.util.stream.Stream;
import org.opendaylight.controller.cluster.access.client.ClientActorBehavior;
import org.opendaylight.controller.cluster.access.client.ClientActorContext;
import org.opendaylight.controller.cluster.access.client.ConnectedClientConnection;
import org.opendaylight.controller.cluster.access.client.ConnectionEntry;
import org.opendaylight.controller.cluster.access.client.ReconnectForwarder;
import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
private volatile Throwable aborted;
AbstractDataStoreClientBehavior(final ClientActorContext context,
- final BackendInfoResolver<ShardBackendInfo> resolver) {
+ final AbstractShardBackendResolver resolver) {
super(context, resolver);
singleHistory = new SingleClientHistory(this, new LocalHistoryIdentifier(getIdentifier(), 0));
}
try {
if (aborted != null) {
Throwables.throwIfUnchecked(aborted);
- throw new RuntimeException(aborted);
+ throw new IllegalStateException(aborted);
}
final ClientLocalHistory history = new ClientLocalHistory(this, historyId);
}
abstract Long resolveShardForPath(YangInstanceIdentifier path);
+
+ abstract Stream<Long> resolveAllShards();
+
+ final ActorUtils actorUtils() {
+ return ((AbstractShardBackendResolver) resolver()).actorUtils();
+ }
}
import org.opendaylight.controller.cluster.access.concepts.Response;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.yangtools.concepts.Identifiable;
+import org.opendaylight.yangtools.yang.common.Empty;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.slf4j.Logger;
* <p>
* This class interacts with the queueing mechanism in ClientActorBehavior, hence once we arrive at a decision
* to use either a local or remote implementation, we are stuck with it. We can re-evaluate on the next transaction.
- *
- * @author Robert Varga
*/
-abstract class AbstractProxyTransaction implements Identifiable<TransactionIdentifier> {
+abstract sealed class AbstractProxyTransaction implements Identifiable<TransactionIdentifier>
+ permits LocalProxyTransaction, RemoteProxyTransaction {
/**
* Marker object used instead of read-type of requests, which are satisfied only once. This has a lower footprint
* and allows compressing multiple requests into a single entry. This class is not thread-safe.
latch.await();
} catch (InterruptedException e) {
LOG.warn("Interrupted while waiting for latch of {}", successor);
- throw new RuntimeException(e);
+ throw new IllegalStateException(e);
}
return successor;
}
this.prevState);
this.prevState = requireNonNull(prevState);
// We cannot have duplicate successor states, so this check is sufficient
- this.done = DONE.equals(prevState);
+ done = DONE.equals(prevState);
}
// To be called from safe contexts, where successor is known to be completed
doDelete(path);
}
- final void merge(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ final void merge(final YangInstanceIdentifier path, final NormalizedNode data) {
checkReadWrite();
checkNotSealed();
doMerge(path, data);
}
- final void write(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ final void write(final YangInstanceIdentifier path, final NormalizedNode data) {
checkReadWrite();
checkNotSealed();
doWrite(path, data);
return doExists(path);
}
- final FluentFuture<Optional<NormalizedNode<?, ?>>> read(final YangInstanceIdentifier path) {
+ final FluentFuture<Optional<NormalizedNode>> read(final YangInstanceIdentifier path) {
checkNotSealed();
return doRead(path);
}
// Propagate state and seal the successor.
final Optional<ModifyTransactionRequest> optState = flushState();
if (optState.isPresent()) {
- forwardToSuccessor(successor, optState.get(), null);
+ forwardToSuccessor(successor, optState.orElseThrow(), null);
}
successor.predecessorSealed();
}
});
}
- final void abort(final VotingFuture<Void> ret) {
+ final void abort(final VotingFuture<Empty> ret) {
checkSealed();
sendDoAbort(t -> {
final long enqueuedTicks = parent.currentTime();
final Optional<ModifyTransactionRequest> optState = flushState();
if (optState.isPresent()) {
- successor.handleReplayedRemoteRequest(optState.get(), null, enqueuedTicks);
+ successor.handleReplayedRemoteRequest(optState.orElseThrow(), null, enqueuedTicks);
}
if (successor.markSealed()) {
successor.sealAndSend(OptionalLong.of(enqueuedTicks));
abstract void doDelete(YangInstanceIdentifier path);
- abstract void doMerge(YangInstanceIdentifier path, NormalizedNode<?, ?> data);
+ abstract void doMerge(YangInstanceIdentifier path, NormalizedNode data);
- abstract void doWrite(YangInstanceIdentifier path, NormalizedNode<?, ?> data);
+ abstract void doWrite(YangInstanceIdentifier path, NormalizedNode data);
abstract FluentFuture<Boolean> doExists(YangInstanceIdentifier path);
- abstract FluentFuture<Optional<NormalizedNode<?, ?>>> doRead(YangInstanceIdentifier path);
+ abstract FluentFuture<Optional<NormalizedNode>> doRead(YangInstanceIdentifier path);
@GuardedBy("this")
abstract Optional<ModifyTransactionRequest> flushState();
abstract void handleReplayedRemoteRequest(TransactionRequest<?> request,
@Nullable Consumer<Response<?, ?>> callback, long enqueuedTicks);
- private static IllegalStateException unhandledResponseException(final Response<?, ?> resp) {
+ static final @NonNull IllegalArgumentException unhandledRequest(final TransactionRequest<?> request) {
+ return new IllegalArgumentException("Unhandled request " + request);
+ }
+
+ private static @NonNull IllegalStateException unhandledResponseException(final Response<?, ?> resp) {
return new IllegalStateException("Unhandled response " + resp.getClass());
}
private synchronized void onStageResolved(final ShardBackendInfo info, final Throwable failure) {
if (failure == null) {
- this.result = requireNonNull(info);
+ result = requireNonNull(info);
} else {
LOG.warn("Failed to resolve shard", failure);
}
// FIXME: we really need just ActorContext.findPrimaryShardAsync()
AbstractShardBackendResolver(final ClientIdentifier clientId, final ActorUtils actorUtils) {
this.actorUtils = requireNonNull(actorUtils);
- this.connectFunction = ExplicitAsk.toScala(t -> new ConnectClientRequest(clientId, t, ABIVersion.BORON,
+ connectFunction = ExplicitAsk.toScala(t -> new ConnectClientRequest(clientId, t, ABIVersion.POTASSIUM,
ABIVersion.current()));
}
return () -> staleBackendInfoCallbacks.remove(callback);
}
- protected void notifyStaleBackendInfoCallbacks(Long cookie) {
+ protected void notifyStaleBackendInfoCallbacks(final Long cookie) {
staleBackendInfoCallbacks.forEach(callback -> callback.accept(cookie));
}
import com.google.common.util.concurrent.ListenableFuture;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.yangtools.yang.common.Empty;
/**
* Base class for internal {@link DOMStoreThreePhaseCommitCohort} implementation. It contains utility constants for
*/
abstract class AbstractTransactionCommitCohort implements DOMStoreThreePhaseCommitCohort {
static final ListenableFuture<Boolean> TRUE_FUTURE = Futures.immediateFuture(Boolean.TRUE);
- static final ListenableFuture<Void> VOID_FUTURE = Futures.immediateFuture(null);
+ static final ListenableFuture<Empty> EMPTY_FUTURE = Futures.immediateFuture(Empty.value());
private final AbstractClientHistory parent;
private final TransactionIdentifier txId;
import com.google.common.util.concurrent.FluentFuture;
import java.util.Optional;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.utils.RootScatterGather;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
}
public FluentFuture<Boolean> exists(final YangInstanceIdentifier path) {
- return ensureSnapshotProxy(path).exists(path);
+ return ensureProxy(path).exists(path);
}
- public FluentFuture<Optional<NormalizedNode<?, ?>>> read(final YangInstanceIdentifier path) {
- return ensureSnapshotProxy(path).read(path);
+ public FluentFuture<Optional<NormalizedNode>> read(final YangInstanceIdentifier path) {
+ return path.isEmpty() ? readRoot() : ensureProxy(path).read(path);
+ }
+
+ private FluentFuture<Optional<NormalizedNode>> readRoot() {
+ return RootScatterGather.gather(parent().actorUtils(), ensureAllProxies()
+ .map(proxy -> proxy.read(YangInstanceIdentifier.of())));
}
@Override
final AbstractProxyTransaction createProxy(final Long shard) {
return parent().createSnapshotProxy(getIdentifier(), shard);
}
-
- private AbstractProxyTransaction ensureSnapshotProxy(final YangInstanceIdentifier path) {
- return ensureProxy(path);
- }
}
*/
package org.opendaylight.controller.cluster.databroker.actors.dds;
+import static com.google.common.base.Preconditions.checkState;
+
import com.google.common.annotations.Beta;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Iterables;
import com.google.common.util.concurrent.FluentFuture;
import java.util.Collection;
+import java.util.Map;
import java.util.Optional;
import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCursor;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteCursor;
+import org.opendaylight.controller.cluster.datastore.utils.RootScatterGather;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
/**
*/
@Beta
public class ClientTransaction extends AbstractClientHandle<AbstractProxyTransaction> {
-
- private ClientTransactionCursor cursor;
-
ClientTransaction(final AbstractClientHistory parent, final TransactionIdentifier transactionId) {
super(parent, transactionId);
}
- private AbstractProxyTransaction ensureTransactionProxy(final YangInstanceIdentifier path) {
- return ensureProxy(path);
+ public FluentFuture<Boolean> exists(final YangInstanceIdentifier path) {
+ return ensureProxy(path).exists(path);
}
- public DOMDataTreeWriteCursor openCursor() {
- Preconditions.checkState(cursor == null, "Transaction %s has open cursor", getIdentifier());
- cursor = new ClientTransactionCursor(this);
- return cursor;
+ public FluentFuture<Optional<NormalizedNode>> read(final YangInstanceIdentifier path) {
+ return path.isEmpty() ? readRoot() : ensureProxy(path).read(path);
}
- public FluentFuture<Boolean> exists(final YangInstanceIdentifier path) {
- return ensureTransactionProxy(path).exists(path);
+ private FluentFuture<Optional<NormalizedNode>> readRoot() {
+ return RootScatterGather.gather(parent().actorUtils(), ensureAllProxies()
+ .map(proxy -> proxy.read(YangInstanceIdentifier.of())));
}
- public FluentFuture<Optional<NormalizedNode<?, ?>>> read(final YangInstanceIdentifier path) {
- return ensureTransactionProxy(path).read(path);
+ public void delete(final YangInstanceIdentifier path) {
+ if (path.isEmpty()) {
+ ensureAllProxies().forEach(proxy -> proxy.delete(YangInstanceIdentifier.of()));
+ } else {
+ ensureProxy(path).delete(path);
+ }
}
- public void delete(final YangInstanceIdentifier path) {
- ensureTransactionProxy(path).delete(path);
+ public void merge(final YangInstanceIdentifier path, final NormalizedNode data) {
+ if (path.isEmpty()) {
+ mergeRoot(RootScatterGather.castRootNode(data));
+ } else {
+ ensureProxy(path).merge(path, data);
+ }
+ }
+
+ private void mergeRoot(final @NonNull ContainerNode rootData) {
+ if (!rootData.isEmpty()) {
+ RootScatterGather.scatterTouched(rootData, this::ensureProxy).forEach(
+ scattered -> scattered.shard().merge(YangInstanceIdentifier.of(), scattered.container()));
+ }
+ }
+
+ public void write(final YangInstanceIdentifier path, final NormalizedNode data) {
+ if (path.isEmpty()) {
+ writeRoot(RootScatterGather.castRootNode(data));
+ } else {
+ ensureProxy(path).write(path, data);
+ }
}
- public void merge(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
- ensureTransactionProxy(path).merge(path, data);
+ private void writeRoot(final @NonNull ContainerNode rootData) {
+ RootScatterGather.scatterAll(rootData, this::ensureProxy, ensureAllProxies()).forEach(
+ scattered -> scattered.shard().write(YangInstanceIdentifier.of(), scattered.container()));
}
- public void write(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
- ensureTransactionProxy(path).write(path, data);
+ private AbstractProxyTransaction ensureProxy(final PathArgument childId) {
+ return ensureProxy(YangInstanceIdentifier.of(childId));
}
public DOMStoreThreePhaseCommitCohort ready() {
- final Collection<AbstractProxyTransaction> toReady = ensureClosed();
- Preconditions.checkState(toReady != null, "Attempted to submit a closed transaction %s", this);
+ final Map<Long, AbstractProxyTransaction> participants = ensureClosed();
+ checkState(participants != null, "Attempted to submit a closed transaction %s", this);
+ final Collection<AbstractProxyTransaction> toReady = participants.values();
toReady.forEach(AbstractProxyTransaction::seal);
- final AbstractTransactionCommitCohort cohort;
- switch (toReady.size()) {
- case 0:
- cohort = new EmptyTransactionCommitCohort(parent(), getIdentifier());
- break;
- case 1:
- cohort = new DirectTransactionCommitCohort(parent(), getIdentifier(),
- Iterables.getOnlyElement(toReady));
- break;
- default:
- cohort = new ClientTransactionCommitCohort(parent(), getIdentifier(), toReady);
- break;
- }
- return parent().onTransactionReady(this, cohort);
+ final TransactionIdentifier txId = getIdentifier();
+ final AbstractClientHistory parent = parent();
+ parent.onTransactionShardsBound(txId, participants.keySet());
+
+ final AbstractTransactionCommitCohort cohort = switch (toReady.size()) {
+ case 0 -> new EmptyTransactionCommitCohort(parent, txId);
+ case 1 -> new DirectTransactionCommitCohort(parent, txId, toReady.iterator().next());
+ default -> new ClientTransactionCommitCohort(parent, txId, toReady);
+ };
+ return parent.onTransactionReady(this, cohort);
}
@Override
final AbstractProxyTransaction createProxy(final Long shard) {
return parent().createTransactionProxy(getIdentifier(), shard);
}
-
- void closeCursor(final @NonNull DOMDataTreeCursor cursorToClose) {
- if (cursorToClose.equals(this.cursor)) {
- this.cursor = null;
- }
- }
}
import com.google.common.util.concurrent.MoreExecutors;
import java.util.Collection;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.mdsal.common.api.CommitInfo;
+import org.opendaylight.yangtools.yang.common.Empty;
final class ClientTransactionCommitCohort extends AbstractTransactionCommitCohort {
private final Collection<AbstractProxyTransaction> proxies;
return ret;
}
- private ListenableFuture<Void> addComplete(final ListenableFuture<Void> future) {
+ private <T> ListenableFuture<T> addComplete(final ListenableFuture<T> future) {
future.addListener(this::complete, MoreExecutors.directExecutor());
return future;
}
@Override
- public ListenableFuture<Void> preCommit() {
- final VotingFuture<Void> ret = new VotingFuture<>(null, proxies.size());
+ public ListenableFuture<Empty> preCommit() {
+ final var ret = new VotingFuture<>(Empty.value(), proxies.size());
for (AbstractProxyTransaction proxy : proxies) {
proxy.preCommit(ret);
}
}
@Override
- public ListenableFuture<Void> commit() {
- final VotingFuture<Void> ret = new VotingFuture<>(null, proxies.size());
+ public ListenableFuture<CommitInfo> commit() {
+ final var ret = new VotingFuture<>(CommitInfo.empty(), proxies.size());
for (AbstractProxyTransaction proxy : proxies) {
proxy.doCommit(ret);
}
}
@Override
- public ListenableFuture<Void> abort() {
- final VotingFuture<Void> ret = new VotingFuture<>(null, proxies.size());
+ public ListenableFuture<Empty> abort() {
+ final var ret = new VotingFuture<>(Empty.value(), proxies.size());
for (AbstractProxyTransaction proxy : proxies) {
proxy.abort(ret);
}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.databroker.actors.dds;
-
-import static com.google.common.base.Preconditions.checkState;
-import static java.util.Objects.requireNonNull;
-
-import java.util.Arrays;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteCursor;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-/**
- * A {@link DOMDataTreeWriteCursor} tied to a {@link ClientTransaction}.
- *
- * @author Robert Varga
- */
-final class ClientTransactionCursor implements DOMDataTreeWriteCursor {
- private YangInstanceIdentifier current = YangInstanceIdentifier.empty();
- private final ClientTransaction parent;
-
- ClientTransactionCursor(final ClientTransaction parent) {
- this.parent = requireNonNull(parent);
- }
-
- @Override
- public void enter(final PathArgument child) {
- current = current.node(child);
- }
-
- @Override
- public void enter(final PathArgument... path) {
- enter(Arrays.asList(path));
- }
-
- @Override
- public void enter(final Iterable<PathArgument> path) {
- path.forEach(this::enter);
- }
-
- @Override
- public void exit() {
- final YangInstanceIdentifier currentParent = current.getParent();
- checkState(currentParent != null);
- current = currentParent;
- }
-
- @Override
- public void exit(final int depth) {
- for (int i = 0; i < depth; ++i) {
- exit();
- }
- }
-
- @Override
- public void close() {
- parent.closeCursor(this);
- }
-
- @Override
- public void delete(final PathArgument child) {
- parent.delete(current.node(child));
- }
-
- @Override
- public void merge(final PathArgument child, final NormalizedNode<?, ?> data) {
- parent.merge(current.node(child), data);
- }
-
- @Override
- public void write(final PathArgument child, final NormalizedNode<?, ?> data) {
- parent.write(current.node(child), data);
- }
-}
import com.google.common.util.concurrent.ListenableFuture;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.mdsal.common.api.CommitInfo;
+import org.opendaylight.yangtools.yang.common.Empty;
/**
* An {@link AbstractTransactionCommitCohort} implementation for transactions which contain a single proxy. Since there
}
@Override
- public ListenableFuture<Void> preCommit() {
- return VOID_FUTURE;
+ public ListenableFuture<Empty> preCommit() {
+ return EMPTY_FUTURE;
}
@Override
- public ListenableFuture<Void> abort() {
+ public ListenableFuture<Empty> abort() {
complete();
- return VOID_FUTURE;
+ return EMPTY_FUTURE;
}
@Override
- public ListenableFuture<Void> commit() {
+ public ListenableFuture<CommitInfo> commit() {
complete();
- return VOID_FUTURE;
+ return CommitInfo.emptyFluentFuture();
}
}
*/
package org.opendaylight.controller.cluster.databroker.actors.dds;
-import java.util.function.Function;
+import java.util.stream.Stream;
import org.opendaylight.controller.cluster.access.client.ClientActorContext;
import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
* @author Robert Varga
*/
final class DistributedDataStoreClientBehavior extends AbstractDataStoreClientBehavior {
- private final Function<YangInstanceIdentifier, Long> pathToShard;
+ private final ModuleShardBackendResolver resolver;
private DistributedDataStoreClientBehavior(final ClientActorContext context,
final ModuleShardBackendResolver resolver) {
super(context, resolver);
- pathToShard = resolver::resolveShardForPath;
+ this.resolver = resolver;
}
DistributedDataStoreClientBehavior(final ClientActorContext context, final ActorUtils actorUtils) {
@Override
Long resolveShardForPath(final YangInstanceIdentifier path) {
- return pathToShard.apply(path);
+ return resolver.resolveShardForPath(path);
+ }
+
+ @Override
+ Stream<Long> resolveAllShards() {
+ return resolver.resolveAllShards();
}
@Override
import com.google.common.util.concurrent.ListenableFuture;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.mdsal.common.api.CommitInfo;
+import org.opendaylight.yangtools.yang.common.Empty;
/**
* An {@link AbstractTransactionCommitCohort} for use with empty transactions. This relies on the fact that no backends
}
@Override
- public ListenableFuture<Void> preCommit() {
- return VOID_FUTURE;
+ public ListenableFuture<Empty> preCommit() {
+ return EMPTY_FUTURE;
}
@Override
- public ListenableFuture<Void> abort() {
+ public ListenableFuture<Empty> abort() {
complete();
- return VOID_FUTURE;
+ return EMPTY_FUTURE;
}
@Override
- public ListenableFuture<Void> commit() {
+ public ListenableFuture<CommitInfo> commit() {
complete();
- return VOID_FUTURE;
+ return CommitInfo.emptyFluentFuture();
}
}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.databroker.actors.dds;
+
+import static java.util.Objects.requireNonNull;
+
+import java.util.Optional;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.tree.api.CursorAwareDataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModificationCursor;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
+
+/**
+ * A {@link CursorAwareDataTreeModification} which does not really do anything and throws an
+ * {@link FailedDataTreeModificationException} for most of its operations. Used in case we when
+ * {@link DataTreeSnapshot#newModification()} fails, see {@link LocalReadWriteProxyTransaction} for details. Surrounding
+ * code should guard against invocation of most of these methods.
+ */
+record FailedDataTreeModification(
+ @NonNull EffectiveModelContext modelContext,
+ @NonNull Exception cause) implements CursorAwareDataTreeModification {
+
+ FailedDataTreeModification {
+ requireNonNull(modelContext);
+ requireNonNull(cause);
+ }
+
+ @Override
+ public void delete(final YangInstanceIdentifier path) {
+ throw ex();
+ }
+
+ @Override
+ public void merge(final YangInstanceIdentifier path, final NormalizedNode data) {
+ throw ex();
+ }
+
+ @Override
+ public void write(final YangInstanceIdentifier path, final NormalizedNode data) {
+ throw ex();
+ }
+
+ @Override
+ public void ready() {
+ // No-op
+ }
+
+ @Override
+ public void applyToCursor(final DataTreeModificationCursor cursor) {
+ throw ex();
+ }
+
+ @Override
+ public Optional<NormalizedNode> readNode(final YangInstanceIdentifier path) {
+ throw ex();
+ }
+
+ @Override
+ public CursorAwareDataTreeModification newModification() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Optional<? extends DataTreeModificationCursor> openCursor(final YangInstanceIdentifier path) {
+ throw ex();
+ }
+
+ private @NonNull FailedDataTreeModificationException ex() {
+ return new FailedDataTreeModificationException(cause);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.databroker.actors.dds;
+
+import static java.util.Objects.requireNonNull;
+
+/**
+ * A box {@link RuntimeException} thrown by {@link FailedDataTreeModification} from its user-facing methods.
+ */
+final class FailedDataTreeModificationException extends RuntimeException {
+ private static final long serialVersionUID = 1L;
+
+ FailedDataTreeModificationException(final Exception cause) {
+ super(null, requireNonNull(cause), false, false);
+ }
+}
import org.opendaylight.controller.cluster.access.commands.TransactionPurgeRequest;
import org.opendaylight.controller.cluster.access.commands.TransactionRequest;
import org.opendaylight.controller.cluster.access.concepts.Response;
+import org.opendaylight.controller.cluster.access.concepts.RuntimeRequestException;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.util.AbstractDataTreeModificationCursor;
+import org.opendaylight.mdsal.common.api.ReadFailedException;
import org.opendaylight.yangtools.util.concurrent.FluentFutures;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
* <p>
* This class is not thread-safe as usual with transactions. Since it does not interact with the backend until the
* transaction is submitted, at which point this class gets out of the picture, this is not a cause for concern.
- *
- * @author Robert Varga
*/
-abstract class LocalProxyTransaction extends AbstractProxyTransaction {
+abstract sealed class LocalProxyTransaction extends AbstractProxyTransaction
+ permits LocalReadOnlyProxyTransaction, LocalReadWriteProxyTransaction {
private static final Logger LOG = LoggerFactory.getLogger(LocalProxyTransaction.class);
- private final TransactionIdentifier identifier;
+ private final @NonNull TransactionIdentifier identifier;
LocalProxyTransaction(final ProxyHistory parent, final TransactionIdentifier identifier, final boolean isDone) {
super(parent, isDone);
@Nullable Consumer<Response<?, ?>> callback, long enqueuedTicks);
@Override
- final FluentFuture<Boolean> doExists(final YangInstanceIdentifier path) {
- return FluentFutures.immediateFluentFuture(readOnlyView().readNode(path).isPresent());
+ FluentFuture<Boolean> doExists(final YangInstanceIdentifier path) {
+ final boolean result;
+ try {
+ result = readOnlyView().readNode(path).isPresent();
+ } catch (FailedDataTreeModificationException e) {
+ return FluentFutures.immediateFailedFluentFuture(ReadFailedException.MAPPER.apply(e));
+ }
+ return FluentFutures.immediateBooleanFluentFuture(result);
}
@Override
- final FluentFuture<Optional<NormalizedNode<?, ?>>> doRead(final YangInstanceIdentifier path) {
- return FluentFutures.immediateFluentFuture(readOnlyView().readNode(path));
+ FluentFuture<Optional<NormalizedNode>> doRead(final YangInstanceIdentifier path) {
+ final Optional<NormalizedNode> result;
+ try {
+ result = readOnlyView().readNode(path);
+ } catch (FailedDataTreeModificationException e) {
+ return FluentFutures.immediateFailedFluentFuture(ReadFailedException.MAPPER.apply(e));
+ }
+ return FluentFutures.immediateFluentFuture(result);
}
@Override
if (request instanceof AbortLocalTransactionRequest) {
enqueueAbort(request, callback, enqueuedTicks);
} else {
- throw new IllegalArgumentException("Unhandled request" + request);
- }
- }
-
- private boolean handleReadRequest(final TransactionRequest<?> request, final Consumer<Response<?, ?>> callback) {
- // Note we delay completion of read requests to limit the scope at which the client can run, as they have
- // listeners, which we do not want to execute while we are reconnecting.
- if (request instanceof ReadTransactionRequest) {
- final YangInstanceIdentifier path = ((ReadTransactionRequest) request).getPath();
- final Optional<NormalizedNode<?, ?>> result = readOnlyView().readNode(path);
- if (callback != null) {
- // XXX: FB does not see that callback is final, on stack and has be check for non-null.
- final Consumer<Response<?, ?>> fbIsStupid = requireNonNull(callback);
- executeInActor(() -> fbIsStupid.accept(new ReadTransactionSuccess(request.getTarget(),
- request.getSequence(), result)));
- }
- return true;
- } else if (request instanceof ExistsTransactionRequest) {
- final YangInstanceIdentifier path = ((ExistsTransactionRequest) request).getPath();
- final boolean result = readOnlyView().readNode(path).isPresent();
- if (callback != null) {
- // XXX: FB does not see that callback is final, on stack and has be check for non-null.
- final Consumer<Response<?, ?>> fbIsStupid = requireNonNull(callback);
- executeInActor(() -> fbIsStupid.accept(new ExistsTransactionSuccess(request.getTarget(),
- request.getSequence(), result)));
- }
- return true;
- } else {
- return false;
+ throw unhandledRequest(request);
}
}
// hence we can skip sequence increments.
LOG.debug("Not replaying {}", request);
} else {
- throw new IllegalArgumentException("Unhandled request " + request);
+ throw unhandledRequest(request);
}
}
} else if (request instanceof TransactionPurgeRequest) {
enqueuePurge(callback);
} else {
- throw new IllegalArgumentException("Unhandled request " + request);
+ throw unhandledRequest(request);
+ }
+ }
+
+ @NonNull Response<?, ?> handleExistsRequest(final @NonNull DataTreeSnapshot snapshot,
+ final @NonNull ExistsTransactionRequest request) {
+ try {
+ return new ExistsTransactionSuccess(request.getTarget(), request.getSequence(),
+ snapshot.readNode(request.getPath()).isPresent());
+ } catch (FailedDataTreeModificationException e) {
+ return request.toRequestFailure(new RuntimeRequestException("Failed to access data",
+ ReadFailedException.MAPPER.apply(e)));
+ }
+ }
+
+ @NonNull Response<?, ?> handleReadRequest(final @NonNull DataTreeSnapshot snapshot,
+ final @NonNull ReadTransactionRequest request) {
+ try {
+ return new ReadTransactionSuccess(request.getTarget(), request.getSequence(),
+ snapshot.readNode(request.getPath()));
+ } catch (FailedDataTreeModificationException e) {
+ return request.toRequestFailure(new RuntimeRequestException("Failed to access data",
+ ReadFailedException.MAPPER.apply(e)));
+ }
+ }
+
+ private boolean handleReadRequest(final TransactionRequest<?> request, final Consumer<Response<?, ?>> callback) {
+ // Note we delay completion of read requests to limit the scope at which the client can run, as they have
+ // listeners, which we do not want to execute while we are reconnecting.
+ if (request instanceof ReadTransactionRequest) {
+ if (callback != null) {
+ final var response = handleReadRequest(readOnlyView(), (ReadTransactionRequest) request);
+ executeInActor(() -> callback.accept(response));
+ }
+ return true;
+ } else if (request instanceof ExistsTransactionRequest) {
+ if (callback != null) {
+ final var response = handleExistsRequest(readOnlyView(), (ExistsTransactionRequest) request);
+ executeInActor(() -> callback.accept(response));
+ }
+ return true;
+ } else {
+ return false;
}
}
@Override
final void forwardToRemote(final RemoteProxyTransaction successor, final TransactionRequest<?> request,
final Consumer<Response<?, ?>> callback) {
- if (request instanceof CommitLocalTransactionRequest) {
- final CommitLocalTransactionRequest req = (CommitLocalTransactionRequest) request;
+ if (request instanceof final CommitLocalTransactionRequest req) {
final DataTreeModification mod = req.getModification();
LOG.debug("Applying modification {} to successor {}", mod, successor);
mod.applyToCursor(new AbstractDataTreeModificationCursor() {
@Override
- public void write(final PathArgument child, final NormalizedNode<?, ?> data) {
+ public void write(final PathArgument child, final NormalizedNode data) {
successor.write(current().node(child), data);
}
@Override
- public void merge(final PathArgument child, final NormalizedNode<?, ?> data) {
+ public void merge(final PathArgument child, final NormalizedNode data) {
successor.merge(current().node(child), data);
}
} else if (request instanceof ModifyTransactionRequest) {
successor.handleForwardedRequest(request, callback);
} else {
- throwUnhandledRequest(request);
+ throw unhandledRequest(request);
}
}
} else if (request instanceof TransactionPurgeRequest) {
successor.enqueuePurge(callback);
} else {
- throwUnhandledRequest(request);
+ throw unhandledRequest(request);
}
LOG.debug("Forwarded request {} to successor {}", request, successor);
}
- private static void throwUnhandledRequest(final TransactionRequest<?> request) {
- throw new IllegalArgumentException("Unhandled request" + request);
- }
-
void sendAbort(final TransactionRequest<?> request, final Consumer<Response<?, ?>> callback) {
sendRequest(request, callback);
}
*/
package org.opendaylight.controller.cluster.databroker.actors.dds;
-import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Verify.verify;
+import static com.google.common.base.Verify.verifyNotNull;
import static java.util.Objects.requireNonNull;
import java.util.Optional;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
/**
* A read-only specialization of {@link LocalProxyTransaction}. This class is NOT thread-safe.
* @author Robert Varga
*/
final class LocalReadOnlyProxyTransaction extends LocalProxyTransaction {
-
private final DataTreeSnapshot snapshot;
LocalReadOnlyProxyTransaction(final ProxyHistory parent, final TransactionIdentifier identifier,
LocalReadOnlyProxyTransaction(final ProxyHistory parent, final TransactionIdentifier identifier) {
super(parent, identifier, true);
// It is an error to touch snapshot once we are DONE
- this.snapshot = null;
+ snapshot = null;
}
@Override
@Override
DataTreeSnapshot readOnlyView() {
- return checkNotNull(snapshot, "Transaction %s is DONE", getIdentifier());
+ return verifyNotNull(snapshot, "Transaction %s is DONE", getIdentifier());
}
@Override
}
@Override
- void doMerge(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ void doMerge(final YangInstanceIdentifier path, final NormalizedNode data) {
throw new UnsupportedOperationException("doMerge");
}
@Override
- void doWrite(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ void doWrite(final YangInstanceIdentifier path, final NormalizedNode data) {
throw new UnsupportedOperationException("doWrite");
}
private static void commonModifyTransactionRequest(final ModifyTransactionRequest request) {
verify(request.getModifications().isEmpty());
- final PersistenceProtocol protocol = request.getPersistenceProtocol().get();
+ final PersistenceProtocol protocol = request.getPersistenceProtocol().orElseThrow();
verify(protocol == PersistenceProtocol.ABORT);
}
}
*/
package org.opendaylight.controller.cluster.databroker.actors.dds;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Verify;
+import static com.google.common.base.Preconditions.checkState;
+import static com.google.common.base.Verify.verify;
+import static com.google.common.base.Verify.verifyNotNull;
+
+import com.google.common.util.concurrent.FluentFuture;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.util.Optional;
import java.util.OptionalLong;
import java.util.function.BiConsumer;
import org.opendaylight.controller.cluster.access.commands.AbortLocalTransactionRequest;
import org.opendaylight.controller.cluster.access.commands.AbstractLocalTransactionRequest;
import org.opendaylight.controller.cluster.access.commands.CommitLocalTransactionRequest;
+import org.opendaylight.controller.cluster.access.commands.ExistsTransactionRequest;
import org.opendaylight.controller.cluster.access.commands.ModifyTransactionRequest;
import org.opendaylight.controller.cluster.access.commands.ModifyTransactionRequestBuilder;
import org.opendaylight.controller.cluster.access.commands.PersistenceProtocol;
+import org.opendaylight.controller.cluster.access.commands.ReadTransactionRequest;
import org.opendaylight.controller.cluster.access.commands.TransactionAbortRequest;
import org.opendaylight.controller.cluster.access.commands.TransactionDelete;
import org.opendaylight.controller.cluster.access.commands.TransactionDoCommitRequest;
import org.opendaylight.controller.cluster.access.commands.TransactionRequest;
import org.opendaylight.controller.cluster.access.commands.TransactionWrite;
import org.opendaylight.controller.cluster.access.concepts.Response;
+import org.opendaylight.controller.cluster.access.concepts.RuntimeRequestException;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.util.AbstractDataTreeModificationCursor;
+import org.opendaylight.mdsal.common.api.ReadFailedException;
+import org.opendaylight.yangtools.util.concurrent.FluentFutures;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.CursorAwareDataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.CursorAwareDataTreeSnapshot;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModificationCursor;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.CursorAwareDataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.CursorAwareDataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModificationCursor;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
*/
private Exception recordedFailure;
+ @SuppressWarnings("checkstyle:IllegalCatch")
LocalReadWriteProxyTransaction(final ProxyHistory parent, final TransactionIdentifier identifier,
- final DataTreeSnapshot snapshot) {
+ final DataTreeSnapshot snapshot) {
super(parent, identifier, false);
- this.modification = (CursorAwareDataTreeModification) snapshot.newModification();
+
+ if (snapshot instanceof FailedDataTreeModification failed) {
+ recordedFailure = failed.cause();
+ modification = failed;
+ } else {
+ CursorAwareDataTreeModification mod;
+ try {
+ mod = (CursorAwareDataTreeModification) snapshot.newModification();
+ } catch (Exception e) {
+ LOG.debug("Failed to instantiate modification for {}", identifier, e);
+ recordedFailure = e;
+ mod = new FailedDataTreeModification(snapshot.modelContext(), e);
+ }
+ modification = mod;
+ }
}
LocalReadWriteProxyTransaction(final ProxyHistory parent, final TransactionIdentifier identifier) {
super(parent, identifier, true);
// This is DONE transaction, this should never be touched
- this.modification = null;
+ modification = null;
}
@Override
return getModification();
}
+ @Override
+ FluentFuture<Boolean> doExists(final YangInstanceIdentifier path) {
+ final var ex = recordedFailure;
+ return ex == null ? super.doExists(path)
+ : FluentFutures.immediateFailedFluentFuture(ReadFailedException.MAPPER.apply(ex));
+ }
+
+ @Override
+ FluentFuture<Optional<NormalizedNode>> doRead(final YangInstanceIdentifier path) {
+ final var ex = recordedFailure;
+ return ex == null ? super.doRead(path)
+ : FluentFutures.immediateFailedFluentFuture(ReadFailedException.MAPPER.apply(ex));
+ }
+
@Override
@SuppressWarnings("checkstyle:IllegalCatch")
void doDelete(final YangInstanceIdentifier path) {
@Override
@SuppressWarnings("checkstyle:IllegalCatch")
- void doMerge(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ void doMerge(final YangInstanceIdentifier path, final NormalizedNode data) {
final CursorAwareDataTreeModification mod = getModification();
if (recordedFailure != null) {
LOG.debug("Transaction {} recorded failure, ignoring merge to {}", getIdentifier(), path);
@Override
@SuppressWarnings("checkstyle:IllegalCatch")
- void doWrite(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ void doWrite(final YangInstanceIdentifier path, final NormalizedNode data) {
final CursorAwareDataTreeModification mod = getModification();
if (recordedFailure != null) {
LOG.debug("Transaction {} recorded failure, ignoring write to {}", getIdentifier(), path);
}
private void sealModification() {
- Preconditions.checkState(sealedModification == null, "Transaction %s is already sealed", this);
+ checkState(sealedModification == null, "Transaction %s is already sealed", this);
final CursorAwareDataTreeModification mod = getModification();
mod.ready();
sealedModification = mod;
sealedModification.applyToCursor(new AbstractDataTreeModificationCursor() {
@Override
- public void write(final PathArgument child, final NormalizedNode<?, ?> data) {
+ public void write(final PathArgument child, final NormalizedNode data) {
b.addModification(new TransactionWrite(current().node(child), data));
}
@Override
- public void merge(final PathArgument child, final NormalizedNode<?, ?> data) {
+ public void merge(final PathArgument child, final NormalizedNode data) {
b.addModification(new TransactionMerge(current().node(child), data));
}
return Optional.of(b.build());
}
- DataTreeSnapshot getSnapshot() {
- Preconditions.checkState(sealedModification != null, "Proxy %s is not sealed yet", getIdentifier());
+ CursorAwareDataTreeSnapshot getSnapshot() {
+ checkState(sealedModification != null, "Proxy %s is not sealed yet", getIdentifier());
return sealedModification;
}
final Optional<PersistenceProtocol> maybeProtocol = request.getPersistenceProtocol();
if (maybeProtocol.isPresent()) {
- Verify.verify(callback != null, "Request %s has null callback", request);
+ final var cb = verifyNotNull(callback, "Request %s has null callback", request);
if (markSealed()) {
sealOnly();
}
- switch (maybeProtocol.get()) {
+ switch (maybeProtocol.orElseThrow()) {
case ABORT:
- sendMethod.accept(new AbortLocalTransactionRequest(getIdentifier(), localActor()), callback);
+ sendMethod.accept(new AbortLocalTransactionRequest(getIdentifier(), localActor()), cb);
break;
case READY:
// No-op, as we have already issued a sealOnly() and we are not transmitting anything
break;
case SIMPLE:
- sendMethod.accept(commitRequest(false), callback);
+ sendMethod.accept(commitRequest(false), cb);
break;
case THREE_PHASE:
- sendMethod.accept(commitRequest(true), callback);
+ sendMethod.accept(commitRequest(true), cb);
break;
default:
- throw new IllegalArgumentException("Unhandled protocol " + maybeProtocol.get());
+ throw new IllegalArgumentException("Unhandled protocol " + maybeProtocol.orElseThrow());
}
}
}
}
}
+ @Override
+ Response<?, ?> handleExistsRequest(final DataTreeSnapshot snapshot, final ExistsTransactionRequest request) {
+ final var ex = recordedFailure;
+ return ex == null ? super.handleExistsRequest(snapshot, request)
+ : request.toRequestFailure(
+ new RuntimeRequestException("Previous modification failed", ReadFailedException.MAPPER.apply(ex)));
+ }
+
+ @Override
+ Response<?, ?> handleReadRequest(final DataTreeSnapshot snapshot, final ReadTransactionRequest request) {
+ final var ex = recordedFailure;
+ return ex == null ? super.handleReadRequest(snapshot, request)
+ : request.toRequestFailure(
+ new RuntimeRequestException("Previous modification failed", ReadFailedException.MAPPER.apply(ex)));
+ }
+
@Override
void forwardToLocal(final LocalProxyTransaction successor, final TransactionRequest<?> request,
final Consumer<Response<?, ?>> callback) {
if (request instanceof CommitLocalTransactionRequest) {
- Verify.verify(successor instanceof LocalReadWriteProxyTransaction);
- ((LocalReadWriteProxyTransaction) successor).sendRebased((CommitLocalTransactionRequest)request, callback);
- LOG.debug("Forwarded request {} to successor {}", request, successor);
+ verifyLocalReadWrite(successor).sendRebased((CommitLocalTransactionRequest)request, callback);
+ } else if (request instanceof ModifyTransactionRequest) {
+ verifyLocalReadWrite(successor).handleForwardedRemoteRequest(request, callback);
} else {
super.forwardToLocal(successor, request, callback);
+ return;
}
+ LOG.debug("Forwarded request {} to successor {}", request, successor);
+ }
+
+ private static LocalReadWriteProxyTransaction verifyLocalReadWrite(final LocalProxyTransaction successor) {
+ verify(successor instanceof LocalReadWriteProxyTransaction, "Unexpected successor %s", successor);
+ return (LocalReadWriteProxyTransaction) successor;
}
@Override
closedException = this::abortedException;
}
+ @SuppressFBWarnings(value = "THROWS_METHOD_THROWS_RUNTIMEEXCEPTION", justification = "Replay of recorded failure")
private @NonNull CursorAwareDataTreeModification getModification() {
if (closedException != null) {
throw closedException.get();
}
-
- return Preconditions.checkNotNull(modification, "Transaction %s is DONE", getIdentifier());
+ return verifyNotNull(modification, "Transaction %s is DONE", getIdentifier());
}
private void sendRebased(final CommitLocalTransactionRequest request, final Consumer<Response<?, ?>> callback) {
// Rebase old modification on new data tree.
final CursorAwareDataTreeModification mod = getModification();
- try (DataTreeModificationCursor cursor = mod.openCursor()) {
- request.getModification().applyToCursor(cursor);
+ if (!(mod instanceof FailedDataTreeModification)) {
+ request.getDelayedFailure().ifPresentOrElse(failure -> {
+ if (recordedFailure == null) {
+ recordedFailure = failure;
+ } else {
+ recordedFailure.addSuppressed(failure);
+ }
+ }, () -> {
+ try (DataTreeModificationCursor cursor = mod.openCursor()) {
+ request.getModification().applyToCursor(cursor);
+ }
+ });
}
if (markSealed()) {
*/
package org.opendaylight.controller.cluster.databroker.actors.dds;
-import static akka.pattern.Patterns.ask;
import static com.google.common.base.Verify.verifyNotNull;
import akka.dispatch.ExecutionContexts;
import akka.dispatch.OnComplete;
+import akka.pattern.Patterns;
import akka.util.Timeout;
-import com.google.common.collect.BiMap;
import com.google.common.collect.ImmutableBiMap;
-import com.google.common.collect.ImmutableBiMap.Builder;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.TimeUnit;
+import java.util.stream.Stream;
import org.checkerframework.checker.lock.qual.GuardedBy;
+import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.controller.cluster.access.client.BackendInfoResolver;
import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
import org.opendaylight.controller.cluster.datastore.shardmanager.RegisterForShardAvailabilityChanges;
@GuardedBy("this")
private long nextShard = 1;
- private volatile BiMap<String, Long> shards = ImmutableBiMap.of(DefaultShardStrategy.DEFAULT_SHARD, 0L);
+ private volatile ImmutableBiMap<String, Long> shards = ImmutableBiMap.of(DefaultShardStrategy.DEFAULT_SHARD, 0L);
// FIXME: we really need just ActorContext.findPrimaryShardAsync()
ModuleShardBackendResolver(final ClientIdentifier clientId, final ActorUtils actorUtils) {
super(clientId, actorUtils);
- shardAvailabilityChangesRegFuture = ask(actorUtils.getShardManager(), new RegisterForShardAvailabilityChanges(
- this::onShardAvailabilityChange), Timeout.apply(60, TimeUnit.MINUTES))
+ shardAvailabilityChangesRegFuture = Patterns.ask(actorUtils.getShardManager(),
+ new RegisterForShardAvailabilityChanges(this::onShardAvailabilityChange),
+ Timeout.apply(60, TimeUnit.MINUTES))
.map(reply -> (Registration)reply, ExecutionContexts.global());
shardAvailabilityChangesRegFuture.onComplete(new OnComplete<Registration>() {
@Override
- public void onComplete(Throwable failure, Registration reply) {
+ public void onComplete(final Throwable failure, final Registration reply) {
if (failure != null) {
LOG.error("RegisterForShardAvailabilityChanges failed", failure);
}
}, ExecutionContexts.global());
}
- private void onShardAvailabilityChange(String shardName) {
+ private void onShardAvailabilityChange(final String shardName) {
LOG.debug("onShardAvailabilityChange for {}", shardName);
Long cookie = shards.get(shardName);
}
Long resolveShardForPath(final YangInstanceIdentifier path) {
- final String shardName = actorUtils().getShardStrategyFactory().getStrategy(path).findShard(path);
+ return resolveCookie(actorUtils().getShardStrategyFactory().getStrategy(path).findShard(path));
+ }
+
+ Stream<Long> resolveAllShards() {
+ return actorUtils().getConfiguration().getAllShardNames().stream()
+ .sorted()
+ .map(this::resolveCookie);
+ }
+
+ private @NonNull Long resolveCookie(final String shardName) {
+ final Long cookie = shards.get(shardName);
+ return cookie != null ? cookie : populateShard(shardName);
+ }
+
+ private synchronized @NonNull Long populateShard(final String shardName) {
Long cookie = shards.get(shardName);
if (cookie == null) {
- synchronized (this) {
- cookie = shards.get(shardName);
- if (cookie == null) {
- cookie = nextShard++;
-
- Builder<String, Long> builder = ImmutableBiMap.builder();
- builder.putAll(shards);
- builder.put(shardName, cookie);
- shards = builder.build();
- }
- }
+ cookie = nextShard++;
+ shards = ImmutableBiMap.<String, Long>builder().putAll(shards).put(shardName, cookie).build();
}
-
return cookie;
}
public void close() {
shardAvailabilityChangesRegFuture.onComplete(new OnComplete<Registration>() {
@Override
- public void onComplete(Throwable failure, Registration reply) {
+ public void onComplete(final Throwable failure, final Registration reply) {
reply.close();
}
}, ExecutionContexts.global());
}
@Override
- public String resolveCookieName(Long cookie) {
+ public String resolveCookieName(final Long cookie) {
return verifyNotNull(shards.inverse().get(cookie), "Unexpected null cookie: %s", cookie);
}
}
package org.opendaylight.controller.cluster.databroker.actors.dds;
import static com.google.common.base.Preconditions.checkState;
+import static com.google.common.base.Verify.verify;
+import static com.google.common.base.Verify.verifyNotNull;
import static java.util.Objects.requireNonNull;
import akka.actor.ActorRef;
-import com.google.common.base.Verify;
+import com.google.common.collect.ImmutableList;
+import com.google.common.primitives.UnsignedLong;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.LinkedHashMap;
+import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
import java.util.function.Consumer;
import org.checkerframework.checker.lock.qual.GuardedBy;
import org.checkerframework.checker.lock.qual.Holding;
+import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.controller.cluster.access.client.AbstractClientConnection;
import org.opendaylight.controller.cluster.access.client.ClientActorContext;
import org.opendaylight.controller.cluster.access.client.ConnectedClientConnection;
import org.opendaylight.controller.cluster.access.commands.DestroyLocalHistoryRequest;
import org.opendaylight.controller.cluster.access.commands.LocalHistoryRequest;
import org.opendaylight.controller.cluster.access.commands.PurgeLocalHistoryRequest;
+import org.opendaylight.controller.cluster.access.commands.SkipTransactionsRequest;
import org.opendaylight.controller.cluster.access.commands.TransactionRequest;
import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
import org.opendaylight.controller.cluster.access.concepts.Request;
import org.opendaylight.controller.cluster.access.concepts.Response;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.yangtools.concepts.Identifiable;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.ReadOnlyDataTree;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Override
void onTransactionCompleted(final AbstractProxyTransaction tx) {
- Verify.verify(tx instanceof LocalProxyTransaction);
+ verify(tx instanceof LocalProxyTransaction, "Unexpected transaction %s", tx);
if (tx instanceof LocalReadWriteProxyTransaction
&& LAST_SEALED_UPDATER.compareAndSet(this, (LocalReadWriteProxyTransaction) tx, null)) {
LOG.debug("Completed last sealed transaction {}", tx);
final ConnectionEntry e = it.next();
final Request<?, ?> req = e.getRequest();
if (identifier.equals(req.getTarget())) {
- Verify.verify(req instanceof LocalHistoryRequest);
+ verify(req instanceof LocalHistoryRequest, "Unexpected request %s", req);
if (req instanceof CreateLocalHistoryRequest) {
successor.connection.enqueueRequest(req, e.getCallback(), e.getEnqueuedTicks());
it.remove();
t.replayMessages(successor, previousEntries);
}
+ // Forward any skipped transactions
+ final var local = skippedTransactions;
+ if (local != null) {
+ LOG.debug("{} forwarding skipped transactions towards successor {}", identifier, successor);
+ successor.skipTransactions(local);
+ skippedTransactions = null;
+ }
+
// Now look for any finalizing messages
it = previousEntries.iterator();
while (it.hasNext()) {
final ConnectionEntry e = it.next();
final Request<?, ?> req = e.getRequest();
if (identifier.equals(req.getTarget())) {
- Verify.verify(req instanceof LocalHistoryRequest);
+ verify(req instanceof LocalHistoryRequest, "Unexpected request %s", req);
if (req instanceof DestroyLocalHistoryRequest) {
successor.connection.enqueueRequest(req, e.getCallback(), e.getEnqueuedTicks());
it.remove();
}
}
- @GuardedBy("lock")
+ @Holding("lock")
@Override
ProxyHistory finishReconnect() {
- final ProxyHistory ret = Verify.verifyNotNull(successor);
+ final ProxyHistory ret = verifyNotNull(successor);
for (AbstractProxyTransaction t : proxies.values()) {
t.finishReconnect();
private static final Logger LOG = LoggerFactory.getLogger(ProxyHistory.class);
private final Lock lock = new ReentrantLock();
- private final LocalHistoryIdentifier identifier;
- private final AbstractClientConnection<ShardBackendInfo> connection;
- private final AbstractClientHistory parent;
+ private final @NonNull LocalHistoryIdentifier identifier;
+ private final @NonNull AbstractClientConnection<ShardBackendInfo> connection;
+ private final @NonNull AbstractClientHistory parent;
@GuardedBy("lock")
private final Map<TransactionIdentifier, AbstractProxyTransaction> proxies = new LinkedHashMap<>();
@GuardedBy("lock")
private ProxyHistory successor;
+ // List of transaction identifiers which were allocated by our parent history, but did not touch our shard. Each of
+ // these represents a hole in otherwise-contiguous allocation of transactionIds. These holes are problematic, as
+ // each of them prevents LeaderFrontendState.purgedHistories from coalescing, leading to a gradual heap exhaustion.
+ //
+ // <p>
+ // We keep these in an ArrayList for fast insertion, as that happens when we are otherwise idle. We translate these
+ // into purge requests when:
+ // - we are about to allocate a new transaction
+ // - we get a successor proxy
+ // - the list grows unreasonably long
+ //
+ // TODO: we are tracking entire TransactionIdentifiers, but really only need to track the longs. Do that once we
+ // have a {@code List<long>}.
+ // FIXME: this is not tuneable, but perhaps should be
+ // FIXME: default value deserves some explanation -- this affects depth of an RB Tree on the receiving end.
+ private static final int PURGE_SKIPPED_TXID_THRESHOLD = 256;
+
+ @GuardedBy("lock")
+ private volatile List<TransactionIdentifier> skippedTransactions;
+
private ProxyHistory(final AbstractClientHistory parent,
final AbstractClientConnection<ShardBackendInfo> connection, final LocalHistoryIdentifier identifier) {
this.parent = requireNonNull(parent);
static ProxyHistory createClient(final AbstractClientHistory parent,
final AbstractClientConnection<ShardBackendInfo> connection, final LocalHistoryIdentifier identifier) {
final Optional<ReadOnlyDataTree> dataTree = connection.getBackendInfo().flatMap(ShardBackendInfo::getDataTree);
- return dataTree.isPresent() ? new Local(parent, connection, identifier, dataTree.get())
+ return dataTree.isPresent() ? new Local(parent, connection, identifier, dataTree.orElseThrow())
: new Remote(parent, connection, identifier);
}
final AbstractClientConnection<ShardBackendInfo> connection,
final LocalHistoryIdentifier identifier) {
final Optional<ReadOnlyDataTree> dataTree = connection.getBackendInfo().flatMap(ShardBackendInfo::getDataTree);
- return dataTree.isPresent() ? new LocalSingle(parent, connection, identifier, dataTree.get())
+ return dataTree.isPresent() ? new LocalSingle(parent, connection, identifier, dataTree.orElseThrow())
: new RemoteSingle(parent, connection, identifier);
}
@Override
+ // Non-final for mocking
public LocalHistoryIdentifier getIdentifier() {
return identifier;
}
return createTransactionProxy(txId, snapshotOnly, false);
}
+ // Non-final for mocking
AbstractProxyTransaction createTransactionProxy(final TransactionIdentifier txId, final boolean snapshotOnly,
final boolean isDone) {
lock.lock();
}
}
+ final void skipTransaction(final TransactionIdentifier txId) {
+ lock.lock();
+ try {
+ if (successor != null) {
+ successor.skipTransaction(txId);
+ return;
+ }
+
+ var local = skippedTransactions;
+ if (local == null) {
+ skippedTransactions = local = new ArrayList<>();
+ }
+ local.add(txId);
+ LOG.debug("Recorded skipped transaction {}", txId);
+ skipIfNeeded(local);
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ @Holding("lock")
+ private void skipIfNeeded(final List<TransactionIdentifier> current) {
+ if (current.size() >= PURGE_SKIPPED_TXID_THRESHOLD) {
+ skippedTransactions = null;
+ doSkipTransactions(current);
+ }
+ }
+
+ private void skipTransactions(final List<TransactionIdentifier> toSkip) {
+ lock.lock();
+ try {
+ if (successor != null) {
+ successor.skipTransactions(toSkip);
+ return;
+ }
+
+ var local = skippedTransactions;
+ if (local != null) {
+ local.addAll(toSkip);
+ } else {
+ skippedTransactions = local = toSkip;
+ }
+ skipIfNeeded(local);
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ private void skipTransactions() {
+ var local = skippedTransactions;
+ if (local != null) {
+ lock.lock();
+ try {
+ local = skippedTransactions;
+ if (local != null && successor == null) {
+ skippedTransactions = null;
+ doSkipTransactions(local);
+ }
+ } finally {
+ lock.unlock();
+ }
+ }
+ }
+
+ @Holding("lock")
+ private void doSkipTransactions(final List<TransactionIdentifier> toSkip) {
+ final var txIds = toSkip.stream()
+ .mapToLong(TransactionIdentifier::getTransactionId)
+ .distinct()
+ .sorted()
+ .mapToObj(UnsignedLong::fromLongBits)
+ .collect(ImmutableList.toImmutableList());
+
+ LOG.debug("Proxy {} skipping transactions {}", this, txIds);
+ connection.enqueueRequest(new SkipTransactionsRequest(new TransactionIdentifier(identifier,
+ txIds.get(0).longValue()), 0, localActor(),txIds.subList(1, txIds.size())), resp -> {
+ LOG.debug("Proxy {} confirmed transaction skip", this);
+ }, connection.currentTime());
+ }
+
final void abortTransaction(final AbstractProxyTransaction tx) {
lock.lock();
try {
}
}
- void purgeTransaction(final AbstractProxyTransaction tx) {
+ final void purgeTransaction(final AbstractProxyTransaction tx) {
lock.lock();
try {
proxies.remove(tx.getIdentifier());
final void enqueueRequest(final TransactionRequest<?> request, final Consumer<Response<?, ?>> callback,
final long enqueuedTicks) {
+ skipTransactions();
connection.enqueueRequest(request, callback, enqueuedTicks);
}
final void sendRequest(final TransactionRequest<?> request, final Consumer<Response<?, ?>> callback) {
+ skipTransactions();
connection.sendRequest(request, callback);
}
- @GuardedBy("lock")
+ @Holding("lock")
@SuppressWarnings("checkstyle:hiddenField")
abstract AbstractProxyTransaction doCreateTransactionProxy(AbstractClientConnection<ShardBackendInfo> connection,
TransactionIdentifier txId, boolean snapshotOnly, boolean isDone);
+ @Holding("lock")
@SuppressWarnings("checkstyle:hiddenField")
abstract ProxyHistory createSuccessor(AbstractClientConnection<ShardBackendInfo> connection);
@SuppressFBWarnings(value = "UL_UNRELEASED_LOCK", justification = "Lock is released asynchronously via the cohort")
- ProxyReconnectCohort startReconnect(final ConnectedClientConnection<ShardBackendInfo> newConnection) {
+ final ProxyReconnectCohort startReconnect(final ConnectedClientConnection<ShardBackendInfo> newConnection) {
lock.lock();
if (successor != null) {
lock.unlock();
// No-op for most implementations
}
+ @Holding("lock")
void onTransactionSealed(final AbstractProxyTransaction tx) {
// No-op on most implementations
}
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
* <p>
* This class is not safe to access from multiple application threads, as is usual for transactions. Its internal state
* transitions based on backend responses are thread-safe.
- *
- * @author Robert Varga
*/
final class RemoteProxyTransaction extends AbstractProxyTransaction {
private static final Logger LOG = LoggerFactory.getLogger(RemoteProxyTransaction.class);
- // FIXME: make this tuneable
- private static final int REQUEST_MAX_MODIFICATIONS = 1000;
-
private final ModifyTransactionRequestBuilder builder;
private final boolean sendReadyOnSeal;
private final boolean snapshotOnly;
+ private final int maxModifications;
private boolean builderBusy;
this.snapshotOnly = snapshotOnly;
this.sendReadyOnSeal = sendReadyOnSeal;
builder = new ModifyTransactionRequestBuilder(identifier, localActor());
+ maxModifications = parent.parent().actorUtils().getDatastoreContext().getShardBatchedModificationCount();
}
@Override
}
@Override
- void doMerge(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ void doMerge(final YangInstanceIdentifier path, final NormalizedNode data) {
appendModification(new TransactionMerge(path, data), OptionalLong.empty());
}
@Override
- void doWrite(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ void doWrite(final YangInstanceIdentifier path, final NormalizedNode data) {
appendModification(new TransactionWrite(path, data), OptionalLong.empty());
}
}
@Override
- FluentFuture<Optional<NormalizedNode<?, ?>>> doRead(final YangInstanceIdentifier path) {
- final SettableFuture<Optional<NormalizedNode<?, ?>>> future = SettableFuture.create();
+ FluentFuture<Optional<NormalizedNode>> doRead(final YangInstanceIdentifier path) {
+ final SettableFuture<Optional<NormalizedNode>> future = SettableFuture.create();
return sendReadRequest(new ReadTransactionRequest(getIdentifier(), nextSequence(), localActor(), path,
isSnapshotOnly()), t -> completeRead(path, future, t), future);
}
private void sendModification(final TransactionRequest<?> request, final OptionalLong enqueuedTicks) {
if (enqueuedTicks.isPresent()) {
- enqueueRequest(request, response -> completeModify(request, response), enqueuedTicks.getAsLong());
+ enqueueRequest(request, response -> completeModify(request, response), enqueuedTicks.orElseThrow());
} else {
sendRequest(request, response -> completeModify(request, response));
}
ensureInitializedBuilder();
builder.addModification(modification);
- if (builder.size() >= REQUEST_MAX_MODIFICATIONS) {
+ if (builder.size() >= maxModifications) {
flushBuilder(enqueuedTicks);
}
} else {
private Exception recordFailedResponse(final Response<?, ?> response) {
final Exception failure;
- if (response instanceof RequestFailure) {
- final RequestException cause = ((RequestFailure<?, ?>) response).getCause();
+ if (response instanceof RequestFailure<?, ?> requestFailure) {
+ final RequestException cause = requestFailure.getCause();
failure = cause instanceof RequestTimeoutException
? new DataStoreUnavailableException(cause.getMessage(), cause) : cause;
} else {
final Response<?, ?> response) {
LOG.debug("Exists request for {} completed with {}", path, response);
- if (response instanceof ExistsTransactionSuccess) {
- future.set(((ExistsTransactionSuccess) response).getExists());
+ if (response instanceof ExistsTransactionSuccess success) {
+ future.set(success.getExists());
} else {
failReadFuture(future, "Error executing exists request for path " + path, response);
}
recordFinishedRequest(response);
}
- private void completeRead(final YangInstanceIdentifier path,
- final SettableFuture<Optional<NormalizedNode<?, ?>>> future, final Response<?, ?> response) {
+ private void completeRead(final YangInstanceIdentifier path, final SettableFuture<Optional<NormalizedNode>> future,
+ final Response<?, ?> response) {
LOG.debug("Read request for {} completed with {}", path, response);
- if (response instanceof ReadTransactionSuccess) {
- future.set(((ReadTransactionSuccess) response).getData());
+ if (response instanceof ReadTransactionSuccess success) {
+ future.set(success.getData());
} else {
failReadFuture(future, "Error reading data for path " + path, response);
}
}
void handleForwardedRequest(final TransactionRequest<?> request, final Consumer<Response<?, ?>> callback) {
- if (request instanceof ModifyTransactionRequest) {
- handleForwardedModifyTransactionRequest(callback, (ModifyTransactionRequest) request);
- } else if (request instanceof ReadTransactionRequest) {
+ if (request instanceof ModifyTransactionRequest modifyRequest) {
+ handleForwardedModifyTransactionRequest(callback, modifyRequest);
+ } else if (request instanceof ReadTransactionRequest readRequest) {
ensureFlushedBuider();
sendRequest(new ReadTransactionRequest(getIdentifier(), nextSequence(), localActor(),
- ((ReadTransactionRequest) request).getPath(), isSnapshotOnly()), resp -> {
+ readRequest.getPath(), isSnapshotOnly()), resp -> {
recordFinishedRequest(resp);
callback.accept(resp);
});
- } else if (request instanceof ExistsTransactionRequest) {
+ } else if (request instanceof ExistsTransactionRequest existsRequest) {
ensureFlushedBuider();
sendRequest(new ExistsTransactionRequest(getIdentifier(), nextSequence(), localActor(),
- ((ExistsTransactionRequest) request).getPath(), isSnapshotOnly()), resp -> {
+ existsRequest.getPath(), isSnapshotOnly()), resp -> {
recordFinishedRequest(resp);
callback.accept(resp);
});
} else if (request instanceof TransactionPurgeRequest) {
enqueuePurge(callback);
} else {
- throw new IllegalArgumentException("Unhandled request {}" + request);
+ throw unhandledRequest(request);
}
}
}
final TransactionRequest<?> tmp;
- switch (maybeProto.get()) {
+ switch (maybeProto.orElseThrow()) {
case ABORT:
tmp = abortRequest();
sendRequest(tmp, resp -> {
});
break;
default:
- throw new IllegalArgumentException("Unhandled protocol " + maybeProto.get());
+ throw new IllegalArgumentException("Unhandled protocol " + maybeProto.orElseThrow());
}
}
}
@Override
void handleReplayedLocalRequest(final AbstractLocalTransactionRequest<?> request,
final Consumer<Response<?, ?>> callback, final long enqueuedTicks) {
- if (request instanceof CommitLocalTransactionRequest) {
- replayLocalCommitRequest((CommitLocalTransactionRequest) request, callback, enqueuedTicks);
+ if (request instanceof CommitLocalTransactionRequest commitRequest) {
+ replayLocalCommitRequest(commitRequest, callback, enqueuedTicks);
} else if (request instanceof AbortLocalTransactionRequest) {
enqueueRequest(abortRequest(), callback, enqueuedTicks);
} else {
- throw new IllegalStateException("Unhandled request " + request);
+ throw unhandledRequest(request);
}
}
mod.applyToCursor(new AbstractDataTreeModificationCursor() {
@Override
- public void write(final PathArgument child, final NormalizedNode<?, ?> data) {
+ public void write(final PathArgument child, final NormalizedNode data) {
appendModification(new TransactionWrite(current().node(child), data), optTicks);
}
@Override
- public void merge(final PathArgument child, final NormalizedNode<?, ?> data) {
+ public void merge(final PathArgument child, final NormalizedNode data) {
appendModification(new TransactionMerge(current().node(child), data), optTicks);
}
final Consumer<Response<?, ?>> cb = callback != null ? callback : resp -> { /* NOOP */ };
final OptionalLong optTicks = OptionalLong.of(enqueuedTicks);
- if (request instanceof ModifyTransactionRequest) {
- handleReplayedModifyTransactionRequest(enqueuedTicks, cb, (ModifyTransactionRequest) request);
- } else if (request instanceof ReadTransactionRequest) {
+ if (request instanceof ModifyTransactionRequest modifyRequest) {
+ handleReplayedModifyTransactionRequest(enqueuedTicks, cb, modifyRequest);
+ } else if (request instanceof ReadTransactionRequest readRequest) {
ensureFlushedBuider(optTicks);
enqueueRequest(new ReadTransactionRequest(getIdentifier(), nextSequence(), localActor(),
- ((ReadTransactionRequest) request).getPath(), isSnapshotOnly()), resp -> {
+ readRequest.getPath(), isSnapshotOnly()), resp -> {
recordFinishedRequest(resp);
cb.accept(resp);
}, enqueuedTicks);
- } else if (request instanceof ExistsTransactionRequest) {
+ } else if (request instanceof ExistsTransactionRequest existsRequest) {
ensureFlushedBuider(optTicks);
enqueueRequest(new ExistsTransactionRequest(getIdentifier(), nextSequence(), localActor(),
- ((ExistsTransactionRequest) request).getPath(), isSnapshotOnly()), resp -> {
+ existsRequest.getPath(), isSnapshotOnly()), resp -> {
recordFinishedRequest(resp);
cb.accept(resp);
}, enqueuedTicks);
enqueueDoAbort(callback, enqueuedTicks);
} else if (request instanceof TransactionPurgeRequest) {
enqueuePurge(callback, enqueuedTicks);
- } else if (request instanceof IncrementTransactionSequenceRequest) {
- final IncrementTransactionSequenceRequest req = (IncrementTransactionSequenceRequest) request;
+ } else if (request instanceof IncrementTransactionSequenceRequest req) {
ensureFlushedBuider(optTicks);
enqueueRequest(new IncrementTransactionSequenceRequest(getIdentifier(), nextSequence(), localActor(),
snapshotOnly, req.getIncrement()), callback, enqueuedTicks);
incrementSequence(req.getIncrement());
} else {
- throw new IllegalArgumentException("Unhandled request {}" + request);
+ throw unhandledRequest(request);
}
}
}
final TransactionRequest<?> tmp;
- switch (maybeProto.get()) {
+ switch (maybeProto.orElseThrow()) {
case ABORT:
tmp = abortRequest();
enqueueRequest(tmp, resp -> {
}, enqueuedTicks);
break;
default:
- throw new IllegalArgumentException("Unhandled protocol " + maybeProto.get());
+ throw new IllegalArgumentException("Unhandled protocol " + maybeProto.orElseThrow());
}
}
}
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.controller.cluster.access.client.BackendInfo;
import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.ReadOnlyDataTree;
/**
* Combined backend tracking. Aside from usual {@link BackendInfo}, this object also tracks the cookie assigned
*/
package org.opendaylight.controller.cluster.databroker.actors.dds;
+import java.util.stream.Stream;
import org.opendaylight.controller.cluster.access.client.ClientActorContext;
import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
*/
final class SimpleDataStoreClientBehavior extends AbstractDataStoreClientBehavior {
// Pre-boxed instance
- private static final Long ZERO = Long.valueOf(0);
+ private static final Long ZERO = 0L;
private SimpleDataStoreClientBehavior(final ClientActorContext context,
final SimpleShardBackendResolver resolver) {
Long resolveShardForPath(final YangInstanceIdentifier path) {
return ZERO;
}
+
+ @Override
+ Stream<Long> resolveAllShards() {
+ return Stream.of(ZERO);
+ }
}
*/
package org.opendaylight.controller.cluster.databroker.actors.dds;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Verify;
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Verify.verify;
+import static java.util.Objects.requireNonNull;
+
import com.google.common.util.concurrent.AbstractFuture;
import java.util.ArrayList;
import java.util.Collection;
private volatile int neededVotes;
VotingFuture(final T result, final int requiredVotes) {
- Preconditions.checkArgument(requiredVotes > 0);
+ this.result = requireNonNull(result);
+ checkArgument(requiredVotes > 0);
this.neededVotes = requiredVotes;
- // null is okay to allow Void type
- this.result = result;
}
void voteYes() {
private boolean castVote() {
final int votes = VOTES_UPDATER.decrementAndGet(this);
- Verify.verify(votes >= 0);
+ verify(votes >= 0);
return votes == 0;
}
*/
package org.opendaylight.controller.cluster.datastore;
-import static com.google.common.base.Preconditions.checkArgument;
import static java.util.Objects.requireNonNull;
import akka.actor.ActorRef;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.SettableFuture;
import com.google.common.util.concurrent.Uninterruptibles;
-import java.util.Set;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.util.List;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
+import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
import org.opendaylight.controller.cluster.common.actor.Dispatchers;
import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient;
import org.opendaylight.controller.cluster.databroker.actors.dds.DistributedDataStoreClientActor;
import org.opendaylight.controller.cluster.datastore.config.Configuration;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.DatastoreConfigurationMXBeanImpl;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.DatastoreInfoMXBeanImpl;
import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
import org.opendaylight.controller.cluster.datastore.shardmanager.AbstractShardManagerCreator;
import org.opendaylight.controller.cluster.datastore.shardmanager.ShardManagerCreator;
import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
import org.opendaylight.controller.cluster.datastore.utils.PrimaryShardInfoFutureCache;
-import org.opendaylight.mdsal.dom.api.ClusteredDOMDataTreeChangeListener;
+import org.opendaylight.mdsal.dom.api.DOMDataBroker.CommitCohortExtension;
import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistration;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistry;
import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreTreeChangePublisher;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.yang.common.Empty;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
-import org.opendaylight.yangtools.yang.model.api.EffectiveModelContextListener;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.concurrent.duration.Duration;
/**
* Base implementation of a distributed DOMStore.
*/
-public abstract class AbstractDataStore implements DistributedDataStoreInterface, EffectiveModelContextListener,
- DatastoreContextPropertiesUpdater.Listener, DOMStoreTreeChangePublisher,
- DOMDataTreeCommitCohortRegistry, AutoCloseable {
-
+public abstract class AbstractDataStore implements DistributedDataStoreInterface,
+ DatastoreContextPropertiesUpdater.Listener, DOMStoreTreeChangePublisher, CommitCohortExtension,
+ AutoCloseable {
private static final Logger LOG = LoggerFactory.getLogger(AbstractDataStore.class);
- private final SettableFuture<Void> readinessFuture = SettableFuture.create();
+ private final SettableFuture<Empty> readinessFuture = SettableFuture.create();
private final ClientIdentifier identifier;
private final DataStoreClient client;
private final ActorUtils actorUtils;
private DatastoreInfoMXBeanImpl datastoreInfoMXBean;
@SuppressWarnings("checkstyle:IllegalCatch")
+ @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Testing overrides")
protected AbstractDataStore(final ActorSystem actorSystem, final ClusterWrapper cluster,
final Configuration configuration, final DatastoreContextFactory datastoreContextFactory,
final DatastoreSnapshot restoreFromSnapshot) {
LOG.error("Failed to get actor for {}", clientProps, e);
clientActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
Throwables.throwIfUnchecked(e);
- throw new RuntimeException(e);
+ throw new IllegalStateException(e);
}
identifier = client.getIdentifier();
datastoreInfoMXBean.registerMBean();
}
- @VisibleForTesting
- protected AbstractDataStore(final ActorUtils actorUtils, final ClientIdentifier identifier) {
- this.actorUtils = requireNonNull(actorUtils, "actorContext should not be null");
- this.client = null;
- this.identifier = requireNonNull(identifier);
- }
-
@VisibleForTesting
protected AbstractDataStore(final ActorUtils actorUtils, final ClientIdentifier identifier,
final DataStoreClient clientActor) {
this.actorUtils = requireNonNull(actorUtils, "actorContext should not be null");
- this.client = clientActor;
+ client = clientActor;
this.identifier = requireNonNull(identifier);
}
+ @VisibleForTesting
protected AbstractShardManagerCreator<?> getShardManagerCreator() {
return new ShardManagerCreator();
}
return client;
}
- final ClientIdentifier getIdentifier() {
- return identifier;
- }
-
public void setCloseable(final AutoCloseable closeable) {
this.closeable = closeable;
}
@Override
- public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerTreeChangeListener(
- final YangInstanceIdentifier treeId, final L listener) {
+ public final Registration registerTreeChangeListener(final YangInstanceIdentifier treeId,
+ final DOMDataTreeChangeListener listener) {
+ return registerTreeChangeListener(treeId, listener, true);
+ }
+
+ private @NonNull Registration registerTreeChangeListener(final YangInstanceIdentifier treeId,
+ final DOMDataTreeChangeListener listener, final boolean clustered) {
requireNonNull(treeId, "treeId should not be null");
requireNonNull(listener, "listener should not be null");
if (treeId.isEmpty()) {
// User is targeting root of the datastore. If there is more than one shard, we have to register with them
// all and perform data composition.
- final Set<String> shardNames = actorUtils.getConfiguration().getAllShardNames();
+ final var shardNames = actorUtils.getConfiguration().getAllShardNames();
if (shardNames.size() > 1) {
- checkArgument(listener instanceof ClusteredDOMDataTreeChangeListener,
- "Cannot listen on root without non-clustered listener %s", listener);
+ if (!clustered) {
+ throw new IllegalArgumentException(
+ "Cannot listen on root without non-clustered listener " + listener);
+ }
return new RootDataTreeChangeListenerProxy<>(actorUtils, listener, shardNames);
}
}
- final String shardName = actorUtils.getShardStrategyFactory().getStrategy(treeId).findShard(treeId);
+ final var shardName = actorUtils.getShardStrategyFactory().getStrategy(treeId).findShard(treeId);
LOG.debug("Registering tree listener: {} for tree: {} shard: {}", listener, treeId, shardName);
- final DataTreeChangeListenerProxy<L> listenerRegistrationProxy =
- new DataTreeChangeListenerProxy<>(actorUtils, listener, treeId);
- listenerRegistrationProxy.init(shardName);
+ return DataTreeChangeListenerProxy.of(actorUtils, listener, treeId, clustered, shardName);
+ }
- return listenerRegistrationProxy;
+ @Override
+ @Deprecated(since = "9.0.0", forRemoval = true)
+ public final Registration registerLegacyTreeChangeListener(final YangInstanceIdentifier treeId,
+ final DOMDataTreeChangeListener listener) {
+ return registerTreeChangeListener(treeId, listener, false);
}
@Override
- public <C extends DOMDataTreeCommitCohort> DOMDataTreeCommitCohortRegistration<C> registerCommitCohort(
- final DOMDataTreeIdentifier subtree, final C cohort) {
- YangInstanceIdentifier treeId = requireNonNull(subtree, "subtree should not be null").getRootIdentifier();
+ // Non-final for testing
+ public Registration registerCommitCohort(final DOMDataTreeIdentifier subtree,
+ final DOMDataTreeCommitCohort cohort) {
+ YangInstanceIdentifier treeId = requireNonNull(subtree, "subtree should not be null").path();
requireNonNull(cohort, "listener should not be null");
final String shardName = actorUtils.getShardStrategyFactory().getStrategy(treeId).findShard(treeId);
LOG.debug("Registering cohort: {} for tree: {} shard: {}", cohort, treeId, shardName);
- DataTreeCohortRegistrationProxy<C> cohortProxy =
- new DataTreeCohortRegistrationProxy<>(actorUtils, subtree, cohort);
+ final var cohortProxy = new DataTreeCohortRegistrationProxy<>(actorUtils, subtree, cohort);
cohortProxy.init(shardName);
return cohortProxy;
}
- @Override
public void onModelContextUpdated(final EffectiveModelContext newModelContext) {
actorUtils.setSchemaContext(newModelContext);
}
@Override
- public void onDatastoreContextUpdated(final DatastoreContextFactory contextFactory) {
+ public final void onDatastoreContextUpdated(final DatastoreContextFactory contextFactory) {
LOG.info("DatastoreContext updated for data store {}", actorUtils.getDataStoreName());
actorUtils.setDatastoreContext(contextFactory);
@Override
@SuppressWarnings("checkstyle:IllegalCatch")
- public void close() {
+ public final void close() {
LOG.info("Closing data store {}", identifier);
if (datastoreConfigMXBean != null) {
}
@Override
- public ActorUtils getActorUtils() {
+ public final ActorUtils getActorUtils() {
return actorUtils;
}
// TODO: consider removing this in favor of awaitReadiness()
@Deprecated
- public void waitTillReady() {
+ public final void waitTillReady() {
LOG.info("Beginning to wait for data store to become ready : {}", identifier);
final Duration toWait = initialSettleTime();
@Beta
@Deprecated
- public boolean awaitReadiness() throws InterruptedException {
+ public final boolean awaitReadiness() throws InterruptedException {
return awaitReadiness(initialSettleTime());
}
@Beta
@Deprecated
- public boolean awaitReadiness(final Duration toWait) throws InterruptedException {
+ public final boolean awaitReadiness(final Duration toWait) throws InterruptedException {
try {
if (toWait.isFinite()) {
try {
@Beta
@Deprecated
- public void awaitReadiness(final long timeout, final TimeUnit unit) throws InterruptedException, TimeoutException {
+ public final void awaitReadiness(final long timeout, final TimeUnit unit)
+ throws InterruptedException, TimeoutException {
if (!awaitReadiness(Duration.create(timeout, unit))) {
throw new TimeoutException("Shard leaders failed to settle");
}
}
@VisibleForTesting
- SettableFuture<Void> readinessFuture() {
+ public final SettableFuture<Empty> readinessFuture() {
return readinessFuture;
}
@Override
- @SuppressWarnings("unchecked")
- public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerProxyListener(
- final YangInstanceIdentifier shardLookup, final YangInstanceIdentifier insideShard,
- final DOMDataTreeChangeListener delegate) {
-
+ public final Registration registerProxyListener(final YangInstanceIdentifier shardLookup,
+ final YangInstanceIdentifier insideShard, final DOMDataTreeChangeListener delegate) {
requireNonNull(shardLookup, "shardLookup should not be null");
requireNonNull(insideShard, "insideShard should not be null");
requireNonNull(delegate, "delegate should not be null");
- final String shardName = actorUtils.getShardStrategyFactory().getStrategy(shardLookup).findShard(shardLookup);
- LOG.debug("Registering tree listener: {} for tree: {} shard: {}, path inside shard: {}",
- delegate,shardLookup, shardName, insideShard);
-
- final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> listenerRegistrationProxy =
- new DataTreeChangeListenerProxy<>(actorUtils,
- // wrap this in the ClusteredDOMDataTreeChangeLister interface
- // since we always want clustered registration
- (ClusteredDOMDataTreeChangeListener) delegate::onDataTreeChanged, insideShard);
- listenerRegistrationProxy.init(shardName);
-
- return (ListenerRegistration<L>) listenerRegistrationProxy;
- }
+ final var shardName = actorUtils.getShardStrategyFactory().getStrategy(shardLookup).findShard(shardLookup);
+ LOG.debug("Registering tree listener: {} for tree: {} shard: {}, path inside shard: {}", delegate, shardLookup,
+ shardName, insideShard);
- @Override
- @SuppressWarnings("unchecked")
- public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerShardConfigListener(
- final YangInstanceIdentifier internalPath, final DOMDataTreeChangeListener delegate) {
- requireNonNull(delegate, "delegate should not be null");
-
- LOG.debug("Registering a listener for the configuration shard: {}", internalPath);
-
- final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy =
- new DataTreeChangeListenerProxy<>(actorUtils, delegate, internalPath);
- proxy.init(ClusterUtils.PREFIX_CONFIG_SHARD_ID);
+ return DataTreeChangeListenerProxy.of(actorUtils, new DOMDataTreeChangeListener() {
+ @Override
+ public void onDataTreeChanged(final List<DataTreeCandidate> changes) {
+ delegate.onDataTreeChanged(changes);
+ }
- return (ListenerRegistration<L>) proxy;
+ @Override
+ public void onInitialData() {
+ delegate.onInitialData();
+ }
+ }, insideShard, true, shardName);
}
private Duration initialSettleTime() {
package org.opendaylight.controller.cluster.datastore;
import com.google.common.annotations.VisibleForTesting;
+import java.util.Map;
import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev140612.DataStorePropertiesContainer;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev231229.DataStorePropertiesContainer;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
abstract class AbstractDatastoreContextIntrospectorFactory implements DatastoreContextIntrospectorFactory {
@Override
- public final DatastoreContextIntrospector newInstance(final LogicalDatastoreType datastoreType) {
+ public DatastoreContextIntrospector newInstance(final LogicalDatastoreType datastoreType,
+ final Map<String, Object> properties) {
+ final DatastoreContextIntrospector inst = newInstance(datastoreType);
+ inst.update(properties);
+ return inst;
+ }
+
+ @VisibleForTesting
+ final DatastoreContextIntrospector newInstance(final LogicalDatastoreType datastoreType) {
return newInstance(DatastoreContext.newBuilder()
.logicalStoreType(datastoreType)
.tempFileDirectory("./data")
@VisibleForTesting
final @NonNull DatastoreContextIntrospector newInstance(final DatastoreContext context) {
- final DataStorePropertiesContainer defaultPropsContainer = (DataStorePropertiesContainer)
- serializer().fromNormalizedNode(YangInstanceIdentifier.of(DataStorePropertiesContainer.QNAME),
- ImmutableNodes.containerNode(DataStorePropertiesContainer.QNAME)).getValue();
-
- return new DatastoreContextIntrospector(context, defaultPropsContainer);
+ return new DatastoreContextIntrospector(context, (DataStorePropertiesContainer) serializer()
+ .fromNormalizedNode(YangInstanceIdentifier.of(DataStorePropertiesContainer.QNAME),
+ ImmutableNodes.newContainerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(DataStorePropertiesContainer.QNAME))
+ .build())
+ .getValue());
}
abstract BindingNormalizedNodeSerializer serializer();
import com.google.common.base.MoreObjects;
import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Range;
-import com.google.common.collect.RangeSet;
import com.google.common.primitives.UnsignedLong;
+import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import org.opendaylight.controller.cluster.access.commands.IncrementTransactionSequenceRequest;
import org.opendaylight.controller.cluster.access.commands.LocalHistorySuccess;
import org.opendaylight.controller.cluster.access.commands.OutOfOrderRequestException;
+import org.opendaylight.controller.cluster.access.commands.SkipTransactionsRequest;
+import org.opendaylight.controller.cluster.access.commands.SkipTransactionsResponse;
import org.opendaylight.controller.cluster.access.commands.TransactionPurgeRequest;
import org.opendaylight.controller.cluster.access.commands.TransactionPurgeResponse;
import org.opendaylight.controller.cluster.access.commands.TransactionRequest;
import org.opendaylight.controller.cluster.access.concepts.RequestEnvelope;
import org.opendaylight.controller.cluster.access.concepts.RequestException;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.utils.MutableUnsignedLongSet;
import org.opendaylight.yangtools.concepts.Identifiable;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
private static final Logger LOG = LoggerFactory.getLogger(AbstractFrontendHistory.class);
private final Map<TransactionIdentifier, FrontendTransaction> transactions = new HashMap<>();
- private final RangeSet<UnsignedLong> purgedTransactions;
+ private final MutableUnsignedLongSet purgedTransactions;
private final String persistenceId;
private final ShardDataTree tree;
private Map<UnsignedLong, Boolean> closedTransactions;
AbstractFrontendHistory(final String persistenceId, final ShardDataTree tree,
- final Map<UnsignedLong, Boolean> closedTransactions, final RangeSet<UnsignedLong> purgedTransactions) {
+ final Map<UnsignedLong, Boolean> closedTransactions, final MutableUnsignedLongSet purgedTransactions) {
this.persistenceId = requireNonNull(persistenceId);
this.tree = requireNonNull(tree);
this.closedTransactions = requireNonNull(closedTransactions);
final @Nullable TransactionSuccess<?> handleTransactionRequest(final TransactionRequest<?> request,
final RequestEnvelope envelope, final long now) throws RequestException {
- if (request instanceof TransactionPurgeRequest) {
- return handleTransactionPurgeRequest(request, envelope, now);
+ if (request instanceof TransactionPurgeRequest purgeRequest) {
+ return handleTransactionPurgeRequest(purgeRequest, envelope, now);
+ } else if (request instanceof SkipTransactionsRequest skipRequest) {
+ return handleSkipTransactionsRequest(skipRequest, envelope, now);
}
final TransactionIdentifier id = request.getTarget();
- final UnsignedLong ul = UnsignedLong.fromLongBits(id.getTransactionId());
- if (purgedTransactions.contains(ul)) {
+ final long txidBits = id.getTransactionId();
+ if (purgedTransactions.contains(txidBits)) {
LOG.warn("{}: Request {} is contained purged transactions {}", persistenceId, request, purgedTransactions);
- throw new DeadTransactionException(purgedTransactions);
+ throw new DeadTransactionException(purgedTransactions.toRangeSet());
}
- final Boolean closed = closedTransactions.get(ul);
+
+ final Boolean closed = closedTransactions.get(UnsignedLong.fromLongBits(txidBits));
if (closed != null) {
- final boolean successful = closed.booleanValue();
+ final boolean successful = closed;
LOG.debug("{}: Request {} refers to a {} transaction", persistenceId, request, successful ? "successful"
: "failed");
throw new ClosedTransactionException(successful);
} else if (!(request instanceof IncrementTransactionSequenceRequest)) {
final Optional<TransactionSuccess<?>> maybeReplay = tx.replaySequence(request.getSequence());
if (maybeReplay.isPresent()) {
- final TransactionSuccess<?> replay = maybeReplay.get();
+ final TransactionSuccess<?> replay = maybeReplay.orElseThrow();
LOG.debug("{}: envelope {} replaying response {}", persistenceId(), envelope, replay);
return replay;
}
return tx.handleRequest(request, envelope, now);
}
- private TransactionSuccess<?> handleTransactionPurgeRequest(final TransactionRequest<?> request,
+ private TransactionPurgeResponse handleTransactionPurgeRequest(final TransactionPurgeRequest request,
final RequestEnvelope envelope, final long now) {
final TransactionIdentifier id = request.getTarget();
- final UnsignedLong ul = UnsignedLong.fromLongBits(id.getTransactionId());
- if (purgedTransactions.contains(ul)) {
+ final long txidBits = id.getTransactionId();
+ if (purgedTransactions.contains(txidBits)) {
// Retransmitted purge request: nothing to do
LOG.debug("{}: transaction {} already purged", persistenceId, id);
return new TransactionPurgeResponse(id, request.getSequence());
// We perform two lookups instead of a straight remove, because once the map becomes empty we switch it
// to an ImmutableMap, which does not allow remove().
+ final UnsignedLong ul = UnsignedLong.fromLongBits(txidBits);
if (closedTransactions.containsKey(ul)) {
tree.purgeTransaction(id, () -> {
closedTransactions.remove(ul);
closedTransactions = ImmutableMap.of();
}
- purgedTransactions.add(Range.closedOpen(ul, UnsignedLong.ONE.plus(ul)));
+ purgedTransactions.add(txidBits);
LOG.debug("{}: finished purging inherited transaction {}", persistenceId(), id);
envelope.sendSuccess(new TransactionPurgeResponse(id, request.getSequence()), readTime() - now);
});
// purged transactions in one go. If it does, we warn about the situation and
LOG.warn("{}: transaction {} not tracked in {}, but not present in active transactions", persistenceId,
id, purgedTransactions);
- purgedTransactions.add(Range.closedOpen(ul, UnsignedLong.ONE.plus(ul)));
+ purgedTransactions.add(txidBits);
return new TransactionPurgeResponse(id, request.getSequence());
}
tree.purgeTransaction(id, () -> {
- purgedTransactions.add(Range.closedOpen(ul, UnsignedLong.ONE.plus(ul)));
+ purgedTransactions.add(txidBits);
transactions.remove(id);
LOG.debug("{}: finished purging transaction {}", persistenceId(), id);
envelope.sendSuccess(new TransactionPurgeResponse(id, request.getSequence()), readTime() - now);
return null;
}
+ private SkipTransactionsResponse handleSkipTransactionsRequest(final SkipTransactionsRequest request,
+ final RequestEnvelope envelope, final long now) {
+ final var first = request.getTarget();
+ final var others = request.getOthers();
+ final var ids = new ArrayList<UnsignedLong>(others.size() + 1);
+ ids.add(UnsignedLong.fromLongBits(first.getTransactionId()));
+ ids.addAll(others);
+
+ final var it = ids.iterator();
+ while (it.hasNext()) {
+ final var id = it.next();
+ final long bits = id.longValue();
+ if (purgedTransactions.contains(bits)) {
+ LOG.warn("{}: history {} tracks {} as purged", persistenceId(), getIdentifier(), id);
+ it.remove();
+ } else if (transactions.containsKey(new TransactionIdentifier(getIdentifier(), bits))) {
+ LOG.warn("{}: history {} tracks {} as open", persistenceId(), getIdentifier(), id);
+ it.remove();
+ }
+ }
+
+ if (ids.isEmpty()) {
+ LOG.debug("{}: history {} completing empty skip request", persistenceId(), getIdentifier());
+ return new SkipTransactionsResponse(first, now);
+ }
+
+ final var transactionIds = MutableUnsignedLongSet.of(ids.stream().mapToLong(UnsignedLong::longValue).toArray())
+ .immutableCopy();
+ LOG.debug("{}: history {} skipping transactions {}", persistenceId(), getIdentifier(), transactionIds.ranges());
+
+ tree.skipTransactions(getIdentifier(), transactionIds, () -> {
+ purgedTransactions.addAll(transactionIds);
+ envelope.sendSuccess(new TransactionPurgeResponse(first, request.getSequence()), readTime() - now);
+ });
+ return null;
+ }
+
final void destroy(final long sequence, final RequestEnvelope envelope, final long now) {
LOG.debug("{}: closing history {}", persistenceId(), getIdentifier());
tree.closeTransactionChain(getIdentifier(),
}
private FrontendTransaction createTransaction(final TransactionRequest<?> request, final TransactionIdentifier id) {
- if (request instanceof CommitLocalTransactionRequest) {
+ if (request instanceof CommitLocalTransactionRequest commitLocalRequest) {
LOG.debug("{}: allocating new ready transaction {}", persistenceId(), id);
tree.getStats().incrementReadWriteTransactionCount();
- return createReadyTransaction(id, ((CommitLocalTransactionRequest) request).getModification());
+ return createReadyTransaction(id, commitLocalRequest.getModification());
}
- if (request instanceof AbstractReadTransactionRequest
- && ((AbstractReadTransactionRequest<?>) request).isSnapshotOnly()) {
+ if (request instanceof AbstractReadTransactionRequest<?> readTxRequest && readTxRequest.isSnapshotOnly()) {
LOG.debug("{}: allocating new open snapshot {}", persistenceId(), id);
tree.getStats().incrementReadOnlyTransactionCount();
return createOpenSnapshot(id);
abstract FrontendTransaction createOpenTransaction(TransactionIdentifier id);
- abstract FrontendTransaction createReadyTransaction(TransactionIdentifier id, DataTreeModification mod)
- ;
+ abstract FrontendTransaction createReadyTransaction(TransactionIdentifier id, DataTreeModification mod);
abstract ShardDataTreeCohort createFailedCohort(TransactionIdentifier id, DataTreeModification mod,
Exception failure);
Optional<SortedSet<String>> participatingShardNames);
@Override
- public String toString() {
- return MoreObjects.toStringHelper(this).omitNullValues().add("identifier", getIdentifier())
- .add("persistenceId", persistenceId).add("transactions", transactions).toString();
+ public final String toString() {
+ return MoreObjects.toStringHelper(this).omitNullValues()
+ .add("identifier", getIdentifier())
+ .add("persistenceId", persistenceId)
+ .add("transactions", transactions)
+ .toString();
}
}
import akka.actor.Props;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.opendaylight.controller.cluster.common.actor.Dispatchers;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.persisted.AbortTransactionPayload;
import org.opendaylight.yangtools.concepts.Identifiable;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
/**
* Abstract base for transactions running on SharrdDataTree. This class is NOT thread-safe.
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import java.util.List;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import scala.concurrent.Future;
-
-/**
- * Abstract base class for {@link DOMStoreThreePhaseCommitCohort} instances returned by this
- * implementation. In addition to the usual set of methods it also contains the list of actor
- * futures.
- */
-public abstract class AbstractThreePhaseCommitCohort<T> implements DOMStoreThreePhaseCommitCohort {
- protected static final ListenableFuture<Void> IMMEDIATE_VOID_SUCCESS = Futures.immediateFuture(null);
- protected static final ListenableFuture<Boolean> IMMEDIATE_BOOLEAN_SUCCESS = Futures.immediateFuture(Boolean.TRUE);
-
- abstract List<Future<T>> getCohortFutures();
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-abstract class AbstractTransactionContext implements TransactionContext {
- private static final Logger LOG = LoggerFactory.getLogger(AbstractTransactionContext.class);
- private final TransactionIdentifier transactionIdentifier;
- private long modificationCount = 0;
- private boolean handOffComplete;
- private final short transactionVersion;
-
- protected AbstractTransactionContext(TransactionIdentifier transactionIdentifier) {
- this(transactionIdentifier, DataStoreVersions.CURRENT_VERSION);
- }
-
- protected AbstractTransactionContext(TransactionIdentifier transactionIdentifier, short transactionVersion) {
- // FIXME: requireNonNull()?
- this.transactionIdentifier = transactionIdentifier;
- this.transactionVersion = transactionVersion;
- }
-
- /**
- * Get the transaction identifier associated with this context.
- *
- * @return Transaction identifier.
- */
- // FIXME: does this imply Identifiable?
- protected final @NonNull TransactionIdentifier getIdentifier() {
- return transactionIdentifier;
- }
-
- protected final void incrementModificationCount() {
- modificationCount++;
- }
-
- protected final void logModificationCount() {
- LOG.debug("Total modifications on Tx {} = [ {} ]", getIdentifier(), modificationCount);
- }
-
- @Override
- public final void operationHandOffComplete() {
- handOffComplete = true;
- }
-
- protected boolean isOperationHandOffComplete() {
- return handOffComplete;
- }
-
- @Override
- public boolean usesOperationLimiting() {
- return false;
- }
-
- @Override
- public short getTransactionVersion() {
- return transactionVersion;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorSelection;
-import akka.dispatch.OnComplete;
-import java.util.Collection;
-import java.util.Optional;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.atomic.AtomicLongFieldUpdater;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-import scala.util.Try;
-
-/**
- * Factory for creating local and remote TransactionContext instances. Maintains a cache of known local
- * transaction factories.
- */
-abstract class AbstractTransactionContextFactory<F extends LocalTransactionFactory> implements AutoCloseable {
- private static final Logger LOG = LoggerFactory.getLogger(AbstractTransactionContextFactory.class);
- @SuppressWarnings("rawtypes")
- private static final AtomicLongFieldUpdater<AbstractTransactionContextFactory> TX_COUNTER_UPDATER =
- AtomicLongFieldUpdater.newUpdater(AbstractTransactionContextFactory.class, "nextTx");
-
- private final ConcurrentMap<String, F> knownLocal = new ConcurrentHashMap<>();
- private final LocalHistoryIdentifier historyId;
- private final ActorUtils actorUtils;
-
- // Used via TX_COUNTER_UPDATER
- @SuppressWarnings("unused")
- private volatile long nextTx;
-
- protected AbstractTransactionContextFactory(final ActorUtils actorUtils, final LocalHistoryIdentifier historyId) {
- this.actorUtils = requireNonNull(actorUtils);
- this.historyId = requireNonNull(historyId);
- }
-
- final ActorUtils getActorUtils() {
- return actorUtils;
- }
-
- final LocalHistoryIdentifier getHistoryId() {
- return historyId;
- }
-
- @SuppressWarnings("checkstyle:IllegalCatch")
- private TransactionContext maybeCreateLocalTransactionContext(final TransactionProxy parent,
- final String shardName) {
- final LocalTransactionFactory local = knownLocal.get(shardName);
- if (local != null) {
- LOG.debug("Tx {} - Creating local component for shard {} using factory {}", parent.getIdentifier(),
- shardName, local);
-
- try {
- return createLocalTransactionContext(local, parent);
- } catch (Exception e) {
- return new NoOpTransactionContext(e, parent.getIdentifier());
- }
- }
-
- return null;
- }
-
- private void onFindPrimaryShardSuccess(PrimaryShardInfo primaryShardInfo, TransactionProxy parent,
- String shardName, TransactionContextWrapper transactionContextWrapper) {
- LOG.debug("Tx {}: Found primary {} for shard {}", parent.getIdentifier(),
- primaryShardInfo.getPrimaryShardActor(), shardName);
-
- updateShardInfo(shardName, primaryShardInfo);
-
- try {
- TransactionContext localContext = maybeCreateLocalTransactionContext(parent, shardName);
- if (localContext != null) {
- transactionContextWrapper.executePriorTransactionOperations(localContext);
- } else {
- RemoteTransactionContextSupport remote = new RemoteTransactionContextSupport(transactionContextWrapper,
- parent, shardName);
- remote.setPrimaryShard(primaryShardInfo);
- }
- } finally {
- onTransactionContextCreated(parent.getIdentifier());
- }
- }
-
- private void onFindPrimaryShardFailure(Throwable failure, TransactionProxy parent,
- String shardName, TransactionContextWrapper transactionContextWrapper) {
- LOG.debug("Tx {}: Find primary for shard {} failed", parent.getIdentifier(), shardName, failure);
-
- try {
- transactionContextWrapper.executePriorTransactionOperations(new NoOpTransactionContext(failure,
- parent.getIdentifier()));
- } finally {
- onTransactionContextCreated(parent.getIdentifier());
- }
- }
-
- final TransactionContextWrapper newTransactionContextWrapper(final TransactionProxy parent,
- final String shardName) {
- final TransactionContextWrapper transactionContextWrapper =
- new TransactionContextWrapper(parent.getIdentifier(), actorUtils, shardName);
-
- Future<PrimaryShardInfo> findPrimaryFuture = findPrimaryShard(shardName, parent.getIdentifier());
- if (findPrimaryFuture.isCompleted()) {
- Try<PrimaryShardInfo> maybe = findPrimaryFuture.value().get();
- if (maybe.isSuccess()) {
- onFindPrimaryShardSuccess(maybe.get(), parent, shardName, transactionContextWrapper);
- } else {
- onFindPrimaryShardFailure(maybe.failed().get(), parent, shardName, transactionContextWrapper);
- }
- } else {
- findPrimaryFuture.onComplete(new OnComplete<PrimaryShardInfo>() {
- @Override
- public void onComplete(final Throwable failure, final PrimaryShardInfo primaryShardInfo) {
- if (failure == null) {
- onFindPrimaryShardSuccess(primaryShardInfo, parent, shardName, transactionContextWrapper);
- } else {
- onFindPrimaryShardFailure(failure, parent, shardName, transactionContextWrapper);
- }
- }
- }, actorUtils.getClientDispatcher());
- }
-
- return transactionContextWrapper;
- }
-
- private void updateShardInfo(final String shardName, final PrimaryShardInfo primaryShardInfo) {
- final Optional<ReadOnlyDataTree> maybeDataTree = primaryShardInfo.getLocalShardDataTree();
- if (maybeDataTree.isPresent()) {
- if (!knownLocal.containsKey(shardName)) {
- LOG.debug("Shard {} resolved to local data tree - adding local factory", shardName);
-
- F factory = factoryForShard(shardName, primaryShardInfo.getPrimaryShardActor(), maybeDataTree.get());
- knownLocal.putIfAbsent(shardName, factory);
- }
- } else if (knownLocal.containsKey(shardName)) {
- LOG.debug("Shard {} invalidating local data tree", shardName);
-
- knownLocal.remove(shardName);
- }
- }
-
- protected final MemberName getMemberName() {
- return historyId.getClientId().getFrontendId().getMemberName();
- }
-
- /**
- * Create an identifier for the next TransactionProxy attached to this component
- * factory.
- * @return Transaction identifier, may not be null.
- */
- protected final TransactionIdentifier nextIdentifier() {
- return new TransactionIdentifier(historyId, TX_COUNTER_UPDATER.getAndIncrement(this));
- }
-
- /**
- * Find the primary shard actor.
- *
- * @param shardName Shard name
- * @return Future containing shard information.
- */
- protected abstract Future<PrimaryShardInfo> findPrimaryShard(@NonNull String shardName,
- @NonNull TransactionIdentifier txId);
-
- /**
- * Create local transaction factory for specified shard, backed by specified shard leader
- * and data tree instance.
- *
- * @param shardName the shard name
- * @param shardLeader the shard leader
- * @param dataTree Backing data tree instance. The data tree may only be accessed in
- * read-only manner.
- * @return Transaction factory for local use.
- */
- protected abstract F factoryForShard(String shardName, ActorSelection shardLeader, ReadOnlyDataTree dataTree);
-
- /**
- * Callback invoked from child transactions to push any futures, which need to
- * be waited for before the next transaction is allocated.
- * @param cohortFutures Collection of futures
- */
- protected abstract <T> void onTransactionReady(@NonNull TransactionIdentifier transaction,
- @NonNull Collection<Future<T>> cohortFutures);
-
- /**
- * Callback invoked when the internal TransactionContext has been created for a transaction.
- *
- * @param transactionId the ID of the transaction.
- */
- protected abstract void onTransactionContextCreated(@NonNull TransactionIdentifier transactionId);
-
- private static TransactionContext createLocalTransactionContext(final LocalTransactionFactory factory,
- final TransactionProxy parent) {
-
- switch (parent.getType()) {
- case READ_ONLY:
- final DOMStoreReadTransaction readOnly = factory.newReadOnlyTransaction(parent.getIdentifier());
- return new LocalTransactionContext(readOnly, parent.getIdentifier(), factory) {
- @Override
- protected DOMStoreWriteTransaction getWriteDelegate() {
- throw new UnsupportedOperationException();
- }
-
- @Override
- protected DOMStoreReadTransaction getReadDelegate() {
- return readOnly;
- }
- };
- case READ_WRITE:
- final DOMStoreReadWriteTransaction readWrite = factory.newReadWriteTransaction(parent.getIdentifier());
- return new LocalTransactionContext(readWrite, parent.getIdentifier(), factory) {
- @Override
- protected DOMStoreWriteTransaction getWriteDelegate() {
- return readWrite;
- }
-
- @Override
- protected DOMStoreReadTransaction getReadDelegate() {
- return readWrite;
- }
- };
- case WRITE_ONLY:
- final DOMStoreWriteTransaction writeOnly = factory.newWriteOnlyTransaction(parent.getIdentifier());
- return new LocalTransactionContext(writeOnly, parent.getIdentifier(), factory) {
- @Override
- protected DOMStoreWriteTransaction getWriteDelegate() {
- return writeOnly;
- }
-
- @Override
- protected DOMStoreReadTransaction getReadDelegate() {
- throw new UnsupportedOperationException();
- }
- };
- default:
- throw new IllegalArgumentException("Invalid transaction type: " + parent.getType());
- }
- }
-}
import java.util.Optional;
import java.util.SortedSet;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateTip;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
}
@Override
- public TransactionIdentifier getIdentifier() {
- return delegate.getIdentifier();
+ TransactionIdentifier transactionId() {
+ return delegate.transactionId();
}
@Override
- public void canCommit(final FutureCallback<Void> callback) {
+ public void canCommit(final FutureCallback<Empty> callback) {
delegate.canCommit(callback);
}
}
@Override
- public void abort(final FutureCallback<Void> callback) {
+ public void abort(final FutureCallback<Empty> callback) {
delegate.abort(callback);
}
import akka.actor.ActorRef;
import com.google.common.primitives.UnsignedLong;
import com.google.common.util.concurrent.FutureCallback;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.util.List;
import java.util.Optional;
import java.util.SortedSet;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.ShardCommitCoordinator.CohortDecorator;
import org.opendaylight.controller.cluster.datastore.modification.Modification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+@Deprecated(since = "9.0.0", forRemoval = true)
final class CohortEntry {
private final ReadWriteShardDataTreeTransaction transaction;
private final TransactionIdentifier transactionId;
private Shard shard;
private CohortEntry(final ReadWriteShardDataTreeTransaction transaction, final short clientVersion) {
- this.cohort = null;
+ cohort = null;
this.transaction = requireNonNull(transaction);
- this.transactionId = transaction.getIdentifier();
+ transactionId = transaction.getIdentifier();
this.clientVersion = clientVersion;
}
private CohortEntry(final ShardDataTreeCohort cohort, final short clientVersion) {
this.cohort = requireNonNull(cohort);
- this.transactionId = cohort.getIdentifier();
- this.transaction = null;
+ transactionId = cohort.transactionId();
+ transaction = null;
this.clientVersion = clientVersion;
}
}
@SuppressWarnings("checkstyle:IllegalCatch")
+ @SuppressFBWarnings(value = "THROWS_METHOD_THROWS_RUNTIMEEXCEPTION", justification = "Re-thrown")
void applyModifications(final List<Modification> modifications) {
totalBatchedModificationsReceived++;
if (lastBatchedModificationsException == null) {
}
}
- void canCommit(final FutureCallback<Void> callback) {
+ void canCommit(final FutureCallback<Empty> callback) {
cohort.canCommit(callback);
}
cohort.commit(callback);
}
- void abort(final FutureCallback<Void> callback) {
+ void abort(final FutureCallback<Empty> callback) {
cohort.abort(callback);
}
import akka.pattern.Patterns;
import akka.util.Timeout;
import com.google.common.collect.Lists;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.util.AbstractMap.SimpleImmutableEntry;
import java.util.ArrayList;
import java.util.Collection;
-import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map.Entry;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.DataTreeCohortActor.CanCommit;
import org.opendaylight.controller.cluster.datastore.DataTreeCohortActor.Success;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.compat.java8.FutureConverters;
* <p/>
* It tracks current operation and list of cohorts which successfuly finished previous phase in
* case, if abort is necessary to invoke it only on cohort steps which are still active.
- *
*/
class CompositeDataTreeCohort {
private static final Logger LOG = LoggerFactory.getLogger(CompositeDataTreeCohort.class);
ABORTED
}
- static final Recover<Object> EXCEPTION_TO_MESSAGE = new Recover<Object>() {
+ static final Recover<Object> EXCEPTION_TO_MESSAGE = new Recover<>() {
@Override
public Failure recover(final Throwable error) {
return new Failure(error);
private final DataTreeCohortActorRegistry registry;
private final TransactionIdentifier txId;
- private final SchemaContext schema;
+ private final EffectiveModelContext schema;
private final Executor callbackExecutor;
private final Timeout timeout;
- private @NonNull List<Success> successfulFromPrevious = Collections.emptyList();
+ private @NonNull List<Success> successfulFromPrevious = List.of();
private State state = State.IDLE;
CompositeDataTreeCohort(final DataTreeCohortActorRegistry registry, final TransactionIdentifier transactionID,
- final SchemaContext schema, final Executor callbackExecutor, final Timeout timeout) {
+ final EffectiveModelContext schema, final Executor callbackExecutor, final Timeout timeout) {
this.registry = requireNonNull(registry);
- this.txId = requireNonNull(transactionID);
+ txId = requireNonNull(transactionID);
this.schema = requireNonNull(schema);
this.callbackExecutor = requireNonNull(callbackExecutor);
this.timeout = requireNonNull(timeout);
throw new IllegalStateException("Unhandled state " + state);
}
- successfulFromPrevious = Collections.emptyList();
+ successfulFromPrevious = List.of();
state = State.IDLE;
}
- Optional<CompletionStage<Void>> canCommit(final DataTreeCandidate tip) {
+ Optional<CompletionStage<Empty>> canCommit(final DataTreeCandidate tip) {
if (LOG.isTraceEnabled()) {
LOG.trace("{}: canCommit - candidate: {}", txId, tip);
} else {
final List<CanCommit> messages = registry.createCanCommitMessages(txId, tip, schema);
LOG.debug("{}: canCommit - messages: {}", txId, messages);
if (messages.isEmpty()) {
- successfulFromPrevious = Collections.emptyList();
+ successfulFromPrevious = List.of();
changeStateFrom(State.IDLE, State.CAN_COMMIT_SUCCESSFUL);
return Optional.empty();
}
return Optional.of(processResponses(futures, State.CAN_COMMIT_SENT, State.CAN_COMMIT_SUCCESSFUL));
}
- Optional<CompletionStage<Void>> preCommit() {
+ Optional<CompletionStage<Empty>> preCommit() {
LOG.debug("{}: preCommit - successfulFromPrevious: {}", txId, successfulFromPrevious);
if (successfulFromPrevious.isEmpty()) {
return Optional.of(processResponses(futures, State.PRE_COMMIT_SENT, State.PRE_COMMIT_SUCCESSFUL));
}
- Optional<CompletionStage<Void>> commit() {
+ Optional<CompletionStage<Empty>> commit() {
LOG.debug("{}: commit - successfulFromPrevious: {}", txId, successfulFromPrevious);
if (successfulFromPrevious.isEmpty()) {
changeStateFrom(State.PRE_COMMIT_SUCCESSFUL, State.COMMITED);
return ret;
}
- private @NonNull CompletionStage<Void> processResponses(final List<Entry<ActorRef, Future<Object>>> futures,
+ private @NonNull CompletionStage<Empty> processResponses(final List<Entry<ActorRef, Future<Object>>> futures,
final State currentState, final State afterState) {
LOG.debug("{}: processResponses - currentState: {}, afterState: {}", txId, currentState, afterState);
- final CompletableFuture<Void> returnFuture = new CompletableFuture<>();
+ final CompletableFuture<Empty> returnFuture = new CompletableFuture<>();
Future<Iterable<Object>> aggregateFuture = Futures.sequence(Lists.transform(futures, Entry::getValue),
ExecutionContexts.global());
return returnFuture;
}
- // FB issues violation for passing null to CompletableFuture#complete but it is valid and necessary when the
- // generic type is Void.
- @SuppressFBWarnings(value = { "NP_NONNULL_PARAM_VIOLATION", "UPM_UNCALLED_PRIVATE_METHOD" },
- justification = "https://github.com/spotbugs/spotbugs/issues/811")
private void processResponses(final Throwable failure, final Iterable<Object> results,
- final State currentState, final State afterState, final CompletableFuture<Void> resultFuture) {
+ final State currentState, final State afterState, final CompletableFuture<Empty> resultFuture) {
if (failure != null) {
- successfulFromPrevious = Collections.emptyList();
+ successfulFromPrevious = List.of();
resultFuture.completeExceptionally(failure);
return;
}
firstEx.addSuppressed(it.next().cause());
}
- successfulFromPrevious = Collections.emptyList();
+ successfulFromPrevious = List.of();
resultFuture.completeExceptionally(firstEx);
} else {
successfulFromPrevious = successful;
changeStateFrom(currentState, afterState);
- resultFuture.complete(null);
+ resultFuture.complete(Empty.value());
}
}
import com.google.common.base.MoreObjects;
import org.opendaylight.mdsal.dom.api.DOMDataTreeCandidate;
import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateNode;
final class DOMDataTreeCandidateTO implements DOMDataTreeCandidate {
*/
public final class DataStoreVersions {
@Deprecated
- public static final short BASE_HELIUM_VERSION = 0;
+ public static final short BASE_HELIUM_VERSION = 0;
@Deprecated
- public static final short HELIUM_1_VERSION = 1;
+ public static final short HELIUM_1_VERSION = 1;
@Deprecated
- public static final short HELIUM_2_VERSION = 2;
+ public static final short HELIUM_2_VERSION = 2;
@Deprecated
- public static final short LITHIUM_VERSION = 3;
- public static final short BORON_VERSION = 5;
- public static final short FLUORINE_VERSION = 9;
- public static final short NEON_SR2_VERSION = 10;
- public static final short SODIUM_SR1_VERSION = 11;
- public static final short MAGNESIUM_VERSION = 12;
- public static final short CURRENT_VERSION = SODIUM_SR1_VERSION;
+ public static final short LITHIUM_VERSION = 3;
+ @Deprecated
+ public static final short BORON_VERSION = 5;
+ @Deprecated
+ public static final short FLUORINE_VERSION = 9;
+ @Deprecated
+ public static final short NEON_SR2_VERSION = 10;
+ @Deprecated
+ public static final short SODIUM_SR1_VERSION = 11;
+ @Deprecated
+ public static final short PHOSPHORUS_VERSION = 12;
+ public static final short POTASSIUM_VERSION = 13;
+ public static final short CURRENT_VERSION = POTASSIUM_VERSION;
private DataStoreVersions() {
import static java.util.Objects.requireNonNull;
+import akka.actor.ActorRef;
import akka.actor.Props;
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
import org.opendaylight.controller.cluster.datastore.messages.DataTreeChanged;
import org.opendaylight.controller.cluster.datastore.messages.DataTreeChangedReply;
-import org.opendaylight.controller.cluster.datastore.messages.DataTreeListenerInfo;
import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
import org.opendaylight.controller.cluster.datastore.messages.GetInfo;
import org.opendaylight.controller.cluster.datastore.messages.OnInitialData;
+import org.opendaylight.controller.cluster.mgmt.api.DataTreeListenerInfo;
import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
LOG.debug("{}: Notifying onInitialData to listener {}", logContext, listener);
try {
- this.listener.onInitialData();
+ listener.onInitialData();
} catch (Exception e) {
- LOG.error("{}: Error notifying listener {}", logContext, this.listener, e);
+ LOG.error("{}: Error notifying listener {}", logContext, listener, e);
}
}
return;
}
- LOG.debug("{}: Sending {} change notification(s) {} to listener {}", logContext, message.getChanges().size(),
- message.getChanges(), listener);
+ final var changes = message.getChanges();
+ LOG.debug("{}: Sending {} change notification(s) to listener {}", logContext, changes.size(), listener);
+ if (LOG.isTraceEnabled() && !changes.isEmpty()) {
+ LOG.trace("{}: detailed change follow", logContext);
+ for (int i = 0, size = changes.size(); i < size; ++i) {
+ LOG.trace("{}: change {}: {}", logContext, i, changes.get(i));
+ }
+ }
notificationCount++;
try {
- this.listener.onDataTreeChanged(message.getChanges());
+ listener.onDataTreeChanged(changes);
} catch (Exception e) {
- LOG.error("{}: Error notifying listener {}", logContext, this.listener, e);
+ LOG.error("{}: Error notifying listener {}", logContext, listener, e);
}
// TODO: do we really need this?
// It seems the sender is never null but it doesn't hurt to check. If the caller passes in
// a null sender (ActorRef.noSender()), akka translates that to the deadLetters actor.
- if (getSender() != null && !getContext().system().deadLetters().equals(getSender())) {
- getSender().tell(DataTreeChangedReply.getInstance(), getSelf());
+ final ActorRef sender = getSender();
+ if (sender != null && !sender.equals(getContext().system().deadLetters())) {
+ sender.tell(DataTreeChangedReply.getInstance(), getSelf());
}
}
import akka.actor.PoisonPill;
import akka.dispatch.OnComplete;
import com.google.common.annotations.VisibleForTesting;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import com.google.common.util.concurrent.MoreExecutors;
+import java.util.concurrent.Executor;
import org.checkerframework.checker.lock.qual.GuardedBy;
+import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.controller.cluster.datastore.exceptions.LocalShardNotFoundException;
import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeNotificationListenerRegistration;
import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeNotificationListenerReply;
import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.mdsal.dom.api.ClusteredDOMDataTreeChangeListener;
import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
+import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
/**
* Proxy class for holding required state to lazily instantiate a listener registration with an
*
* @param <T> listener type
*/
-final class DataTreeChangeListenerProxy<T extends DOMDataTreeChangeListener> extends AbstractListenerRegistration<T> {
+final class DataTreeChangeListenerProxy extends AbstractObjectRegistration<DOMDataTreeChangeListener> {
private static final Logger LOG = LoggerFactory.getLogger(DataTreeChangeListenerProxy.class);
private final ActorRef dataChangeListenerActor;
private final ActorUtils actorUtils;
private final YangInstanceIdentifier registeredPath;
+ private final boolean clustered;
@GuardedBy("this")
private ActorSelection listenerRegistrationActor;
- DataTreeChangeListenerProxy(final ActorUtils actorUtils, final T listener,
- final YangInstanceIdentifier registeredPath) {
+ @VisibleForTesting
+ private DataTreeChangeListenerProxy(final ActorUtils actorUtils, final DOMDataTreeChangeListener listener,
+ final YangInstanceIdentifier registeredPath, final boolean clustered, final String shardName) {
super(listener);
this.actorUtils = requireNonNull(actorUtils);
this.registeredPath = requireNonNull(registeredPath);
- this.dataChangeListenerActor = actorUtils.getActorSystem().actorOf(
+ this.clustered = clustered;
+ dataChangeListenerActor = actorUtils.getActorSystem().actorOf(
DataTreeChangeListenerActor.props(getInstance(), registeredPath)
.withDispatcher(actorUtils.getNotificationDispatcherPath()));
-
LOG.debug("{}: Created actor {} for DTCL {}", actorUtils.getDatastoreContext().getLogicalStoreType(),
dataChangeListenerActor, listener);
}
+ static @NonNull DataTreeChangeListenerProxy of(final ActorUtils actorUtils,
+ final DOMDataTreeChangeListener listener, final YangInstanceIdentifier registeredPath,
+ final boolean clustered, final String shardName) {
+ return ofTesting(actorUtils, listener, registeredPath, clustered, shardName, MoreExecutors.directExecutor());
+ }
+
+ @VisibleForTesting
+ static @NonNull DataTreeChangeListenerProxy ofTesting(final ActorUtils actorUtils,
+ final DOMDataTreeChangeListener listener, final YangInstanceIdentifier registeredPath,
+ final boolean clustered, final String shardName, final Executor executor) {
+ final var ret = new DataTreeChangeListenerProxy(actorUtils, listener, registeredPath, clustered, shardName);
+ executor.execute(() -> {
+ LOG.debug("{}: Starting discovery of shard {}", ret.logContext(), shardName);
+ actorUtils.findLocalShardAsync(shardName).onComplete(new OnComplete<>() {
+ @Override
+ public void onComplete(final Throwable failure, final ActorRef shard) {
+ if (failure instanceof LocalShardNotFoundException) {
+ LOG.debug("{}: No local shard found for {} - DataTreeChangeListener {} at path {} cannot be "
+ + "registered", ret.logContext(), shardName, listener, registeredPath);
+ } else if (failure != null) {
+ LOG.error("{}: Failed to find local shard {} - DataTreeChangeListener {} at path {} cannot be "
+ + "registered", ret.logContext(), shardName, listener, registeredPath, failure);
+ } else {
+ ret.doRegistration(shard);
+ }
+ }
+ }, actorUtils.getClientDispatcher());
+ });
+ return ret;
+ }
+
@Override
protected synchronized void removeRegistration() {
if (listenerRegistrationActor != null) {
dataChangeListenerActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
}
- void init(final String shardName) {
- Future<ActorRef> findFuture = actorUtils.findLocalShardAsync(shardName);
- findFuture.onComplete(new OnComplete<ActorRef>() {
- @Override
- public void onComplete(final Throwable failure, final ActorRef shard) {
- if (failure instanceof LocalShardNotFoundException) {
- LOG.debug("{}: No local shard found for {} - DataTreeChangeListener {} at path {} "
- + "cannot be registered", logContext(), shardName, getInstance(), registeredPath);
- } else if (failure != null) {
- LOG.error("{}: Failed to find local shard {} - DataTreeChangeListener {} at path {} "
- + "cannot be registered", logContext(), shardName, getInstance(), registeredPath,
- failure);
- } else {
- doRegistration(shard);
- }
- }
- }, actorUtils.getClientDispatcher());
- }
-
- @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
- justification = "https://github.com/spotbugs/spotbugs/issues/811")
private void setListenerRegistrationActor(final ActorSelection actor) {
if (actor == null) {
LOG.debug("{}: Ignoring null actor on {}", logContext(), this);
synchronized (this) {
if (!isClosed()) {
- this.listenerRegistrationActor = actor;
+ listenerRegistrationActor = actor;
return;
}
}
actor.tell(CloseDataTreeNotificationListenerRegistration.getInstance(), null);
}
- @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
- justification = "https://github.com/spotbugs/spotbugs/issues/811")
private void doRegistration(final ActorRef shard) {
-
- Future<Object> future = actorUtils.executeOperationAsync(shard,
- new RegisterDataTreeChangeListener(registeredPath, dataChangeListenerActor,
- getInstance() instanceof ClusteredDOMDataTreeChangeListener),
- actorUtils.getDatastoreContext().getShardInitializationTimeout());
-
- future.onComplete(new OnComplete<Object>() {
- @Override
- public void onComplete(final Throwable failure, final Object result) {
- if (failure != null) {
- LOG.error("{}: Failed to register DataTreeChangeListener {} at path {}", logContext(),
+ actorUtils.executeOperationAsync(shard,
+ new RegisterDataTreeChangeListener(registeredPath, dataChangeListenerActor, clustered),
+ actorUtils.getDatastoreContext().getShardInitializationTimeout()).onComplete(new OnComplete<>() {
+ @Override
+ public void onComplete(final Throwable failure, final Object result) {
+ if (failure != null) {
+ LOG.error("{}: Failed to register DataTreeChangeListener {} at path {}", logContext(),
getInstance(), registeredPath, failure);
- } else {
- RegisterDataTreeNotificationListenerReply reply = (RegisterDataTreeNotificationListenerReply)result;
- setListenerRegistrationActor(actorUtils.actorSelection(
- reply.getListenerRegistrationPath()));
+ } else {
+ setListenerRegistrationActor(actorUtils.actorSelection(
+ ((RegisterDataTreeNotificationListenerReply) result).getListenerRegistrationPath()));
+ }
}
- }
- }, actorUtils.getClientDispatcher());
+ }, actorUtils.getClientDispatcher());
}
@VisibleForTesting
final class DataTreeChangeListenerSupport extends LeaderLocalDelegateFactory<RegisterDataTreeChangeListener> {
private static final Logger LOG = LoggerFactory.getLogger(DataTreeChangeListenerSupport.class);
- private final Collection<DelayedDataTreeChangeListenerRegistration<DOMDataTreeChangeListener>>
+ private final Collection<DelayedDataTreeChangeListenerRegistration>
delayedDataTreeChangeListenerRegistrations = ConcurrentHashMap.newKeySet();
- private final Collection<DelayedDataTreeChangeListenerRegistration<DOMDataTreeChangeListener>>
+ private final Collection<DelayedDataTreeChangeListenerRegistration>
delayedListenerOnAllRegistrations = ConcurrentHashMap.newKeySet();
private final Collection<ActorSelection> leaderOnlyListenerActors = ConcurrentHashMap.newKeySet();
private final Collection<ActorSelection> allListenerActors = ConcurrentHashMap.newKeySet();
}
if (hasLeader) {
- for (DelayedDataTreeChangeListenerRegistration<DOMDataTreeChangeListener> reg :
- delayedListenerOnAllRegistrations) {
+ for (var reg : delayedListenerOnAllRegistrations) {
reg.doRegistration(this);
}
}
if (isLeader) {
- for (DelayedDataTreeChangeListenerRegistration<DOMDataTreeChangeListener> reg :
- delayedDataTreeChangeListenerRegistrations) {
+ for (var reg : delayedDataTreeChangeListenerRegistrations) {
reg.doRegistration(this);
}
} else {
LOG.debug("{}: Shard does not have a leader - delaying registration", persistenceId());
- final DelayedDataTreeChangeListenerRegistration<DOMDataTreeChangeListener> delayedReg =
- new DelayedDataTreeChangeListenerRegistration<>(message, registrationActor);
- final Collection<DelayedDataTreeChangeListenerRegistration<DOMDataTreeChangeListener>> delayedRegList;
+ final var delayedReg = new DelayedDataTreeChangeListenerRegistration(message, registrationActor);
+ final Collection<DelayedDataTreeChangeListenerRegistration> delayedRegList;
if (message.isRegisterOnAllInstances()) {
delayedRegList = delayedListenerOnAllRegistrations;
} else {
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.MoreExecutors;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import org.opendaylight.mdsal.dom.api.DOMDataTreeCandidate;
import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
/**
* Proxy actor which acts as a facade to the user-provided commit cohort. Responsible for
private final Collection<DOMDataTreeCandidate> candidates;
private final ActorRef cohort;
- private final SchemaContext schema;
+ private final EffectiveModelContext schema;
CanCommit(final TransactionIdentifier txId, final Collection<DOMDataTreeCandidate> candidates,
- final SchemaContext schema, final ActorRef cohort) {
+ final EffectiveModelContext schema, final ActorRef cohort) {
super(txId);
this.cohort = Objects.requireNonNull(cohort);
this.candidates = Objects.requireNonNull(candidates);
return candidates;
}
- SchemaContext getSchema() {
+ EffectiveModelContext getSchema() {
return schema;
}
}, callbackExecutor);
}
- @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
- justification = "https://github.com/spotbugs/spotbugs/issues/811")
private void failed(final TransactionIdentifier txId, final ActorRef sender, final Throwable failure) {
currentStateMap.remove(txId);
sender.tell(new Status.Failure(failure), getSelf());
}
- @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
- justification = "https://github.com/spotbugs/spotbugs/issues/811")
private void success(final TransactionIdentifier txId, final ActorRef sender, final S nextStep) {
currentStateMap.computeIfPresent(txId, (key, behaviour) -> nextBehaviour(txId, nextStep));
sender.tell(new Success(getSelf(), txId), getSelf());
import org.opendaylight.mdsal.dom.api.DOMDataTreeCandidate;
import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
import org.opendaylight.mdsal.dom.spi.AbstractRegistrationTree;
-import org.opendaylight.mdsal.dom.spi.RegistrationTreeNode;
-import org.opendaylight.mdsal.dom.spi.RegistrationTreeSnapshot;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Registry of user commit cohorts, which is responsible for handling registration and calculation
* of affected cohorts based on {@link DataTreeCandidate}. This class is NOT thread-safe.
- *
*/
class DataTreeCohortActorRegistry extends AbstractRegistrationTree<ActorRef> {
private static final Logger LOG = LoggerFactory.getLogger(DataTreeCohortActorRegistry.class);
- private final Map<ActorRef, RegistrationTreeNode<ActorRef>> cohortToNode = new HashMap<>();
+ private final Map<ActorRef, Node<ActorRef>> cohortToNode = new HashMap<>();
Collection<ActorRef> getCohortActors() {
return new ArrayList<>(cohortToNode.keySet());
takeLock();
try {
final ActorRef cohortRef = cohort.getCohort();
- final RegistrationTreeNode<ActorRef> node =
- findNodeFor(cohort.getPath().getRootIdentifier().getPathArguments());
+ final Node<ActorRef> node = findNodeFor(cohort.getPath().path().getPathArguments());
addRegistration(node, cohort.getCohort());
cohortToNode.put(cohortRef, node);
} catch (final Exception e) {
void removeCommitCohort(final ActorRef sender, final RemoveCohort message) {
final ActorRef cohort = message.getCohort();
- final RegistrationTreeNode<ActorRef> node = cohortToNode.get(cohort);
+ final Node<ActorRef> node = cohortToNode.get(cohort);
if (node != null) {
removeRegistration(node, cohort);
cohortToNode.remove(cohort);
}
List<DataTreeCohortActor.CanCommit> createCanCommitMessages(final TransactionIdentifier txId,
- final DataTreeCandidate candidate, final SchemaContext schema) {
- try (RegistrationTreeSnapshot<ActorRef> cohorts = takeSnapshot()) {
+ final DataTreeCandidate candidate, final EffectiveModelContext schema) {
+ try (var cohorts = takeSnapshot()) {
return new CanCommitMessageBuilder(txId, candidate, schema).perform(cohorts.getRootNode());
}
}
private final Multimap<ActorRef, DOMDataTreeCandidate> actorToCandidates = ArrayListMultimap.create();
private final TransactionIdentifier txId;
private final DataTreeCandidate candidate;
- private final SchemaContext schema;
+ private final EffectiveModelContext schema;
CanCommitMessageBuilder(final TransactionIdentifier txId, final DataTreeCandidate candidate,
- final SchemaContext schema) {
+ final EffectiveModelContext schema) {
this.txId = requireNonNull(txId);
this.candidate = requireNonNull(candidate);
this.schema = schema;
}
private void lookupAndCreateCanCommits(final List<PathArgument> args, final int offset,
- final RegistrationTreeNode<ActorRef> node) {
+ final Node<ActorRef> node) {
if (args.size() != offset) {
final PathArgument arg = args.get(offset);
- final RegistrationTreeNode<ActorRef> exactChild = node.getExactChild(arg);
+ final var exactChild = node.getExactChild(arg);
if (exactChild != null) {
lookupAndCreateCanCommits(args, offset + 1, exactChild);
}
- for (final RegistrationTreeNode<ActorRef> c : node.getInexactChildren(arg)) {
- lookupAndCreateCanCommits(args, offset + 1, c);
+ for (var inexact : node.getInexactChildren(arg)) {
+ lookupAndCreateCanCommits(args, offset + 1, inexact);
}
} else {
lookupAndCreateCanCommits(candidate.getRootPath(), node, candidate.getRootNode());
}
}
- private void lookupAndCreateCanCommits(final YangInstanceIdentifier path,
- final RegistrationTreeNode<ActorRef> regNode, final DataTreeCandidateNode candNode) {
- if (candNode.getModificationType() == ModificationType.UNMODIFIED) {
+ private void lookupAndCreateCanCommits(final YangInstanceIdentifier path, final Node<ActorRef> regNode,
+ final DataTreeCandidateNode candNode) {
+ if (candNode.modificationType() == ModificationType.UNMODIFIED) {
LOG.debug("Skipping unmodified candidate {}", path);
return;
}
- final Collection<ActorRef> regs = regNode.getRegistrations();
+ final var regs = regNode.getRegistrations();
if (!regs.isEmpty()) {
createCanCommits(regs, path, candNode);
}
- for (final DataTreeCandidateNode candChild : candNode.getChildNodes()) {
- if (candChild.getModificationType() != ModificationType.UNMODIFIED) {
- final RegistrationTreeNode<ActorRef> regChild =
- regNode.getExactChild(candChild.getIdentifier());
+ for (var candChild : candNode.childNodes()) {
+ if (candChild.modificationType() != ModificationType.UNMODIFIED) {
+ final var regChild = regNode.getExactChild(candChild.name());
if (regChild != null) {
- lookupAndCreateCanCommits(path.node(candChild.getIdentifier()), regChild, candChild);
+ lookupAndCreateCanCommits(path.node(candChild.name()), regChild, candChild);
}
- for (final RegistrationTreeNode<ActorRef> rc : regNode
- .getInexactChildren(candChild.getIdentifier())) {
- lookupAndCreateCanCommits(path.node(candChild.getIdentifier()), rc, candChild);
+ for (var rc : regNode.getInexactChildren(candChild.name())) {
+ lookupAndCreateCanCommits(path.node(candChild.name()), rc, candChild);
}
}
}
}
private static DOMDataTreeIdentifier treeIdentifier(final YangInstanceIdentifier path) {
- return new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, path);
+ return DOMDataTreeIdentifier.of(LogicalDatastoreType.CONFIGURATION, path);
}
- List<DataTreeCohortActor.CanCommit> perform(final RegistrationTreeNode<ActorRef> rootNode) {
- final List<PathArgument> toLookup = candidate.getRootPath().getPathArguments();
+ List<DataTreeCohortActor.CanCommit> perform(final Node<ActorRef> rootNode) {
+ final var toLookup = candidate.getRootPath().getPathArguments();
lookupAndCreateCanCommits(toLookup, 0, rootNode);
final Map<ActorRef, Collection<DOMDataTreeCandidate>> mapView = actorToCandidates.asMap();
}
}
- CompositeDataTreeCohort createCohort(final SchemaContext schemaContext, final TransactionIdentifier txId,
+ CompositeDataTreeCohort createCohort(final EffectiveModelContext schemaContext, final TransactionIdentifier txId,
final Executor callbackExecutor, final Timeout commitStepTimeout) {
return new CompositeDataTreeCohort(this, txId, schemaContext, callbackExecutor, commitStepTimeout);
}
import akka.dispatch.OnComplete;
import akka.pattern.Patterns;
import akka.util.Timeout;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.util.concurrent.TimeUnit;
import org.checkerframework.checker.lock.qual.GuardedBy;
import org.opendaylight.controller.cluster.datastore.exceptions.LocalShardNotFoundException;
import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistration;
import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
import org.slf4j.Logger;
import scala.concurrent.Future;
import scala.concurrent.duration.FiniteDuration;
-public class DataTreeCohortRegistrationProxy<C extends DOMDataTreeCommitCohort> extends AbstractObjectRegistration<C>
- implements DOMDataTreeCommitCohortRegistration<C> {
-
+public class DataTreeCohortRegistrationProxy<C extends DOMDataTreeCommitCohort> extends AbstractObjectRegistration<C> {
private static final Logger LOG = LoggerFactory.getLogger(DataTreeCohortRegistrationProxy.class);
private static final Timeout TIMEOUT = new Timeout(new FiniteDuration(5, TimeUnit.SECONDS));
+
private final DOMDataTreeIdentifier subtree;
private final ActorRef actor;
private final ActorUtils actorUtils;
super(cohort);
this.subtree = requireNonNull(subtree);
this.actorUtils = requireNonNull(actorUtils);
- this.actor = actorUtils.getActorSystem().actorOf(DataTreeCohortActor.props(getInstance(),
- subtree.getRootIdentifier()).withDispatcher(actorUtils.getNotificationDispatcherPath()));
+ actor = actorUtils.getActorSystem().actorOf(DataTreeCohortActor.props(getInstance(),
+ subtree.path()).withDispatcher(actorUtils.getNotificationDispatcherPath()));
}
public void init(final String shardName) {
}, actorUtils.getClientDispatcher());
}
- @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
- justification = "https://github.com/spotbugs/spotbugs/issues/811")
private synchronized void performRegistration(final ActorRef shard) {
if (isClosed()) {
return;
cohortRegistry = shard;
Future<Object> future =
Patterns.ask(shard, new DataTreeCohortActorRegistry.RegisterCohort(subtree, actor), TIMEOUT);
- future.onComplete(new OnComplete<Object>() {
+ future.onComplete(new OnComplete<>() {
@Override
public void onComplete(final Throwable failure, final Object val) {
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.cluster.datastore.jmx.mbeans;
+package org.opendaylight.controller.cluster.datastore;
import java.util.concurrent.TimeUnit;
-import org.opendaylight.controller.cluster.datastore.DatastoreContext;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.DatastoreConfigurationMXBean;
import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
/**
*
* @author Thomas Pantelis
*/
-public class DatastoreConfigurationMXBeanImpl extends AbstractMXBean implements DatastoreConfigurationMXBean {
+final class DatastoreConfigurationMXBeanImpl extends AbstractMXBean implements DatastoreConfigurationMXBean {
public static final String JMX_CATEGORY_CONFIGURATION = "Configuration";
private DatastoreContext context;
- public DatastoreConfigurationMXBeanImpl(final String mxBeanType) {
+ DatastoreConfigurationMXBeanImpl(final String mxBeanType) {
super("Datastore", mxBeanType, JMX_CATEGORY_CONFIGURATION);
}
return context.getShardRaftConfig().getSnapshotDataThresholdPercentage();
}
+ @Override
+ public int getShardSnapshotDataThreshold() {
+ return context.getShardRaftConfig().getSnapshotDataThreshold();
+ }
+
@Override
public long getShardSnapshotBatchCount() {
return context.getShardRaftConfig().getSnapshotBatchCount();
return context.isTransactionDebugContextEnabled();
}
- @Override
- @Deprecated(forRemoval = true)
- public int getMaxShardDataChangeExecutorPoolSize() {
- return 0;
- }
-
- @Override
- @Deprecated(forRemoval = true)
- public int getMaxShardDataChangeExecutorQueueSize() {
- return 0;
- }
-
- @Override
- @Deprecated(forRemoval = true)
- public int getMaxShardDataChangeListenerQueueSize() {
- return 0;
- }
-
- @Override
- @Deprecated(forRemoval = true)
- public int getMaxShardDataStoreExecutorQueueSize() {
- return 0;
- }
-
@Override
public int getMaximumMessageSliceSize() {
return context.getMaximumMessageSliceSize();
import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
import org.opendaylight.controller.cluster.raft.PeerAddressResolver;
import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev231229.DataStoreProperties.ExportOnRecovery;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public static final boolean DEFAULT_SNAPSHOT_ON_ROOT_OVERWRITE = false;
public static final FileAkkaConfigurationReader DEFAULT_CONFIGURATION_READER = new FileAkkaConfigurationReader();
public static final int DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE = 12;
+ public static final int DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD = 0;
public static final int DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR = 2;
public static final int DEFAULT_SHARD_CANDIDATE_ELECTION_TIMEOUT_DIVISOR = 1;
public static final int DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT = 100;
public static final int DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT = 1000;
public static final long DEFAULT_SHARD_COMMIT_QUEUE_EXPIRY_TIMEOUT_IN_MS =
TimeUnit.MILLISECONDS.convert(2, TimeUnit.MINUTES);
- public static final int DEFAULT_MAX_MESSAGE_SLICE_SIZE = 2048 * 1000; // 2MB
+ public static final int DEFAULT_MAX_MESSAGE_SLICE_SIZE = 480 * 1024; // 480KiB
public static final int DEFAULT_INITIAL_PAYLOAD_SERIALIZED_BUFFER_CAPACITY = 512;
+ public static final ExportOnRecovery DEFAULT_EXPORT_ON_RECOVERY = ExportOnRecovery.Off;
+ public static final String DEFAULT_RECOVERY_EXPORT_BASE_DIR = "persistence-export";
public static final long DEFAULT_SYNC_INDEX_THRESHOLD = 10;
private long transactionCreationInitialRateLimit = DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT;
private String dataStoreName = UNKNOWN_DATA_STORE_TYPE;
private LogicalDatastoreType logicalStoreType = LogicalDatastoreType.OPERATIONAL;
- private YangInstanceIdentifier storeRoot = YangInstanceIdentifier.empty();
+ private YangInstanceIdentifier storeRoot = YangInstanceIdentifier.of();
private int shardBatchedModificationCount = DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT;
private boolean writeOnlyTransactionOptimizationsEnabled = true;
private long shardCommitQueueExpiryTimeoutInMillis = DEFAULT_SHARD_COMMIT_QUEUE_EXPIRY_TIMEOUT_IN_MS;
- private boolean useTellBasedProtocol = false;
private boolean transactionDebugContextEnabled = false;
private String shardManagerPersistenceId;
private int maximumMessageSliceSize = DEFAULT_MAX_MESSAGE_SLICE_SIZE;
private long noProgressTimeout = AbstractClientConnection.DEFAULT_NO_PROGRESS_TIMEOUT_NANOS;
private int initialPayloadSerializedBufferCapacity = DEFAULT_INITIAL_PAYLOAD_SERIALIZED_BUFFER_CAPACITY;
private boolean useLz4Compression = false;
+ private ExportOnRecovery exportOnRecovery = DEFAULT_EXPORT_ON_RECOVERY;
+ private String recoveryExportBaseDir = DEFAULT_RECOVERY_EXPORT_BASE_DIR;
public static Set<String> getGlobalDatastoreNames() {
return GLOBAL_DATASTORE_NAMES;
setHeartbeatInterval(DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS);
setIsolatedLeaderCheckInterval(DEFAULT_ISOLATED_LEADER_CHECK_INTERVAL_IN_MILLIS);
setSnapshotDataThresholdPercentage(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE);
+ setSnapshotDataThreshold(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD);
setElectionTimeoutFactor(DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR);
setCandidateElectionTimeoutDivisor(DEFAULT_SHARD_CANDIDATE_ELECTION_TIMEOUT_DIVISOR);
setSyncIndexThreshold(DEFAULT_SYNC_INDEX_THRESHOLD);
}
private DatastoreContext(final DatastoreContext other) {
- this.shardTransactionIdleTimeout = other.shardTransactionIdleTimeout;
- this.operationTimeoutInMillis = other.operationTimeoutInMillis;
- this.dataStoreMXBeanType = other.dataStoreMXBeanType;
- this.shardTransactionCommitTimeoutInSeconds = other.shardTransactionCommitTimeoutInSeconds;
- this.shardTransactionCommitQueueCapacity = other.shardTransactionCommitQueueCapacity;
- this.shardInitializationTimeout = other.shardInitializationTimeout;
- this.shardLeaderElectionTimeout = other.shardLeaderElectionTimeout;
- this.initialSettleTimeoutMultiplier = other.initialSettleTimeoutMultiplier;
- this.persistent = other.persistent;
- this.snapshotOnRootOverwrite = other.snapshotOnRootOverwrite;
- this.configurationReader = other.configurationReader;
- this.transactionCreationInitialRateLimit = other.transactionCreationInitialRateLimit;
- this.dataStoreName = other.dataStoreName;
- this.logicalStoreType = other.logicalStoreType;
- this.storeRoot = other.storeRoot;
- this.shardBatchedModificationCount = other.shardBatchedModificationCount;
- this.writeOnlyTransactionOptimizationsEnabled = other.writeOnlyTransactionOptimizationsEnabled;
- this.shardCommitQueueExpiryTimeoutInMillis = other.shardCommitQueueExpiryTimeoutInMillis;
- this.transactionDebugContextEnabled = other.transactionDebugContextEnabled;
- this.shardManagerPersistenceId = other.shardManagerPersistenceId;
- this.useTellBasedProtocol = other.useTellBasedProtocol;
- this.backendAlivenessTimerInterval = other.backendAlivenessTimerInterval;
- this.requestTimeout = other.requestTimeout;
- this.noProgressTimeout = other.noProgressTimeout;
- this.initialPayloadSerializedBufferCapacity = other.initialPayloadSerializedBufferCapacity;
- this.useLz4Compression = other.useLz4Compression;
+ shardTransactionIdleTimeout = other.shardTransactionIdleTimeout;
+ operationTimeoutInMillis = other.operationTimeoutInMillis;
+ dataStoreMXBeanType = other.dataStoreMXBeanType;
+ shardTransactionCommitTimeoutInSeconds = other.shardTransactionCommitTimeoutInSeconds;
+ shardTransactionCommitQueueCapacity = other.shardTransactionCommitQueueCapacity;
+ shardInitializationTimeout = other.shardInitializationTimeout;
+ shardLeaderElectionTimeout = other.shardLeaderElectionTimeout;
+ initialSettleTimeoutMultiplier = other.initialSettleTimeoutMultiplier;
+ persistent = other.persistent;
+ snapshotOnRootOverwrite = other.snapshotOnRootOverwrite;
+ configurationReader = other.configurationReader;
+ transactionCreationInitialRateLimit = other.transactionCreationInitialRateLimit;
+ dataStoreName = other.dataStoreName;
+ logicalStoreType = other.logicalStoreType;
+ storeRoot = other.storeRoot;
+ shardBatchedModificationCount = other.shardBatchedModificationCount;
+ writeOnlyTransactionOptimizationsEnabled = other.writeOnlyTransactionOptimizationsEnabled;
+ shardCommitQueueExpiryTimeoutInMillis = other.shardCommitQueueExpiryTimeoutInMillis;
+ transactionDebugContextEnabled = other.transactionDebugContextEnabled;
+ shardManagerPersistenceId = other.shardManagerPersistenceId;
+ backendAlivenessTimerInterval = other.backendAlivenessTimerInterval;
+ requestTimeout = other.requestTimeout;
+ noProgressTimeout = other.noProgressTimeout;
+ initialPayloadSerializedBufferCapacity = other.initialPayloadSerializedBufferCapacity;
+ useLz4Compression = other.useLz4Compression;
+ exportOnRecovery = other.exportOnRecovery;
+ recoveryExportBaseDir = other.recoveryExportBaseDir;
setShardJournalRecoveryLogBatchSize(other.raftConfig.getJournalRecoveryLogBatchSize());
setSnapshotBatchCount(other.raftConfig.getSnapshotBatchCount());
setHeartbeatInterval(other.raftConfig.getHeartBeatInterval().toMillis());
setIsolatedLeaderCheckInterval(other.raftConfig.getIsolatedCheckIntervalInMillis());
setSnapshotDataThresholdPercentage(other.raftConfig.getSnapshotDataThresholdPercentage());
+ setSnapshotDataThreshold(other.raftConfig.getSnapshotDataThreshold());
setElectionTimeoutFactor(other.raftConfig.getElectionTimeoutFactor());
setCandidateElectionTimeoutDivisor(other.raftConfig.getCandidateElectionTimeoutDivisor());
setCustomRaftPolicyImplementation(other.raftConfig.getCustomRaftPolicyImplementationClass());
setMaximumMessageSliceSize(other.getMaximumMessageSliceSize());
- setShardSnapshotChunkSize(other.raftConfig.getSnapshotChunkSize());
setPeerAddressResolver(other.raftConfig.getPeerAddressResolver());
setTempFileDirectory(other.getTempFileDirectory());
setFileBackedStreamingThreshold(other.getFileBackedStreamingThreshold());
}
public boolean isSnapshotOnRootOverwrite() {
- return this.snapshotOnRootOverwrite;
+ return snapshotOnRootOverwrite;
}
public AkkaConfigurationReader getConfigurationReader() {
raftConfig.setSnapshotDataThresholdPercentage(shardSnapshotDataThresholdPercentage);
}
+ private void setSnapshotDataThreshold(final int shardSnapshotDataThreshold) {
+ checkArgument(shardSnapshotDataThreshold >= 0);
+ raftConfig.setSnapshotDataThreshold(shardSnapshotDataThreshold);
+ }
+
private void setSnapshotBatchCount(final long shardSnapshotBatchCount) {
raftConfig.setSnapshotBatchCount(shardSnapshotBatchCount);
}
raftConfig.setRecoverySnapshotIntervalSeconds(recoverySnapshotInterval);
}
- @Deprecated
- private void setShardSnapshotChunkSize(final int shardSnapshotChunkSize) {
- // We'll honor the shardSnapshotChunkSize setting for backwards compatibility but only if it doesn't exceed
- // maximumMessageSliceSize.
- if (shardSnapshotChunkSize < maximumMessageSliceSize) {
- raftConfig.setSnapshotChunkSize(shardSnapshotChunkSize);
- }
- }
-
private void setMaximumMessageSliceSize(final int maximumMessageSliceSize) {
- raftConfig.setSnapshotChunkSize(maximumMessageSliceSize);
+ raftConfig.setMaximumMessageSliceSize(maximumMessageSliceSize);
this.maximumMessageSliceSize = maximumMessageSliceSize;
}
return transactionDebugContextEnabled;
}
- public boolean isUseTellBasedProtocol() {
- return useTellBasedProtocol;
- }
-
public boolean isUseLz4Compression() {
return useLz4Compression;
}
+ public ExportOnRecovery getExportOnRecovery() {
+ return exportOnRecovery;
+ }
+
+ public String getRecoveryExportBaseDir() {
+ return recoveryExportBaseDir;
+ }
+
@Override
public int getMaximumMessageSliceSize() {
return maximumMessageSliceSize;
return initialPayloadSerializedBufferCapacity;
}
- public static class Builder implements org.opendaylight.yangtools.concepts.Builder<DatastoreContext> {
+ public static class Builder {
private final DatastoreContext datastoreContext;
Builder(final DatastoreContext datastoreContext) {
return this;
}
+ public Builder shardSnapshotDataThreshold(final int shardSnapshotDataThreshold) {
+ datastoreContext.setSnapshotDataThreshold(shardSnapshotDataThreshold);
+ return this;
+ }
+
public Builder shardHeartbeatIntervalInMillis(final int shardHeartbeatIntervalInMillis) {
datastoreContext.setHeartbeatInterval(shardHeartbeatIntervalInMillis);
return this;
return this;
}
- @Deprecated(forRemoval = true)
- public Builder maxShardDataChangeExecutorPoolSize(final int newMaxShardDataChangeExecutorPoolSize) {
- return this;
- }
-
- @Deprecated(forRemoval = true)
- public Builder maxShardDataChangeExecutorQueueSize(final int newMaxShardDataChangeExecutorQueueSize) {
- return this;
- }
-
- @Deprecated(forRemoval = true)
- public Builder maxShardDataChangeListenerQueueSize(final int newMaxShardDataChangeListenerQueueSize) {
- return this;
- }
-
- @Deprecated(forRemoval = true)
- public Builder maxShardDataStoreExecutorQueueSize(final int newMaxShardDataStoreExecutorQueueSize) {
+ public Builder useLz4Compression(final boolean value) {
+ datastoreContext.useLz4Compression = value;
return this;
}
- public Builder useTellBasedProtocol(final boolean value) {
- datastoreContext.useTellBasedProtocol = value;
+ public Builder exportOnRecovery(final ExportOnRecovery value) {
+ datastoreContext.exportOnRecovery = value;
return this;
}
- public Builder useLz4Compression(final boolean value) {
- datastoreContext.useLz4Compression = value;
+ public Builder recoveryExportBaseDir(final String value) {
+ datastoreContext.recoveryExportBaseDir = value;
return this;
}
return this;
}
- @Deprecated
- public Builder shardSnapshotChunkSize(final int shardSnapshotChunkSize) {
- LOG.warn("The shard-snapshot-chunk-size configuration parameter is deprecated - "
- + "use maximum-message-slice-size instead");
- datastoreContext.setShardSnapshotChunkSize(shardSnapshotChunkSize);
- return this;
- }
-
public Builder maximumMessageSliceSize(final int maximumMessageSliceSize) {
datastoreContext.setMaximumMessageSliceSize(maximumMessageSliceSize);
return this;
return this;
}
- @Override
public DatastoreContext build() {
if (datastoreContext.dataStoreName != null) {
GLOBAL_DATASTORE_NAMES.add(datastoreContext.dataStoreName);
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.primitives.Primitives;
-import java.beans.BeanInfo;
-import java.beans.ConstructorProperties;
-import java.beans.IntrospectionException;
-import java.beans.Introspector;
-import java.beans.MethodDescriptor;
-import java.beans.PropertyDescriptor;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
+import java.util.Locale;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.function.Function;
+import javax.management.ConstructorParameters;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.text.WordUtils;
import org.checkerframework.checker.lock.qual.GuardedBy;
import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev140612.DataStoreProperties;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev140612.DataStorePropertiesContainer;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev231229.DataStoreProperties;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev231229.DataStorePropertiesContainer;
import org.opendaylight.yangtools.yang.common.Uint16;
import org.opendaylight.yangtools.yang.common.Uint32;
import org.opendaylight.yangtools.yang.common.Uint64;
introspectDatastoreContextBuilder();
introspectDataStoreProperties();
introspectPrimitiveTypes();
- } catch (final IntrospectionException e) {
+ } catch (final IllegalArgumentException e) {
LOG.error("Error initializing DatastoreContextIntrospector", e);
}
}
private static void introspectPrimitiveTypes() {
final Set<Class<?>> primitives = ImmutableSet.<Class<?>>builder().addAll(
Primitives.allWrapperTypes()).add(String.class).build();
- for (final Class<?> primitive: primitives) {
+ for (final Class<?> primitive : primitives) {
try {
processPropertyType(primitive);
} catch (final NoSuchMethodException e) {
// Ignore primitives that can't be constructed from a String, eg Character and Void.
- } catch (SecurityException | IntrospectionException e) {
+ } catch (SecurityException | IllegalArgumentException e) {
LOG.error("Error introspect primitive type {}", primitive, e);
}
}
* yang grouping. We use the bean Introspector to find the types of all the properties defined
* in the interface (this is the type returned from the getter method). For each type, we find
* the appropriate constructor that we will use.
+ *
+ * @throws IllegalArgumentException if failed to process yang-defined property
*/
- private static void introspectDataStoreProperties() throws IntrospectionException {
- final BeanInfo beanInfo = Introspector.getBeanInfo(DataStoreProperties.class);
- for (final PropertyDescriptor desc: beanInfo.getPropertyDescriptors()) {
- processDataStoreProperty(desc.getName(), desc.getPropertyType(), desc.getReadMethod());
+ private static void introspectDataStoreProperties() {
+ for (final Method method : DataStoreProperties.class.getDeclaredMethods()) {
+ final String propertyName = getPropertyName(method);
+ if (propertyName != null) {
+ processDataStoreProperty(propertyName, method.getReturnType(), method);
+ }
}
+ }
- // Getter methods that return Boolean and start with "is" instead of "get" aren't recognized as
- // properties and thus aren't returned from getPropertyDescriptors. A getter starting with
- // "is" is only supported if it returns primitive boolean. So we'll check for these via
- // getMethodDescriptors.
- for (final MethodDescriptor desc: beanInfo.getMethodDescriptors()) {
- final String methodName = desc.getName();
- if (Boolean.class.equals(desc.getMethod().getReturnType()) && methodName.startsWith("is")) {
- final String propertyName = WordUtils.uncapitalize(methodName.substring(2));
- processDataStoreProperty(propertyName, Boolean.class, desc.getMethod());
- }
+ private static String getPropertyName(final Method method) {
+ final String methodName = method.getName();
+ if (Boolean.class.equals(method.getReturnType()) && methodName.startsWith("is")) {
+ return WordUtils.uncapitalize(methodName.substring(2));
+ } else if (methodName.startsWith("get")) {
+ return WordUtils.uncapitalize(methodName.substring(3));
}
+ return null;
}
/**
/**
* Finds the appropriate constructor for the specified type that we will use to construct
* instances.
+ *
+ * @throws IllegalArgumentException if yang-defined type has no property, annotated by ConstructorParameters
*/
private static void processPropertyType(final Class<?> propertyType)
- throws NoSuchMethodException, SecurityException, IntrospectionException {
+ throws NoSuchMethodException, SecurityException {
final Class<?> wrappedType = Primitives.wrap(propertyType);
if (CONSTRUCTORS.containsKey(wrappedType)) {
return;
// This must be a yang-defined type. We need to find the constructor that takes a
// primitive as the only argument. This will be used to construct instances to perform
// validation (eg range checking). The yang-generated types have a couple single-argument
- // constructors but the one we want has the bean ConstructorProperties annotation.
+ // constructors but the one we want has the ConstructorParameters annotation.
for (final Constructor<?> ctor: propertyType.getConstructors()) {
- final ConstructorProperties ctorPropsAnnotation = ctor.getAnnotation(ConstructorProperties.class);
- if (ctor.getParameterCount() == 1 && ctorPropsAnnotation != null) {
- findYangTypeGetter(propertyType, ctorPropsAnnotation.value()[0]);
+ final ConstructorParameters ctorParAnnotation = ctor.getAnnotation(ConstructorParameters.class);
+ if (ctor.getParameterCount() == 1 && ctorParAnnotation != null) {
+ findYangTypeGetter(propertyType, ctorParAnnotation.value()[0]);
CONSTRUCTORS.put(propertyType, ctor);
break;
}
/**
* Finds the getter method on a yang-generated type for the specified property name.
+ *
+ * @throws IllegalArgumentException if passed type has no passed property
*/
- private static void findYangTypeGetter(final Class<?> type, final String propertyName)
- throws IntrospectionException {
- for (final PropertyDescriptor desc: Introspector.getBeanInfo(type).getPropertyDescriptors()) {
- if (desc.getName().equals(propertyName)) {
- YANG_TYPE_GETTERS.put(type, desc.getReadMethod());
+ private static void findYangTypeGetter(final Class<?> type, final String propertyName) {
+ for (Method method : type.getDeclaredMethods()) {
+ final String property = getPropertyName(method);
+ if (property != null && property.equals(propertyName)) {
+ YANG_TYPE_GETTERS.put(type, method);
return;
}
}
- throw new IntrospectionException(String.format(
+ throw new IllegalArgumentException(String.format(
"Getter method for constructor property %s not found for YANG type %s",
propertyName, type));
}
// Call the setter method on the Builder instance.
final Method setter = BUILDER_SETTERS.get(key);
- setter.invoke(builder, constructorValueRecursively(
- Primitives.wrap(setter.getParameterTypes()[0]), value.toString()));
+ if (value.getClass().isEnum()) {
+ setter.invoke(builder, value);
+ } else {
+ setter.invoke(builder, constructorValueRecursively(
+ Primitives.wrap(setter.getParameterTypes()[0]), value.toString()));
+ }
return true;
} catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException
LOG.debug("Type for property {}: {}, converting value {} ({})",
name, propertyType.getSimpleName(), from, from.getClass().getSimpleName());
+ if (propertyType.isEnum()) {
+ try {
+ final Method enumConstructor = propertyType.getDeclaredMethod("forName", String.class);
+ if (enumConstructor.getReturnType().equals(propertyType)) {
+ return enumConstructor.invoke(null, from.toString().toLowerCase(Locale.ROOT));
+ }
+ } catch (NoSuchMethodException e) {
+ LOG.error("Error constructing value ({}) for enum {}", from, propertyType);
+ }
+ }
+
// Recurse the chain of constructors depth-first to get the resulting value. Eg, if the
// property type is the yang-generated NonZeroUint32Type, it's constructor takes a Long so
// we have to first construct a Long instance from the input value.
*/
package org.opendaylight.controller.cluster.datastore;
-import org.eclipse.jdt.annotation.NonNull;
+import java.util.Map;
+import org.eclipse.jdt.annotation.NonNullByDefault;
+import org.eclipse.jdt.annotation.Nullable;
import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
/**
- * Factory for creating DatastoreContextIntrospector instances.
+ * Factory for creating {@link DatastoreContextIntrospector} instances.
*
* @author Thomas Pantelis
*/
+@NonNullByDefault
public interface DatastoreContextIntrospectorFactory {
- @NonNull DatastoreContextIntrospector newInstance(LogicalDatastoreType datastoreType);
+ /**
+ * Create a new {@link DatastoreContextIntrospector} initialized with specified properties.
+ *
+ * @param datastoreType Datastore type
+ * @param properties optional initial properties
+ * @return A new DatastoreContextIntrospector
+ */
+ DatastoreContextIntrospector newInstance(LogicalDatastoreType datastoreType,
+ @Nullable Map<String, Object> properties);
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.cluster.datastore.jmx.mbeans;
+package org.opendaylight.controller.cluster.datastore;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.DatastoreInfoMXBean;
import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
*
* @author Thomas Pantelis
*/
-public class DatastoreInfoMXBeanImpl extends AbstractMXBean implements DatastoreInfoMXBean {
-
+final class DatastoreInfoMXBeanImpl extends AbstractMXBean implements DatastoreInfoMXBean {
private final ActorUtils actorUtils;
- public DatastoreInfoMXBeanImpl(String mxBeanType, ActorUtils actorUtils) {
+ DatastoreInfoMXBeanImpl(final String mxBeanType, final ActorUtils actorUtils) {
super("GeneralRuntimeInfo", mxBeanType, null);
this.actorUtils = actorUtils;
}
-
@Override
public double getTransactionCreationRateLimit() {
return actorUtils.getTxCreationLimit();
}
+
+ @Override
+ public long getAskTimeoutExceptionCount() {
+ return actorUtils.getAskTimeoutExceptionCount();
+ }
+
+ @Override
+ public void resetAskTimeoutExceptionCount() {
+ actorUtils.resetAskTimeoutExceptionCount();
+ }
}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.util.List;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-
-/**
- * An AbstractThreePhaseCommitCohort implementation used for debugging. If a failure occurs, the transaction
- * call site is printed.
- *
- * @author Thomas Pantelis
- */
-class DebugThreePhaseCommitCohort extends AbstractThreePhaseCommitCohort<Object> {
- private static final Logger LOG = LoggerFactory.getLogger(DebugThreePhaseCommitCohort.class);
-
- private final AbstractThreePhaseCommitCohort<?> delegate;
- private final Throwable debugContext;
- private final TransactionIdentifier transactionId;
-
- @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_FINAL")
- private Logger log = LOG;
-
- DebugThreePhaseCommitCohort(final TransactionIdentifier transactionId,
- final AbstractThreePhaseCommitCohort<?> delegate, final Throwable debugContext) {
- this.delegate = requireNonNull(delegate);
- this.debugContext = requireNonNull(debugContext);
- this.transactionId = requireNonNull(transactionId);
- }
-
- private <V> ListenableFuture<V> addFutureCallback(final ListenableFuture<V> future) {
- Futures.addCallback(future, new FutureCallback<V>() {
- @Override
- public void onSuccess(final V result) {
- // no-op
- }
-
- @Override
- public void onFailure(final Throwable failure) {
- log.warn("Transaction {} failed with error \"{}\" - was allocated in the following context",
- transactionId, failure, debugContext);
- }
- }, MoreExecutors.directExecutor());
-
- return future;
- }
-
- @Override
- public ListenableFuture<Boolean> canCommit() {
- return addFutureCallback(delegate.canCommit());
- }
-
- @Override
- public ListenableFuture<Void> preCommit() {
- return addFutureCallback(delegate.preCommit());
- }
-
- @Override
- public ListenableFuture<Void> commit() {
- return addFutureCallback(delegate.commit());
- }
-
- @Override
- public ListenableFuture<Void> abort() {
- return delegate.abort();
- }
-
- @SuppressWarnings({ "rawtypes", "unchecked" })
- @Override
- List<Future<Object>> getCohortFutures() {
- return ((AbstractThreePhaseCommitCohort)delegate).getCohortFutures();
- }
-
- @VisibleForTesting
- void setLogger(final Logger logger) {
- this.log = logger;
- }
-}
*/
package org.opendaylight.controller.cluster.datastore;
-import java.util.Collection;
+import java.util.List;
import java.util.Optional;
import java.util.function.Consumer;
import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.mdsal.dom.spi.AbstractDOMDataTreeChangeListenerRegistration;
import org.opendaylight.mdsal.dom.spi.store.AbstractDOMStoreTreeChangePublisher;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
}
@Override
- protected void notifyListener(final AbstractDOMDataTreeChangeListenerRegistration<?> registration,
- final Collection<DataTreeCandidate> changes) {
- LOG.debug("{}: notifyListener: listener: {}", logContext, registration.getInstance());
- registration.getInstance().onDataTreeChanged(changes);
+ protected void notifyListener(final Reg registration, final List<DataTreeCandidate> changes) {
+ final var listener = registration.listener();
+ LOG.debug("{}: notifyListener: listener: {}", logContext, listener);
+ listener.onDataTreeChanged(changes);
}
@Override
- protected void registrationRemoved(final AbstractDOMDataTreeChangeListenerRegistration<?> registration) {
+ protected void registrationRemoved(final Reg registration) {
LOG.debug("Registration {} removed", registration);
}
@Override
public void registerTreeChangeListener(final YangInstanceIdentifier treeId,
final DOMDataTreeChangeListener listener, final Optional<DataTreeCandidate> initialState,
- final Consumer<ListenerRegistration<DOMDataTreeChangeListener>> onRegistration) {
+ final Consumer<Registration> onRegistration) {
registerTreeChangeListener(treeId, listener, onRegistration);
if (initialState.isPresent()) {
- notifySingleListener(treeId, listener, initialState.get(), logContext);
+ notifySingleListener(treeId, listener, initialState.orElseThrow(), logContext);
} else {
listener.onInitialData();
}
}
void registerTreeChangeListener(final YangInstanceIdentifier treeId, final DOMDataTreeChangeListener listener,
- final Consumer<ListenerRegistration<DOMDataTreeChangeListener>> onRegistration) {
+ final Consumer<Registration> onRegistration) {
LOG.debug("{}: registerTreeChangeListener: path: {}, listener: {}", logContext, treeId, listener);
-
- AbstractDOMDataTreeChangeListenerRegistration<DOMDataTreeChangeListener> registration =
- super.registerTreeChangeListener(treeId, listener);
-
- onRegistration.accept(registration);
+ onRegistration.accept(super.registerTreeChangeListener(treeId, listener));
}
static void notifySingleListener(final YangInstanceIdentifier treeId, final DOMDataTreeChangeListener listener,
package org.opendaylight.controller.cluster.datastore;
import akka.actor.ActorRef;
-import java.util.EventListener;
import org.checkerframework.checker.lock.qual.GuardedBy;
import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
-class DelayedDataTreeChangeListenerRegistration<L extends EventListener> implements ListenerRegistration<L> {
+class DelayedDataTreeChangeListenerRegistration implements Registration {
private final RegisterDataTreeChangeListener registrationMessage;
private final ActorRef registrationActor;
}
}
- @Override
- public L getInstance() {
- // ObjectRegistration annotates this method as @Nonnull but we could return null if the delegate is not set yet.
- // In reality, we do not and should not ever call this method on DelayedDataTreeChangeListenerRegistration
- // instances anyway but, since we have to provide an implementation to satisfy the interface, we throw
- // UnsupportedOperationException to honor the API contract of not returning null and to avoid a FindBugs error
- // for possibly returning null.
- throw new UnsupportedOperationException(
- "getInstance should not be called on this instance since it could be null");
- }
-
@Override
public synchronized void close() {
closed = true;
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore;
-
-import akka.actor.ActorSystem;
-import com.google.common.annotations.VisibleForTesting;
-import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
-import org.opendaylight.controller.cluster.datastore.config.Configuration;
-import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-
-/**
- * Implements a distributed DOMStore using Akka Patterns.ask().
- */
-public class DistributedDataStore extends AbstractDataStore {
-
- private final TransactionContextFactory txContextFactory;
-
- public DistributedDataStore(final ActorSystem actorSystem, final ClusterWrapper cluster,
- final Configuration configuration, final DatastoreContextFactory datastoreContextFactory,
- final DatastoreSnapshot restoreFromSnapshot) {
- super(actorSystem, cluster, configuration, datastoreContextFactory, restoreFromSnapshot);
- this.txContextFactory = new TransactionContextFactory(getActorUtils(), getIdentifier());
- }
-
- @VisibleForTesting
- DistributedDataStore(final ActorUtils actorUtils, final ClientIdentifier identifier) {
- super(actorUtils, identifier);
- this.txContextFactory = new TransactionContextFactory(getActorUtils(), getIdentifier());
- }
-
-
- @Override
- public DOMStoreTransactionChain createTransactionChain() {
- return txContextFactory.createTransactionChain();
- }
-
- @Override
- public DOMStoreReadTransaction newReadOnlyTransaction() {
- return new TransactionProxy(txContextFactory, TransactionType.READ_ONLY);
- }
-
- @Override
- public DOMStoreWriteTransaction newWriteOnlyTransaction() {
- getActorUtils().acquireTxCreationPermit();
- return new TransactionProxy(txContextFactory, TransactionType.WRITE_ONLY);
- }
-
- @Override
- public DOMStoreReadWriteTransaction newReadWriteTransaction() {
- getActorUtils().acquireTxCreationPermit();
- return new TransactionProxy(txContextFactory, TransactionType.READ_WRITE);
- }
-
- @Override
- public void close() {
- txContextFactory.close();
- super.close();
- }
-}
*/
package org.opendaylight.controller.cluster.datastore;
-import akka.actor.ActorSystem;
import org.opendaylight.controller.cluster.ActorSystemProvider;
import org.opendaylight.controller.cluster.databroker.ClientBackedDataStore;
import org.opendaylight.controller.cluster.datastore.config.Configuration;
import org.opendaylight.controller.cluster.datastore.config.ConfigurationImpl;
-import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
import org.opendaylight.mdsal.dom.api.DOMSchemaService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
updater.setListener(dataStore);
- schemaService.registerSchemaContextListener(dataStore);
+ schemaService.registerSchemaContextListener(dataStore::onModelContextUpdated);
dataStore.setCloseable(updater);
dataStore.waitTillReady();
final String datastoreName = initialDatastoreContext.getDataStoreName();
LOG.info("Create data store instance of type : {}", datastoreName);
- final ActorSystem actorSystem = actorSystemProvider.getActorSystem();
- final DatastoreSnapshot restoreFromSnapshot = datastoreSnapshotRestore.getAndRemove(datastoreName).orElse(null);
+ final var actorSystem = actorSystemProvider.getActorSystem();
+ final var restoreFromSnapshot = datastoreSnapshotRestore.getAndRemove(datastoreName).orElse(null);
final Configuration config;
if (orgConfig == null) {
} else {
config = orgConfig;
}
- final ClusterWrapper clusterWrapper = new ClusterWrapperImpl(actorSystem);
- final DatastoreContextFactory contextFactory = introspector.newContextFactory();
+ final var clusterWrapper = new ClusterWrapperImpl(actorSystem);
+ final var contextFactory = introspector.newContextFactory();
- // This is the potentially-updated datastore context, distinct from the initial one
- final DatastoreContext datastoreContext = contextFactory.getBaseDatastoreContext();
-
- final AbstractDataStore dataStore;
- if (datastoreContext.isUseTellBasedProtocol()) {
- dataStore = new ClientBackedDataStore(actorSystem, clusterWrapper, config, contextFactory,
- restoreFromSnapshot);
- LOG.info("Data store {} is using tell-based protocol", datastoreName);
- } else {
- dataStore = new DistributedDataStore(actorSystem, clusterWrapper, config, contextFactory,
- restoreFromSnapshot);
- LOG.info("Data store {} is using ask-based protocol", datastoreName);
- }
-
- return dataStore;
+ final var ret = new ClientBackedDataStore(actorSystem, clusterWrapper, config, contextFactory,
+ restoreFromSnapshot);
+ LOG.info("Data store {} is using tell-based protocol", datastoreName);
+ return ret;
}
}
import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
import org.opendaylight.mdsal.dom.spi.store.DOMStore;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
/**
ActorUtils getActorUtils();
@Beta
- <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerShardConfigListener(
- YangInstanceIdentifier internalPath, DOMDataTreeChangeListener delegate);
-
- @Beta
- <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerProxyListener(
- YangInstanceIdentifier shardLookup, YangInstanceIdentifier insideShard,
+ Registration registerProxyListener(YangInstanceIdentifier shardLookup, YangInstanceIdentifier insideShard,
DOMDataTreeChangeListener delegate);
}
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
-import java.util.Collection;
+import java.util.List;
import org.eclipse.jdt.annotation.Nullable;
import org.opendaylight.controller.cluster.datastore.messages.DataTreeChanged;
import org.opendaylight.controller.cluster.datastore.messages.OnInitialData;
import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
}
@Override
- public void onDataTreeChanged(final Collection<DataTreeCandidate> changes) {
+ public void onDataTreeChanged(final List<DataTreeCandidate> changes) {
LOG.debug("Sending DataTreeChanged to {}", actor);
actor.tell(new DataTreeChanged(changes), sendingActor);
}
*/
package org.opendaylight.controller.cluster.datastore;
-import static com.google.common.base.Verify.verify;
import static java.util.Objects.requireNonNull;
import com.google.common.base.MoreObjects;
import com.google.common.base.MoreObjects.ToStringHelper;
+import com.google.common.base.VerifyException;
import com.google.common.collect.Collections2;
import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableRangeSet;
-import com.google.common.collect.RangeSet;
-import com.google.common.primitives.UnsignedLong;
-import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.persisted.FrontendClientMetadata;
-import org.opendaylight.controller.cluster.datastore.persisted.FrontendHistoryMetadata;
-import org.opendaylight.controller.cluster.datastore.utils.UnsignedLongRangeSet;
-import org.opendaylight.yangtools.concepts.Builder;
-import org.opendaylight.yangtools.concepts.Identifiable;
+import org.opendaylight.controller.cluster.datastore.utils.ImmutableUnsignedLongSet;
+import org.opendaylight.controller.cluster.datastore.utils.MutableUnsignedLongSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class is NOT thread-safe.
*/
-abstract class FrontendClientMetadataBuilder implements Builder<FrontendClientMetadata>,
- Identifiable<ClientIdentifier> {
+abstract sealed class FrontendClientMetadataBuilder {
static final class Disabled extends FrontendClientMetadataBuilder {
- Disabled(final String shardName, final ClientIdentifier identifier) {
- super(shardName, identifier);
+ Disabled(final String shardName, final ClientIdentifier clientId) {
+ super(shardName, clientId);
}
@Override
- public FrontendClientMetadata build() {
- return new FrontendClientMetadata(getIdentifier(), ImmutableRangeSet.of(), ImmutableList.of());
+ FrontendClientMetadata build() {
+ return new FrontendClientMetadata(clientId(), ImmutableUnsignedLongSet.of(), ImmutableList.of());
}
@Override
// No-op
}
+ @Override
+ void onTransactionsSkipped(final LocalHistoryIdentifier historyId, final ImmutableUnsignedLongSet txIds) {
+ // No-op
+ }
+
@Override
LeaderFrontendState toLeaderState(final Shard shard) {
- return new LeaderFrontendState.Disabled(shard.persistenceId(), getIdentifier(), shard.getDataStore());
+ return new LeaderFrontendState.Disabled(shard.persistenceId(), clientId(), shard.getDataStore());
}
}
static final class Enabled extends FrontendClientMetadataBuilder {
-
private final Map<LocalHistoryIdentifier, FrontendHistoryMetadataBuilder> currentHistories = new HashMap<>();
- private final UnsignedLongRangeSet purgedHistories;
+ private final MutableUnsignedLongSet purgedHistories;
private final LocalHistoryIdentifier standaloneId;
- Enabled(final String shardName, final ClientIdentifier identifier) {
- super(shardName, identifier);
+ Enabled(final String shardName, final ClientIdentifier clientId) {
+ super(shardName, clientId);
- purgedHistories = UnsignedLongRangeSet.create();
+ purgedHistories = MutableUnsignedLongSet.of();
// History for stand-alone transactions is always present
standaloneId = standaloneHistoryId();
}
Enabled(final String shardName, final FrontendClientMetadata meta) {
- super(shardName, meta.getIdentifier());
+ super(shardName, meta.clientId());
- purgedHistories = UnsignedLongRangeSet.create(meta.getPurgedHistories());
- for (FrontendHistoryMetadata h : meta.getCurrentHistories()) {
- final FrontendHistoryMetadataBuilder b = new FrontendHistoryMetadataBuilder(getIdentifier(), h);
- currentHistories.put(b.getIdentifier(), b);
+ purgedHistories = meta.getPurgedHistories().mutableCopy();
+ for (var historyMeta : meta.getCurrentHistories()) {
+ final var builder = new FrontendHistoryMetadataBuilder(clientId(), historyMeta);
+ currentHistories.put(builder.getIdentifier(), builder);
}
// Sanity check and recovery
standaloneId = standaloneHistoryId();
if (!currentHistories.containsKey(standaloneId)) {
LOG.warn("{}: Client {} recovered histories {} do not contain stand-alone history, attempting recovery",
- shardName, getIdentifier(), currentHistories);
+ shardName, clientId(), currentHistories);
currentHistories.put(standaloneId, new FrontendHistoryMetadataBuilder(standaloneId));
}
}
@Override
- public FrontendClientMetadata build() {
- return new FrontendClientMetadata(getIdentifier(), purgedHistories.toImmutable(),
+ FrontendClientMetadata build() {
+ return new FrontendClientMetadata(clientId(), purgedHistories.immutableCopy(),
Collections2.transform(currentHistories.values(), FrontendHistoryMetadataBuilder::build));
}
@Override
void onHistoryCreated(final LocalHistoryIdentifier historyId) {
- final FrontendHistoryMetadataBuilder newMeta = new FrontendHistoryMetadataBuilder(historyId);
- final FrontendHistoryMetadataBuilder oldMeta = currentHistories.putIfAbsent(historyId, newMeta);
+ final var newMeta = new FrontendHistoryMetadataBuilder(historyId);
+ final var oldMeta = currentHistories.putIfAbsent(historyId, newMeta);
if (oldMeta != null) {
// This should not be happening, warn about it
LOG.warn("{}: Reused local history {}", shardName(), historyId);
@Override
void onHistoryClosed(final LocalHistoryIdentifier historyId) {
- final FrontendHistoryMetadataBuilder builder = currentHistories.get(historyId);
+ final var builder = currentHistories.get(historyId);
if (builder != null) {
builder.onHistoryClosed();
LOG.debug("{}: Closed history {}", shardName(), historyId);
@Override
void onHistoryPurged(final LocalHistoryIdentifier historyId) {
- final FrontendHistoryMetadataBuilder history = currentHistories.remove(historyId);
+ final var history = currentHistories.remove(historyId);
final long historyBits = historyId.getHistoryId();
if (history == null) {
if (!purgedHistories.contains(historyBits)) {
@Override
void onTransactionAborted(final TransactionIdentifier txId) {
- final FrontendHistoryMetadataBuilder history = getHistory(txId);
+ final var history = getHistory(txId);
if (history != null) {
history.onTransactionAborted(txId);
LOG.debug("{}: Aborted transaction {}", shardName(), txId);
@Override
void onTransactionCommitted(final TransactionIdentifier txId) {
- final FrontendHistoryMetadataBuilder history = getHistory(txId);
+ final var history = getHistory(txId);
if (history != null) {
history.onTransactionCommitted(txId);
LOG.debug("{}: Committed transaction {}", shardName(), txId);
@Override
void onTransactionPurged(final TransactionIdentifier txId) {
- final FrontendHistoryMetadataBuilder history = getHistory(txId);
+ final var history = getHistory(txId);
if (history != null) {
history.onTransactionPurged(txId);
LOG.debug("{}: Purged transaction {}", shardName(), txId);
}
}
+ @Override
+ void onTransactionsSkipped(final LocalHistoryIdentifier historyId, final ImmutableUnsignedLongSet txIds) {
+ final FrontendHistoryMetadataBuilder history = getHistory(historyId);
+ if (history != null) {
+ history.onTransactionsSkipped(txIds);
+ LOG.debug("{}: History {} skipped transactions {}", shardName(), historyId, txIds);
+ } else {
+ LOG.warn("{}: Unknown history {} for skipped transactions, ignoring", shardName(), historyId);
+ }
+ }
+
@Override
LeaderFrontendState toLeaderState(final Shard shard) {
// Note: we have to make sure to *copy* all current state and not leak any views, otherwise leader/follower
// interactions would get intertwined leading to inconsistencies.
- final Map<LocalHistoryIdentifier, LocalFrontendHistory> histories = new HashMap<>();
- for (FrontendHistoryMetadataBuilder e : currentHistories.values()) {
- if (e.getIdentifier().getHistoryId() != 0) {
- final AbstractFrontendHistory state = e.toLeaderState(shard);
- verify(state instanceof LocalFrontendHistory, "Unexpected state %s", state);
- histories.put(e.getIdentifier(), (LocalFrontendHistory) state);
+ final var histories = new HashMap<LocalHistoryIdentifier, LocalFrontendHistory>();
+ for (var historyMetaBuilder : currentHistories.values()) {
+ final var historyId = historyMetaBuilder.getIdentifier();
+ if (historyId.getHistoryId() != 0) {
+ final var state = historyMetaBuilder.toLeaderState(shard);
+ if (state instanceof LocalFrontendHistory localState) {
+ histories.put(historyId, localState);
+ } else {
+ throw new VerifyException("Unexpected state " + state);
+ }
}
}
final AbstractFrontendHistory singleHistory;
- final FrontendHistoryMetadataBuilder singleHistoryMeta = currentHistories.get(
- new LocalHistoryIdentifier(getIdentifier(), 0));
+ final var singleHistoryMeta = currentHistories.get(new LocalHistoryIdentifier(clientId(), 0));
if (singleHistoryMeta == null) {
- final ShardDataTree tree = shard.getDataStore();
- singleHistory = StandaloneFrontendHistory.create(shard.persistenceId(), getIdentifier(), tree);
+ final var tree = shard.getDataStore();
+ singleHistory = StandaloneFrontendHistory.create(shard.persistenceId(), clientId(), tree);
} else {
singleHistory = singleHistoryMeta.toLeaderState(shard);
}
- return new LeaderFrontendState.Enabled(shard.persistenceId(), getIdentifier(), shard.getDataStore(),
- purgedHistories.copy(), singleHistory, histories);
+ return new LeaderFrontendState.Enabled(shard.persistenceId(), clientId(), shard.getDataStore(),
+ purgedHistories.mutableCopy(), singleHistory, histories);
}
@Override
}
private FrontendHistoryMetadataBuilder getHistory(final TransactionIdentifier txId) {
- LocalHistoryIdentifier historyId = txId.getHistoryId();
+ return getHistory(txId.getHistoryId());
+ }
+
+ private FrontendHistoryMetadataBuilder getHistory(final LocalHistoryIdentifier historyId) {
+ final LocalHistoryIdentifier local;
if (historyId.getHistoryId() == 0 && historyId.getCookie() != 0) {
// We are pre-creating the history for free-standing transactions with a zero cookie, hence our lookup
// needs to account for that.
LOG.debug("{}: looking up {} instead of {}", shardName(), standaloneId, historyId);
- historyId = standaloneId;
+ local = standaloneId;
+ } else {
+ local = historyId;
}
- return currentHistories.get(historyId);
+ return currentHistories.get(local);
}
private LocalHistoryIdentifier standaloneHistoryId() {
- return new LocalHistoryIdentifier(getIdentifier(), 0);
+ return new LocalHistoryIdentifier(clientId(), 0);
}
}
private static final Logger LOG = LoggerFactory.getLogger(FrontendClientMetadataBuilder.class);
- private final ClientIdentifier identifier;
- private final String shardName;
+ private final @NonNull ClientIdentifier clientId;
+ private final @NonNull String shardName;
- FrontendClientMetadataBuilder(final String shardName, final ClientIdentifier identifier) {
+ FrontendClientMetadataBuilder(final String shardName, final ClientIdentifier clientId) {
this.shardName = requireNonNull(shardName);
- this.identifier = requireNonNull(identifier);
+ this.clientId = requireNonNull(clientId);
}
static FrontendClientMetadataBuilder of(final String shardName, final FrontendClientMetadata meta) {
- final Collection<FrontendHistoryMetadata> current = meta.getCurrentHistories();
- final RangeSet<UnsignedLong> purged = meta.getPurgedHistories();
-
// Completely empty histories imply disabled state, as otherwise we'd have a record of the single history --
// either purged or active
- return current.isEmpty() && purged.isEmpty() ? new Disabled(shardName, meta.getIdentifier())
- : new Enabled(shardName, meta);
+ return meta.getCurrentHistories().isEmpty() && meta.getPurgedHistories().isEmpty()
+ ? new Disabled(shardName, meta.clientId()) : new Enabled(shardName, meta);
}
- @Override
- public final ClientIdentifier getIdentifier() {
- return identifier;
+ final ClientIdentifier clientId() {
+ return clientId;
}
final String shardName() {
return shardName;
}
+ abstract FrontendClientMetadata build();
+
abstract void onHistoryCreated(LocalHistoryIdentifier historyId);
abstract void onHistoryClosed(LocalHistoryIdentifier historyId);
abstract void onTransactionPurged(TransactionIdentifier txId);
+ abstract void onTransactionsSkipped(LocalHistoryIdentifier historyId, ImmutableUnsignedLongSet txIds);
+
/**
* Transform frontend metadata for a particular client into its {@link LeaderFrontendState} counterpart.
*
}
ToStringHelper addToStringAttributes(final ToStringHelper helper) {
- return helper.add("identifier", identifier);
+ return helper.add("clientId", clientId);
}
}
import static com.google.common.base.Preconditions.checkState;
import static java.util.Objects.requireNonNull;
-import com.google.common.collect.Range;
-import com.google.common.collect.RangeSet;
-import com.google.common.collect.TreeRangeSet;
import com.google.common.primitives.UnsignedLong;
import java.util.HashMap;
import java.util.Map;
import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.persisted.FrontendHistoryMetadata;
-import org.opendaylight.yangtools.concepts.Builder;
+import org.opendaylight.controller.cluster.datastore.utils.ImmutableUnsignedLongSet;
+import org.opendaylight.controller.cluster.datastore.utils.MutableUnsignedLongSet;
+import org.opendaylight.controller.cluster.datastore.utils.UnsignedLongBitmap;
import org.opendaylight.yangtools.concepts.Identifiable;
-final class FrontendHistoryMetadataBuilder implements Builder<FrontendHistoryMetadata>,
- Identifiable<LocalHistoryIdentifier> {
-
- private final Map<UnsignedLong, Boolean> closedTransactions;
- private final RangeSet<UnsignedLong> purgedTransactions;
- private final LocalHistoryIdentifier identifier;
+final class FrontendHistoryMetadataBuilder implements Identifiable<LocalHistoryIdentifier> {
+ private final @NonNull Map<UnsignedLong, Boolean> closedTransactions;
+ private final @NonNull MutableUnsignedLongSet purgedTransactions;
+ private final @NonNull LocalHistoryIdentifier identifier;
private boolean closed;
FrontendHistoryMetadataBuilder(final LocalHistoryIdentifier identifier) {
this.identifier = requireNonNull(identifier);
- this.purgedTransactions = TreeRangeSet.create();
- this.closedTransactions = new HashMap<>(2);
+ purgedTransactions = MutableUnsignedLongSet.of();
+ closedTransactions = new HashMap<>(2);
}
FrontendHistoryMetadataBuilder(final ClientIdentifier clientId, final FrontendHistoryMetadata meta) {
identifier = new LocalHistoryIdentifier(clientId, meta.getHistoryId(), meta.getCookie());
- closedTransactions = new HashMap<>(meta.getClosedTransactions());
- purgedTransactions = TreeRangeSet.create(meta.getPurgedTransactions());
+ closedTransactions = meta.getClosedTransactions().mutableCopy();
+ purgedTransactions = meta.getPurgedTransactions().mutableCopy();
closed = meta.isClosed();
}
return identifier;
}
- @Override
public FrontendHistoryMetadata build() {
return new FrontendHistoryMetadata(identifier.getHistoryId(), identifier.getCookie(), closed,
- closedTransactions, purgedTransactions);
+ UnsignedLongBitmap.copyOf(closedTransactions), purgedTransactions.immutableCopy());
}
void onHistoryClosed() {
}
void onTransactionPurged(final TransactionIdentifier txId) {
- final UnsignedLong id = UnsignedLong.fromLongBits(txId.getTransactionId());
- closedTransactions.remove(id);
- purgedTransactions.add(Range.closedOpen(id, UnsignedLong.ONE.plus(id)));
+ final long txidBits = txId.getTransactionId();
+ closedTransactions.remove(UnsignedLong.fromLongBits(txidBits));
+ purgedTransactions.add(txidBits);
+ }
+
+ void onTransactionsSkipped(final ImmutableUnsignedLongSet txIds) {
+ purgedTransactions.addAll(txIds);
}
/**
import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.persisted.FrontendClientMetadata;
import org.opendaylight.controller.cluster.datastore.persisted.FrontendShardDataTreeSnapshotMetadata;
+import org.opendaylight.controller.cluster.datastore.utils.ImmutableUnsignedLongSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
LOG.debug("{}: applying snapshot {} over clients {}", shardName, snapshot, clients);
clients.clear();
- for (FrontendClientMetadata m : snapshot.getClients()) {
- LOG.debug("{}: applying metadata {}", shardName, m);
- final FrontendClientMetadataBuilder b = FrontendClientMetadataBuilder.of(shardName, m);
- final FrontendIdentifier client = m.getIdentifier().getFrontendId();
+ for (var clientMeta : snapshot.getClients()) {
+ LOG.debug("{}: applying metadata {}", shardName, clientMeta);
+ final var builder = FrontendClientMetadataBuilder.of(shardName, clientMeta);
+ final var frontendId = clientMeta.clientId().getFrontendId();
- LOG.debug("{}: client {} updated to {}", shardName, client, b);
- clients.put(client, b);
+ LOG.debug("{}: client {} updated to {}", shardName, frontendId, builder);
+ clients.put(frontendId, builder);
}
}
}
private FrontendClientMetadataBuilder ensureClient(final ClientIdentifier id) {
- final FrontendClientMetadataBuilder existing = clients.get(id.getFrontendId());
- if (existing != null && id.equals(existing.getIdentifier())) {
+ final var existing = clients.get(id.getFrontendId());
+ if (existing != null && id.equals(existing.clientId())) {
return existing;
}
- final FrontendClientMetadataBuilder client = new FrontendClientMetadataBuilder.Enabled(shardName, id);
- final FrontendClientMetadataBuilder previous = clients.put(id.getFrontendId(), client);
+ final var client = new FrontendClientMetadataBuilder.Enabled(shardName, id);
+ final var previous = clients.put(id.getFrontendId(), client);
if (previous != null) {
LOG.debug("{}: Replaced client {} with {}", shardName, previous, client);
} else {
ensureClient(txId.getHistoryId().getClientId()).onTransactionPurged(txId);
}
+ @Override
+ void onTransactionsSkipped(final LocalHistoryIdentifier historyId, final ImmutableUnsignedLongSet txIds) {
+ ensureClient(historyId.getClientId()).onTransactionsSkipped(historyId, txIds);
+ }
+
/**
* Transform frontend metadata into an active leader state map.
*
}
void disableTracking(final ClientIdentifier clientId) {
- final FrontendIdentifier frontendId = clientId.getFrontendId();
- final FrontendClientMetadataBuilder client = clients.get(frontendId);
+ final var frontendId = clientId.getFrontendId();
+ final var client = clients.get(frontendId);
if (client == null) {
- // When we havent seen the client before, we still need to disable tracking for him since this only gets
+ // When we have not seen the client before, we still need to disable tracking for him since this only gets
// triggered once.
LOG.debug("{}: disableTracking {} does not match any client, pre-disabling client.", shardName, clientId);
clients.put(frontendId, new FrontendClientMetadataBuilder.Disabled(shardName, clientId));
return;
}
- if (!clientId.equals(client.getIdentifier())) {
+ if (!clientId.equals(client.clientId())) {
LOG.debug("{}: disableTracking {} does not match client {}, ignoring", shardName, clientId, client);
return;
}
ImmutableSet<ClientIdentifier> getClients() {
return clients.values().stream()
- .map(FrontendClientMetadataBuilder::getIdentifier)
- .collect(ImmutableSet.toImmutableSet());
+ .map(FrontendClientMetadataBuilder::clientId)
+ .collect(ImmutableSet.toImmutableSet());
}
}
private FrontendReadOnlyTransaction(final AbstractFrontendHistory history,
final ReadOnlyShardDataTreeTransaction transaction) {
super(history, transaction.getIdentifier());
- this.openTransaction = requireNonNull(transaction);
+ openTransaction = requireNonNull(transaction);
}
static FrontendReadOnlyTransaction create(final AbstractFrontendHistory history,
// The only valid request here is with abort protocol
final Optional<PersistenceProtocol> optProto = request.getPersistenceProtocol();
checkArgument(optProto.isPresent(), "Commit protocol is missing in %s", request);
- checkArgument(optProto.get() == PersistenceProtocol.ABORT, "Unsupported commit protocol in %s", request);
+ checkArgument(optProto.orElseThrow() == PersistenceProtocol.ABORT, "Unsupported commit protocol in %s",
+ request);
openTransaction.abort(() -> recordAndSendSuccess(envelope, now,
new ModifyTransactionSuccess(request.getTarget(), request.getSequence())));
}
private ExistsTransactionSuccess handleExistsTransaction(final ExistsTransactionRequest request) {
- final Optional<NormalizedNode<?, ?>> data = openTransaction.getSnapshot().readNode(request.getPath());
+ final Optional<NormalizedNode> data = openTransaction.getSnapshot().readNode(request.getPath());
return recordSuccess(request.getSequence(), new ExistsTransactionSuccess(openTransaction.getIdentifier(),
request.getSequence(), data.isPresent()));
}
private ReadTransactionSuccess handleReadTransaction(final ReadTransactionRequest request) {
- final Optional<NormalizedNode<?, ?>> data = openTransaction.getSnapshot().readNode(request.getPath());
+ final Optional<NormalizedNode> data = openTransaction.getSnapshot().readNode(request.getPath());
return recordSuccess(request.getSequence(), new ReadTransactionSuccess(openTransaction.getIdentifier(),
request.getSequence(), data));
}
import org.opendaylight.controller.cluster.access.concepts.RuntimeRequestException;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.controller.cluster.access.concepts.UnsupportedRequestException;
+import org.opendaylight.yangtools.yang.common.Empty;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
Ready(final ShardDataTreeCohort readyCohort) {
this.readyCohort = requireNonNull(readyCohort);
- this.stage = CommitStage.READY;
+ stage = CommitStage.READY;
}
@Override
private FrontendReadWriteTransaction(final AbstractFrontendHistory history, final TransactionIdentifier id,
final ReadWriteShardDataTreeTransaction transaction) {
super(history, id);
- this.state = new Open(transaction);
+ state = new Open(transaction);
}
private FrontendReadWriteTransaction(final AbstractFrontendHistory history, final TransactionIdentifier id,
final DataTreeModification mod) {
super(history, id);
- this.state = new Sealed(mod);
+ state = new Sealed(mod);
}
static FrontendReadWriteTransaction createOpen(final AbstractFrontendHistory history,
@Override
TransactionSuccess<?> doHandleRequest(final TransactionRequest<?> request, final RequestEnvelope envelope,
final long now) throws RequestException {
- if (request instanceof ModifyTransactionRequest) {
- return handleModifyTransaction((ModifyTransactionRequest) request, envelope, now);
- } else if (request instanceof CommitLocalTransactionRequest) {
- handleCommitLocalTransaction((CommitLocalTransactionRequest) request, envelope, now);
+ if (request instanceof ModifyTransactionRequest modifyRequest) {
+ return handleModifyTransaction(modifyRequest, envelope, now);
+ } else if (request instanceof CommitLocalTransactionRequest commitLocalRequest) {
+ handleCommitLocalTransaction(commitLocalRequest, envelope, now);
return null;
- } else if (request instanceof ExistsTransactionRequest) {
- return handleExistsTransaction((ExistsTransactionRequest) request);
- } else if (request instanceof ReadTransactionRequest) {
- return handleReadTransaction((ReadTransactionRequest) request);
- } else if (request instanceof TransactionPreCommitRequest) {
- handleTransactionPreCommit((TransactionPreCommitRequest) request, envelope, now);
+ } else if (request instanceof ExistsTransactionRequest existsRequest) {
+ return handleExistsTransaction(existsRequest);
+ } else if (request instanceof ReadTransactionRequest readRequest) {
+ return handleReadTransaction(readRequest);
+ } else if (request instanceof TransactionPreCommitRequest preCommitRequest) {
+ handleTransactionPreCommit(preCommitRequest, envelope, now);
return null;
- } else if (request instanceof TransactionDoCommitRequest) {
- handleTransactionDoCommit((TransactionDoCommitRequest) request, envelope, now);
+ } else if (request instanceof TransactionDoCommitRequest doCommitRequest) {
+ handleTransactionDoCommit(doCommitRequest, envelope, now);
return null;
} else if (request instanceof TransactionAbortRequest) {
return handleTransactionAbort(request.getSequence(), envelope, now);
final Ready ready = checkReady();
startAbort();
- ready.readyCohort.abort(new FutureCallback<Void>() {
+ ready.readyCohort.abort(new FutureCallback<>() {
@Override
- public void onSuccess(final Void result) {
+ public void onSuccess(final Empty result) {
recordAndSendSuccess(envelope, now, new TransactionAbortSuccess(getIdentifier(), sequence));
finishAbort();
}
case READY:
ready.stage = CommitStage.CAN_COMMIT_PENDING;
LOG.debug("{}: Transaction {} initiating canCommit", persistenceId(), getIdentifier());
- checkReady().readyCohort.canCommit(new FutureCallback<Void>() {
+ checkReady().readyCohort.canCommit(new FutureCallback<>() {
@Override
- public void onSuccess(final Void result) {
+ public void onSuccess(final Empty result) {
successfulCanCommit(envelope, now);
}
case READY:
ready.stage = CommitStage.CAN_COMMIT_PENDING;
LOG.debug("{}: Transaction {} initiating direct canCommit", persistenceId(), getIdentifier());
- ready.readyCohort.canCommit(new FutureCallback<Void>() {
+ ready.readyCohort.canCommit(new FutureCallback<>() {
@Override
- public void onSuccess(final Void result) {
+ public void onSuccess(final Empty result) {
successfulDirectCanCommit(envelope, now);
}
final Optional<Exception> optFailure = request.getDelayedFailure();
if (optFailure.isPresent()) {
- state = new Ready(history().createFailedCohort(getIdentifier(), sealedModification, optFailure.get()));
+ state = new Ready(history().createFailedCohort(getIdentifier(), sealedModification,
+ optFailure.orElseThrow()));
} else {
state = new Ready(history().createReadyCohort(getIdentifier(), sealedModification, Optional.empty()));
}
}
private ExistsTransactionSuccess handleExistsTransaction(final ExistsTransactionRequest request) {
- final Optional<NormalizedNode<?, ?>> data = checkOpen().getSnapshot().readNode(request.getPath());
+ final Optional<NormalizedNode> data = checkOpen().getSnapshot().readNode(request.getPath());
return recordSuccess(request.getSequence(), new ExistsTransactionSuccess(getIdentifier(), request.getSequence(),
data.isPresent()));
}
private ReadTransactionSuccess handleReadTransaction(final ReadTransactionRequest request) {
- final Optional<NormalizedNode<?, ?>> data = checkOpen().getSnapshot().readNode(request.getPath());
+ final Optional<NormalizedNode> data = checkOpen().getSnapshot().readNode(request.getPath());
return recordSuccess(request.getSequence(), new ReadTransactionSuccess(getIdentifier(), request.getSequence(),
data));
}
for (TransactionModification m : modifications) {
if (m instanceof TransactionDelete) {
modification.delete(m.getPath());
- } else if (m instanceof TransactionWrite) {
- modification.write(m.getPath(), ((TransactionWrite) m).getData());
- } else if (m instanceof TransactionMerge) {
- modification.merge(m.getPath(), ((TransactionMerge) m).getData());
+ } else if (m instanceof TransactionWrite write) {
+ modification.write(m.getPath(), write.getData());
+ } else if (m instanceof TransactionMerge merge) {
+ modification.merge(m.getPath(), merge.getData());
} else {
LOG.warn("{}: ignoring unhandled modification {}", persistenceId(), m);
}
return replyModifySuccess(request.getSequence());
}
- switch (maybeProto.get()) {
+ switch (maybeProto.orElseThrow()) {
case ABORT:
if (ABORTING.equals(state)) {
LOG.debug("{}: Transaction {} already aborting", persistenceId(), getIdentifier());
coordinatedCommit(envelope, now);
return null;
default:
- LOG.warn("{}: rejecting unsupported protocol {}", persistenceId(), maybeProto.get());
+ LOG.warn("{}: rejecting unsupported protocol {}", persistenceId(), maybeProto.orElseThrow());
throw new UnsupportedRequestException(request);
}
}
import com.google.common.base.MoreObjects;
import com.google.common.base.MoreObjects.ToStringHelper;
import java.util.HashMap;
-import java.util.Iterator;
import java.util.Map;
+import org.eclipse.jdt.annotation.NonNull;
import org.eclipse.jdt.annotation.Nullable;
import org.opendaylight.controller.cluster.access.commands.CreateLocalHistoryRequest;
import org.opendaylight.controller.cluster.access.commands.DeadHistoryException;
import org.opendaylight.controller.cluster.access.concepts.RequestException;
import org.opendaylight.controller.cluster.access.concepts.UnsupportedRequestException;
import org.opendaylight.controller.cluster.datastore.ShardDataTreeCohort.State;
-import org.opendaylight.controller.cluster.datastore.utils.UnsignedLongRangeSet;
+import org.opendaylight.controller.cluster.datastore.utils.MutableUnsignedLongSet;
import org.opendaylight.yangtools.concepts.Identifiable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Frontend state as observed by the shard leader. This class is responsible for tracking generations and sequencing
* in the frontend/backend conversation. This class is NOT thread-safe.
- *
- * @author Robert Varga
*/
-abstract class LeaderFrontendState implements Identifiable<ClientIdentifier> {
+abstract sealed class LeaderFrontendState implements Identifiable<ClientIdentifier> {
static final class Disabled extends LeaderFrontendState {
Disabled(final String persistenceId, final ClientIdentifier clientId, final ShardDataTree tree) {
super(persistenceId, clientId, tree);
// Histories which have not been purged
private final Map<LocalHistoryIdentifier, LocalFrontendHistory> localHistories;
- // RangeSet performs automatic merging, hence we keep minimal state tracking information
- private final UnsignedLongRangeSet purgedHistories;
+ // UnsignedLongSet performs automatic merging, hence we keep minimal state tracking information
+ private final MutableUnsignedLongSet purgedHistories;
// Used for all standalone transactions
private final AbstractFrontendHistory standaloneHistory;
private Long lastSeenHistory = null;
Enabled(final String persistenceId, final ClientIdentifier clientId, final ShardDataTree tree) {
- this(persistenceId, clientId, tree, UnsignedLongRangeSet.create(),
+ this(persistenceId, clientId, tree, MutableUnsignedLongSet.of(),
StandaloneFrontendHistory.create(persistenceId, clientId, tree), new HashMap<>());
}
Enabled(final String persistenceId, final ClientIdentifier clientId, final ShardDataTree tree,
- final UnsignedLongRangeSet purgedHistories, final AbstractFrontendHistory standaloneHistory,
+ final MutableUnsignedLongSet purgedHistories, final AbstractFrontendHistory standaloneHistory,
final Map<LocalHistoryIdentifier, LocalFrontendHistory> localHistories) {
super(persistenceId, clientId, tree);
this.purgedHistories = requireNonNull(purgedHistories);
checkRequestSequence(envelope);
try {
- if (request instanceof CreateLocalHistoryRequest) {
- return handleCreateHistory((CreateLocalHistoryRequest) request, envelope, now);
- } else if (request instanceof DestroyLocalHistoryRequest) {
- return handleDestroyHistory((DestroyLocalHistoryRequest) request, envelope, now);
- } else if (request instanceof PurgeLocalHistoryRequest) {
- return handlePurgeHistory((PurgeLocalHistoryRequest)request, envelope, now);
+ if (request instanceof CreateLocalHistoryRequest req) {
+ return handleCreateHistory(req, envelope, now);
+ } else if (request instanceof DestroyLocalHistoryRequest req) {
+ return handleDestroyHistory(req, envelope, now);
+ } else if (request instanceof PurgeLocalHistoryRequest req) {
+ return handlePurgeHistory(req, envelope, now);
} else {
LOG.warn("{}: rejecting unsupported request {}", persistenceId(), request);
throw new UnsupportedRequestException(request);
checkRequestSequence(envelope);
try {
- final LocalHistoryIdentifier lhId = request.getTarget().getHistoryId();
+ final var lhId = request.getTarget().getHistoryId();
final AbstractFrontendHistory history;
if (lhId.getHistoryId() != 0) {
if (history == null) {
if (purgedHistories.contains(lhId.getHistoryId())) {
LOG.warn("{}: rejecting request {} to purged history", persistenceId(), request);
- throw new DeadHistoryException(purgedHistories.toImmutable());
+ throw new DeadHistoryException(purgedHistories.toRangeSet());
}
LOG.warn("{}: rejecting unknown history request {}", persistenceId(), request);
private LocalHistorySuccess handleCreateHistory(final CreateLocalHistoryRequest request,
final RequestEnvelope envelope, final long now) throws RequestException {
- final LocalHistoryIdentifier historyId = request.getTarget();
- final AbstractFrontendHistory existing = localHistories.get(historyId);
+ final var historyId = request.getTarget();
+ final var existing = localHistories.get(historyId);
if (existing != null) {
// History already exists: report success
LOG.debug("{}: history {} already exists", persistenceId(), historyId);
// not end up resurrecting a purged history.
if (purgedHistories.contains(historyId.getHistoryId())) {
LOG.debug("{}: rejecting purged request {}", persistenceId(), request);
- throw new DeadHistoryException(purgedHistories.toImmutable());
+ throw new DeadHistoryException(purgedHistories.toRangeSet());
}
// Update last history we have seen
}
// We have to send the response only after persistence has completed
- final ShardDataTreeTransactionChain chain = tree().ensureTransactionChain(historyId, () -> {
+ final var chain = tree().ensureTransactionChain(historyId, () -> {
LOG.debug("{}: persisted history {}", persistenceId(), historyId);
envelope.sendSuccess(new LocalHistorySuccess(historyId, request.getSequence()),
tree().readTime() - now);
private LocalHistorySuccess handleDestroyHistory(final DestroyLocalHistoryRequest request,
final RequestEnvelope envelope, final long now) {
- final LocalHistoryIdentifier id = request.getTarget();
- final LocalFrontendHistory existing = localHistories.get(id);
+ final var id = request.getTarget();
+ final var existing = localHistories.get(id);
if (existing == null) {
// History does not exist: report success
LOG.debug("{}: history {} does not exist, nothing to destroy", persistenceId(), id);
private LocalHistorySuccess handlePurgeHistory(final PurgeLocalHistoryRequest request,
final RequestEnvelope envelope, final long now) {
- final LocalHistoryIdentifier id = request.getTarget();
- final LocalFrontendHistory existing = localHistories.remove(id);
+ final var id = request.getTarget();
+ final var existing = localHistories.remove(id);
if (existing == null) {
LOG.debug("{}: history {} has already been purged", persistenceId(), id);
return new LocalHistorySuccess(id, request.getSequence());
private static final Logger LOG = LoggerFactory.getLogger(LeaderFrontendState.class);
- private final ShardDataTree tree;
- private final ClientIdentifier clientId;
- private final String persistenceId;
+ private final @NonNull ClientIdentifier clientId;
+ private final @NonNull String persistenceId;
+ private final @NonNull ShardDataTree tree;
private long lastConnectTicks;
private long lastSeenTicks;
this.persistenceId = requireNonNull(persistenceId);
this.clientId = requireNonNull(clientId);
this.tree = requireNonNull(tree);
- this.lastSeenTicks = tree.readTime();
+ lastSeenTicks = tree.readTime();
}
@Override
}
final void touch() {
- this.lastSeenTicks = tree.readTime();
+ lastSeenTicks = tree.readTime();
}
abstract @Nullable LocalHistorySuccess handleLocalHistoryRequest(LocalHistoryRequest<?> request,
void retire() {
// Hunt down any transactions associated with this frontend
- final Iterator<SimpleShardDataTreeCohort> it = tree.cohortIterator();
+ final var it = tree.cohortIterator();
while (it.hasNext()) {
- final SimpleShardDataTreeCohort cohort = it.next();
- if (clientId.equals(cohort.getIdentifier().getHistoryId().getClientId())) {
+ final var cohort = it.next();
+ final var transactionId = cohort.transactionId();
+ if (clientId.equals(transactionId.getHistoryId().getClientId())) {
if (cohort.getState() != State.COMMIT_PENDING) {
- LOG.debug("{}: Retiring transaction {}", persistenceId, cohort.getIdentifier());
+ LOG.debug("{}: Retiring transaction {}", persistenceId, transactionId);
it.remove();
} else {
- LOG.debug("{}: Transaction {} already committing, not retiring it", persistenceId,
- cohort.getIdentifier());
+ LOG.debug("{}: Transaction {} already committing, not retiring it", persistenceId, transactionId);
}
}
}
import static java.util.Objects.requireNonNull;
import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.RangeSet;
-import com.google.common.collect.TreeRangeSet;
import com.google.common.primitives.UnsignedLong;
import java.util.HashMap;
import java.util.Map;
import java.util.SortedSet;
import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.controller.cluster.datastore.utils.MutableUnsignedLongSet;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
/**
* Chained transaction specialization of {@link AbstractFrontendHistory}. It prevents concurrent open transactions.
private LocalFrontendHistory(final String persistenceId, final ShardDataTree tree,
final ShardDataTreeTransactionChain chain, final Map<UnsignedLong, Boolean> closedTransactions,
- final RangeSet<UnsignedLong> purgedTransactions) {
+ final MutableUnsignedLongSet purgedTransactions) {
super(persistenceId, tree, closedTransactions, purgedTransactions);
this.chain = requireNonNull(chain);
}
static LocalFrontendHistory create(final String persistenceId, final ShardDataTree tree,
final ShardDataTreeTransactionChain chain) {
- return new LocalFrontendHistory(persistenceId, tree, chain, ImmutableMap.of(), TreeRangeSet.create());
+ return new LocalFrontendHistory(persistenceId, tree, chain, ImmutableMap.of(), MutableUnsignedLongSet.of());
}
static LocalFrontendHistory recreate(final String persistenceId, final ShardDataTree tree,
final ShardDataTreeTransactionChain chain, final Map<UnsignedLong, Boolean> closedTransactions,
- final RangeSet<UnsignedLong> purgedTransactions) {
+ final MutableUnsignedLongSet purgedTransactions) {
return new LocalFrontendHistory(persistenceId, tree, chain, new HashMap<>(closedTransactions),
- TreeRangeSet.create(purgedTransactions));
+ purgedTransactions.mutableCopy());
}
@Override
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorSelection;
-import akka.dispatch.Futures;
-import akka.dispatch.OnComplete;
-import com.google.common.util.concurrent.ListenableFuture;
-import java.util.Optional;
-import java.util.SortedSet;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.mdsal.dom.spi.store.SnapshotBackedWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-
-/**
- * Fake {@link DOMStoreThreePhaseCommitCohort} instantiated for local transactions to conform with the DOM
- * transaction APIs. It is only used to hold the data from a local DOM transaction ready operation and to
- * initiate direct or coordinated commits from the front-end by sending the ReadyLocalTransaction message.
- * It is not actually called by the front-end to perform 3PC thus the canCommit/preCommit/commit methods
- * are no-ops.
- */
-class LocalThreePhaseCommitCohort implements DOMStoreThreePhaseCommitCohort {
- private static final Logger LOG = LoggerFactory.getLogger(LocalThreePhaseCommitCohort.class);
-
- private final SnapshotBackedWriteTransaction<TransactionIdentifier> transaction;
- private final DataTreeModification modification;
- private final ActorUtils actorUtils;
- private final ActorSelection leader;
- private final Exception operationError;
-
- protected LocalThreePhaseCommitCohort(final ActorUtils actorUtils, final ActorSelection leader,
- final SnapshotBackedWriteTransaction<TransactionIdentifier> transaction,
- final DataTreeModification modification,
- final Exception operationError) {
- this.actorUtils = requireNonNull(actorUtils);
- this.leader = requireNonNull(leader);
- this.transaction = requireNonNull(transaction);
- this.modification = requireNonNull(modification);
- this.operationError = operationError;
- }
-
- protected LocalThreePhaseCommitCohort(final ActorUtils actorUtils, final ActorSelection leader,
- final SnapshotBackedWriteTransaction<TransactionIdentifier> transaction, final Exception operationError) {
- this.actorUtils = requireNonNull(actorUtils);
- this.leader = requireNonNull(leader);
- this.transaction = requireNonNull(transaction);
- this.operationError = requireNonNull(operationError);
- this.modification = null;
- }
-
- private Future<Object> initiateCommit(final boolean immediate,
- final Optional<SortedSet<String>> participatingShardNames) {
- if (operationError != null) {
- return Futures.failed(operationError);
- }
-
- final ReadyLocalTransaction message = new ReadyLocalTransaction(transaction.getIdentifier(),
- modification, immediate, participatingShardNames);
- return actorUtils.executeOperationAsync(leader, message, actorUtils.getTransactionCommitOperationTimeout());
- }
-
- Future<ActorSelection> initiateCoordinatedCommit(final Optional<SortedSet<String>> participatingShardNames) {
- final Future<Object> messageFuture = initiateCommit(false, participatingShardNames);
- final Future<ActorSelection> ret = TransactionReadyReplyMapper.transform(messageFuture, actorUtils,
- transaction.getIdentifier());
- ret.onComplete(new OnComplete<ActorSelection>() {
- @Override
- public void onComplete(final Throwable failure, final ActorSelection success) {
- if (failure != null) {
- LOG.warn("Failed to prepare transaction {} on backend", transaction.getIdentifier(), failure);
- transactionAborted(transaction);
- return;
- }
-
- LOG.debug("Transaction {} resolved to actor {}", transaction.getIdentifier(), success);
- }
- }, actorUtils.getClientDispatcher());
-
- return ret;
- }
-
- Future<Object> initiateDirectCommit() {
- final Future<Object> messageFuture = initiateCommit(true, Optional.empty());
- messageFuture.onComplete(new OnComplete<Object>() {
- @Override
- public void onComplete(final Throwable failure, final Object message) {
- if (failure != null) {
- LOG.warn("Failed to prepare transaction {} on backend", transaction.getIdentifier(), failure);
- transactionAborted(transaction);
- } else if (CommitTransactionReply.isSerializedType(message)) {
- LOG.debug("Transaction {} committed successfully", transaction.getIdentifier());
- transactionCommitted(transaction);
- } else {
- LOG.error("Transaction {} resulted in unhandled message type {}, aborting",
- transaction.getIdentifier(), message.getClass());
- transactionAborted(transaction);
- }
- }
- }, actorUtils.getClientDispatcher());
-
- return messageFuture;
- }
-
- @Override
- public final ListenableFuture<Boolean> canCommit() {
- // Intended no-op
- throw new UnsupportedOperationException();
- }
-
- @Override
- public final ListenableFuture<Void> preCommit() {
- // Intended no-op
- throw new UnsupportedOperationException();
- }
-
- @Override
- public final ListenableFuture<Void> abort() {
- // Intended no-op
- throw new UnsupportedOperationException();
- }
-
- @Override
- public final ListenableFuture<Void> commit() {
- // Intended no-op
- throw new UnsupportedOperationException();
- }
-
- protected void transactionAborted(final SnapshotBackedWriteTransaction<TransactionIdentifier> aborted) {
- }
-
- protected void transactionCommitted(final SnapshotBackedWriteTransaction<TransactionIdentifier> comitted) {
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static com.google.common.base.Preconditions.checkArgument;
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorSelection;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.mdsal.dom.spi.store.AbstractSnapshotBackedTransactionChain;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.SnapshotBackedWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree;
-
-/**
- * Transaction chain instantiated on top of a locally-available DataTree. It does not instantiate
- * a transaction in the leader and rather chains transactions on top of themselves.
- */
-final class LocalTransactionChain extends AbstractSnapshotBackedTransactionChain<TransactionIdentifier>
- implements LocalTransactionFactory {
- private static final Throwable ABORTED = new Throwable("Transaction aborted");
- private final TransactionChainProxy parent;
- private final ActorSelection leader;
- private final ReadOnlyDataTree tree;
-
- LocalTransactionChain(final TransactionChainProxy parent, final ActorSelection leader,
- final ReadOnlyDataTree tree) {
- this.parent = requireNonNull(parent);
- this.leader = requireNonNull(leader);
- this.tree = requireNonNull(tree);
- }
-
- ReadOnlyDataTree getDataTree() {
- return tree;
- }
-
- @Override
- protected TransactionIdentifier nextTransactionIdentifier() {
- throw new UnsupportedOperationException();
- }
-
- @Override
- protected boolean getDebugTransactions() {
- return false;
- }
-
- @Override
- protected DataTreeSnapshot takeSnapshot() {
- return tree.takeSnapshot();
- }
-
- @Override
- protected DOMStoreThreePhaseCommitCohort createCohort(
- final SnapshotBackedWriteTransaction<TransactionIdentifier> transaction,
- final DataTreeModification modification,
- final Exception operationError) {
- return new LocalChainThreePhaseCommitCohort(transaction, modification, operationError);
- }
-
- @Override
- public DOMStoreReadTransaction newReadOnlyTransaction(TransactionIdentifier identifier) {
- return super.newReadOnlyTransaction(identifier);
- }
-
- @Override
- public DOMStoreReadWriteTransaction newReadWriteTransaction(TransactionIdentifier identifier) {
- return super.newReadWriteTransaction(identifier);
- }
-
- @Override
- public DOMStoreWriteTransaction newWriteOnlyTransaction(TransactionIdentifier identifier) {
- return super.newWriteOnlyTransaction(identifier);
- }
-
- @SuppressWarnings({"unchecked", "checkstyle:IllegalCatch"})
- @Override
- public LocalThreePhaseCommitCohort onTransactionReady(DOMStoreWriteTransaction tx, Exception operationError) {
- checkArgument(tx instanceof SnapshotBackedWriteTransaction);
- if (operationError != null) {
- return new LocalChainThreePhaseCommitCohort((SnapshotBackedWriteTransaction<TransactionIdentifier>)tx,
- operationError);
- }
-
- try {
- return (LocalThreePhaseCommitCohort) tx.ready();
- } catch (Exception e) {
- // Unfortunately we need to cast to SnapshotBackedWriteTransaction here as it's required by
- // LocalThreePhaseCommitCohort and the base class.
- return new LocalChainThreePhaseCommitCohort((SnapshotBackedWriteTransaction<TransactionIdentifier>)tx, e);
- }
- }
-
- private class LocalChainThreePhaseCommitCohort extends LocalThreePhaseCommitCohort {
-
- protected LocalChainThreePhaseCommitCohort(SnapshotBackedWriteTransaction<TransactionIdentifier> transaction,
- DataTreeModification modification, Exception operationError) {
- super(parent.getActorUtils(), leader, transaction, modification, operationError);
- }
-
- protected LocalChainThreePhaseCommitCohort(SnapshotBackedWriteTransaction<TransactionIdentifier> transaction,
- Exception operationError) {
- super(parent.getActorUtils(), leader, transaction, operationError);
- }
-
- @Override
- protected void transactionAborted(SnapshotBackedWriteTransaction<TransactionIdentifier> transaction) {
- onTransactionFailed(transaction, ABORTED);
- }
-
- @Override
- protected void transactionCommitted(SnapshotBackedWriteTransaction<TransactionIdentifier> transaction) {
- onTransactionCommited(transaction);
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorSelection;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.MoreExecutors;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.Optional;
-import java.util.SortedSet;
-import java.util.function.Consumer;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.AbstractRead;
-import org.opendaylight.mdsal.common.api.ReadFailedException;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import scala.concurrent.Future;
-
-/**
- * Processes front-end transaction operations locally before being committed to the destination shard.
- * Instances of this class are used when the destination shard is local to the caller.
- *
- * @author Thomas Pantelis
- */
-abstract class LocalTransactionContext extends AbstractTransactionContext {
- private final DOMStoreTransaction txDelegate;
- private final LocalTransactionReadySupport readySupport;
- private Exception operationError;
-
- LocalTransactionContext(final DOMStoreTransaction txDelegate, final TransactionIdentifier identifier,
- final LocalTransactionReadySupport readySupport) {
- super(identifier);
- this.txDelegate = requireNonNull(txDelegate);
- this.readySupport = readySupport;
- }
-
- protected abstract DOMStoreWriteTransaction getWriteDelegate();
-
- protected abstract DOMStoreReadTransaction getReadDelegate();
-
- @SuppressWarnings("checkstyle:IllegalCatch")
- private void executeModification(Consumer<DOMStoreWriteTransaction> consumer) {
- incrementModificationCount();
- if (operationError == null) {
- try {
- consumer.accept(getWriteDelegate());
- } catch (Exception e) {
- operationError = e;
- }
- }
- }
-
- @Override
- public void executeDelete(final YangInstanceIdentifier path, final Boolean havePermit) {
- executeModification(transaction -> transaction.delete(path));
- }
-
- @Override
- public void executeMerge(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data,
- final Boolean havePermit) {
- executeModification(transaction -> transaction.merge(path, data));
- }
-
- @Override
- public void executeWrite(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data,
- final Boolean havePermit) {
- executeModification(transaction -> transaction.write(path, data));
- }
-
- @Override
- public <T> void executeRead(final AbstractRead<T> readCmd, final SettableFuture<T> proxyFuture,
- final Boolean havePermit) {
- Futures.addCallback(readCmd.apply(getReadDelegate()), new FutureCallback<T>() {
- @Override
- public void onSuccess(final T result) {
- proxyFuture.set(result);
- }
-
- @Override
- public void onFailure(final Throwable failure) {
- proxyFuture.setException(failure instanceof Exception
- ? ReadFailedException.MAPPER.apply((Exception) failure) : failure);
- }
- }, MoreExecutors.directExecutor());
- }
-
- private LocalThreePhaseCommitCohort ready() {
- logModificationCount();
- return readySupport.onTransactionReady(getWriteDelegate(), operationError);
- }
-
- @Override
- public Future<ActorSelection> readyTransaction(final Boolean havePermit,
- final Optional<SortedSet<String>> participatingShardNames) {
- final LocalThreePhaseCommitCohort cohort = ready();
- return cohort.initiateCoordinatedCommit(participatingShardNames);
- }
-
- @Override
- public Future<Object> directCommit(final Boolean havePermit) {
- final LocalThreePhaseCommitCohort cohort = ready();
- return cohort.initiateDirectCommit();
- }
-
- @Override
- public void closeTransaction() {
- txDelegate.close();
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-
-/**
- * A factory for creating local transactions used by {@link AbstractTransactionContextFactory} to instantiate
- * transactions on shards which are co-located with the shard leader.
- *
- * @author Thomas Pantelis
- */
-interface LocalTransactionFactory extends LocalTransactionReadySupport {
- DOMStoreReadTransaction newReadOnlyTransaction(TransactionIdentifier identifier);
-
- DOMStoreReadWriteTransaction newReadWriteTransaction(TransactionIdentifier identifier);
-
- DOMStoreWriteTransaction newWriteOnlyTransaction(TransactionIdentifier identifier);
-}
\ No newline at end of file
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static com.google.common.base.Preconditions.checkArgument;
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorSelection;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.SnapshotBackedTransactions;
-import org.opendaylight.mdsal.dom.spi.store.SnapshotBackedWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.SnapshotBackedWriteTransaction.TransactionReadyPrototype;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree;
-
-/**
- * {@link LocalTransactionFactory} for instantiating backing transactions which are
- * disconnected from each other, ie not chained. These are used by {@link AbstractTransactionContextFactory}
- * to instantiate transactions on shards which are co-located with the shard leader.
- */
-final class LocalTransactionFactoryImpl extends TransactionReadyPrototype<TransactionIdentifier>
- implements LocalTransactionFactory {
-
- private final ActorSelection leader;
- private final ReadOnlyDataTree dataTree;
- private final ActorUtils actorUtils;
-
- LocalTransactionFactoryImpl(final ActorUtils actorUtils, final ActorSelection leader,
- final ReadOnlyDataTree dataTree) {
- this.leader = requireNonNull(leader);
- this.dataTree = requireNonNull(dataTree);
- this.actorUtils = actorUtils;
- }
-
- ReadOnlyDataTree getDataTree() {
- return dataTree;
- }
-
- @Override
- public DOMStoreReadTransaction newReadOnlyTransaction(TransactionIdentifier identifier) {
- return SnapshotBackedTransactions.newReadTransaction(identifier, false, dataTree.takeSnapshot());
- }
-
- @Override
- public DOMStoreReadWriteTransaction newReadWriteTransaction(TransactionIdentifier identifier) {
- return SnapshotBackedTransactions.newReadWriteTransaction(identifier, false, dataTree.takeSnapshot(), this);
- }
-
- @Override
- public DOMStoreWriteTransaction newWriteOnlyTransaction(TransactionIdentifier identifier) {
- return SnapshotBackedTransactions.newWriteTransaction(identifier, false, dataTree.takeSnapshot(), this);
- }
-
- @Override
- protected void transactionAborted(final SnapshotBackedWriteTransaction<TransactionIdentifier> tx) {
- // No-op
- }
-
- @Override
- protected DOMStoreThreePhaseCommitCohort transactionReady(
- final SnapshotBackedWriteTransaction<TransactionIdentifier> tx,
- final DataTreeModification tree,
- final Exception readyError) {
- return new LocalThreePhaseCommitCohort(actorUtils, leader, tx, tree, readyError);
- }
-
- @SuppressWarnings({"unchecked", "checkstyle:IllegalCatch"})
- @Override
- public LocalThreePhaseCommitCohort onTransactionReady(DOMStoreWriteTransaction tx, Exception operationError) {
- checkArgument(tx instanceof SnapshotBackedWriteTransaction);
- if (operationError != null) {
- return new LocalThreePhaseCommitCohort(actorUtils, leader,
- (SnapshotBackedWriteTransaction<TransactionIdentifier>)tx, operationError);
- }
-
- return (LocalThreePhaseCommitCohort) tx.ready();
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import org.eclipse.jdt.annotation.NonNull;
-import org.eclipse.jdt.annotation.Nullable;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-
-/**
- * Interface for a class that can "ready" a transaction.
- *
- * @author Thomas Pantelis
- */
-interface LocalTransactionReadySupport {
- LocalThreePhaseCommitCohort onTransactionReady(@NonNull DOMStoreWriteTransaction tx,
- @Nullable Exception operationError);
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import com.google.common.util.concurrent.ListenableFuture;
-import java.util.Collections;
-import java.util.List;
-import scala.concurrent.Future;
-
-/**
- * A {@link org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort}
- * instance given out for empty transactions.
- */
-final class NoOpDOMStoreThreePhaseCommitCohort extends AbstractThreePhaseCommitCohort<Object> {
- static final NoOpDOMStoreThreePhaseCommitCohort INSTANCE = new NoOpDOMStoreThreePhaseCommitCohort();
-
- private NoOpDOMStoreThreePhaseCommitCohort() {
- // Hidden to prevent instantiation
- }
-
- @Override
- public ListenableFuture<Boolean> canCommit() {
- return IMMEDIATE_BOOLEAN_SUCCESS;
- }
-
- @Override
- public ListenableFuture<Void> preCommit() {
- return IMMEDIATE_VOID_SUCCESS;
- }
-
- @Override
- public ListenableFuture<Void> abort() {
- return IMMEDIATE_VOID_SUCCESS;
- }
-
- @Override
- public ListenableFuture<Void> commit() {
- return IMMEDIATE_VOID_SUCCESS;
- }
-
- @Override
- List<Future<Object>> getCohortFutures() {
- return Collections.emptyList();
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import akka.actor.ActorSelection;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.Optional;
-import java.util.SortedSet;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
-import org.opendaylight.controller.cluster.datastore.messages.AbstractRead;
-import org.opendaylight.mdsal.common.api.DataStoreUnavailableException;
-import org.opendaylight.mdsal.common.api.ReadFailedException;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-
-final class NoOpTransactionContext extends AbstractTransactionContext {
- private static final Logger LOG = LoggerFactory.getLogger(NoOpTransactionContext.class);
-
- private final Throwable failure;
-
- NoOpTransactionContext(final Throwable failure, final TransactionIdentifier identifier) {
- super(identifier);
- this.failure = failure;
- }
-
- @Override
- public void closeTransaction() {
- LOG.debug("NoOpTransactionContext {} closeTransaction called", getIdentifier());
- }
-
- @Override
- public Future<Object> directCommit(final Boolean havePermit) {
- LOG.debug("Tx {} directCommit called, failure", getIdentifier(), failure);
- return akka.dispatch.Futures.failed(failure);
- }
-
- @Override
- public Future<ActorSelection> readyTransaction(final Boolean havePermit,
- final Optional<SortedSet<String>> participatingShardNamess) {
- LOG.debug("Tx {} readyTransaction called, failure", getIdentifier(), failure);
- return akka.dispatch.Futures.failed(failure);
- }
-
- @Override
- public <T> void executeRead(final AbstractRead<T> readCmd, final SettableFuture<T> proxyFuture,
- final Boolean havePermit) {
- LOG.debug("Tx {} executeRead {} called path = {}", getIdentifier(), readCmd.getClass().getSimpleName(),
- readCmd.getPath());
-
- final Throwable t;
- if (failure instanceof NoShardLeaderException) {
- t = new DataStoreUnavailableException(failure.getMessage(), failure);
- } else {
- t = failure;
- }
- proxyFuture.setException(new ReadFailedException("Error executeRead " + readCmd.getClass().getSimpleName()
- + " for path " + readCmd.getPath(), t));
- }
-
- @Override
- public void executeDelete(final YangInstanceIdentifier path, final Boolean havePermit) {
- LOG.debug("Tx {} executeDelete called path = {}", getIdentifier(), path);
- }
-
- @Override
- public void executeMerge(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data,
- final Boolean havePermit) {
- LOG.debug("Tx {} executeMerge called path = {}", getIdentifier(), path);
- }
-
- @Override
- public void executeWrite(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data,
- final Boolean havePermit) {
- LOG.debug("Tx {} executeWrite called path = {}", getIdentifier(), path);
- }
-}
import java.util.Map;
import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
+import org.opendaylight.mdsal.dom.api.DOMDataBroker.CommitCohortExtension;
import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistration;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistry;
import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
import org.opendaylight.mdsal.dom.spi.store.DOMStore;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreTreeChangePublisher;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.osgi.service.component.annotations.Activate;
import org.osgi.service.component.annotations.Component;
@Beta
@Component(factory = OSGiDOMStore.FACTORY_NAME, service = { DOMStore.class, DistributedDataStoreInterface.class })
public final class OSGiDOMStore
- implements DistributedDataStoreInterface, DOMStoreTreeChangePublisher, DOMDataTreeCommitCohortRegistry {
+ implements DistributedDataStoreInterface, DOMStoreTreeChangePublisher, CommitCohortExtension {
// OSGi DS Component Factory name
static final String FACTORY_NAME = "org.opendaylight.controller.cluster.datastore.OSGiDOMStore";
static final String DATASTORE_INST_PROP = ".datastore.instance";
private static final Logger LOG = LoggerFactory.getLogger(OSGiDOMStore.class);
- private LogicalDatastoreType datastoreType;
+ private final LogicalDatastoreType datastoreType;
private AbstractDataStore datastore;
- @Override
- public ActorUtils getActorUtils() {
- return datastore.getActorUtils();
+ @Activate
+ public OSGiDOMStore(final Map<String, ?> properties) {
+ datastoreType = (LogicalDatastoreType) verifyNotNull(properties.get(DATASTORE_TYPE_PROP));
+ datastore = (AbstractDataStore) verifyNotNull(properties.get(DATASTORE_INST_PROP));
+ LOG.info("Datastore service type {} activated", datastoreType);
+ }
+
+ @Deactivate
+ void deactivate() {
+ datastore = null;
+ LOG.info("Datastore service type {} deactivated", datastoreType);
}
@Override
- public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerShardConfigListener(
- final YangInstanceIdentifier internalPath, final DOMDataTreeChangeListener delegate) {
- return datastore.registerShardConfigListener(internalPath, delegate);
+ public ActorUtils getActorUtils() {
+ return datastore.getActorUtils();
}
@Override
- public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerProxyListener(
- final YangInstanceIdentifier shardLookup, final YangInstanceIdentifier insideShard,
- final DOMDataTreeChangeListener delegate) {
+ public Registration registerProxyListener(final YangInstanceIdentifier shardLookup,
+ final YangInstanceIdentifier insideShard, final DOMDataTreeChangeListener delegate) {
return datastore.registerProxyListener(shardLookup, insideShard, delegate);
}
@Override
- public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerTreeChangeListener(
- final YangInstanceIdentifier treeId, final L listener) {
+ public Registration registerTreeChangeListener(final YangInstanceIdentifier treeId,
+ final DOMDataTreeChangeListener listener) {
return datastore.registerTreeChangeListener(treeId, listener);
}
@Override
- public <T extends DOMDataTreeCommitCohort> DOMDataTreeCommitCohortRegistration<T> registerCommitCohort(
- final DOMDataTreeIdentifier path, final T cohort) {
+ public Registration registerCommitCohort(final DOMDataTreeIdentifier path, final DOMDataTreeCommitCohort cohort) {
return datastore.registerCommitCohort(path, cohort);
}
return datastore.newReadWriteTransaction();
}
- @Activate
- void activate(final Map<String, ?> properties) {
- datastoreType = (LogicalDatastoreType) verifyNotNull(properties.get(DATASTORE_TYPE_PROP));
- datastore = (AbstractDataStore) verifyNotNull(properties.get(DATASTORE_INST_PROP));
- LOG.info("Datastore service type {} activated", datastoreType);
- }
-
- @Deactivate
- void deactivate() {
- datastore = null;
- LOG.info("Datastore service type {} deactivated", datastoreType);
+ @Override
+ public Registration registerLegacyTreeChangeListener(final YangInstanceIdentifier treeId,
+ final DOMDataTreeChangeListener listener) {
+ return datastore.registerLegacyTreeChangeListener(treeId, listener);
}
}
import com.google.common.annotations.Beta;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
-import java.util.Dictionary;
-import java.util.Hashtable;
import java.util.Map;
import org.checkerframework.checker.lock.qual.GuardedBy;
import org.opendaylight.controller.cluster.ActorSystemProvider;
import org.opendaylight.controller.cluster.datastore.config.ModuleShardConfigProvider;
import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
import org.opendaylight.mdsal.dom.api.DOMSchemaService;
+import org.osgi.framework.FrameworkUtil;
import org.osgi.service.component.ComponentFactory;
import org.osgi.service.component.ComponentInstance;
import org.osgi.service.component.annotations.Activate;
private final String serviceType;
@GuardedBy("this")
- private ComponentInstance component;
+ private ComponentInstance<OSGiDOMStore> component;
@GuardedBy("this")
private boolean stopped;
synchronized void updateProperties(final Map<String, Object> properties) {
if (introspector.update(properties)) {
+ LOG.info("Distributed Datastore type {} updating context", datastoreType);
datastore.onDatastoreContextUpdated(introspector.newContextFactory());
}
}
synchronized (this) {
if (!stopped) {
- final Dictionary<String, Object> dict = new Hashtable<>();
- dict.put(OSGiDOMStore.DATASTORE_TYPE_PROP, datastoreType);
- dict.put(OSGiDOMStore.DATASTORE_INST_PROP, datastore);
- dict.put("type", serviceType);
- component = datastoreFactory.newInstance(dict);
+ component = datastoreFactory.newInstance(FrameworkUtil.asDictionary(Map.of(
+ OSGiDOMStore.DATASTORE_TYPE_PROP, datastoreType,
+ OSGiDOMStore.DATASTORE_INST_PROP, datastore,
+ "type", serviceType)));
LOG.info("Distributed Datastore type {} started", datastoreType);
}
}
private static final Logger LOG = LoggerFactory.getLogger(OSGiDistributedDataStore.class);
- @Reference
- DOMSchemaService schemaService = null;
- @Reference
- ActorSystemProvider actorSystemProvider = null;
- @Reference
- DatastoreContextIntrospectorFactory introspectorFactory = null;
- @Reference
- DatastoreSnapshotRestore snapshotRestore = null;
- @Reference
- ModuleShardConfigProvider configProvider = null;
- @Reference(target = "(component.factory=" + OSGiDOMStore.FACTORY_NAME + ")")
- ComponentFactory datastoreFactory = null;
-
+ private final ComponentFactory<OSGiDOMStore> datastoreFactory;
private DatastoreState configDatastore;
private DatastoreState operDatastore;
@Activate
- void activate(final Map<String, Object> properties) {
- configDatastore = createDatastore(LogicalDatastoreType.CONFIGURATION, "distributed-config", null);
- operDatastore = createDatastore(LogicalDatastoreType.OPERATIONAL, "distributed-operational",
+ public OSGiDistributedDataStore(@Reference final DOMSchemaService schemaService,
+ @Reference final ActorSystemProvider actorSystemProvider,
+ @Reference final DatastoreContextIntrospectorFactory introspectorFactory,
+ @Reference final DatastoreSnapshotRestore snapshotRestore,
+ @Reference final ModuleShardConfigProvider configProvider,
+ @Reference(target = "(component.factory=" + OSGiDOMStore.FACTORY_NAME + ")")
+ final ComponentFactory<OSGiDOMStore> datastoreFactory, final Map<String, Object> properties) {
+ this.datastoreFactory = requireNonNull(datastoreFactory);
+ configDatastore = createDatastore(schemaService, actorSystemProvider, snapshotRestore, introspectorFactory,
+ LogicalDatastoreType.CONFIGURATION, "distributed-config", properties, null);
+ operDatastore = createDatastore(schemaService, actorSystemProvider, snapshotRestore, introspectorFactory,
+ LogicalDatastoreType.OPERATIONAL, "distributed-operational", properties,
new ConfigurationImpl(configProvider));
- modified(properties);
}
@Modified
configDatastore = null;
}
- private DatastoreState createDatastore(final LogicalDatastoreType datastoreType, final String serviceType,
- final Configuration config) {
+ private DatastoreState createDatastore(final DOMSchemaService schemaService,
+ final ActorSystemProvider actorSystemProvider, final DatastoreSnapshotRestore snapshotRestore,
+ final DatastoreContextIntrospectorFactory introspectorFactory, final LogicalDatastoreType datastoreType,
+ final String serviceType, final Map<String, Object> properties,final Configuration config) {
LOG.info("Distributed Datastore type {} starting", datastoreType);
- final DatastoreContextIntrospector introspector = introspectorFactory.newInstance(datastoreType);
- final AbstractDataStore datastore = DistributedDataStoreFactory.createInstance(actorSystemProvider,
+ final var introspector = introspectorFactory.newInstance(datastoreType, properties);
+ final var datastore = DistributedDataStoreFactory.createInstance(actorSystemProvider,
introspector.getContext(), introspector, snapshotRestore, config);
- datastore.setCloseable(schemaService.registerSchemaContextListener(datastore));
- final DatastoreState state = new DatastoreState(introspector, datastoreType, datastore, serviceType);
+ datastore.setCloseable(schemaService.registerSchemaContextListener(datastore::onModelContextUpdated));
+ final var state = new DatastoreState(introspector, datastoreType, datastore, serviceType);
Futures.addCallback(datastore.initialSettleFuture(), state,
// Note we are invoked from shard manager and therefore could block it, hence the round-trip to executor
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard;
+package org.opendaylight.controller.cluster.datastore;
import static java.util.Objects.requireNonNull;
*
* @author Thomas Pantelis
*/
-class OnDemandShardStateCache {
+final class OnDemandShardStateCache {
private static final Cache<String, OnDemandShardState> ONDEMAND_SHARD_STATE_CACHE =
CacheBuilder.newBuilder().expireAfterWrite(2, TimeUnit.SECONDS).build();
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static com.google.common.base.Preconditions.checkArgument;
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.annotations.VisibleForTesting;
-import java.util.concurrent.Semaphore;
-import java.util.concurrent.TimeUnit;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Class for limiting operations.
- */
-public class OperationLimiter {
- private static final Logger LOG = LoggerFactory.getLogger(OperationLimiter.class);
- private final TransactionIdentifier identifier;
- private final long acquireTimeout;
- private final Semaphore semaphore;
- private final int maxPermits;
-
- OperationLimiter(final TransactionIdentifier identifier, final int maxPermits, final long acquireTimeoutSeconds) {
- this.identifier = requireNonNull(identifier);
-
- checkArgument(acquireTimeoutSeconds >= 0);
- this.acquireTimeout = TimeUnit.SECONDS.toNanos(acquireTimeoutSeconds);
-
- checkArgument(maxPermits >= 0);
- this.maxPermits = maxPermits;
- this.semaphore = new Semaphore(maxPermits);
- }
-
- boolean acquire() {
- return acquire(1);
- }
-
- boolean acquire(final int acquirePermits) {
- try {
- if (semaphore.tryAcquire(acquirePermits, acquireTimeout, TimeUnit.NANOSECONDS)) {
- return true;
- }
- } catch (InterruptedException e) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Interrupted when trying to acquire operation permit for transaction {}", identifier, e);
- } else {
- LOG.warn("Interrupted when trying to acquire operation permit for transaction {}", identifier);
- }
- }
-
- return false;
- }
-
- void release() {
- release(1);
- }
-
- void release(final int permits) {
- this.semaphore.release(permits);
- }
-
- @VisibleForTesting
- TransactionIdentifier getIdentifier() {
- return identifier;
- }
-
- @VisibleForTesting
- int availablePermits() {
- return semaphore.availablePermits();
- }
-
- /**
- * Release all the permits.
- */
- public void releaseAll() {
- this.semaphore.release(maxPermits - availablePermits());
- }
-}
package org.opendaylight.controller.cluster.datastore;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
final class ReadOnlyShardDataTreeTransaction extends AbstractShardDataTreeTransaction<DataTreeSnapshot> {
ReadOnlyShardDataTreeTransaction(final ShardDataTreeTransactionParent parent, final TransactionIdentifier id,
- final DataTreeSnapshot snapshot) {
+ final DataTreeSnapshot snapshot) {
super(parent, id, snapshot);
}
}
*/
package org.opendaylight.controller.cluster.datastore;
-import com.google.common.base.Preconditions;
+import static com.google.common.base.Preconditions.checkState;
+
import java.util.Optional;
import java.util.SortedSet;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
public final class ReadWriteShardDataTreeTransaction extends AbstractShardDataTreeTransaction<DataTreeModification> {
-
ReadWriteShardDataTreeTransaction(final ShardDataTreeTransactionParent parent, final TransactionIdentifier id,
- final DataTreeModification modification) {
+ final DataTreeModification modification) {
super(parent, id, modification);
}
- ShardDataTreeCohort ready(Optional<SortedSet<String>> participatingShardNames) {
- Preconditions.checkState(close(), "Transaction is already closed");
+ ShardDataTreeCohort ready(final Optional<SortedSet<String>> participatingShardNames) {
+ checkState(close(), "Transaction is already closed");
return getParent().finishTransaction(this, participatingShardNames);
}
}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static com.google.common.base.Preconditions.checkState;
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorSelection;
-import akka.dispatch.Futures;
-import akka.dispatch.OnComplete;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.Optional;
-import java.util.SortedSet;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.AbstractRead;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
-import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
-import org.opendaylight.controller.cluster.datastore.modification.AbstractModification;
-import org.opendaylight.controller.cluster.datastore.modification.DeleteModification;
-import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
-import org.opendaylight.controller.cluster.datastore.modification.Modification;
-import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.mdsal.common.api.ReadFailedException;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-
-/**
- * Redirects front-end transaction operations to a shard for processing. Instances of this class are used
- * when the destination shard is remote to the caller.
- *
- * @author Thomas Pantelis
- */
-public class RemoteTransactionContext extends AbstractTransactionContext {
- private static final Logger LOG = LoggerFactory.getLogger(RemoteTransactionContext.class);
-
- private final ActorUtils actorUtils;
- private final ActorSelection actor;
- private final OperationLimiter limiter;
-
- private BatchedModifications batchedModifications;
- private int totalBatchedModificationsSent;
- private int batchPermits;
-
- /**
- * We have observed a failed modification batch. This transaction context is effectively doomed, as the backend
- * does not have a correct view of the world. If this happens, we do not limit operations but rather short-cut them
- * to a either a no-op (modifications) or a failure (reads). Once the transaction is ready, though, we send the
- * message to resynchronize with the backend, sharing a 'lost message' failure path.
- */
- private volatile Throwable failedModification;
-
- protected RemoteTransactionContext(final TransactionIdentifier identifier, final ActorSelection actor,
- final ActorUtils actorUtils, final short remoteTransactionVersion, final OperationLimiter limiter) {
- super(identifier, remoteTransactionVersion);
- this.limiter = requireNonNull(limiter);
- this.actor = actor;
- this.actorUtils = actorUtils;
- }
-
- private ActorSelection getActor() {
- return actor;
- }
-
- protected ActorUtils getActorUtils() {
- return actorUtils;
- }
-
- @Override
- public void closeTransaction() {
- LOG.debug("Tx {} closeTransaction called", getIdentifier());
- TransactionContextCleanup.untrack(this);
-
- actorUtils.sendOperationAsync(getActor(), new CloseTransaction(getTransactionVersion()).toSerializable());
- }
-
- @Override
- public Future<Object> directCommit(final Boolean havePermit) {
- LOG.debug("Tx {} directCommit called", getIdentifier());
-
- // Send the remaining batched modifications, if any, with the ready flag set.
- bumpPermits(havePermit);
- return sendBatchedModifications(true, true, Optional.empty());
- }
-
- @Override
- public Future<ActorSelection> readyTransaction(final Boolean havePermit,
- final Optional<SortedSet<String>> participatingShardNames) {
- logModificationCount();
-
- LOG.debug("Tx {} readyTransaction called", getIdentifier());
-
- // Send the remaining batched modifications, if any, with the ready flag set.
-
- bumpPermits(havePermit);
- Future<Object> lastModificationsFuture = sendBatchedModifications(true, false, participatingShardNames);
-
- return transformReadyReply(lastModificationsFuture);
- }
-
- private void bumpPermits(final Boolean havePermit) {
- if (Boolean.TRUE.equals(havePermit)) {
- ++batchPermits;
- }
- }
-
- protected Future<ActorSelection> transformReadyReply(final Future<Object> readyReplyFuture) {
- // Transform the last reply Future into a Future that returns the cohort actor path from
- // the last reply message. That's the end result of the ready operation.
-
- return TransactionReadyReplyMapper.transform(readyReplyFuture, actorUtils, getIdentifier());
- }
-
- private BatchedModifications newBatchedModifications() {
- return new BatchedModifications(getIdentifier(), getTransactionVersion());
- }
-
- private void batchModification(final Modification modification, final boolean havePermit) {
- incrementModificationCount();
- if (havePermit) {
- ++batchPermits;
- }
-
- if (batchedModifications == null) {
- batchedModifications = newBatchedModifications();
- }
-
- batchedModifications.addModification(modification);
-
- if (batchedModifications.getModifications().size()
- >= actorUtils.getDatastoreContext().getShardBatchedModificationCount()) {
- sendBatchedModifications();
- }
- }
-
- protected Future<Object> sendBatchedModifications() {
- return sendBatchedModifications(false, false, Optional.empty());
- }
-
- protected Future<Object> sendBatchedModifications(final boolean ready, final boolean doCommitOnReady,
- final Optional<SortedSet<String>> participatingShardNames) {
- Future<Object> sent = null;
- if (ready || batchedModifications != null && !batchedModifications.getModifications().isEmpty()) {
- if (batchedModifications == null) {
- batchedModifications = newBatchedModifications();
- }
-
- LOG.debug("Tx {} sending {} batched modifications, ready: {}", getIdentifier(),
- batchedModifications.getModifications().size(), ready);
-
- batchedModifications.setDoCommitOnReady(doCommitOnReady);
- batchedModifications.setTotalMessagesSent(++totalBatchedModificationsSent);
-
- final BatchedModifications toSend = batchedModifications;
- final int permitsToRelease = batchPermits;
- batchPermits = 0;
-
- if (ready) {
- batchedModifications.setReady(participatingShardNames);
- batchedModifications.setDoCommitOnReady(doCommitOnReady);
- batchedModifications = null;
- } else {
- batchedModifications = newBatchedModifications();
-
- final Throwable failure = failedModification;
- if (failure != null) {
- // We have observed a modification failure, it does not make sense to send this batch. This speeds
- // up the time when the application could be blocked due to messages timing out and operation
- // limiter kicking in.
- LOG.debug("Tx {} modifications previously failed, not sending a non-ready batch", getIdentifier());
- limiter.release(permitsToRelease);
- return Futures.failed(failure);
- }
- }
-
- sent = actorUtils.executeOperationAsync(getActor(), toSend.toSerializable(),
- actorUtils.getTransactionCommitOperationTimeout());
- sent.onComplete(new OnComplete<>() {
- @Override
- public void onComplete(final Throwable failure, final Object success) {
- if (failure != null) {
- LOG.debug("Tx {} modifications failed", getIdentifier(), failure);
- failedModification = failure;
- } else {
- LOG.debug("Tx {} modifications completed with {}", getIdentifier(), success);
- }
- limiter.release(permitsToRelease);
- }
- }, actorUtils.getClientDispatcher());
- }
-
- return sent;
- }
-
- @Override
- public void executeDelete(final YangInstanceIdentifier path, final Boolean havePermit) {
- LOG.debug("Tx {} executeDelete called path = {}", getIdentifier(), path);
- executeModification(new DeleteModification(path), havePermit);
- }
-
- @Override
- public void executeMerge(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data,
- final Boolean havePermit) {
- LOG.debug("Tx {} executeMerge called path = {}", getIdentifier(), path);
- executeModification(new MergeModification(path, data), havePermit);
- }
-
- @Override
- public void executeWrite(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data,
- final Boolean havePermit) {
- LOG.debug("Tx {} executeWrite called path = {}", getIdentifier(), path);
- executeModification(new WriteModification(path, data), havePermit);
- }
-
- private void executeModification(final AbstractModification modification, final Boolean havePermit) {
- final boolean permitToRelease;
- if (havePermit == null) {
- permitToRelease = failedModification == null && acquireOperation();
- } else {
- permitToRelease = havePermit.booleanValue();
- }
-
- batchModification(modification, permitToRelease);
- }
-
- @Override
- public <T> void executeRead(final AbstractRead<T> readCmd, final SettableFuture<T> returnFuture,
- final Boolean havePermit) {
- LOG.debug("Tx {} executeRead {} called path = {}", getIdentifier(), readCmd.getClass().getSimpleName(),
- readCmd.getPath());
-
- final Throwable failure = failedModification;
- if (failure != null) {
- // If we know there was a previous modification failure, we must not send a read request, as it risks
- // returning incorrect data. We check this before acquiring an operation simply because we want the app
- // to complete this transaction as soon as possible.
- returnFuture.setException(new ReadFailedException("Previous modification failed, cannot "
- + readCmd.getClass().getSimpleName() + " for path " + readCmd.getPath(), failure));
- return;
- }
-
- // Send any batched modifications. This is necessary to honor the read uncommitted semantics of the
- // public API contract.
-
- final boolean permitToRelease = havePermit == null ? acquireOperation() : havePermit.booleanValue();
- sendBatchedModifications();
-
- OnComplete<Object> onComplete = new OnComplete<>() {
- @Override
- public void onComplete(final Throwable failure, final Object response) {
- // We have previously acquired an operation, now release it, no matter what happened
- if (permitToRelease) {
- limiter.release();
- }
-
- if (failure != null) {
- LOG.debug("Tx {} {} operation failed", getIdentifier(), readCmd.getClass().getSimpleName(),
- failure);
-
- returnFuture.setException(new ReadFailedException("Error checking "
- + readCmd.getClass().getSimpleName() + " for path " + readCmd.getPath(), failure));
- } else {
- LOG.debug("Tx {} {} operation succeeded", getIdentifier(), readCmd.getClass().getSimpleName());
- readCmd.processResponse(response, returnFuture);
- }
- }
- };
-
- final Future<Object> future = actorUtils.executeOperationAsync(getActor(),
- readCmd.asVersion(getTransactionVersion()).toSerializable(), actorUtils.getOperationTimeout());
- future.onComplete(onComplete, actorUtils.getClientDispatcher());
- }
-
- /**
- * Acquire operation from the limiter if the hand-off has completed. If the hand-off is still ongoing, this method
- * does nothing.
- *
- * @return True if a permit was successfully acquired, false otherwise
- */
- private boolean acquireOperation() {
- checkState(isOperationHandOffComplete(),
- "Attempted to acquire execute operation permit for transaction %s on actor %s during handoff",
- getIdentifier(), actor);
-
- if (limiter.acquire()) {
- return true;
- }
-
- LOG.warn("Failed to acquire execute operation permit for transaction {} on actor {}", getIdentifier(), actor);
- return false;
- }
-
- @Override
- public boolean usesOperationLimiting() {
- return true;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorSelection;
-import akka.dispatch.OnComplete;
-import akka.pattern.AskTimeoutException;
-import akka.util.Timeout;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.util.concurrent.TimeUnit;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
-import org.opendaylight.controller.cluster.datastore.exceptions.ShardLeaderNotRespondingException;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-import scala.concurrent.duration.FiniteDuration;
-
-/**
- * Handles creation of TransactionContext instances for remote transactions. This class creates
- * remote transactions, if necessary, by sending CreateTransaction messages with retries, up to a limit,
- * if the shard doesn't have a leader yet. This is done by scheduling a retry task after a short delay.
- * <p/>
- * The end result from a completed CreateTransaction message is a TransactionContext that is
- * used to perform transaction operations. Transaction operations that occur before the
- * CreateTransaction completes are cache via a TransactionContextWrapper and executed once the
- * CreateTransaction completes, successfully or not.
- */
-final class RemoteTransactionContextSupport {
- private static final Logger LOG = LoggerFactory.getLogger(RemoteTransactionContextSupport.class);
-
- private static final long CREATE_TX_TRY_INTERVAL_IN_MS = 1000;
- private static final long MAX_CREATE_TX_MSG_TIMEOUT_IN_MS = 5000;
-
- private final TransactionProxy parent;
- private final String shardName;
-
- /**
- * The target primary shard.
- */
- private volatile PrimaryShardInfo primaryShardInfo;
-
- /**
- * The total timeout for creating a tx on the primary shard.
- */
- private volatile long totalCreateTxTimeout;
-
- private final Timeout createTxMessageTimeout;
-
- private final TransactionContextWrapper transactionContextWrapper;
-
- RemoteTransactionContextSupport(final TransactionContextWrapper transactionContextWrapper,
- final TransactionProxy parent, final String shardName) {
- this.parent = requireNonNull(parent);
- this.shardName = shardName;
- this.transactionContextWrapper = transactionContextWrapper;
-
- // For the total create tx timeout, use 2 times the election timeout. This should be enough time for
- // a leader re-election to occur if we happen to hit it in transition.
- totalCreateTxTimeout = parent.getActorUtils().getDatastoreContext().getShardRaftConfig()
- .getElectionTimeOutInterval().toMillis() * 2;
-
- // We'll use the operationTimeout for the the create Tx message timeout so it can be set appropriately
- // for unit tests but cap it at MAX_CREATE_TX_MSG_TIMEOUT_IN_MS. The operationTimeout could be set
- // larger than the totalCreateTxTimeout in production which we don't want.
- long operationTimeout = parent.getActorUtils().getOperationTimeout().duration().toMillis();
- createTxMessageTimeout = new Timeout(Math.min(operationTimeout, MAX_CREATE_TX_MSG_TIMEOUT_IN_MS),
- TimeUnit.MILLISECONDS);
- }
-
- String getShardName() {
- return shardName;
- }
-
- private TransactionType getTransactionType() {
- return parent.getType();
- }
-
- private ActorUtils getActorUtils() {
- return parent.getActorUtils();
- }
-
- private TransactionIdentifier getIdentifier() {
- return parent.getIdentifier();
- }
-
- /**
- * Sets the target primary shard and initiates a CreateTransaction try.
- */
- void setPrimaryShard(final PrimaryShardInfo newPrimaryShardInfo) {
- this.primaryShardInfo = newPrimaryShardInfo;
-
- if (getTransactionType() == TransactionType.WRITE_ONLY
- && getActorUtils().getDatastoreContext().isWriteOnlyTransactionOptimizationsEnabled()) {
- ActorSelection primaryShard = newPrimaryShardInfo.getPrimaryShardActor();
-
- LOG.debug("Tx {} Primary shard {} found - creating WRITE_ONLY transaction context",
- getIdentifier(), primaryShard);
-
- // For write-only Tx's we prepare the transaction modifications directly on the shard actor
- // to avoid the overhead of creating a separate transaction actor.
- transactionContextWrapper.executePriorTransactionOperations(createValidTransactionContext(
- primaryShard, String.valueOf(primaryShard.path()), newPrimaryShardInfo.getPrimaryShardVersion()));
- } else {
- tryCreateTransaction();
- }
- }
-
- /**
- Performs a CreateTransaction try async.
- */
- private void tryCreateTransaction() {
- LOG.debug("Tx {} Primary shard {} found - trying create transaction", getIdentifier(),
- primaryShardInfo.getPrimaryShardActor());
-
- Object serializedCreateMessage = new CreateTransaction(getIdentifier(), getTransactionType().ordinal(),
- primaryShardInfo.getPrimaryShardVersion()).toSerializable();
-
- Future<Object> createTxFuture = getActorUtils().executeOperationAsync(
- primaryShardInfo.getPrimaryShardActor(), serializedCreateMessage, createTxMessageTimeout);
-
- createTxFuture.onComplete(new OnComplete<Object>() {
- @Override
- public void onComplete(final Throwable failure, final Object response) {
- onCreateTransactionComplete(failure, response);
- }
- }, getActorUtils().getClientDispatcher());
- }
-
- private void tryFindPrimaryShard() {
- LOG.debug("Tx {} Retrying findPrimaryShardAsync for shard {}", getIdentifier(), shardName);
-
- this.primaryShardInfo = null;
- Future<PrimaryShardInfo> findPrimaryFuture = getActorUtils().findPrimaryShardAsync(shardName);
- findPrimaryFuture.onComplete(new OnComplete<PrimaryShardInfo>() {
- @Override
- public void onComplete(final Throwable failure, final PrimaryShardInfo newPrimaryShardInfo) {
- onFindPrimaryShardComplete(failure, newPrimaryShardInfo);
- }
- }, getActorUtils().getClientDispatcher());
- }
-
- @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
- justification = "https://github.com/spotbugs/spotbugs/issues/811")
- private void onFindPrimaryShardComplete(final Throwable failure, final PrimaryShardInfo newPrimaryShardInfo) {
- if (failure == null) {
- this.primaryShardInfo = newPrimaryShardInfo;
- tryCreateTransaction();
- } else {
- LOG.debug("Tx {}: Find primary for shard {} failed", getIdentifier(), shardName, failure);
-
- onCreateTransactionComplete(failure, null);
- }
- }
-
- private void onCreateTransactionComplete(final Throwable failure, final Object response) {
- // An AskTimeoutException will occur if the local shard forwards to an unavailable remote leader or
- // the cached remote leader actor is no longer available.
- boolean retryCreateTransaction = primaryShardInfo != null
- && (failure instanceof NoShardLeaderException || failure instanceof AskTimeoutException);
-
- // Schedule a retry unless we're out of retries. Note: totalCreateTxTimeout is volatile as it may
- // be written by different threads however not concurrently, therefore decrementing it
- // non-atomically here is ok.
- if (retryCreateTransaction && totalCreateTxTimeout > 0) {
- long scheduleInterval = CREATE_TX_TRY_INTERVAL_IN_MS;
- if (failure instanceof AskTimeoutException) {
- // Since we use the createTxMessageTimeout for the CreateTransaction request and it timed
- // out, subtract it from the total timeout. Also since the createTxMessageTimeout period
- // has already elapsed, we can immediately schedule the retry (10 ms is virtually immediate).
- totalCreateTxTimeout -= createTxMessageTimeout.duration().toMillis();
- scheduleInterval = 10;
- }
-
- totalCreateTxTimeout -= scheduleInterval;
-
- LOG.debug("Tx {}: create tx on shard {} failed with exception \"{}\" - scheduling retry in {} ms",
- getIdentifier(), shardName, failure, scheduleInterval);
-
- getActorUtils().getActorSystem().scheduler().scheduleOnce(
- FiniteDuration.create(scheduleInterval, TimeUnit.MILLISECONDS),
- this::tryFindPrimaryShard, getActorUtils().getClientDispatcher());
- return;
- }
-
- createTransactionContext(failure, response);
- }
-
- private void createTransactionContext(final Throwable failure, final Object response) {
- // Create the TransactionContext from the response or failure. Store the new
- // TransactionContext locally until we've completed invoking the
- // TransactionOperations. This avoids thread timing issues which could cause
- // out-of-order TransactionOperations. Eg, on a modification operation, if the
- // TransactionContext is non-null, then we directly call the TransactionContext.
- // However, at the same time, the code may be executing the cached
- // TransactionOperations. So to avoid thus timing, we don't publish the
- // TransactionContext until after we've executed all cached TransactionOperations.
- TransactionContext localTransactionContext;
- if (failure != null) {
- LOG.debug("Tx {} Creating NoOpTransaction because of error", getIdentifier(), failure);
-
- Throwable resultingEx = failure;
- if (failure instanceof AskTimeoutException) {
- resultingEx = new ShardLeaderNotRespondingException(String.format(
- "Could not create a %s transaction on shard %s. The shard leader isn't responding.",
- parent.getType(), shardName), failure);
- } else if (!(failure instanceof NoShardLeaderException)) {
- resultingEx = new Exception(String.format(
- "Error creating %s transaction on shard %s", parent.getType(), shardName), failure);
- }
-
- localTransactionContext = new NoOpTransactionContext(resultingEx, getIdentifier());
- } else if (CreateTransactionReply.isSerializedType(response)) {
- localTransactionContext = createValidTransactionContext(
- CreateTransactionReply.fromSerializable(response));
- } else {
- IllegalArgumentException exception = new IllegalArgumentException(String.format(
- "Invalid reply type %s for CreateTransaction", response.getClass()));
-
- localTransactionContext = new NoOpTransactionContext(exception, getIdentifier());
- }
-
- transactionContextWrapper.executePriorTransactionOperations(localTransactionContext);
- }
-
- private TransactionContext createValidTransactionContext(final CreateTransactionReply reply) {
- LOG.debug("Tx {} Received {}", getIdentifier(), reply);
-
- return createValidTransactionContext(getActorUtils().actorSelection(reply.getTransactionPath()),
- reply.getTransactionPath(), primaryShardInfo.getPrimaryShardVersion());
- }
-
- private TransactionContext createValidTransactionContext(final ActorSelection transactionActor,
- final String transactionPath, final short remoteTransactionVersion) {
- final TransactionContext ret = new RemoteTransactionContext(transactionContextWrapper.getIdentifier(),
- transactionActor, getActorUtils(), remoteTransactionVersion, transactionContextWrapper.getLimiter());
-
- if (parent.getType() == TransactionType.READ_ONLY) {
- TransactionContextCleanup.track(parent, ret);
- }
-
- return ret;
- }
-}
-
import akka.actor.Props;
import com.google.common.collect.Iterables;
import java.util.ArrayDeque;
+import java.util.ArrayList;
import java.util.Collection;
import java.util.Deque;
import java.util.Iterator;
import java.util.LinkedHashMap;
+import java.util.List;
import java.util.Map;
import org.opendaylight.controller.cluster.datastore.messages.DataTreeChanged;
import org.opendaylight.controller.cluster.datastore.messages.OnInitialData;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNodes;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
+import org.opendaylight.yangtools.yang.data.api.schema.builder.DataContainerNodeBuilder;
import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.DataContainerNodeBuilder;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.spi.DataTreeCandidateNodes;
+import org.opendaylight.yangtools.yang.data.tree.spi.DataTreeCandidates;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
final class RootDataTreeChangeListenerActor extends DataTreeChangeListenerActor {
private Deque<DataTreeChanged> otherMessages = new ArrayDeque<>();
private RootDataTreeChangeListenerActor(final DOMDataTreeChangeListener listener, final int shardCount) {
- super(listener, YangInstanceIdentifier.empty());
+ super(listener, YangInstanceIdentifier.of());
this.shardCount = shardCount;
}
/*
* We need to make-pretend that the data coming into the listener is coming from a single logical entity, where
* ordering is partially guaranteed (on shard boundaries). The data layout in shards is such that each DataTree
- * is rooted at YangInstanceIdentifier.empty(), but their contents vary:
+ * is rooted at YangInstanceIdentifier.of(), but their contents vary:
*
* 1) non-default shards contain immediate children of root from one module
* 2) default shard contains everything else
* Construct an overall NormalizedNode view of the entire datastore by combining first-level children from all
* reported initial state reports, report that node as written and then report any additional deltas.
*/
- final Deque<DataTreeCandidate> initialChanges = new ArrayDeque<>();
+ final List<DataTreeCandidate> initialChanges = new ArrayList<>();
+ // Reserve first item
+ initialChanges.add(null);
+
final DataContainerNodeBuilder<NodeIdentifier, ContainerNode> rootBuilder = Builders.containerBuilder()
.withNodeIdentifier(NodeIdentifier.create(SchemaContext.NAME));
for (Object message : initialMessages.values()) {
final Iterator<DataTreeCandidate> it = changes.iterator();
initial = it.next();
// Append to changes to report as initial. This should not be happening (often?).
- it.forEachRemaining(initialChanges::addLast);
+ it.forEachRemaining(initialChanges::add);
} else {
initial = Iterables.get(changes, 0);
}
- final NormalizedNode<?, ?> root = initial.getRootNode().getDataAfter().orElseThrow();
+ final NormalizedNode root = initial.getRootNode().getDataAfter();
verify(root instanceof ContainerNode, "Unexpected root node %s", root);
- ((ContainerNode) root).getValue().forEach(rootBuilder::withChild);
+ ((ContainerNode) root).body().forEach(rootBuilder::withChild);
}
}
// We will not be intercepting any other messages, allow initial state to be reclaimed as soon as possible
initialMessages = null;
- // Prepend combined initial changed and report initial changes and clear the map
- initialChanges.addFirst(DataTreeCandidates.newDataTreeCandidate(YangInstanceIdentifier.empty(),
+ // Replace first element with the combined initial change, report initial changes and clear the map
+ initialChanges.set(0, DataTreeCandidates.newDataTreeCandidate(YangInstanceIdentifier.of(),
DataTreeCandidateNodes.written(rootBuilder.build())));
super.dataTreeChanged(new DataTreeChanged(initialChanges));
import akka.actor.PoisonPill;
import akka.dispatch.OnComplete;
import com.google.common.collect.Maps;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeNotificationListenerReply;
import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
+import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-final class RootDataTreeChangeListenerProxy<L extends DOMDataTreeChangeListener>
- extends AbstractListenerRegistration<L> {
+final class RootDataTreeChangeListenerProxy<L extends DOMDataTreeChangeListener> extends AbstractObjectRegistration<L> {
private abstract static class State {
}
final Set<String> shardNames) {
super(listener);
this.actorUtils = requireNonNull(actorUtils);
- this.state = new ResolveShards(shardNames.size());
+ state = new ResolveShards(shardNames.size());
for (String shardName : shardNames) {
actorUtils.findLocalShardAsync(shardName).onComplete(new OnComplete<ActorRef>() {
} else if (state instanceof ResolveShards) {
// Simple case: just mark the fact we were closed, terminating when resolution finishes
state = new Terminated();
- } else if (state instanceof Subscribed) {
- terminate((Subscribed) state);
+ } else if (state instanceof Subscribed subscribed) {
+ terminate(subscribed);
} else {
throw new IllegalStateException("Unhandled close in state " + state);
}
}
- @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
- justification = "https://github.com/spotbugs/spotbugs/issues/811")
private synchronized void onFindLocalShardComplete(final String shardName, final Throwable failure,
final ActorRef shard) {
- if (state instanceof ResolveShards) {
- localShardsResolved((ResolveShards) state, shardName, failure, shard);
+ if (state instanceof ResolveShards resolveShards) {
+ localShardsResolved(resolveShards, shardName, failure, shard);
} else {
LOG.debug("{}: lookup for shard {} turned into a noop on state {}", logContext(), shardName, state);
}
// Subscribe to all shards
final RegisterDataTreeChangeListener regMessage = new RegisterDataTreeChangeListener(
- YangInstanceIdentifier.empty(), dtclActor, true);
+ YangInstanceIdentifier.of(), dtclActor, true);
for (Entry<String, Object> entry : localShards.entrySet()) {
// Do not retain references to localShards
final String shardName = entry.getKey();
}
}
- @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
- justification = "https://github.com/spotbugs/spotbugs/issues/811")
private synchronized void onShardSubscribed(final String shardName, final Throwable failure, final Object result) {
- if (state instanceof Subscribed) {
- final Subscribed current = (Subscribed) state;
+ if (state instanceof Subscribed current) {
if (failure != null) {
LOG.error("{}: Shard {} failed to subscribe, terminating listener {}", logContext(),
shardName,getInstance(), failure);
import static com.google.common.base.Preconditions.checkState;
import static com.google.common.base.Verify.verify;
+import static com.google.common.base.Verify.verifyNotNull;
import static java.util.Objects.requireNonNull;
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
import akka.actor.Cancellable;
import akka.actor.ExtendedActorSystem;
+import akka.actor.PoisonPill;
import akka.actor.Props;
import akka.actor.Status;
import akka.actor.Status.Failure;
+import akka.persistence.RecoveryCompleted;
+import akka.persistence.SnapshotOffer;
import akka.serialization.JavaSerializer;
import akka.serialization.Serialization;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Ticker;
-import com.google.common.base.Verify;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Range;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.Optional;
import java.util.OptionalLong;
import java.util.concurrent.TimeUnit;
+import java.util.function.Supplier;
import org.eclipse.jdt.annotation.NonNull;
import org.eclipse.jdt.annotation.Nullable;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.controller.cluster.access.commands.TransactionRequest;
import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.Request;
import org.opendaylight.controller.cluster.access.concepts.RequestEnvelope;
import org.opendaylight.controller.cluster.access.concepts.RequestException;
import org.opendaylight.controller.cluster.access.concepts.RequestSuccess;
import org.opendaylight.controller.cluster.common.actor.Dispatchers;
import org.opendaylight.controller.cluster.common.actor.Dispatchers.DispatcherType;
import org.opendaylight.controller.cluster.common.actor.MessageTracker;
-import org.opendaylight.controller.cluster.common.actor.MessageTracker.Error;
import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
+import org.opendaylight.controller.cluster.datastore.actors.JsonExportActor;
import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardDataTreeListenerInfoMXBeanImpl;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
+import org.opendaylight.controller.cluster.datastore.messages.DataTreeChangedReply;
import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
import org.opendaylight.controller.cluster.datastore.messages.GetKnownClients;
import org.opendaylight.controller.cluster.datastore.messages.GetKnownClientsReply;
import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
import org.opendaylight.controller.cluster.raft.RaftState;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
import org.opendaylight.controller.cluster.raft.messages.RequestLeadership;
import org.opendaylight.controller.cluster.raft.messages.ServerRemoved;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev231229.DataStoreProperties.ExportOnRecovery;
import org.opendaylight.yangtools.concepts.Identifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.api.TreeType;
import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
-import org.opendaylight.yangtools.yang.model.api.EffectiveModelContextProvider;
import scala.concurrent.duration.FiniteDuration;
/**
* <p>
* Our Shard uses InMemoryDataTree as it's internal representation and delegates all requests it
*/
+// FIXME: non-final for testing?
public class Shard extends RaftActor {
@VisibleForTesting
private static final Collection<ABIVersion> SUPPORTED_ABIVERSIONS;
+ // Make sure to keep this in sync with the journal configuration in factory-akka.conf
+ public static final String NON_PERSISTENT_JOURNAL_ID = "akka.persistence.non-persistent.journal";
+
static {
final ABIVersion[] values = ABIVersion.values();
final ABIVersion[] real = Arrays.copyOfRange(values, 1, values.length - 1);
private DatastoreContext datastoreContext;
+ @Deprecated(since = "9.0.0", forRemoval = true)
private final ShardCommitCoordinator commitCoordinator;
private long transactionCommitTimeout;
private final MessageTracker appendEntriesReplyTracker;
+ @Deprecated(since = "9.0.0", forRemoval = true)
private final ShardTransactionActorFactory transactionActorFactory;
private final ShardSnapshotCohort snapshotCohort;
private ShardSnapshot restoreFromSnapshot;
+ @Deprecated(since = "9.0.0", forRemoval = true)
private final ShardTransactionMessageRetrySupport messageRetrySupport;
@VisibleForTesting
private final MessageAssembler requestMessageAssembler;
- protected Shard(final AbstractBuilder<?, ?> builder) {
+ private final ExportOnRecovery exportOnRecovery;
+
+ private final ActorRef exportActor;
+
+ @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design")
+ Shard(final AbstractBuilder<?, ?> builder) {
super(builder.getId().toString(), builder.getPeerAddresses(),
Optional.of(builder.getDatastoreContext().getShardRaftConfig()), DataStoreVersions.CURRENT_VERSION);
- this.name = builder.getId().toString();
- this.shardName = builder.getId().getShardName();
- this.datastoreContext = builder.getDatastoreContext();
- this.restoreFromSnapshot = builder.getRestoreFromSnapshot();
- this.frontendMetadata = new FrontendMetadata(name);
+ name = builder.getId().toString();
+ shardName = builder.getId().getShardName();
+ datastoreContext = builder.getDatastoreContext();
+ restoreFromSnapshot = builder.getRestoreFromSnapshot();
+ frontendMetadata = new FrontendMetadata(name);
+ exportOnRecovery = datastoreContext.getExportOnRecovery();
+
+ exportActor = switch (exportOnRecovery) {
+ case Json -> getContext().actorOf(JsonExportActor.props(builder.getSchemaContext(),
+ datastoreContext.getRecoveryExportBaseDir()));
+ case Off -> null;
+ };
setPersistence(datastoreContext.isPersistent());
frontendMetadata);
}
- shardMBean = ShardMBeanFactory.getShardStatsMBean(name, datastoreContext.getDataStoreMXBeanType(), this);
+ shardMBean = ShardStats.create(name, datastoreContext.getDataStoreMXBeanType(), this);
if (isMetricsCaptureEnabled()) {
getContext().become(new MeteringBehavior(this));
}
- commitCoordinator = new ShardCommitCoordinator(store, LOG, this.name);
+ commitCoordinator = new ShardCommitCoordinator(store, LOG, name);
setTransactionCommitTimeout();
self(), getContext(), shardMBean, builder.getId().getShardName());
snapshotCohort = ShardSnapshotCohort.create(getContext(), builder.getId().getMemberName(), store, LOG,
- this.name, datastoreContext);
+ name, datastoreContext);
messageRetrySupport = new ShardTransactionMessageRetrySupport(this);
- responseMessageSlicer = MessageSlicer.builder().logContext(this.name)
+ responseMessageSlicer = MessageSlicer.builder().logContext(name)
.messageSliceSize(datastoreContext.getMaximumMessageSliceSize())
.fileBackedStreamFactory(getRaftActorContext().getFileBackedOutputStreamFactory())
.expireStateAfterInactivity(2, TimeUnit.MINUTES).build();
- requestMessageAssembler = MessageAssembler.builder().logContext(this.name)
+ requestMessageAssembler = MessageAssembler.builder().logContext(name)
.fileBackedStreamFactory(getRaftActorContext().getFileBackedOutputStreamFactory())
.assembledMessageCallback((message, sender) -> self().tell(message, sender))
.expireStateAfterInactivity(datastoreContext.getRequestTimeout(), TimeUnit.NANOSECONDS).build();
}
private Optional<ActorRef> createRoleChangeNotifier(final String shardId) {
- ActorRef shardRoleChangeNotifier = this.getContext().actorOf(
+ ActorRef shardRoleChangeNotifier = getContext().actorOf(
RoleChangeNotifier.getProps(shardId), shardId + "-notifier");
return Optional.of(shardRoleChangeNotifier);
}
@Override
- public void postStop() throws Exception {
+ public final void postStop() throws Exception {
LOG.info("Stopping Shard {}", persistenceId());
super.postStop();
}
@Override
- protected void handleRecover(final Object message) {
+ protected final void handleRecover(final Object message) {
LOG.debug("{}: onReceiveRecover: Received message {} from {}", persistenceId(), message.getClass(),
getSender());
super.handleRecover(message);
+
+ switch (exportOnRecovery) {
+ case Json:
+ if (message instanceof SnapshotOffer) {
+ exportActor.tell(new JsonExportActor.ExportSnapshot(store.readCurrentData().orElseThrow(), name),
+ ActorRef.noSender());
+ } else if (message instanceof ReplicatedLogEntry replicatedLogEntry) {
+ exportActor.tell(new JsonExportActor.ExportJournal(replicatedLogEntry), ActorRef.noSender());
+ } else if (message instanceof RecoveryCompleted) {
+ exportActor.tell(new JsonExportActor.FinishExport(name), ActorRef.noSender());
+ exportActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }
+ break;
+ case Off:
+ default:
+ break;
+ }
+
if (LOG.isTraceEnabled()) {
appendEntriesReplyTracker.begin();
}
}
@Override
+ // non-final for TestShard
protected void handleNonRaftCommand(final Object message) {
- try (MessageTracker.Context context = appendEntriesReplyTracker.received(message)) {
- final Optional<Error> maybeError = context.error();
+ try (var context = appendEntriesReplyTracker.received(message)) {
+ final var maybeError = context.error();
if (maybeError.isPresent()) {
LOG.trace("{} : AppendEntriesReply failed to arrive at the expected interval {}", persistenceId(),
- maybeError.get());
+ maybeError.orElseThrow());
}
store.resetTransactionBatch();
- if (message instanceof RequestEnvelope) {
- handleRequestEnvelope((RequestEnvelope)message);
+ if (message instanceof RequestEnvelope request) {
+ handleRequestEnvelope(request);
} else if (MessageAssembler.isHandledMessage(message)) {
handleRequestAssemblerMessage(message);
- } else if (message instanceof ConnectClientRequest) {
- handleConnectClient((ConnectClientRequest)message);
- } else if (CreateTransaction.isSerializedType(message)) {
- handleCreateTransaction(message);
- } else if (message instanceof BatchedModifications) {
- handleBatchedModifications((BatchedModifications)message);
- } else if (message instanceof ForwardedReadyTransaction) {
- handleForwardedReadyTransaction((ForwardedReadyTransaction) message);
- } else if (message instanceof ReadyLocalTransaction) {
- handleReadyLocalTransaction((ReadyLocalTransaction)message);
- } else if (CanCommitTransaction.isSerializedType(message)) {
- handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
- } else if (CommitTransaction.isSerializedType(message)) {
- handleCommitTransaction(CommitTransaction.fromSerializable(message));
- } else if (AbortTransaction.isSerializedType(message)) {
- handleAbortTransaction(AbortTransaction.fromSerializable(message));
- } else if (CloseTransactionChain.isSerializedType(message)) {
- closeTransactionChain(CloseTransactionChain.fromSerializable(message));
- } else if (message instanceof RegisterDataTreeChangeListener) {
- treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader(), hasLeader());
- } else if (message instanceof UpdateSchemaContext) {
- updateSchemaContext((UpdateSchemaContext) message);
- } else if (message instanceof PeerAddressResolved) {
- PeerAddressResolved resolved = (PeerAddressResolved) message;
+ } else if (message instanceof ConnectClientRequest request) {
+ handleConnectClient(request);
+ } else if (message instanceof DataTreeChangedReply) {
+ // Ignore reply
+ } else if (message instanceof RegisterDataTreeChangeListener request) {
+ treeChangeSupport.onMessage(request, isLeader(), hasLeader());
+ } else if (message instanceof UpdateSchemaContext request) {
+ updateSchemaContext(request);
+ } else if (message instanceof PeerAddressResolved resolved) {
setPeerAddress(resolved.getPeerId(), resolved.getPeerAddress());
} else if (TX_COMMIT_TIMEOUT_CHECK_MESSAGE.equals(message)) {
commitTimeoutCheck();
- } else if (message instanceof DatastoreContext) {
- onDatastoreContext((DatastoreContext)message);
+ } else if (message instanceof DatastoreContext request) {
+ onDatastoreContext(request);
} else if (message instanceof RegisterRoleChangeListener) {
- roleChangeNotifier.get().forward(message, context());
- } else if (message instanceof FollowerInitialSyncUpStatus) {
- shardMBean.setFollowerInitialSyncStatus(((FollowerInitialSyncUpStatus) message).isInitialSyncDone());
+ roleChangeNotifier.orElseThrow().forward(message, context());
+ } else if (message instanceof FollowerInitialSyncUpStatus request) {
+ shardMBean.setFollowerInitialSyncStatus(request.isInitialSyncDone());
context().parent().tell(message, self());
} else if (GET_SHARD_MBEAN_MESSAGE.equals(message)) {
sender().tell(getShardMBean(), self());
sender().tell(store.getDataTree(), self());
} else if (message instanceof ServerRemoved) {
context().parent().forward(message, context());
- } else if (ShardTransactionMessageRetrySupport.TIMER_MESSAGE_CLASS.isInstance(message)) {
- messageRetrySupport.onTimerMessage(message);
- } else if (message instanceof DataTreeCohortActorRegistry.CohortRegistryCommand) {
- store.processCohortRegistryCommand(getSender(),
- (DataTreeCohortActorRegistry.CohortRegistryCommand) message);
+ } else if (message instanceof DataTreeCohortActorRegistry.CohortRegistryCommand request) {
+ store.processCohortRegistryCommand(getSender(), request);
} else if (message instanceof MakeLeaderLocal) {
onMakeLeaderLocal();
} else if (RESUME_NEXT_PENDING_TRANSACTION.equals(message)) {
} else if (GetKnownClients.INSTANCE.equals(message)) {
handleGetKnownClients();
} else if (!responseMessageSlicer.handleMessage(message)) {
- super.handleNonRaftCommand(message);
+ // Ask-based protocol messages
+ if (CreateTransaction.isSerializedType(message)) {
+ handleCreateTransaction(message);
+ } else if (message instanceof BatchedModifications request) {
+ handleBatchedModifications(request);
+ } else if (message instanceof ForwardedReadyTransaction request) {
+ handleForwardedReadyTransaction(request);
+ } else if (message instanceof ReadyLocalTransaction request) {
+ handleReadyLocalTransaction(request);
+ } else if (CanCommitTransaction.isSerializedType(message)) {
+ handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
+ } else if (CommitTransaction.isSerializedType(message)) {
+ handleCommitTransaction(CommitTransaction.fromSerializable(message));
+ } else if (AbortTransaction.isSerializedType(message)) {
+ handleAbortTransaction(AbortTransaction.fromSerializable(message));
+ } else if (CloseTransactionChain.isSerializedType(message)) {
+ closeTransactionChain(CloseTransactionChain.fromSerializable(message));
+ } else if (ShardTransactionMessageRetrySupport.TIMER_MESSAGE_CLASS.isInstance(message)) {
+ messageRetrySupport.onTimerMessage(message);
+ } else {
+ super.handleNonRaftCommand(message);
+ }
}
}
}
}
private OptionalLong updateAccess(final SimpleShardDataTreeCohort cohort) {
- final FrontendIdentifier frontend = cohort.getIdentifier().getHistoryId().getClientId().getFrontendId();
+ final FrontendIdentifier frontend = cohort.transactionId().getHistoryId().getClientId().getFrontendId();
final LeaderFrontendState state = knownFrontends.get(frontend);
if (state == null) {
// Not tell-based protocol, do nothing
throw new NotLeaderException(getSelf());
}
- final Request<?, ?> request = envelope.getMessage();
- if (request instanceof TransactionRequest) {
- final TransactionRequest<?> txReq = (TransactionRequest<?>)request;
- final ClientIdentifier clientId = txReq.getTarget().getHistoryId().getClientId();
+ final var request = envelope.getMessage();
+ if (request instanceof TransactionRequest<?> txReq) {
+ final var clientId = txReq.getTarget().getHistoryId().getClientId();
return getFrontend(clientId).handleTransactionRequest(txReq, envelope, now);
- } else if (request instanceof LocalHistoryRequest) {
- final LocalHistoryRequest<?> lhReq = (LocalHistoryRequest<?>)request;
- final ClientIdentifier clientId = lhReq.getTarget().getClientId();
+ } else if (request instanceof LocalHistoryRequest<?> lhReq) {
+ final var clientId = lhReq.getTarget().getClientId();
return getFrontend(clientId).handleLocalHistoryRequest(lhReq, envelope, now);
} else {
LOG.warn("{}: rejecting unsupported request {}", persistenceId(), request);
return getLeaderId() != null;
}
- public int getPendingTxCommitQueueSize() {
+ final int getPendingTxCommitQueueSize() {
return store.getQueueSize();
}
- public int getCohortCacheSize() {
+ final int getCohortCacheSize() {
return commitCoordinator.getCohortCacheSize();
}
@Override
- protected Optional<ActorRef> getRoleChangeNotifier() {
+ protected final Optional<ActorRef> getRoleChangeNotifier() {
return roleChangeNotifier;
}
- String getShardName() {
+ final String getShardName() {
return shardName;
}
@Override
- protected LeaderStateChanged newLeaderStateChanged(final String memberId, final String leaderId,
+ protected final LeaderStateChanged newLeaderStateChanged(final String memberId, final String leaderId,
final short leaderPayloadVersion) {
return isLeader() ? new ShardLeaderStateChanged(memberId, leaderId, store.getDataTree(), leaderPayloadVersion)
: new ShardLeaderStateChanged(memberId, leaderId, leaderPayloadVersion);
}
- protected void onDatastoreContext(final DatastoreContext context) {
- datastoreContext = context;
+ private void onDatastoreContext(final DatastoreContext context) {
+ datastoreContext = verifyNotNull(context);
setTransactionCommitTimeout();
}
// applyState() will be invoked once consensus is reached on the payload
+ // non-final for mocking
void persistPayload(final Identifier id, final Payload payload, final boolean batchHint) {
- boolean canSkipPayload = !hasFollowers() && !persistence().isRecoveryApplicable();
+ final boolean canSkipPayload = !hasFollowers() && !persistence().isRecoveryApplicable();
if (canSkipPayload) {
applyState(self(), id, payload);
} else {
}
}
+ @Deprecated(since = "9.0.0", forRemoval = true)
private void handleCommitTransaction(final CommitTransaction commit) {
- final TransactionIdentifier txId = commit.getTransactionId();
+ final var txId = commit.getTransactionId();
if (isLeader()) {
askProtocolEncountered(txId);
commitCoordinator.handleCommit(txId, getSender(), this);
} else {
- ActorSelection leader = getLeader();
+ final var leader = getLeader();
if (leader == null) {
messageRetrySupport.addMessageToRetry(commit, getSender(), "Could not commit transaction " + txId);
} else {
}
}
+ @Deprecated(since = "9.0.0", forRemoval = true)
private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
- final TransactionIdentifier txId = canCommit.getTransactionId();
+ final var txId = canCommit.getTransactionId();
LOG.debug("{}: Can committing transaction {}", persistenceId(), txId);
if (isLeader()) {
askProtocolEncountered(txId);
commitCoordinator.handleCanCommit(txId, getSender(), this);
} else {
- ActorSelection leader = getLeader();
+ final var leader = getLeader();
if (leader == null) {
messageRetrySupport.addMessageToRetry(canCommit, getSender(),
"Could not canCommit transaction " + txId);
}
@SuppressWarnings("checkstyle:IllegalCatch")
- protected void handleBatchedModificationsLocal(final BatchedModifications batched, final ActorRef sender) {
+ @Deprecated(since = "9.0.0", forRemoval = true)
+ private void handleBatchedModificationsLocal(final BatchedModifications batched, final ActorRef sender) {
askProtocolEncountered(batched.getTransactionId());
try {
}
}
+ @Deprecated(since = "9.0.0", forRemoval = true)
private void handleBatchedModifications(final BatchedModifications batched) {
// This message is sent to prepare the modifications transaction directly on the Shard as an
// optimization to avoid the extra overhead of a separate ShardTransaction actor. On the last
if (isLeader() && isLeaderActive) {
handleBatchedModificationsLocal(batched, getSender());
} else {
- ActorSelection leader = getLeader();
+ final var leader = getLeader();
if (!isLeaderActive || leader == null) {
messageRetrySupport.addMessageToRetry(batched, getSender(),
"Could not process BatchedModifications " + batched.getTransactionId());
// we need to reconstruct previous BatchedModifications from the transaction
// DataTreeModification, honoring the max batched modification count, and forward all the
// previous BatchedModifications to the new leader.
- Collection<BatchedModifications> newModifications = commitCoordinator
- .createForwardedBatchedModifications(batched,
- datastoreContext.getShardBatchedModificationCount());
+ final var newModifications = commitCoordinator.createForwardedBatchedModifications(batched,
+ datastoreContext.getShardBatchedModificationCount());
LOG.debug("{}: Forwarding {} BatchedModifications to leader {}", persistenceId(),
newModifications.size(), leader);
}
@SuppressWarnings("checkstyle:IllegalCatch")
- private void handleReadyLocalTransaction(final ReadyLocalTransaction message) {
- LOG.debug("{}: handleReadyLocalTransaction for {}", persistenceId(), message.getTransactionId());
+ @Deprecated(since = "9.0.0", forRemoval = true)
+ private void handleReadyLocalTransaction(final ReadyLocalTransaction message) {
+ final var txId = message.getTransactionId();
+ LOG.debug("{}: handleReadyLocalTransaction for {}", persistenceId(), txId);
- boolean isLeaderActive = isLeaderActive();
+ final var isLeaderActive = isLeaderActive();
if (isLeader() && isLeaderActive) {
+ askProtocolEncountered(txId);
try {
commitCoordinator.handleReadyLocalTransaction(message, getSender(), this);
} catch (Exception e) {
- LOG.error("{}: Error handling ReadyLocalTransaction for Tx {}", persistenceId(),
- message.getTransactionId(), e);
+ LOG.error("{}: Error handling ReadyLocalTransaction for Tx {}", persistenceId(), txId, e);
getSender().tell(new Failure(e), getSelf());
}
} else {
- ActorSelection leader = getLeader();
+ final var leader = getLeader();
if (!isLeaderActive || leader == null) {
messageRetrySupport.addMessageToRetry(message, getSender(),
- "Could not process ready local transaction " + message.getTransactionId());
+ "Could not process ready local transaction " + txId);
} else {
LOG.debug("{}: Forwarding ReadyLocalTransaction to leader {}", persistenceId(), leader);
message.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
}
}
+ @Deprecated(since = "9.0.0", forRemoval = true)
private void handleForwardedReadyTransaction(final ForwardedReadyTransaction forwardedReady) {
LOG.debug("{}: handleForwardedReadyTransaction for {}", persistenceId(), forwardedReady.getTransactionId());
- boolean isLeaderActive = isLeaderActive();
+ final var isLeaderActive = isLeaderActive();
if (isLeader() && isLeaderActive) {
askProtocolEncountered(forwardedReady.getTransactionId());
commitCoordinator.handleForwardedReadyTransaction(forwardedReady, getSender(), this);
} else {
- ActorSelection leader = getLeader();
+ final var leader = getLeader();
if (!isLeaderActive || leader == null) {
messageRetrySupport.addMessageToRetry(forwardedReady, getSender(),
"Could not process forwarded ready transaction " + forwardedReady.getTransactionId());
} else {
LOG.debug("{}: Forwarding ForwardedReadyTransaction to leader {}", persistenceId(), leader);
- ReadyLocalTransaction readyLocal = new ReadyLocalTransaction(forwardedReady.getTransactionId(),
+ final var readyLocal = new ReadyLocalTransaction(forwardedReady.getTransactionId(),
forwardedReady.getTransaction().getSnapshot(), forwardedReady.isDoImmediateCommit(),
forwardedReady.getParticipatingShardNames());
readyLocal.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
}
}
+ @Deprecated(since = "9.0.0", forRemoval = true)
private void handleAbortTransaction(final AbortTransaction abort) {
- final TransactionIdentifier transactionId = abort.getTransactionId();
+ final var transactionId = abort.getTransactionId();
askProtocolEncountered(transactionId);
doAbortTransaction(transactionId, getSender());
}
- void doAbortTransaction(final Identifier transactionID, final ActorRef sender) {
+ final void doAbortTransaction(final Identifier transactionID, final ActorRef sender) {
commitCoordinator.handleAbort(transactionID, sender, this);
}
+ @Deprecated(since = "9.0.0", forRemoval = true)
private void handleCreateTransaction(final Object message) {
if (isLeader()) {
createTransaction(CreateTransaction.fromSerializable(message));
}
}
+ @Deprecated(since = "9.0.0", forRemoval = true)
private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) {
if (isLeader()) {
- final LocalHistoryIdentifier id = closeTransactionChain.getIdentifier();
+ final var id = closeTransactionChain.getIdentifier();
askProtocolEncountered(id.getClientId());
store.closeTransactionChain(id);
} else if (getLeader() != null) {
}
}
+ @Deprecated(since = "9.0.0", forRemoval = true)
@SuppressWarnings("checkstyle:IllegalCatch")
private void createTransaction(final CreateTransaction createTransaction) {
askProtocolEncountered(createTransaction.getTransactionId());
return;
}
- ActorRef transactionActor = createTransaction(createTransaction.getTransactionType(),
+ final var transactionActor = createTransaction(createTransaction.getTransactionType(),
createTransaction.getTransactionId());
getSender().tell(new CreateTransactionReply(Serialization.serializedActorPath(transactionActor),
}
}
+ @Deprecated(since = "9.0.0", forRemoval = true)
private ActorRef createTransaction(final int transactionType, final TransactionIdentifier transactionId) {
LOG.debug("{}: Creating transaction : {} ", persistenceId(), transactionId);
return transactionActorFactory.newShardTransaction(TransactionType.fromInt(transactionType),
}
// Called on leader only
+ @Deprecated(since = "9.0.0", forRemoval = true)
private void askProtocolEncountered(final TransactionIdentifier transactionId) {
askProtocolEncountered(transactionId.getHistoryId().getClientId());
}
// Called on leader only
+ @Deprecated(since = "9.0.0", forRemoval = true)
private void askProtocolEncountered(final ClientIdentifier clientId) {
- final FrontendIdentifier frontend = clientId.getFrontendId();
- final LeaderFrontendState state = knownFrontends.get(frontend);
+ final var frontend = clientId.getFrontendId();
+ final var state = knownFrontends.get(frontend);
if (!(state instanceof LeaderFrontendState.Disabled)) {
LOG.debug("{}: encountered ask-based client {}, disabling transaction tracking", persistenceId(), clientId);
if (knownFrontends.isEmpty()) {
}
private void updateSchemaContext(final UpdateSchemaContext message) {
- updateSchemaContext(message.getEffectiveModelContext());
+ updateSchemaContext(message.modelContext());
}
@VisibleForTesting
}
@Override
- @VisibleForTesting
- public RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
+ protected final RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
return snapshotCohort;
}
@Override
- protected RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
+ protected final RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
if (restoreFromSnapshot == null) {
return ShardRecoveryCoordinator.create(store, persistenceId(), LOG);
}
}
@Override
+ // non-final for testing
protected void onRecoveryComplete() {
restoreFromSnapshot = null;
//notify shard manager
- getContext().parent().tell(new ActorInitialized(), getSelf());
+ getContext().parent().tell(new ActorInitialized(getSelf()), ActorRef.noSender());
// Being paranoid here - this method should only be called once but just in case...
if (txCommitTimeoutCheckSchedule == null) {
// Schedule a message to be periodically sent to check if the current in-progress
// transaction should be expired and aborted.
- FiniteDuration period = FiniteDuration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
+ final var period = FiniteDuration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule(
period, period, getSelf(),
TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender());
}
@Override
- protected void applyState(final ActorRef clientActor, final Identifier identifier, final Object data) {
- if (data instanceof Payload) {
- if (data instanceof DisableTrackingPayload) {
- disableTracking((DisableTrackingPayload) data);
+ protected final void applyState(final ActorRef clientActor, final Identifier identifier, final Object data) {
+ if (data instanceof Payload payload) {
+ if (payload instanceof DisableTrackingPayload disableTracking) {
+ disableTracking(disableTracking);
return;
}
try {
- store.applyReplicatedPayload(identifier, (Payload)data);
+ store.applyReplicatedPayload(identifier, payload);
} catch (DataValidationFailedException | IOException e) {
LOG.error("{}: Error applying replica {}", persistenceId(), identifier, e);
}
}
@Override
- protected void onStateChanged() {
+ protected final void onStateChanged() {
boolean isLeader = isLeader();
boolean hasLeader = hasLeader();
treeChangeSupport.onLeadershipChange(isLeader, hasLeader);
}
@Override
- protected void onLeaderChanged(final String oldLeader, final String newLeader) {
+ protected final void onLeaderChanged(final String oldLeader, final String newLeader) {
shardMBean.incrementLeadershipChangeCount();
paused = false;
// them to transaction messages and send to the new leader.
ActorSelection leader = getLeader();
if (leader != null) {
- Collection<?> messagesToForward = convertPendingTransactionsToMessages();
+ // Clears all pending transactions and converts them to messages to be forwarded to a new leader.
+ Collection<?> messagesToForward = commitCoordinator.convertPendingTransactionsToMessages(
+ datastoreContext.getShardBatchedModificationCount());
if (!messagesToForward.isEmpty()) {
LOG.debug("{}: Forwarding {} pending transaction messages to leader {}", persistenceId(),
}
} else {
// We have become the leader, we need to reconstruct frontend state
- knownFrontends = Verify.verifyNotNull(frontendMetadata.toLeaderState(this));
+ knownFrontends = verifyNotNull(frontendMetadata.toLeaderState(this));
LOG.debug("{}: became leader with frontend state for {}", persistenceId(), knownFrontends.keySet());
}
}
}
- /**
- * Clears all pending transactions and converts them to messages to be forwarded to a new leader.
- *
- * @return the converted messages
- */
- public Collection<?> convertPendingTransactionsToMessages() {
- return commitCoordinator.convertPendingTransactionsToMessages(
- datastoreContext.getShardBatchedModificationCount());
- }
-
@Override
- protected void pauseLeader(final Runnable operation) {
+ protected final void pauseLeader(final Runnable operation) {
LOG.debug("{}: In pauseLeader, operation: {}", persistenceId(), operation);
paused = true;
// Tell-based protocol can replay transaction state, so it is safe to blow it up when we are paused.
- if (datastoreContext.isUseTellBasedProtocol()) {
- knownFrontends.values().forEach(LeaderFrontendState::retire);
- knownFrontends = ImmutableMap.of();
- }
+ knownFrontends.values().forEach(LeaderFrontendState::retire);
+ knownFrontends = ImmutableMap.of();
store.setRunOnPendingTransactionsComplete(operation);
}
@Override
- protected void unpauseLeader() {
+ protected final void unpauseLeader() {
LOG.debug("{}: In unpauseLeader", persistenceId());
paused = false;
store.setRunOnPendingTransactionsComplete(null);
// Restore tell-based protocol state as if we were becoming the leader
- knownFrontends = Verify.verifyNotNull(frontendMetadata.toLeaderState(this));
+ knownFrontends = verifyNotNull(frontendMetadata.toLeaderState(this));
}
@Override
- protected OnDemandRaftState.AbstractBuilder<?, ?> newOnDemandRaftStateBuilder() {
- return OnDemandShardState.newBuilder().treeChangeListenerActors(treeChangeSupport.getListenerActors())
- .commitCohortActors(store.getCohortActors());
+ protected final OnDemandRaftState.AbstractBuilder<?, ?> newOnDemandRaftStateBuilder() {
+ return OnDemandShardState.newBuilder()
+ .treeChangeListenerActors(treeChangeSupport.getListenerActors())
+ .commitCohortActors(store.getCohortActors());
}
@Override
- public String persistenceId() {
- return this.name;
+ public final String persistenceId() {
+ return name;
+ }
+
+ @Override
+ public final String journalPluginId() {
+ // This method may be invoked from super constructor (wonderful), hence we also need to handle the case of
+ // the field being uninitialized because our constructor is not finished.
+ if (datastoreContext != null && !datastoreContext.isPersistent()) {
+ return NON_PERSISTENT_JOURNAL_ID;
+ }
+ return super.journalPluginId();
}
@VisibleForTesting
- ShardCommitCoordinator getCommitCoordinator() {
+ final ShardCommitCoordinator getCommitCoordinator() {
return commitCoordinator;
}
- public DatastoreContext getDatastoreContext() {
+ // non-final for mocking
+ DatastoreContext getDatastoreContext() {
return datastoreContext;
}
@VisibleForTesting
- public ShardDataTree getDataStore() {
+ final ShardDataTree getDataStore() {
return store;
}
@VisibleForTesting
+ // non-final for mocking
ShardStats getShardMBean() {
return shardMBean;
}
private ShardIdentifier id;
private Map<String, String> peerAddresses = Collections.emptyMap();
private DatastoreContext datastoreContext;
- private EffectiveModelContextProvider schemaContextProvider;
+ private Supplier<@NonNull EffectiveModelContext> schemaContextProvider;
private DatastoreSnapshot.ShardSnapshot restoreFromSnapshot;
private DataTree dataTree;
private volatile boolean sealed;
- protected AbstractBuilder(final Class<? extends S> shardClass) {
+ AbstractBuilder(final Class<? extends S> shardClass) {
this.shardClass = shardClass;
}
- protected void checkSealed() {
- checkState(!sealed, "Builder isalready sealed - further modifications are not allowed");
+ final void checkSealed() {
+ checkState(!sealed, "Builder is already sealed - further modifications are not allowed");
}
@SuppressWarnings("unchecked")
public T id(final ShardIdentifier newId) {
checkSealed();
- this.id = newId;
+ id = newId;
return self();
}
public T peerAddresses(final Map<String, String> newPeerAddresses) {
checkSealed();
- this.peerAddresses = newPeerAddresses;
+ peerAddresses = newPeerAddresses;
return self();
}
public T datastoreContext(final DatastoreContext newDatastoreContext) {
checkSealed();
- this.datastoreContext = newDatastoreContext;
+ datastoreContext = newDatastoreContext;
return self();
}
- public T schemaContextProvider(final EffectiveModelContextProvider newSchemaContextProvider) {
+ public T schemaContextProvider(final Supplier<@NonNull EffectiveModelContext> newSchemaContextProvider) {
checkSealed();
- this.schemaContextProvider = requireNonNull(newSchemaContextProvider);
+ schemaContextProvider = requireNonNull(newSchemaContextProvider);
return self();
}
public T restoreFromSnapshot(final DatastoreSnapshot.ShardSnapshot newRestoreFromSnapshot) {
checkSealed();
- this.restoreFromSnapshot = newRestoreFromSnapshot;
+ restoreFromSnapshot = newRestoreFromSnapshot;
return self();
}
public T dataTree(final DataTree newDataTree) {
checkSealed();
- this.dataTree = newDataTree;
+ dataTree = newDataTree;
return self();
}
}
public EffectiveModelContext getSchemaContext() {
- return Verify.verifyNotNull(schemaContextProvider.getEffectiveModelContext());
+ return verifyNotNull(schemaContextProvider.get());
}
public DatastoreSnapshot.ShardSnapshot getRestoreFromSnapshot() {
}
public TreeType getTreeType() {
- switch (datastoreContext.getLogicalStoreType()) {
- case CONFIGURATION:
- return TreeType.CONFIGURATION;
- case OPERATIONAL:
- return TreeType.OPERATIONAL;
- default:
- throw new IllegalStateException("Unhandled logical store type "
- + datastoreContext.getLogicalStoreType());
- }
+ return switch (datastoreContext.getLogicalStoreType()) {
+ case CONFIGURATION -> TreeType.CONFIGURATION;
+ case OPERATIONAL -> TreeType.OPERATIONAL;
+ };
}
protected void verify() {
import com.google.common.annotations.VisibleForTesting;
import com.google.common.primitives.UnsignedLong;
import com.google.common.util.concurrent.FutureCallback;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
-import java.util.Deque;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.Map;
import org.opendaylight.controller.cluster.datastore.messages.VersionedExternalizableMessage;
import org.opendaylight.controller.cluster.datastore.utils.AbstractBatchedModificationsCursor;
import org.opendaylight.yangtools.concepts.Identifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
import org.slf4j.Logger;
/**
*
* @author Thomas Pantelis
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
final class ShardCommitCoordinator {
// Interface hook for unit tests to replace or decorate the ShardDataTreeCohorts.
* @param batched the BatchedModifications message to process
* @param sender the sender of the message
*/
+ @SuppressFBWarnings(value = "THROWS_METHOD_THROWS_RUNTIMEEXCEPTION", justification = "Replay of captured failure")
void handleBatchedModifications(final BatchedModifications batched, final ActorRef sender, final Shard shard) {
CohortEntry cohortEntry = cohortCache.get(batched.getTransactionId());
if (cohortEntry == null || cohortEntry.isSealed()) {
}
}
+ @Deprecated(since = "9.0.0", forRemoval = true)
Collection<BatchedModifications> createForwardedBatchedModifications(final BatchedModifications from,
final int maxModificationsPerBatch) {
CohortEntry cohortEntry = cohortCache.remove(from.getTransactionId());
}
private void handleCanCommit(final CohortEntry cohortEntry) {
- cohortEntry.canCommit(new FutureCallback<Void>() {
+ cohortEntry.canCommit(new FutureCallback<>() {
@Override
- public void onSuccess(final Void result) {
+ public void onSuccess(final Empty result) {
log.debug("{}: canCommit for {}: success", name, cohortEntry.getTransactionId());
if (cohortEntry.isDoImmediateCommit()) {
log.debug("{}: Aborting transaction {}", name, transactionID);
final ActorRef self = shard.getSelf();
- cohortEntry.abort(new FutureCallback<Void>() {
+ cohortEntry.abort(new FutureCallback<>() {
@Override
- public void onSuccess(final Void result) {
+ public void onSuccess(final Empty result) {
if (sender != null) {
sender.tell(AbortTransactionReply.instance(cohortEntry.getClientVersion()).toSerializable(), self);
}
}
void abortPendingTransactions(final String reason, final Shard shard) {
- final Failure failure = new Failure(new RuntimeException(reason));
- Collection<ShardDataTreeCohort> pending = dataTree.getAndClearPendingTransactions();
+ final var failure = new Failure(new RuntimeException(reason));
+ final var pending = dataTree.getAndClearPendingTransactions();
log.debug("{}: Aborting {} pending queued transactions", name, pending.size());
- for (ShardDataTreeCohort cohort : pending) {
- CohortEntry cohortEntry = cohortCache.remove(cohort.getIdentifier());
- if (cohortEntry == null) {
- continue;
- }
-
- if (cohortEntry.getReplySender() != null) {
- cohortEntry.getReplySender().tell(failure, shard.self());
+ for (var cohort : pending) {
+ final var cohortEntry = cohortCache.remove(cohort.transactionId());
+ if (cohortEntry != null) {
+ final var replySender = cohortEntry.getReplySender();
+ if (replySender != null) {
+ replySender.tell(failure, shard.self());
+ }
}
}
}
Collection<?> convertPendingTransactionsToMessages(final int maxModificationsPerBatch) {
- final Collection<VersionedExternalizableMessage> messages = new ArrayList<>();
- for (ShardDataTreeCohort cohort : dataTree.getAndClearPendingTransactions()) {
- CohortEntry cohortEntry = cohortCache.remove(cohort.getIdentifier());
+ final var messages = new ArrayList<VersionedExternalizableMessage>();
+ for (var cohort : dataTree.getAndClearPendingTransactions()) {
+ final var cohortEntry = cohortCache.remove(cohort.transactionId());
if (cohortEntry == null) {
continue;
}
- final Deque<BatchedModifications> newMessages = new ArrayDeque<>();
+ final var newMessages = new ArrayDeque<BatchedModifications>();
cohortEntry.getDataTreeModification().applyToCursor(new AbstractBatchedModificationsCursor() {
@Override
protected BatchedModifications getModifications() {
- final BatchedModifications lastBatch = newMessages.peekLast();
-
+ final var lastBatch = newMessages.peekLast();
if (lastBatch != null && lastBatch.getModifications().size() >= maxModificationsPerBatch) {
return lastBatch;
}
// Allocate a new message
- final BatchedModifications ret = new BatchedModifications(cohortEntry.getTransactionId(),
+ final var ret = new BatchedModifications(cohortEntry.getTransactionId(),
cohortEntry.getClientVersion());
newMessages.add(ret);
return ret;
}
});
- final BatchedModifications last = newMessages.peekLast();
+ final var last = newMessages.peekLast();
if (last != null) {
final boolean immediate = cohortEntry.isDoImmediateCommit();
last.setDoCommitOnReady(immediate);
import static com.google.common.base.Verify.verify;
import static com.google.common.base.Verify.verifyNotNull;
import static java.util.Objects.requireNonNull;
+import static java.util.Objects.requireNonNullElse;
import akka.actor.ActorRef;
import akka.util.Timeout;
import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.MoreObjects;
import com.google.common.base.Stopwatch;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
-import java.util.Map.Entry;
import java.util.Optional;
import java.util.OptionalLong;
import java.util.Queue;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.DataTreeCohortActorRegistry.CohortRegistryCommand;
import org.opendaylight.controller.cluster.datastore.ShardDataTreeCohort.State;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.node.utils.transformer.ReusableNormalizedNodePruner;
import org.opendaylight.controller.cluster.datastore.persisted.AbortTransactionPayload;
import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifiablePayload;
import org.opendaylight.controller.cluster.datastore.persisted.CloseLocalHistoryPayload;
import org.opendaylight.controller.cluster.datastore.persisted.CommitTransactionPayload;
import org.opendaylight.controller.cluster.datastore.persisted.CreateLocalHistoryPayload;
-import org.opendaylight.controller.cluster.datastore.persisted.DataTreeCandidateInputOutput.DataTreeCandidateWithVersion;
import org.opendaylight.controller.cluster.datastore.persisted.MetadataShardDataTreeSnapshot;
import org.opendaylight.controller.cluster.datastore.persisted.PayloadVersion;
import org.opendaylight.controller.cluster.datastore.persisted.PurgeLocalHistoryPayload;
import org.opendaylight.controller.cluster.datastore.persisted.ShardDataTreeSnapshot;
import org.opendaylight.controller.cluster.datastore.persisted.ShardDataTreeSnapshotMetadata;
import org.opendaylight.controller.cluster.datastore.persisted.ShardSnapshotState;
+import org.opendaylight.controller.cluster.datastore.persisted.SkipTransactionsPayload;
import org.opendaylight.controller.cluster.datastore.utils.DataTreeModificationOutput;
+import org.opendaylight.controller.cluster.datastore.utils.ImmutableUnsignedLongSet;
import org.opendaylight.controller.cluster.datastore.utils.PruningDataTreeModification;
import org.opendaylight.controller.cluster.raft.base.messages.InitiateCaptureSnapshot;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
import org.opendaylight.mdsal.common.api.OptimisticLockFailedException;
import org.opendaylight.mdsal.common.api.TransactionCommitFailedException;
import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
import org.opendaylight.yangtools.concepts.Identifier;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.yang.common.Empty;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ConflictingModificationAppliedException;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeTip;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.api.ConflictingModificationAppliedException;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateTip;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeTip;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
+import org.opendaylight.yangtools.yang.data.tree.api.TreeType;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.spi.DataTreeCandidates;
import org.opendaylight.yangtools.yang.data.util.DataSchemaContextTree;
import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.concurrent.duration.FiniteDuration;
* <p>
* This class is not part of the API contract and is subject to change at any time. It is NOT thread-safe.
*/
+@VisibleForTesting
+// non-final for mocking
public class ShardDataTree extends ShardDataTreeTransactionParent {
private static final class CommitEntry {
final SimpleShardDataTreeCohort cohort;
@Override
public String toString() {
- return "CommitEntry [tx=" + cohort.getIdentifier() + ", state=" + cohort.getState() + "]";
+ return "CommitEntry [tx=" + cohort.transactionId() + ", state=" + cohort.getState() + "]";
}
}
*/
private DataTreeTip tip;
- private SchemaContext schemaContext;
+ private EffectiveModelContext schemaContext;
private DataSchemaContextTree dataSchemaContext;
private int currentTransactionBatch;
@VisibleForTesting
public ShardDataTree(final Shard shard, final EffectiveModelContext schemaContext, final TreeType treeType) {
- this(shard, schemaContext, treeType, YangInstanceIdentifier.empty(),
+ this(shard, schemaContext, treeType, YangInstanceIdentifier.of(),
new DefaultShardDataTreeChangeListenerPublisher(""), "");
}
return shard.ticker().read();
}
- public DataTree getDataTree() {
+ final DataTree getDataTree() {
return dataTree;
}
- SchemaContext getSchemaContext() {
+ @VisibleForTesting
+ final EffectiveModelContext getSchemaContext() {
return schemaContext;
}
- void updateSchemaContext(final @NonNull EffectiveModelContext newSchemaContext) {
+ final void updateSchemaContext(final @NonNull EffectiveModelContext newSchemaContext) {
dataTree.setEffectiveModelContext(newSchemaContext);
- this.schemaContext = newSchemaContext;
- this.dataSchemaContext = DataSchemaContextTree.from(newSchemaContext);
+ schemaContext = newSchemaContext;
+ dataSchemaContext = DataSchemaContextTree.from(newSchemaContext);
}
- void resetTransactionBatch() {
+ final void resetTransactionBatch() {
currentTransactionBatch = 0;
}
* @return A state snapshot
*/
@NonNull ShardDataTreeSnapshot takeStateSnapshot() {
- final NormalizedNode<?, ?> rootNode = dataTree.takeSnapshot().readNode(YangInstanceIdentifier.empty()).get();
+ final NormalizedNode rootNode = takeSnapshot().readNode(YangInstanceIdentifier.of()).orElseThrow();
final Builder<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>> metaBuilder =
ImmutableMap.builder();
}
final Map<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>> snapshotMeta;
- if (snapshot instanceof MetadataShardDataTreeSnapshot) {
- snapshotMeta = ((MetadataShardDataTreeSnapshot) snapshot).getMetadata();
+ if (snapshot instanceof MetadataShardDataTreeSnapshot metaSnapshot) {
+ snapshotMeta = metaSnapshot.getMetadata();
} else {
snapshotMeta = ImmutableMap.of();
}
- for (ShardDataTreeMetadata<?> m : metadata) {
- final ShardDataTreeSnapshotMetadata<?> s = snapshotMeta.get(m.getSupportedType());
+ for (var m : metadata) {
+ final var s = snapshotMeta.get(m.getSupportedType());
if (s != null) {
m.applySnapshot(s);
} else {
}
}
- final DataTreeModification unwrapped = dataTree.takeSnapshot().newModification();
+ final DataTreeModification unwrapped = newModification();
final DataTreeModification mod = wrapper.apply(unwrapped);
// delete everything first
- mod.delete(YangInstanceIdentifier.empty());
+ mod.delete(YangInstanceIdentifier.of());
- final Optional<NormalizedNode<?, ?>> maybeNode = snapshot.getRootNode();
- if (maybeNode.isPresent()) {
+ snapshot.getRootNode().ifPresent(rootNode -> {
// Add everything from the remote node back
- mod.write(YangInstanceIdentifier.empty(), maybeNode.get());
- }
+ mod.write(YangInstanceIdentifier.of(), rootNode);
+ });
+
mod.ready();
dataTree.validate(unwrapped);
* @param snapshot Snapshot that needs to be applied
* @throws DataValidationFailedException when the snapshot fails to apply
*/
- void applySnapshot(final @NonNull ShardDataTreeSnapshot snapshot) throws DataValidationFailedException {
+ final void applySnapshot(final @NonNull ShardDataTreeSnapshot snapshot) throws DataValidationFailedException {
// TODO: we should be taking ShardSnapshotState here and performing forward-compatibility translation
applySnapshot(snapshot, UnaryOperator.identity());
}
* @param snapshot Snapshot that needs to be applied
* @throws DataValidationFailedException when the snapshot fails to apply
*/
- void applyRecoverySnapshot(final @NonNull ShardSnapshotState snapshot) throws DataValidationFailedException {
+ final void applyRecoverySnapshot(final @NonNull ShardSnapshotState snapshot) throws DataValidationFailedException {
// TODO: we should be able to reuse the pruner, provided we are not reentrant
final ReusableNormalizedNodePruner pruner = ReusableNormalizedNodePruner.forDataSchemaContext(
dataSchemaContext);
@SuppressWarnings("checkstyle:IllegalCatch")
private void applyRecoveryCandidate(final CommitTransactionPayload payload) throws IOException {
- final Entry<TransactionIdentifier, DataTreeCandidateWithVersion> entry = payload.acquireCandidate();
- final DataTreeModification unwrapped = dataTree.takeSnapshot().newModification();
- final PruningDataTreeModification mod = createPruningModification(unwrapped,
- NormalizedNodeStreamVersion.MAGNESIUM.compareTo(entry.getValue().getVersion()) > 0);
+ final var entry = payload.acquireCandidate();
+ final var unwrapped = newModification();
+ final var pruningMod = createPruningModification(unwrapped,
+ NormalizedNodeStreamVersion.MAGNESIUM.compareTo(entry.streamVersion()) > 0);
- DataTreeCandidates.applyToModification(mod, entry.getValue().getCandidate());
- mod.ready();
+ DataTreeCandidates.applyToModification(pruningMod, entry.candidate());
+ pruningMod.ready();
LOG.trace("{}: Applying recovery modification {}", logContext, unwrapped);
try {
dataTree.validate(unwrapped);
dataTree.commit(dataTree.prepare(unwrapped));
} catch (Exception e) {
- File file = new File(System.getProperty("karaf.data", "."),
+ final var file = new File(System.getProperty("karaf.data", "."),
"failed-recovery-payload-" + logContext + ".out");
DataTreeModificationOutput.toFile(file, unwrapped);
- throw new IllegalStateException(String.format(
- "%s: Failed to apply recovery payload. Modification data was written to file %s",
- logContext, file), e);
+ throw new IllegalStateException(
+ "%s: Failed to apply recovery payload. Modification data was written to file %s".formatted(
+ logContext, file),
+ e);
}
- allMetadataCommittedTransaction(entry.getKey());
+ allMetadataCommittedTransaction(entry.transactionId());
}
private PruningDataTreeModification createPruningModification(final DataTreeModification unwrapped,
final boolean uintAdapting) {
// TODO: we should be able to reuse the pruner, provided we are not reentrant
- final ReusableNormalizedNodePruner pruner = ReusableNormalizedNodePruner.forDataSchemaContext(
- dataSchemaContext);
+ final var pruner = ReusableNormalizedNodePruner.forDataSchemaContext(dataSchemaContext);
return uintAdapting ? new PruningDataTreeModification.Proactive(unwrapped, dataTree, pruner.withUintAdaption())
: new PruningDataTreeModification.Reactive(unwrapped, dataTree, pruner);
}
* @throws IOException when the snapshot fails to deserialize
* @throws DataValidationFailedException when the snapshot fails to apply
*/
- void applyRecoveryPayload(final @NonNull Payload payload) throws IOException {
- if (payload instanceof CommitTransactionPayload) {
- applyRecoveryCandidate((CommitTransactionPayload) payload);
- } else if (payload instanceof AbortTransactionPayload) {
- allMetadataAbortedTransaction(((AbortTransactionPayload) payload).getIdentifier());
- } else if (payload instanceof PurgeTransactionPayload) {
- allMetadataPurgedTransaction(((PurgeTransactionPayload) payload).getIdentifier());
- } else if (payload instanceof CreateLocalHistoryPayload) {
- allMetadataCreatedLocalHistory(((CreateLocalHistoryPayload) payload).getIdentifier());
- } else if (payload instanceof CloseLocalHistoryPayload) {
- allMetadataClosedLocalHistory(((CloseLocalHistoryPayload) payload).getIdentifier());
- } else if (payload instanceof PurgeLocalHistoryPayload) {
- allMetadataPurgedLocalHistory(((PurgeLocalHistoryPayload) payload).getIdentifier());
+ final void applyRecoveryPayload(final @NonNull Payload payload) throws IOException {
+ if (payload instanceof CommitTransactionPayload commit) {
+ applyRecoveryCandidate(commit);
+ } else if (payload instanceof AbortTransactionPayload abort) {
+ allMetadataAbortedTransaction(abort.getIdentifier());
+ } else if (payload instanceof PurgeTransactionPayload purge) {
+ allMetadataPurgedTransaction(purge.getIdentifier());
+ } else if (payload instanceof CreateLocalHistoryPayload create) {
+ allMetadataCreatedLocalHistory(create.getIdentifier());
+ } else if (payload instanceof CloseLocalHistoryPayload close) {
+ allMetadataClosedLocalHistory(close.getIdentifier());
+ } else if (payload instanceof PurgeLocalHistoryPayload purge) {
+ allMetadataPurgedLocalHistory(purge.getIdentifier());
+ } else if (payload instanceof SkipTransactionsPayload skip) {
+ allMetadataSkipTransactions(skip);
} else {
LOG.debug("{}: ignoring unhandled payload {}", logContext, payload);
}
private void applyReplicatedCandidate(final CommitTransactionPayload payload)
throws DataValidationFailedException, IOException {
- final Entry<TransactionIdentifier, DataTreeCandidateWithVersion> entry = payload.getCandidate();
- final TransactionIdentifier identifier = entry.getKey();
- LOG.debug("{}: Applying foreign transaction {}", logContext, identifier);
+ final var payloadCandidate = payload.acquireCandidate();
+ final var transactionId = payloadCandidate.transactionId();
+ LOG.debug("{}: Applying foreign transaction {}", logContext, transactionId);
- final DataTreeModification mod = dataTree.takeSnapshot().newModification();
+ final var mod = newModification();
// TODO: check version here, which will enable us to perform forward-compatibility transformations
- DataTreeCandidates.applyToModification(mod, entry.getValue().getCandidate());
+ DataTreeCandidates.applyToModification(mod, payloadCandidate.candidate());
mod.ready();
LOG.trace("{}: Applying foreign modification {}", logContext, mod);
dataTree.validate(mod);
- final DataTreeCandidate candidate = dataTree.prepare(mod);
+ final var candidate = dataTree.prepare(mod);
dataTree.commit(candidate);
- allMetadataCommittedTransaction(identifier);
+ allMetadataCommittedTransaction(transactionId);
notifyListeners(candidate);
}
* @throws IOException when the snapshot fails to deserialize
* @throws DataValidationFailedException when the snapshot fails to apply
*/
- void applyReplicatedPayload(final Identifier identifier, final Payload payload) throws IOException,
+ final void applyReplicatedPayload(final Identifier identifier, final Payload payload) throws IOException,
DataValidationFailedException {
/*
* This is a bit more involved than it needs to be due to to the fact we do not want to be touching the payload
* In any case, we know that this is an entry coming from replication, hence we can be sure we will not observe
* pre-Boron state -- which limits the number of options here.
*/
- if (payload instanceof CommitTransactionPayload) {
+ if (payload instanceof CommitTransactionPayload commit) {
if (identifier == null) {
- applyReplicatedCandidate((CommitTransactionPayload) payload);
+ applyReplicatedCandidate(commit);
} else {
verify(identifier instanceof TransactionIdentifier);
// if we did not track this transaction before, it means that it came from another leader and we are in
// the process of commiting it while in PreLeader state. That means that it hasnt yet been committed to
// the local DataTree and would be lost if it was only applied via payloadReplicationComplete().
if (!payloadReplicationComplete((TransactionIdentifier) identifier)) {
- applyReplicatedCandidate((CommitTransactionPayload) payload);
+ applyReplicatedCandidate(commit);
}
}
// make sure acquireCandidate() is the last call touching the payload data as we want it to be GC-ed.
- checkRootOverwrite(((CommitTransactionPayload) payload).acquireCandidate().getValue()
- .getCandidate());
- } else if (payload instanceof AbortTransactionPayload) {
+ checkRootOverwrite(commit.acquireCandidate().candidate());
+ } else if (payload instanceof AbortTransactionPayload abort) {
+ if (identifier != null) {
+ payloadReplicationComplete(abort);
+ }
+ allMetadataAbortedTransaction(abort.getIdentifier());
+ } else if (payload instanceof PurgeTransactionPayload purge) {
if (identifier != null) {
- payloadReplicationComplete((AbortTransactionPayload) payload);
+ payloadReplicationComplete(purge);
}
- allMetadataAbortedTransaction(((AbortTransactionPayload) payload).getIdentifier());
- } else if (payload instanceof PurgeTransactionPayload) {
+ allMetadataPurgedTransaction(purge.getIdentifier());
+ } else if (payload instanceof CloseLocalHistoryPayload close) {
if (identifier != null) {
- payloadReplicationComplete((PurgeTransactionPayload) payload);
+ payloadReplicationComplete(close);
}
- allMetadataPurgedTransaction(((PurgeTransactionPayload) payload).getIdentifier());
- } else if (payload instanceof CloseLocalHistoryPayload) {
+ allMetadataClosedLocalHistory(close.getIdentifier());
+ } else if (payload instanceof CreateLocalHistoryPayload create) {
if (identifier != null) {
- payloadReplicationComplete((CloseLocalHistoryPayload) payload);
+ payloadReplicationComplete(create);
}
- allMetadataClosedLocalHistory(((CloseLocalHistoryPayload) payload).getIdentifier());
- } else if (payload instanceof CreateLocalHistoryPayload) {
+ allMetadataCreatedLocalHistory(create.getIdentifier());
+ } else if (payload instanceof PurgeLocalHistoryPayload purge) {
if (identifier != null) {
- payloadReplicationComplete((CreateLocalHistoryPayload)payload);
+ payloadReplicationComplete(purge);
}
- allMetadataCreatedLocalHistory(((CreateLocalHistoryPayload) payload).getIdentifier());
- } else if (payload instanceof PurgeLocalHistoryPayload) {
+ allMetadataPurgedLocalHistory(purge.getIdentifier());
+ } else if (payload instanceof SkipTransactionsPayload skip) {
if (identifier != null) {
- payloadReplicationComplete((PurgeLocalHistoryPayload)payload);
+ payloadReplicationComplete(skip);
}
- allMetadataPurgedLocalHistory(((PurgeLocalHistoryPayload) payload).getIdentifier());
+ allMetadataSkipTransactions(skip);
} else {
LOG.warn("{}: ignoring unhandled identifier {} payload {}", logContext, identifier, payload);
}
}
- private void checkRootOverwrite(DataTreeCandidate candidate) {
+ private void checkRootOverwrite(final DataTreeCandidate candidate) {
final DatastoreContext datastoreContext = shard.getDatastoreContext();
if (!datastoreContext.isSnapshotOnRootOverwrite()) {
return;
}
if (!datastoreContext.isPersistent()) {
- return;
- }
-
- if (candidate.getRootNode().getModificationType().equals(ModificationType.UNMODIFIED)) {
+ // FIXME: why don't we want a snapshot in non-persistent state?
return;
}
// top level container ie "/"
- if ((candidate.getRootPath().equals(YangInstanceIdentifier.empty())
- && candidate.getRootNode().getModificationType().equals(ModificationType.WRITE))) {
+ if (candidate.getRootPath().isEmpty() && candidate.getRootNode().modificationType() == ModificationType.WRITE) {
LOG.debug("{}: shard root overwritten, enqueuing snapshot", logContext);
shard.self().tell(new InitiateCaptureSnapshot(), noSender());
- return;
}
}
}
private boolean payloadReplicationComplete(final TransactionIdentifier txId) {
- final CommitEntry current = pendingFinishCommits.peek();
+ final var current = pendingFinishCommits.peek();
if (current == null) {
LOG.warn("{}: No outstanding transactions, ignoring consensus on transaction {}", logContext, txId);
allMetadataCommittedTransaction(txId);
return false;
}
- if (!current.cohort.getIdentifier().equals(txId)) {
+ final var cohortTxId = current.cohort.transactionId();
+ if (!cohortTxId.equals(txId)) {
LOG.debug("{}: Head of pendingFinishCommits queue is {}, ignoring consensus on transaction {}", logContext,
- current.cohort.getIdentifier(), txId);
+ cohortTxId, txId);
allMetadataCommittedTransaction(txId);
return false;
}
}
}
+ private void allMetadataSkipTransactions(final SkipTransactionsPayload payload) {
+ final var historyId = payload.getIdentifier();
+ final var txIds = payload.getTransactionIds();
+ for (ShardDataTreeMetadata<?> m : metadata) {
+ m.onTransactionsSkipped(historyId, txIds);
+ }
+ }
+
/**
* Create a transaction chain for specified history. Unlike {@link #ensureTransactionChain(LocalHistoryIdentifier)},
* this method is used for re-establishing state when we are taking over
* @param closed True if the chain should be created in closed state (i.e. pending purge)
* @return Transaction chain handle
*/
- ShardDataTreeTransactionChain recreateTransactionChain(final LocalHistoryIdentifier historyId,
+ final ShardDataTreeTransactionChain recreateTransactionChain(final LocalHistoryIdentifier historyId,
final boolean closed) {
final ShardDataTreeTransactionChain ret = new ShardDataTreeTransactionChain(historyId, this);
final ShardDataTreeTransactionChain existing = transactionChains.putIfAbsent(historyId, ret);
return ret;
}
- ShardDataTreeTransactionChain ensureTransactionChain(final LocalHistoryIdentifier historyId,
+ final ShardDataTreeTransactionChain ensureTransactionChain(final LocalHistoryIdentifier historyId,
final @Nullable Runnable callback) {
ShardDataTreeTransactionChain chain = transactionChains.get(historyId);
if (chain == null) {
return chain;
}
- ReadOnlyShardDataTreeTransaction newReadOnlyTransaction(final TransactionIdentifier txId) {
+ final @NonNull ReadOnlyShardDataTreeTransaction newReadOnlyTransaction(final TransactionIdentifier txId) {
shard.getShardMBean().incrementReadOnlyTransactionCount();
- if (txId.getHistoryId().getHistoryId() == 0) {
- return new ReadOnlyShardDataTreeTransaction(this, txId, dataTree.takeSnapshot());
- }
+ final var historyId = txId.getHistoryId();
+ return historyId.getHistoryId() == 0 ? newStandaloneReadOnlyTransaction(txId)
+ : ensureTransactionChain(historyId, null).newReadOnlyTransaction(txId);
+ }
- return ensureTransactionChain(txId.getHistoryId(), null).newReadOnlyTransaction(txId);
+ final @NonNull ReadOnlyShardDataTreeTransaction newStandaloneReadOnlyTransaction(final TransactionIdentifier txId) {
+ return new ReadOnlyShardDataTreeTransaction(this, txId, takeSnapshot());
}
- ReadWriteShardDataTreeTransaction newReadWriteTransaction(final TransactionIdentifier txId) {
+ final @NonNull ReadWriteShardDataTreeTransaction newReadWriteTransaction(final TransactionIdentifier txId) {
shard.getShardMBean().incrementReadWriteTransactionCount();
- if (txId.getHistoryId().getHistoryId() == 0) {
- return new ReadWriteShardDataTreeTransaction(ShardDataTree.this, txId, dataTree.takeSnapshot()
- .newModification());
- }
+ final var historyId = txId.getHistoryId();
+ return historyId.getHistoryId() == 0 ? newStandaloneReadWriteTransaction(txId)
+ : ensureTransactionChain(historyId, null).newReadWriteTransaction(txId);
+ }
- return ensureTransactionChain(txId.getHistoryId(), null).newReadWriteTransaction(txId);
+ final @NonNull ReadWriteShardDataTreeTransaction newStandaloneReadWriteTransaction(
+ final TransactionIdentifier txId) {
+ return new ReadWriteShardDataTreeTransaction(this, txId, newModification());
}
@VisibleForTesting
- public void notifyListeners(final DataTreeCandidate candidate) {
+ final void notifyListeners(final DataTreeCandidate candidate) {
treeChangeListenerPublisher.publishChanges(candidate);
}
* Immediately purge all state relevant to leader. This includes all transaction chains and any scheduled
* replication callbacks.
*/
- void purgeLeaderState() {
+ final void purgeLeaderState() {
for (ShardDataTreeTransactionChain chain : transactionChains.values()) {
chain.close();
}
* @param id History identifier
* @param callback Callback to invoke upon completion, may be null
*/
- void closeTransactionChain(final LocalHistoryIdentifier id, final @Nullable Runnable callback) {
+ final void closeTransactionChain(final LocalHistoryIdentifier id, final @Nullable Runnable callback) {
if (commonCloseTransactionChain(id, callback)) {
replicatePayload(id, CloseLocalHistoryPayload.create(id,
shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
*
* @param id History identifier
*/
- void closeTransactionChain(final LocalHistoryIdentifier id) {
+ final void closeTransactionChain(final LocalHistoryIdentifier id) {
commonCloseTransactionChain(id, null);
}
* @param id History identifier
* @param callback Callback to invoke upon completion, may be null
*/
- void purgeTransactionChain(final LocalHistoryIdentifier id, final @Nullable Runnable callback) {
+ final void purgeTransactionChain(final LocalHistoryIdentifier id, final @Nullable Runnable callback) {
final ShardDataTreeTransactionChain chain = transactionChains.remove(id);
if (chain == null) {
LOG.debug("{}: Purging non-existent transaction chain {}", logContext, id);
id, shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
}
- Optional<DataTreeCandidate> readCurrentData() {
- return dataTree.takeSnapshot().readNode(YangInstanceIdentifier.empty())
- .map(state -> DataTreeCandidates.fromNormalizedNode(YangInstanceIdentifier.empty(), state));
+ final void skipTransactions(final LocalHistoryIdentifier id, final ImmutableUnsignedLongSet transactionIds,
+ final Runnable callback) {
+ final ShardDataTreeTransactionChain chain = transactionChains.get(id);
+ if (chain == null) {
+ LOG.debug("{}: Skipping on non-existent transaction chain {}", logContext, id);
+ if (callback != null) {
+ callback.run();
+ }
+ return;
+ }
+
+ replicatePayload(id, SkipTransactionsPayload.create(id, transactionIds,
+ shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
+ }
+
+ final Optional<DataTreeCandidate> readCurrentData() {
+ return readNode(YangInstanceIdentifier.of())
+ .map(state -> DataTreeCandidates.fromNormalizedNode(YangInstanceIdentifier.of(), state));
}
- public void registerTreeChangeListener(final YangInstanceIdentifier path, final DOMDataTreeChangeListener listener,
- final Optional<DataTreeCandidate> initialState,
- final Consumer<ListenerRegistration<DOMDataTreeChangeListener>> onRegistration) {
+ final void registerTreeChangeListener(final YangInstanceIdentifier path, final DOMDataTreeChangeListener listener,
+ final Optional<DataTreeCandidate> initialState, final Consumer<Registration> onRegistration) {
treeChangeListenerPublisher.registerTreeChangeListener(path, listener, initialState, onRegistration);
}
- int getQueueSize() {
+ final int getQueueSize() {
return pendingTransactions.size() + pendingCommits.size() + pendingFinishCommits.size();
}
@Override
- void abortTransaction(final AbstractShardDataTreeTransaction<?> transaction, final Runnable callback) {
+ final void abortTransaction(final AbstractShardDataTreeTransaction<?> transaction, final Runnable callback) {
final TransactionIdentifier id = transaction.getIdentifier();
LOG.debug("{}: aborting transaction {}", logContext, id);
replicatePayload(id, AbortTransactionPayload.create(
}
@Override
- void abortFromTransactionActor(final AbstractShardDataTreeTransaction<?> transaction) {
+ final void abortFromTransactionActor(final AbstractShardDataTreeTransaction<?> transaction) {
// No-op for free-standing transactions
-
}
@Override
- ShardDataTreeCohort finishTransaction(final ReadWriteShardDataTreeTransaction transaction,
+ final ShardDataTreeCohort finishTransaction(final ReadWriteShardDataTreeTransaction transaction,
final Optional<SortedSet<String>> participatingShardNames) {
final DataTreeModification snapshot = transaction.getSnapshot();
final TransactionIdentifier id = transaction.getIdentifier();
return createReadyCohort(transaction.getIdentifier(), snapshot, participatingShardNames);
}
- void purgeTransaction(final TransactionIdentifier id, final Runnable callback) {
+ final void purgeTransaction(final TransactionIdentifier id, final Runnable callback) {
LOG.debug("{}: purging transaction {}", logContext, id);
replicatePayload(id, PurgeTransactionPayload.create(
id, shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
}
- public Optional<NormalizedNode<?, ?>> readNode(final YangInstanceIdentifier path) {
- return dataTree.takeSnapshot().readNode(path);
+ @VisibleForTesting
+ public final Optional<NormalizedNode> readNode(final YangInstanceIdentifier path) {
+ return takeSnapshot().readNode(path);
}
- DataTreeSnapshot takeSnapshot() {
+ final DataTreeSnapshot takeSnapshot() {
return dataTree.takeSnapshot();
}
@VisibleForTesting
- public DataTreeModification newModification() {
- return dataTree.takeSnapshot().newModification();
+ final DataTreeModification newModification() {
+ return takeSnapshot().newModification();
}
- public Collection<ShardDataTreeCohort> getAndClearPendingTransactions() {
+ final Collection<ShardDataTreeCohort> getAndClearPendingTransactions() {
Collection<ShardDataTreeCohort> ret = new ArrayList<>(getQueueSize());
for (CommitEntry entry: pendingFinishCommits) {
/**
* Called some time after {@link #processNextPendingTransaction()} decides to stop processing.
*/
- void resumeNextPendingTransaction() {
+ final void resumeNextPendingTransaction() {
LOG.debug("{}: attempting to resume transaction processing", logContext);
processNextPending();
}
final SimpleShardDataTreeCohort cohort = entry.cohort;
final DataTreeModification modification = cohort.getDataTreeModification();
- LOG.debug("{}: Validating transaction {}", logContext, cohort.getIdentifier());
+ LOG.debug("{}: Validating transaction {}", logContext, cohort.transactionId());
Exception cause;
try {
tip.validate(modification);
- LOG.debug("{}: Transaction {} validated", logContext, cohort.getIdentifier());
+ LOG.debug("{}: Transaction {} validated", logContext, cohort.transactionId());
cohort.successfulCanCommit();
entry.lastAccess = readTime();
return;
} catch (ConflictingModificationAppliedException e) {
- LOG.warn("{}: Store Tx {}: Conflicting modification for path {}.", logContext, cohort.getIdentifier(),
+ LOG.warn("{}: Store Tx {}: Conflicting modification for path {}.", logContext, cohort.transactionId(),
e.getPath());
cause = new OptimisticLockFailedException("Optimistic lock failed for path " + e.getPath(), e);
} catch (DataValidationFailedException e) {
- LOG.warn("{}: Store Tx {}: Data validation failed for path {}.", logContext, cohort.getIdentifier(),
+ LOG.warn("{}: Store Tx {}: Data validation failed for path {}.", logContext, cohort.transactionId(),
e.getPath(), e);
// For debugging purposes, allow dumping of the modification. Coupled with the above
// precondition log, it should allow us to understand what went on.
- LOG.debug("{}: Store Tx {}: modifications: {}", logContext, cohort.getIdentifier(), modification);
+ LOG.debug("{}: Store Tx {}: modifications: {}", logContext, cohort.transactionId(), modification);
LOG.trace("{}: Current tree: {}", logContext, dataTree);
cause = new TransactionCommitFailedException("Data did not pass validation for path " + e.getPath(), e);
} catch (Exception e) {
final SimpleShardDataTreeCohort cohort = entry.cohort;
if (cohort.isFailed()) {
- LOG.debug("{}: Removing failed transaction {}", logContext, cohort.getIdentifier());
+ LOG.debug("{}: Removing failed transaction {}", logContext, cohort.transactionId());
queue.remove();
continue;
}
return first != null && first.cohort.getState() == State.COMMIT_PENDING;
}
+ // non-final for mocking
void startCanCommit(final SimpleShardDataTreeCohort cohort) {
final CommitEntry head = pendingTransactions.peek();
if (head == null) {
Collection<String> precedingShardNames = extractPrecedingShardNames(cohort.getParticipatingShardNames());
if (precedingShardNames.isEmpty()) {
- LOG.debug("{}: Tx {} is scheduled for canCommit step", logContext, cohort.getIdentifier());
+ LOG.debug("{}: Tx {} is scheduled for canCommit step", logContext, cohort.transactionId());
return;
}
LOG.debug("{}: Evaluating tx {} for canCommit - preceding participating shard names {}",
- logContext, cohort.getIdentifier(), precedingShardNames);
+ logContext, cohort.transactionId(), precedingShardNames);
final Iterator<CommitEntry> iter = pendingTransactions.iterator();
int index = -1;
int moveToIndex = -1;
if (cohort.equals(entry.cohort)) {
if (moveToIndex < 0) {
LOG.debug("{}: Not moving tx {} - cannot proceed with canCommit",
- logContext, cohort.getIdentifier());
+ logContext, cohort.transactionId());
return;
}
LOG.debug("{}: Moving {} to index {} in the pendingTransactions queue",
- logContext, cohort.getIdentifier(), moveToIndex);
+ logContext, cohort.transactionId(), moveToIndex);
iter.remove();
insertEntry(pendingTransactions, entry, moveToIndex);
if (!cohort.equals(pendingTransactions.peek().cohort)) {
LOG.debug("{}: Tx {} is not at the head of the queue - cannot proceed with canCommit",
- logContext, cohort.getIdentifier());
+ logContext, cohort.transactionId());
return;
}
LOG.debug("{}: Tx {} is now at the head of the queue - proceeding with canCommit",
- logContext, cohort.getIdentifier());
+ logContext, cohort.transactionId());
break;
}
if (entry.cohort.getState() != State.READY) {
LOG.debug("{}: Skipping pending transaction {} in state {}",
- logContext, entry.cohort.getIdentifier(), entry.cohort.getState());
+ logContext, entry.cohort.transactionId(), entry.cohort.getState());
continue;
}
if (precedingShardNames.equals(pendingPrecedingShardNames)) {
if (moveToIndex < 0) {
LOG.debug("{}: Preceding shard names {} for pending tx {} match - saving moveToIndex {}",
- logContext, pendingPrecedingShardNames, entry.cohort.getIdentifier(), index);
+ logContext, pendingPrecedingShardNames, entry.cohort.transactionId(), index);
moveToIndex = index;
} else {
LOG.debug(
"{}: Preceding shard names {} for pending tx {} match but moveToIndex already set to {}",
- logContext, pendingPrecedingShardNames, entry.cohort.getIdentifier(), moveToIndex);
+ logContext, pendingPrecedingShardNames, entry.cohort.transactionId(), moveToIndex);
}
} else {
LOG.debug("{}: Preceding shard names {} for pending tx {} differ - skipping",
- logContext, pendingPrecedingShardNames, entry.cohort.getIdentifier());
+ logContext, pendingPrecedingShardNames, entry.cohort.transactionId());
}
}
}
processNextPendingTransaction();
}
+ // non-final for mocking
@SuppressWarnings("checkstyle:IllegalCatch")
void startPreCommit(final SimpleShardDataTreeCohort cohort) {
final CommitEntry entry = pendingTransactions.peek();
final SimpleShardDataTreeCohort current = entry.cohort;
verify(cohort.equals(current), "Attempted to pre-commit %s while %s is pending", cohort, current);
- final TransactionIdentifier currentId = current.getIdentifier();
+ final TransactionIdentifier currentId = current.transactionId();
LOG.debug("{}: Preparing transaction {}", logContext, currentId);
final DataTreeCandidateTip candidate;
return;
}
- cohort.userPreCommit(candidate, new FutureCallback<Void>() {
+ cohort.userPreCommit(candidate, new FutureCallback<>() {
@Override
- public void onSuccess(final Void noop) {
+ public void onSuccess(final Empty result) {
// Set the tip of the data tree.
tip = verifyNotNull(candidate);
@SuppressWarnings("checkstyle:IllegalCatch")
private void finishCommit(final SimpleShardDataTreeCohort cohort) {
- final TransactionIdentifier txId = cohort.getIdentifier();
+ final TransactionIdentifier txId = cohort.transactionId();
final DataTreeCandidate candidate = cohort.getCandidate();
LOG.debug("{}: Resuming commit of transaction {}", logContext, txId);
});
}
+ // non-final for mocking
void startCommit(final SimpleShardDataTreeCohort cohort, final DataTreeCandidate candidate) {
final CommitEntry entry = pendingCommits.peek();
checkState(entry != null, "Attempted to start commit of %s when no transactions pending", cohort);
final SimpleShardDataTreeCohort current = entry.cohort;
if (!cohort.equals(current)) {
- LOG.debug("{}: Transaction {} scheduled for commit step", logContext, cohort.getIdentifier());
+ LOG.debug("{}: Transaction {} scheduled for commit step", logContext, cohort.transactionId());
return;
}
- LOG.debug("{}: Starting commit for transaction {}", logContext, current.getIdentifier());
+ LOG.debug("{}: Starting commit for transaction {}", logContext, current.transactionId());
- final TransactionIdentifier txId = cohort.getIdentifier();
+ final TransactionIdentifier txId = cohort.transactionId();
final Payload payload;
try {
payload = CommitTransactionPayload.create(txId, candidate, PayloadVersion.current(),
processNextPendingCommit();
}
- Collection<ActorRef> getCohortActors() {
+ final Collection<ActorRef> getCohortActors() {
return cohortRegistry.getCohortActors();
}
- void processCohortRegistryCommand(final ActorRef sender, final CohortRegistryCommand message) {
+ final void processCohortRegistryCommand(final ActorRef sender, final CohortRegistryCommand message) {
cohortRegistry.process(sender, message);
}
@Override
- ShardDataTreeCohort createFailedCohort(final TransactionIdentifier txId, final DataTreeModification mod,
+ final ShardDataTreeCohort createFailedCohort(final TransactionIdentifier txId, final DataTreeModification mod,
final Exception failure) {
final SimpleShardDataTreeCohort cohort = new SimpleShardDataTreeCohort(this, mod, txId, failure);
pendingTransactions.add(new CommitEntry(cohort, readTime()));
}
@Override
- ShardDataTreeCohort createReadyCohort(final TransactionIdentifier txId, final DataTreeModification mod,
+ final ShardDataTreeCohort createReadyCohort(final TransactionIdentifier txId, final DataTreeModification mod,
final Optional<SortedSet<String>> participatingShardNames) {
SimpleShardDataTreeCohort cohort = new SimpleShardDataTreeCohort(this, mod, txId,
cohortRegistry.createCohort(schemaContext, txId, shard::executeInSelf,
// Exposed for ShardCommitCoordinator so it does not have deal with local histories (it does not care), this mimics
// the newReadWriteTransaction()
- ShardDataTreeCohort newReadyCohort(final TransactionIdentifier txId, final DataTreeModification mod,
+ final ShardDataTreeCohort newReadyCohort(final TransactionIdentifier txId, final DataTreeModification mod,
final Optional<SortedSet<String>> participatingShardNames) {
- if (txId.getHistoryId().getHistoryId() == 0) {
+ final var historyId = txId.getHistoryId();
+ if (historyId.getHistoryId() == 0) {
return createReadyCohort(txId, mod, participatingShardNames);
}
-
- return ensureTransactionChain(txId.getHistoryId(), null).createReadyCohort(txId, mod, participatingShardNames);
+ return ensureTransactionChain(historyId, null).createReadyCohort(txId, mod, participatingShardNames);
}
@SuppressFBWarnings(value = "DB_DUPLICATE_SWITCH_CLAUSES", justification = "See inline comments below.")
- void checkForExpiredTransactions(final long transactionCommitTimeoutMillis,
+ final void checkForExpiredTransactions(final long transactionCommitTimeoutMillis,
final Function<SimpleShardDataTreeCohort, OptionalLong> accessTimeUpdater) {
final long timeout = TimeUnit.MILLISECONDS.toNanos(transactionCommitTimeoutMillis);
final long now = readTime();
final OptionalLong updateOpt = accessTimeUpdater.apply(currentTx.cohort);
if (updateOpt.isPresent()) {
- final long newAccess = updateOpt.getAsLong();
+ final long newAccess = updateOpt.orElseThrow();
final long newDelta = now - newAccess;
if (newDelta < delta) {
LOG.debug("{}: Updated current transaction {} access time", logContext,
- currentTx.cohort.getIdentifier());
+ currentTx.cohort.transactionId());
currentTx.lastAccess = newAccess;
delta = newDelta;
}
final State state = currentTx.cohort.getState();
LOG.warn("{}: Current transaction {} has timed out after {} ms in state {}", logContext,
- currentTx.cohort.getIdentifier(), deltaMillis, state);
+ currentTx.cohort.transactionId(), deltaMillis, state);
boolean processNext = true;
final TimeoutException cohortFailure = new TimeoutException("Backend timeout in state " + state + " after "
+ deltaMillis + "ms");
break;
case COMMIT_PENDING:
LOG.warn("{}: Transaction {} is still committing, cannot abort", logContext,
- currentTx.cohort.getIdentifier());
+ currentTx.cohort.transactionId());
currentTx.lastAccess = now;
processNext = false;
return;
}
}
+ // non-final for mocking
boolean startAbort(final SimpleShardDataTreeCohort cohort) {
final Iterator<CommitEntry> it = Iterables.concat(pendingFinishCommits, pendingCommits,
pendingTransactions).iterator();
if (!it.hasNext()) {
- LOG.debug("{}: no open transaction while attempting to abort {}", logContext, cohort.getIdentifier());
+ LOG.debug("{}: no open transaction while attempting to abort {}", logContext, cohort.transactionId());
return true;
}
final CommitEntry first = it.next();
if (cohort.equals(first.cohort)) {
if (cohort.getState() != State.COMMIT_PENDING) {
- LOG.debug("{}: aborting head of queue {} in state {}", logContext, cohort.getIdentifier(),
- cohort.getIdentifier());
+ LOG.debug("{}: aborting head of queue {} in state {}", logContext, cohort.transactionId(),
+ cohort.transactionId());
it.remove();
if (cohort.getCandidate() != null) {
return true;
}
- LOG.warn("{}: transaction {} is committing, skipping abort", logContext, cohort.getIdentifier());
+ LOG.warn("{}: transaction {} is committing, skipping abort", logContext, cohort.transactionId());
return false;
}
- DataTreeTip newTip = MoreObjects.firstNonNull(first.cohort.getCandidate(), dataTree);
+ DataTreeTip newTip = requireNonNullElse(first.cohort.getCandidate(), dataTree);
while (it.hasNext()) {
final CommitEntry e = it.next();
if (cohort.equals(e.cohort)) {
- LOG.debug("{}: aborting queued transaction {}", logContext, cohort.getIdentifier());
+ LOG.debug("{}: aborting queued transaction {}", logContext, cohort.transactionId());
it.remove();
if (cohort.getCandidate() != null) {
}
return true;
- } else {
- newTip = MoreObjects.firstNonNull(e.cohort.getCandidate(), newTip);
}
+
+ newTip = requireNonNullElse(e.cohort.getCandidate(), newTip);
}
- LOG.debug("{}: aborted transaction {} not found in the queue", logContext, cohort.getIdentifier());
+ LOG.debug("{}: aborted transaction {} not found in the queue", logContext, cohort.transactionId());
return true;
}
while (iter.hasNext()) {
final SimpleShardDataTreeCohort cohort = iter.next().cohort;
if (cohort.getState() == State.CAN_COMMIT_COMPLETE) {
- LOG.debug("{}: Revalidating queued transaction {}", logContext, cohort.getIdentifier());
+ LOG.debug("{}: Revalidating queued transaction {}", logContext, cohort.transactionId());
try {
tip.validate(cohort.getDataTreeModification());
} catch (DataValidationFailedException | RuntimeException e) {
- LOG.debug("{}: Failed to revalidate queued transaction {}", logContext, cohort.getIdentifier(), e);
+ LOG.debug("{}: Failed to revalidate queued transaction {}", logContext, cohort.transactionId(), e);
cohort.reportFailure(e);
}
} else if (cohort.getState() == State.PRE_COMMIT_COMPLETE) {
- LOG.debug("{}: Repreparing queued transaction {}", logContext, cohort.getIdentifier());
+ LOG.debug("{}: Repreparing queued transaction {}", logContext, cohort.transactionId());
try {
tip.validate(cohort.getDataTreeModification());
cohort.setNewCandidate(candidate);
tip = candidate;
} catch (RuntimeException | DataValidationFailedException e) {
- LOG.debug("{}: Failed to reprepare queued transaction {}", logContext, cohort.getIdentifier(), e);
+ LOG.debug("{}: Failed to reprepare queued transaction {}", logContext, cohort.transactionId(), e);
cohort.reportFailure(e);
}
}
}
}
- void setRunOnPendingTransactionsComplete(final Runnable operation) {
+ final void setRunOnPendingTransactionsComplete(final Runnable operation) {
runOnPendingTransactionsComplete = operation;
maybeRunOperationOnPendingTransactionsComplete();
}
}
}
- ShardStats getStats() {
+ final ShardStats getStats() {
return shard.getShardMBean();
}
- Iterator<SimpleShardDataTreeCohort> cohortIterator() {
+ final Iterator<SimpleShardDataTreeCohort> cohortIterator() {
return Iterables.transform(Iterables.concat(pendingFinishCommits, pendingCommits, pendingTransactions),
e -> e.cohort).iterator();
}
- void removeTransactionChain(final LocalHistoryIdentifier id) {
+ final void removeTransactionChain(final LocalHistoryIdentifier id) {
if (transactionChains.remove(id) != null) {
LOG.debug("{}: Removed transaction chain {}", logContext, id);
}
import java.util.Optional;
import java.util.function.Consumer;
import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
/**
* Interface for a class that generates and publishes notifications for DataTreeChangeListeners.
*/
interface ShardDataTreeChangeListenerPublisher extends ShardDataTreeNotificationPublisher {
void registerTreeChangeListener(YangInstanceIdentifier treeId, DOMDataTreeChangeListener listener,
- Optional<DataTreeCandidate> initialState,
- Consumer<ListenerRegistration<DOMDataTreeChangeListener>> onRegistration);
+ Optional<DataTreeCandidate> initialState, Consumer<Registration> onRegistration);
}
import java.util.Optional;
import java.util.function.Consumer;
import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
/**
* Implementation of ShardDataTreeChangeListenerPublisher that offloads the generation and publication of data tree
@Override
public void registerTreeChangeListener(final YangInstanceIdentifier treeId,
final DOMDataTreeChangeListener listener, final Optional<DataTreeCandidate> currentState,
- final Consumer<ListenerRegistration<DOMDataTreeChangeListener>> onRegistration) {
+ final Consumer<Registration> onRegistration) {
final ShardDataTreeChangePublisherActor.RegisterListener regMessage =
new ShardDataTreeChangePublisherActor.RegisterListener(treeId, listener, currentState, onRegistration);
log.debug("{}: Sending {} to publisher actor {}", logContext(), regMessage, publisherActor());
import java.util.Optional;
import java.util.function.Consumer;
import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
/**
* Actor used to generate and publish DataTreeChange notifications.
@Override
protected void handleReceive(final Object message) {
- if (message instanceof RegisterListener) {
- RegisterListener reg = (RegisterListener)message;
+ if (message instanceof RegisterListener reg) {
LOG.debug("{}: Received {}", logContext(), reg);
if (reg.initialState.isPresent()) {
DefaultShardDataTreeChangeListenerPublisher.notifySingleListener(reg.path, reg.listener,
- reg.initialState.get(), logContext());
+ reg.initialState.orElseThrow(), logContext());
} else {
reg.listener.onInitialData();
}
private final YangInstanceIdentifier path;
private final DOMDataTreeChangeListener listener;
private final Optional<DataTreeCandidate> initialState;
- private final Consumer<ListenerRegistration<DOMDataTreeChangeListener>> onRegistration;
+ private final Consumer<Registration> onRegistration;
RegisterListener(final YangInstanceIdentifier path, final DOMDataTreeChangeListener listener,
- final Optional<DataTreeCandidate> initialState,
- final Consumer<ListenerRegistration<DOMDataTreeChangeListener>> onRegistration) {
+ final Optional<DataTreeCandidate> initialState, final Consumer<Registration> onRegistration) {
this.path = requireNonNull(path);
this.listener = requireNonNull(listener);
this.initialState = requireNonNull(initialState);
import com.google.common.util.concurrent.FutureCallback;
import java.util.Optional;
import java.util.SortedSet;
+import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.concepts.Identifiable;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateTip;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
@VisibleForTesting
-public abstract class ShardDataTreeCohort implements Identifiable<TransactionIdentifier> {
+public abstract class ShardDataTreeCohort {
public enum State {
READY,
CAN_COMMIT_PENDING,
// Prevent foreign instantiation
}
+ abstract @NonNull TransactionIdentifier transactionId();
+
// FIXME: This leaks internal state generated in preCommit,
// should be result of canCommit
abstract DataTreeCandidateTip getCandidate();
// FIXME: Should return rebased DataTreeCandidateTip
@VisibleForTesting
- public abstract void canCommit(FutureCallback<Void> callback);
+ public abstract void canCommit(FutureCallback<Empty> callback);
@VisibleForTesting
public abstract void preCommit(FutureCallback<DataTreeCandidate> callback);
@VisibleForTesting
- public abstract void abort(FutureCallback<Void> callback);
+ public abstract void abort(FutureCallback<Empty> callback);
@VisibleForTesting
public abstract void commit(FutureCallback<UnsignedLong> callback);
}
ToStringHelper addToStringAttributes(final ToStringHelper toStringHelper) {
- return toStringHelper.add("id", getIdentifier()).add("state", getState());
+ return toStringHelper.add("id", transactionId()).add("state", getState());
}
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard;
+package org.opendaylight.controller.cluster.datastore;
import static java.util.Objects.requireNonNull;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.stream.Collectors;
-import org.opendaylight.controller.cluster.datastore.messages.DataTreeListenerInfo;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardDataTreeListenerInfoMXBean;
import org.opendaylight.controller.cluster.datastore.messages.GetInfo;
import org.opendaylight.controller.cluster.datastore.messages.OnDemandShardState;
+import org.opendaylight.controller.cluster.mgmt.api.DataTreeListenerInfo;
import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
import scala.concurrent.Await;
import scala.concurrent.ExecutionContext;
*
* @author Thomas Pantelis
*/
-public class ShardDataTreeListenerInfoMXBeanImpl extends AbstractMXBean implements ShardDataTreeListenerInfoMXBean {
+final class ShardDataTreeListenerInfoMXBeanImpl extends AbstractMXBean implements ShardDataTreeListenerInfoMXBean {
private static final String JMX_CATEGORY = "ShardDataTreeListenerInfo";
private final OnDemandShardStateCache stateCache;
- public ShardDataTreeListenerInfoMXBeanImpl(final String shardName, final String mxBeanType,
- final ActorRef shardActor) {
+ ShardDataTreeListenerInfoMXBeanImpl(final String shardName, final String mxBeanType, final ActorRef shardActor) {
super(shardName, mxBeanType, JMX_CATEGORY);
stateCache = new OnDemandShardStateCache(shardName, requireNonNull(shardActor));
}
return stateCache.get();
} catch (Exception e) {
Throwables.throwIfUnchecked(e);
- throw new RuntimeException(e);
+ throw new IllegalStateException(e);
}
}
private static List<DataTreeListenerInfo> getListenerActorsInfo(final Collection<ActorSelection> actors) {
final Timeout timeout = new Timeout(20, TimeUnit.SECONDS);
final List<Future<Object>> futureList = new ArrayList<>(actors.size());
- for (ActorSelection actor: actors) {
+ for (ActorSelection actor : actors) {
futureList.add(Patterns.ask(actor, GetInfo.INSTANCE, timeout));
}
import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.persisted.ShardDataTreeSnapshotMetadata;
+import org.opendaylight.controller.cluster.datastore.utils.ImmutableUnsignedLongSet;
abstract class ShardDataTreeMetadata<T extends ShardDataTreeSnapshotMetadata<T>> {
/**
abstract void onTransactionPurged(TransactionIdentifier txId);
+ abstract void onTransactionsSkipped(LocalHistoryIdentifier historyId, ImmutableUnsignedLongSet txIds);
+
abstract void onHistoryCreated(LocalHistoryIdentifier historyId);
abstract void onHistoryClosed(LocalHistoryIdentifier historyId);
package org.opendaylight.controller.cluster.datastore;
import java.util.concurrent.TimeUnit;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
/**
* Interface for a class the publishes data tree notifications.
import com.google.common.base.Stopwatch;
import java.util.concurrent.TimeUnit;
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
/**
* Actor used to generate and publish data tree notifications. This is used to offload the potentially
}
@Override
- protected void handleReceive(Object message) {
+ protected void handleReceive(final Object message) {
if (message instanceof PublishNotifications) {
PublishNotifications toPublish = (PublishNotifications)message;
timer.start();
static class PublishNotifications {
private final DataTreeCandidate candidate;
- PublishNotifications(DataTreeCandidate candidate) {
+ PublishNotifications(final DataTreeCandidate candidate) {
this.candidate = candidate;
}
}
import com.google.common.base.MoreObjects;
import java.util.Optional;
import java.util.SortedSet;
+import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.yangtools.concepts.Identifiable;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
private boolean closed;
ShardDataTreeTransactionChain(final LocalHistoryIdentifier localHistoryIdentifier, final ShardDataTree dataTree) {
- this.chainId = requireNonNull(localHistoryIdentifier);
+ chainId = requireNonNull(localHistoryIdentifier);
this.dataTree = requireNonNull(dataTree);
}
return previousTx.getSnapshot();
}
- ReadOnlyShardDataTreeTransaction newReadOnlyTransaction(final TransactionIdentifier txId) {
+ @NonNull ReadOnlyShardDataTreeTransaction newReadOnlyTransaction(final TransactionIdentifier txId) {
final DataTreeSnapshot snapshot = getSnapshot();
LOG.debug("Allocated read-only transaction {} snapshot {}", txId, snapshot);
return new ReadOnlyShardDataTreeTransaction(this, txId, snapshot);
}
- ReadWriteShardDataTreeTransaction newReadWriteTransaction(final TransactionIdentifier txId) {
+ @NonNull ReadWriteShardDataTreeTransaction newReadWriteTransaction(final TransactionIdentifier txId) {
final DataTreeSnapshot snapshot = getSnapshot();
LOG.debug("Allocated read-write transaction {} snapshot {}", txId, snapshot);
- openTransaction = new ReadWriteShardDataTreeTransaction(this, txId, snapshot.newModification());
- return openTransaction;
+ final var ret = new ReadWriteShardDataTreeTransaction(this, txId, snapshot.newModification());
+ openTransaction = ret;
+ return ret;
}
void close() {
import java.util.Optional;
import java.util.SortedSet;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
abstract class ShardDataTreeTransactionParent {
import static java.util.Objects.requireNonNull;
import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.DataExists;
import org.opendaylight.controller.cluster.datastore.messages.ReadData;
*
* @author syedbahm
*/
-public class ShardReadTransaction extends ShardTransaction {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class ShardReadTransaction extends ShardTransaction {
private final AbstractShardDataTreeTransaction<?> transaction;
public ShardReadTransaction(final AbstractShardDataTreeTransaction<?> transaction, final ActorRef shardActor,
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.datastore;
import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.DataExists;
import org.opendaylight.controller.cluster.datastore.messages.ReadData;
*
* @author syedbahm
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
public class ShardReadWriteTransaction extends ShardWriteTransaction {
- public ShardReadWriteTransaction(ReadWriteShardDataTreeTransaction transaction, ActorRef shardActor,
- ShardStats shardStats) {
+ public ShardReadWriteTransaction(final ReadWriteShardDataTreeTransaction transaction, final ActorRef shardActor,
+ final ShardStats shardStats) {
super(transaction, shardActor, shardStats);
}
@Override
- public void handleReceive(Object message) {
+ public void handleReceive(final Object message) {
if (ReadData.isSerializedType(message)) {
readData(ReadData.fromSerializable(message));
} else if (DataExists.isSerializedType(message)) {
import org.opendaylight.controller.cluster.datastore.persisted.ShardSnapshotState;
import org.opendaylight.controller.cluster.datastore.utils.NormalizedNodeXMLOutput;
import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.slf4j.Logger;
WithSnapshot(final ShardDataTree store, final String shardName, final Logger log, final Snapshot snapshot) {
super(store, shardName, log);
- this.restoreFromSnapshot = requireNonNull(snapshot);
+ restoreFromSnapshot = requireNonNull(snapshot);
}
@Override
open = false;
}
- private File writeRoot(final String kind, final NormalizedNode<?, ?> node) {
+ private File writeRoot(final String kind, final NormalizedNode node) {
final File file = new File(System.getProperty("karaf.data", "."),
"failed-recovery-" + kind + "-" + shardName + ".xml");
NormalizedNodeXMLOutput.toFile(file, node);
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard;
+package org.opendaylight.controller.cluster.datastore;
import akka.actor.ActorRef;
import com.google.common.base.Joiner;
import com.google.common.base.Joiner.MapJoiner;
-import java.text.SimpleDateFormat;
-import java.util.Date;
+import java.time.Instant;
+import java.time.ZoneId;
+import java.time.format.DateTimeFormatter;
import java.util.List;
import java.util.concurrent.atomic.AtomicLong;
-import org.checkerframework.checker.lock.qual.GuardedBy;
+import org.eclipse.jdt.annotation.NonNull;
import org.eclipse.jdt.annotation.Nullable;
-import org.opendaylight.controller.cluster.datastore.Shard;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStatsMXBean;
+import org.opendaylight.controller.cluster.mgmt.api.FollowerInfo;
import org.opendaylight.controller.cluster.raft.base.messages.InitiateCaptureSnapshot;
-import org.opendaylight.controller.cluster.raft.client.messages.FollowerInfo;
import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
*
* @author Basheeruddin syedbahm@cisco.com
*/
-public class ShardStats extends AbstractMXBean implements ShardStatsMXBean {
+final class ShardStats extends AbstractMXBean implements ShardStatsMXBean {
public static final String JMX_CATEGORY_SHARD = "Shards";
- // FIXME: migrate this to Java 8 thread-safe time
- @GuardedBy("DATE_FORMAT")
- private static final SimpleDateFormat DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
+ private static final DateTimeFormatter DATE_FORMATTER = DateTimeFormatter.ofPattern("uuuu-MM-dd HH:mm:ss.SSS")
+ .withZone(ZoneId.systemDefault());
private static final MapJoiner MAP_JOINER = Joiner.on(", ").withKeyValueSeparator(": ");
private long lastLeadershipChangeTime;
- public ShardStats(final String shardName, final String mxBeanType, final @Nullable Shard shard) {
+ ShardStats(final String shardName, final String mxBeanType, final @Nullable Shard shard) {
super(shardName, mxBeanType, JMX_CATEGORY_SHARD);
this.shard = shard;
stateCache = new OnDemandShardStateCache(shardName, shard != null ? shard.self() : null);
}
+ static ShardStats create(final String shardName, final String mxBeanType, final @NonNull Shard shard) {
+ String finalMXBeanType = mxBeanType != null ? mxBeanType : "DistDataStore";
+ ShardStats shardStatsMBeanImpl = new ShardStats(shardName, finalMXBeanType, shard);
+ shardStatsMBeanImpl.registerMBean();
+ return shardStatsMBeanImpl;
+ }
+
@SuppressWarnings("checkstyle:IllegalCatch")
private OnDemandRaftState getOnDemandRaftState() {
try {
}
private static String formatMillis(final long timeMillis) {
- synchronized (DATE_FORMAT) {
- return DATE_FORMAT.format(new Date(timeMillis));
- }
+ return DATE_FORMATTER.format(Instant.ofEpochMilli(timeMillis));
}
@Override
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActorWithMetering;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.DataExists;
/**
* The ShardTransaction Actor represents a remote transaction that delegates all actions to DOMDataReadWriteTransaction.
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
public abstract class ShardTransaction extends AbstractUntypedActorWithMetering {
private final ActorRef shardActor;
private final ShardStats shardStats;
@SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "Some fields are not Serializable but we don't "
+ "create remote instances of this actor and thus don't need it to be Serializable.")
private static class ShardTransactionCreator implements Creator<ShardTransaction> {
-
+ @java.io.Serial
private static final long serialVersionUID = 1L;
final AbstractShardDataTreeTransaction<?> transaction;
@Override
public ShardTransaction create() {
- final ShardTransaction tx;
- switch (type) {
- case READ_ONLY:
- tx = new ShardReadTransaction(transaction, shardActor, shardStats);
- break;
- case READ_WRITE:
- tx = new ShardReadWriteTransaction((ReadWriteShardDataTreeTransaction)transaction, shardActor,
- shardStats);
- break;
- case WRITE_ONLY:
- tx = new ShardWriteTransaction((ReadWriteShardDataTreeTransaction)transaction, shardActor,
- shardStats);
- break;
- default:
- throw new IllegalArgumentException("Unhandled transaction type " + type);
- }
-
+ final var tx = switch (type) {
+ case READ_ONLY -> new ShardReadTransaction(transaction, shardActor, shardStats);
+ case READ_WRITE -> new ShardReadWriteTransaction((ReadWriteShardDataTreeTransaction) transaction,
+ shardActor, shardStats);
+ case WRITE_ONLY -> new ShardWriteTransaction((ReadWriteShardDataTreeTransaction) transaction,
+ shardActor, shardStats);
+ default -> throw new IllegalArgumentException("Unhandled transaction type " + type);
+ };
tx.getContext().setReceiveTimeout(datastoreContext.getShardTransactionIdleTimeout());
return tx;
}
import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
/**
* A factory for creating ShardTransaction actors.
*
* @author Thomas Pantelis
*/
-class ShardTransactionActorFactory {
+@Deprecated(since = "9.0.0", forRemoval = true)
+final class ShardTransactionActorFactory {
private static final AtomicLong ACTOR_NAME_COUNTER = new AtomicLong();
private final ShardDataTree dataTree;
private final ActorRef shardActor;
private final String shardName;
- ShardTransactionActorFactory(ShardDataTree dataTree, DatastoreContext datastoreContext,
- String txnDispatcherPath, ActorRef shardActor, ActorContext actorContext, ShardStats shardMBean,
- String shardName) {
+ ShardTransactionActorFactory(final ShardDataTree dataTree, final DatastoreContext datastoreContext,
+ final String txnDispatcherPath, final ActorRef shardActor, final ActorContext actorContext,
+ final ShardStats shardMBean, final String shardName) {
this.dataTree = requireNonNull(dataTree);
this.datastoreContext = requireNonNull(datastoreContext);
this.txnDispatcherPath = requireNonNull(txnDispatcherPath);
return sb.append(txId.getTransactionId()).append('_').append(ACTOR_NAME_COUNTER.incrementAndGet()).toString();
}
- ActorRef newShardTransaction(TransactionType type, TransactionIdentifier transactionID) {
- final AbstractShardDataTreeTransaction<?> transaction;
- switch (type) {
- case READ_ONLY:
- transaction = dataTree.newReadOnlyTransaction(transactionID);
- break;
- case READ_WRITE:
- case WRITE_ONLY:
- transaction = dataTree.newReadWriteTransaction(transactionID);
- break;
- default:
- throw new IllegalArgumentException("Unsupported transaction type " + type);
- }
-
+ ActorRef newShardTransaction(final TransactionType type, final TransactionIdentifier transactionID) {
+ final AbstractShardDataTreeTransaction<?> transaction = switch (type) {
+ case READ_ONLY -> dataTree.newReadOnlyTransaction(transactionID);
+ case READ_WRITE, WRITE_ONLY -> dataTree.newReadWriteTransaction(transactionID);
+ default -> throw new IllegalArgumentException("Unsupported transaction type " + type);
+ };
return actorContext.actorOf(ShardTransaction.props(type, transaction, shardActor, datastoreContext, shardMBean)
.withDispatcher(txnDispatcherPath), actorNameFor(transactionID));
}
*
* @author Thomas Pantelis
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
class ShardTransactionMessageRetrySupport implements Closeable {
private static final Logger LOG = LoggerFactory.getLogger(ShardTransactionMessageRetrySupport.class);
messagesToRetry.clear();
}
- private static class MessageInfo {
+ private static final class MessageInfo {
final Object message;
final ActorRef replyTo;
final String failureMessage;
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.datastore;
import akka.actor.ActorRef;
import akka.actor.PoisonPill;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
import org.opendaylight.controller.cluster.datastore.messages.DataExists;
*
* @author syedbahm
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
public class ShardWriteTransaction extends ShardTransaction {
-
private int totalBatchedModificationsReceived;
private Exception lastBatchedModificationsException;
private final ReadWriteShardDataTreeTransaction transaction;
- public ShardWriteTransaction(ReadWriteShardDataTreeTransaction transaction, ActorRef shardActor,
- ShardStats shardStats) {
+ public ShardWriteTransaction(final ReadWriteShardDataTreeTransaction transaction, final ActorRef shardActor,
+ final ShardStats shardStats) {
super(shardActor, shardStats, transaction.getIdentifier());
this.transaction = transaction;
}
}
@Override
- public void handleReceive(Object message) {
+ public void handleReceive(final Object message) {
if (message instanceof BatchedModifications) {
batchedModifications((BatchedModifications)message);
} else {
}
@SuppressWarnings("checkstyle:IllegalCatch")
- private void batchedModifications(BatchedModifications batched) {
+ private void batchedModifications(final BatchedModifications batched) {
if (checkClosed()) {
if (batched.isReady()) {
getSelf().tell(PoisonPill.getInstance(), getSelf());
}
}
- protected final void dataExists(DataExists message) {
+ protected final void dataExists(final DataExists message) {
super.dataExists(transaction, message);
}
- protected final void readData(ReadData message) {
+ protected final void readData(final ReadData message) {
super.readData(transaction, message);
}
private boolean checkClosed() {
- if (transaction.isClosed()) {
+ final boolean ret = transaction.isClosed();
+ if (ret) {
getSender().tell(new akka.actor.Status.Failure(new IllegalStateException(
"Transaction is closed, no modifications allowed")), getSelf());
- return true;
- } else {
- return false;
}
+ return ret;
}
- private void readyTransaction(BatchedModifications batched) {
+ private void readyTransaction(final BatchedModifications batched) {
TransactionIdentifier transactionID = getTransactionId();
LOG.debug("readyTransaction : {}", transactionID);
*/
package org.opendaylight.controller.cluster.datastore;
+import static com.google.common.base.Verify.verifyNotNull;
import static java.util.Objects.requireNonNull;
import com.google.common.base.MoreObjects.ToStringHelper;
import com.google.common.base.Preconditions;
-import com.google.common.base.Verify;
import com.google.common.primitives.UnsignedLong;
import com.google.common.util.concurrent.FutureCallback;
import java.util.Optional;
import java.util.SortedSet;
import java.util.concurrent.CompletionStage;
+import org.eclipse.jdt.annotation.NonNull;
import org.eclipse.jdt.annotation.Nullable;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateTip;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
private final DataTreeModification transaction;
private final ShardDataTree dataTree;
- private final TransactionIdentifier transactionId;
+ private final @NonNull TransactionIdentifier transactionId;
private final CompositeDataTreeCohort userCohorts;
private final @Nullable SortedSet<String> participatingShardNames;
this.dataTree = requireNonNull(dataTree);
this.transaction = requireNonNull(transaction);
this.transactionId = requireNonNull(transactionId);
- this.userCohorts = null;
- this.participatingShardNames = null;
+ userCohorts = null;
+ participatingShardNames = null;
this.nextFailure = requireNonNull(nextFailure);
}
@Override
- public TransactionIdentifier getIdentifier() {
+ TransactionIdentifier transactionId() {
return transactionId;
}
private void checkState(final State expected) {
Preconditions.checkState(state == expected, "State %s does not match expected state %s for %s",
- state, expected, getIdentifier());
+ state, expected, transactionId());
}
@Override
- public void canCommit(final FutureCallback<Void> newCallback) {
+ public void canCommit(final FutureCallback<Empty> newCallback) {
if (state == State.CAN_COMMIT_PENDING) {
return;
}
checkState(State.READY);
- this.callback = requireNonNull(newCallback);
+ callback = requireNonNull(newCallback);
state = State.CAN_COMMIT_PENDING;
if (nextFailure == null) {
@Override
public void preCommit(final FutureCallback<DataTreeCandidate> newCallback) {
checkState(State.CAN_COMMIT_COMPLETE);
- this.callback = requireNonNull(newCallback);
+ callback = requireNonNull(newCallback);
state = State.PRE_COMMIT_PENDING;
if (nextFailure == null) {
}
@Override
- public void abort(final FutureCallback<Void> abortCallback) {
+ public void abort(final FutureCallback<Empty> abortCallback) {
if (!dataTree.startAbort(this)) {
- abortCallback.onSuccess(null);
+ abortCallback.onSuccess(Empty.value());
return;
}
final Optional<CompletionStage<?>> maybeAborts = userCohorts.abort();
if (!maybeAborts.isPresent()) {
- abortCallback.onSuccess(null);
+ abortCallback.onSuccess(Empty.value());
return;
}
- maybeAborts.get().whenComplete((noop, failure) -> {
+ maybeAborts.orElseThrow().whenComplete((noop, failure) -> {
if (failure != null) {
abortCallback.onFailure(failure);
} else {
- abortCallback.onSuccess(null);
+ abortCallback.onSuccess(Empty.value());
}
});
}
@Override
public void commit(final FutureCallback<UnsignedLong> newCallback) {
checkState(State.PRE_COMMIT_COMPLETE);
- this.callback = requireNonNull(newCallback);
+ callback = requireNonNull(newCallback);
state = State.COMMIT_PENDING;
if (nextFailure == null) {
private <T> FutureCallback<T> switchState(final State newState) {
@SuppressWarnings("unchecked")
- final FutureCallback<T> ret = (FutureCallback<T>) this.callback;
- this.callback = null;
+ final FutureCallback<T> ret = (FutureCallback<T>) callback;
+ callback = null;
LOG.debug("Transaction {} changing state from {} to {}", transactionId, state, newState);
- this.state = newState;
+ state = newState;
return ret;
}
void setNewCandidate(final DataTreeCandidateTip dataTreeCandidate) {
checkState(State.PRE_COMMIT_COMPLETE);
- this.candidate = Verify.verifyNotNull(dataTreeCandidate);
+ candidate = verifyNotNull(dataTreeCandidate);
}
void successfulCanCommit() {
- switchState(State.CAN_COMMIT_COMPLETE).onSuccess(null);
+ switchState(State.CAN_COMMIT_COMPLETE).onSuccess(Empty.value());
}
void failedCanCommit(final Exception cause) {
* @param dataTreeCandidate {@link DataTreeCandidate} under consideration
* @param futureCallback the callback to invoke on completion, which may be immediate or async.
*/
- void userPreCommit(final DataTreeCandidate dataTreeCandidate, final FutureCallback<Void> futureCallback) {
+ void userPreCommit(final DataTreeCandidate dataTreeCandidate, final FutureCallback<Empty> futureCallback) {
userCohorts.reset();
- final Optional<CompletionStage<Void>> maybeCanCommitFuture = userCohorts.canCommit(dataTreeCandidate);
+ final Optional<CompletionStage<Empty>> maybeCanCommitFuture = userCohorts.canCommit(dataTreeCandidate);
if (!maybeCanCommitFuture.isPresent()) {
doUserPreCommit(futureCallback);
return;
}
- maybeCanCommitFuture.get().whenComplete((noop, failure) -> {
+ maybeCanCommitFuture.orElseThrow().whenComplete((noop, failure) -> {
if (failure != null) {
futureCallback.onFailure(failure);
} else {
});
}
- private void doUserPreCommit(final FutureCallback<Void> futureCallback) {
- final Optional<CompletionStage<Void>> maybePreCommitFuture = userCohorts.preCommit();
+ private void doUserPreCommit(final FutureCallback<Empty> futureCallback) {
+ final Optional<CompletionStage<Empty>> maybePreCommitFuture = userCohorts.preCommit();
if (!maybePreCommitFuture.isPresent()) {
- futureCallback.onSuccess(null);
+ futureCallback.onSuccess(Empty.value());
return;
}
- maybePreCommitFuture.get().whenComplete((noop, failure) -> {
+ maybePreCommitFuture.orElseThrow().whenComplete((noop, failure) -> {
if (failure != null) {
futureCallback.onFailure(failure);
} else {
- futureCallback.onSuccess(null);
+ futureCallback.onSuccess(Empty.value());
}
});
}
void successfulPreCommit(final DataTreeCandidateTip dataTreeCandidate) {
LOG.trace("Transaction {} prepared candidate {}", transaction, dataTreeCandidate);
- this.candidate = Verify.verifyNotNull(dataTreeCandidate);
+ candidate = verifyNotNull(dataTreeCandidate);
switchState(State.PRE_COMMIT_COMPLETE).onSuccess(dataTreeCandidate);
}
}
void successfulCommit(final UnsignedLong journalIndex, final Runnable onComplete) {
- final Optional<CompletionStage<Void>> maybeCommitFuture = userCohorts.commit();
+ final Optional<CompletionStage<Empty>> maybeCommitFuture = userCohorts.commit();
if (!maybeCommitFuture.isPresent()) {
finishSuccessfulCommit(journalIndex, onComplete);
return;
}
- maybeCommitFuture.get().whenComplete((noop, failure) -> {
+ maybeCommitFuture.orElseThrow().whenComplete((noop, failure) -> {
if (failure != null) {
LOG.error("User cohorts failed to commit", failure);
}
void reportFailure(final Exception cause) {
if (nextFailure == null) {
- this.nextFailure = requireNonNull(cause);
+ nextFailure = requireNonNull(cause);
} else {
LOG.debug("Transaction {} already has a set failure, not updating it", transactionId, cause);
}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static java.util.Objects.requireNonNull;
-
-import akka.dispatch.OnComplete;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.Arrays;
-import java.util.List;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-
-/**
- * A cohort proxy implementation for a single-shard transaction commit. If the transaction was a direct commit
- * to the shard, this implementation elides the CanCommitTransaction and CommitTransaction messages to the
- * shard as an optimization.
- *
- * @author Thomas Pantelis
- */
-class SingleCommitCohortProxy extends AbstractThreePhaseCommitCohort<Object> {
- private static final Logger LOG = LoggerFactory.getLogger(SingleCommitCohortProxy.class);
-
- private final ActorUtils actorUtils;
- private final Future<Object> cohortFuture;
- private final TransactionIdentifier transactionId;
- private volatile DOMStoreThreePhaseCommitCohort delegateCohort = NoOpDOMStoreThreePhaseCommitCohort.INSTANCE;
- private final OperationCallback.Reference operationCallbackRef;
-
- SingleCommitCohortProxy(ActorUtils actorUtils, Future<Object> cohortFuture, TransactionIdentifier transactionId,
- OperationCallback.Reference operationCallbackRef) {
- this.actorUtils = actorUtils;
- this.cohortFuture = cohortFuture;
- this.transactionId = requireNonNull(transactionId);
- this.operationCallbackRef = operationCallbackRef;
- }
-
- @Override
- public ListenableFuture<Boolean> canCommit() {
- LOG.debug("Tx {} canCommit", transactionId);
-
- final SettableFuture<Boolean> returnFuture = SettableFuture.create();
-
- cohortFuture.onComplete(new OnComplete<Object>() {
- @Override
- public void onComplete(Throwable failure, Object cohortResponse) {
- if (failure != null) {
- operationCallbackRef.get().failure();
- returnFuture.setException(failure);
- return;
- }
-
- operationCallbackRef.get().success();
-
- LOG.debug("Tx {} successfully completed direct commit", transactionId);
-
- // The Future was the result of a direct commit to the shard, essentially eliding the
- // front-end 3PC coordination. We don't really care about the specific Future
- // response object, only that it completed successfully. At this point the Tx is complete
- // so return true. The subsequent preCommit and commit phases will be no-ops, ie return
- // immediate success, to complete the 3PC for the front-end.
- returnFuture.set(Boolean.TRUE);
- }
- }, actorUtils.getClientDispatcher());
-
- return returnFuture;
- }
-
- @Override
- public ListenableFuture<Void> preCommit() {
- return delegateCohort.preCommit();
- }
-
- @Override
- public ListenableFuture<Void> abort() {
- return delegateCohort.abort();
- }
-
- @Override
- public ListenableFuture<Void> commit() {
- return delegateCohort.commit();
- }
-
- @Override
- List<Future<Object>> getCohortFutures() {
- return Arrays.asList(cohortFuture);
- }
-}
import static java.util.Objects.requireNonNull;
import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.RangeSet;
-import com.google.common.collect.TreeRangeSet;
import com.google.common.primitives.UnsignedLong;
import java.util.HashMap;
import java.util.Map;
import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.controller.cluster.datastore.utils.MutableUnsignedLongSet;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
/**
* Standalone transaction specialization of {@link AbstractFrontendHistory}. There can be multiple open transactions
* @author Robert Varga
*/
final class StandaloneFrontendHistory extends AbstractFrontendHistory {
- private final LocalHistoryIdentifier identifier;
- private final ShardDataTree tree;
+ private final @NonNull LocalHistoryIdentifier identifier;
+ private final @NonNull ShardDataTree tree;
private StandaloneFrontendHistory(final String persistenceId, final ClientIdentifier clientId,
final ShardDataTree tree, final Map<UnsignedLong, Boolean> closedTransactions,
- final RangeSet<UnsignedLong> purgedTransactions) {
+ final MutableUnsignedLongSet purgedTransactions) {
super(persistenceId, tree, closedTransactions, purgedTransactions);
- this.identifier = new LocalHistoryIdentifier(clientId, 0);
+ identifier = new LocalHistoryIdentifier(clientId, 0);
this.tree = requireNonNull(tree);
}
static @NonNull StandaloneFrontendHistory create(final String persistenceId, final ClientIdentifier clientId,
final ShardDataTree tree) {
return new StandaloneFrontendHistory(persistenceId, clientId, tree, ImmutableMap.of(),
- TreeRangeSet.create());
+ MutableUnsignedLongSet.of());
}
static @NonNull StandaloneFrontendHistory recreate(final String persistenceId, final ClientIdentifier clientId,
final ShardDataTree tree, final Map<UnsignedLong, Boolean> closedTransactions,
- final RangeSet<UnsignedLong> purgedTransactions) {
+ final MutableUnsignedLongSet purgedTransactions) {
return new StandaloneFrontendHistory(persistenceId, clientId, tree, new HashMap<>(closedTransactions),
- purgedTransactions);
+ purgedTransactions.mutableCopy());
}
@Override
@Override
FrontendTransaction createOpenSnapshot(final TransactionIdentifier id) {
- return FrontendReadOnlyTransaction.create(this, tree.newReadOnlyTransaction(id));
+ return FrontendReadOnlyTransaction.create(this, tree.newStandaloneReadOnlyTransaction(id));
}
@Override
FrontendTransaction createOpenTransaction(final TransactionIdentifier id) {
- return FrontendReadWriteTransaction.createOpen(this, tree.newReadWriteTransaction(id));
+ return FrontendReadWriteTransaction.createOpen(this, tree.newStandaloneReadWriteTransaction(id));
}
@Override
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static com.google.common.base.Preconditions.checkState;
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorSelection;
-import akka.dispatch.Futures;
-import akka.dispatch.OnComplete;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
-import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainClosedException;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-import scala.concurrent.Promise;
-
-/**
- * A chain of {@link TransactionProxy}s. It allows a single open transaction to be open
- * at a time. For remote transactions, it also tracks the outstanding readiness requests
- * towards the shard and unblocks operations only after all have completed.
- */
-final class TransactionChainProxy extends AbstractTransactionContextFactory<LocalTransactionChain>
- implements DOMStoreTransactionChain {
- private abstract static class State {
- /**
- * Check if it is okay to allocate a new transaction.
- * @throws IllegalStateException if a transaction may not be allocated.
- */
- abstract void checkReady();
-
- /**
- * Return the future which needs to be waited for before shard information
- * is returned (which unblocks remote transactions).
- * @return Future to wait for, or null of no wait is necessary
- */
- abstract Future<?> previousFuture();
- }
-
- private abstract static class Pending extends State {
- private final TransactionIdentifier transaction;
- private final Future<?> previousFuture;
-
- Pending(final TransactionIdentifier transaction, final Future<?> previousFuture) {
- this.previousFuture = previousFuture;
- this.transaction = requireNonNull(transaction);
- }
-
- @Override
- final Future<?> previousFuture() {
- return previousFuture;
- }
-
- final TransactionIdentifier getIdentifier() {
- return transaction;
- }
- }
-
- private static final class Allocated extends Pending {
- Allocated(final TransactionIdentifier transaction, final Future<?> previousFuture) {
- super(transaction, previousFuture);
- }
-
- @Override
- void checkReady() {
- throw new IllegalStateException(String.format("Previous transaction %s is not ready yet", getIdentifier()));
- }
- }
-
- private static final class Submitted extends Pending {
- Submitted(final TransactionIdentifier transaction, final Future<?> previousFuture) {
- super(transaction, previousFuture);
- }
-
- @Override
- void checkReady() {
- // Okay to allocate
- }
- }
-
- private abstract static class DefaultState extends State {
- @Override
- final Future<?> previousFuture() {
- return null;
- }
- }
-
- private static final State IDLE_STATE = new DefaultState() {
- @Override
- void checkReady() {
- // Okay to allocate
- }
- };
-
- private static final State CLOSED_STATE = new DefaultState() {
- @Override
- void checkReady() {
- throw new DOMTransactionChainClosedException("Transaction chain has been closed");
- }
- };
-
- private static final Logger LOG = LoggerFactory.getLogger(TransactionChainProxy.class);
- private static final AtomicReferenceFieldUpdater<TransactionChainProxy, State> STATE_UPDATER =
- AtomicReferenceFieldUpdater.newUpdater(TransactionChainProxy.class, State.class, "currentState");
-
- private final TransactionContextFactory parent;
- private volatile State currentState = IDLE_STATE;
-
- /**
- * This map holds Promise instances for each read-only tx. It is used to maintain ordering of tx creates
- * wrt to read-only tx's between this class and a LocalTransactionChain since they're bridged by
- * asynchronous futures. Otherwise, in the following scenario, eg:
- * <p/>
- * 1) Create write tx1 on chain
- * 2) do write and submit
- * 3) Create read-only tx2 on chain and issue read
- * 4) Create write tx3 on chain, do write but do not submit
- * <p/>
- * if the sequence/timing is right, tx3 may create its local tx on the LocalTransactionChain before tx2,
- * which results in tx2 failing b/c tx3 isn't ready yet. So maintaining ordering prevents this issue
- * (see Bug 4774).
- * <p/>
- * A Promise is added via newReadOnlyTransaction. When the parent class completes the primary shard
- * lookup and creates the TransactionContext (either success or failure), onTransactionContextCreated is
- * called which completes the Promise. A write tx that is created prior to completion will wait on the
- * Promise's Future via findPrimaryShard.
- */
- private final ConcurrentMap<TransactionIdentifier, Promise<Object>> priorReadOnlyTxPromises =
- new ConcurrentHashMap<>();
-
- TransactionChainProxy(final TransactionContextFactory parent, final LocalHistoryIdentifier historyId) {
- super(parent.getActorUtils(), historyId);
- this.parent = parent;
- }
-
- @Override
- public DOMStoreReadTransaction newReadOnlyTransaction() {
- currentState.checkReady();
- TransactionProxy transactionProxy = new TransactionProxy(this, TransactionType.READ_ONLY);
- priorReadOnlyTxPromises.put(transactionProxy.getIdentifier(), Futures.<Object>promise());
- return transactionProxy;
- }
-
- @Override
- public DOMStoreReadWriteTransaction newReadWriteTransaction() {
- getActorUtils().acquireTxCreationPermit();
- return allocateWriteTransaction(TransactionType.READ_WRITE);
- }
-
- @Override
- public DOMStoreWriteTransaction newWriteOnlyTransaction() {
- getActorUtils().acquireTxCreationPermit();
- return allocateWriteTransaction(TransactionType.WRITE_ONLY);
- }
-
- @Override
- public void close() {
- currentState = CLOSED_STATE;
-
- // Send a close transaction chain request to each and every shard
-
- getActorUtils().broadcast(version -> new CloseTransactionChain(getHistoryId(), version).toSerializable(),
- CloseTransactionChain.class);
- }
-
- private TransactionProxy allocateWriteTransaction(final TransactionType type) {
- State localState = currentState;
- localState.checkReady();
-
- final TransactionProxy ret = new TransactionProxy(this, type);
- currentState = new Allocated(ret.getIdentifier(), localState.previousFuture());
- return ret;
- }
-
- @Override
- protected LocalTransactionChain factoryForShard(final String shardName, final ActorSelection shardLeader,
- final ReadOnlyDataTree dataTree) {
- final LocalTransactionChain ret = new LocalTransactionChain(this, shardLeader, dataTree);
- LOG.debug("Allocated transaction chain {} for shard {} leader {}", ret, shardName, shardLeader);
- return ret;
- }
-
- /**
- * This method is overridden to ensure the previous Tx's ready operations complete
- * before we initiate the next Tx in the chain to avoid creation failures if the
- * previous Tx's ready operations haven't completed yet.
- */
- @SuppressWarnings({ "unchecked", "rawtypes" })
- @Override
- protected Future<PrimaryShardInfo> findPrimaryShard(final String shardName, final TransactionIdentifier txId) {
- // Read current state atomically
- final State localState = currentState;
-
- // There are no outstanding futures, shortcut
- Future<?> previous = localState.previousFuture();
- if (previous == null) {
- return combineFutureWithPossiblePriorReadOnlyTxFutures(parent.findPrimaryShard(shardName, txId), txId);
- }
-
- final String previousTransactionId;
-
- if (localState instanceof Pending) {
- previousTransactionId = ((Pending) localState).getIdentifier().toString();
- LOG.debug("Tx: {} - waiting for ready futures with pending Tx {}", txId, previousTransactionId);
- } else {
- previousTransactionId = "";
- LOG.debug("Waiting for ready futures on chain {}", getHistoryId());
- }
-
- previous = combineFutureWithPossiblePriorReadOnlyTxFutures(previous, txId);
-
- // Add a callback for completion of the combined Futures.
- final Promise<PrimaryShardInfo> returnPromise = Futures.promise();
-
- final OnComplete onComplete = new OnComplete() {
- @Override
- public void onComplete(final Throwable failure, final Object notUsed) {
- if (failure != null) {
- // A Ready Future failed so fail the returned Promise.
- LOG.error("Tx: {} - ready future failed for previous Tx {}", txId, previousTransactionId);
- returnPromise.failure(failure);
- } else {
- LOG.debug("Tx: {} - previous Tx {} readied - proceeding to FindPrimaryShard",
- txId, previousTransactionId);
-
- // Send the FindPrimaryShard message and use the resulting Future to complete the
- // returned Promise.
- returnPromise.completeWith(parent.findPrimaryShard(shardName, txId));
- }
- }
- };
-
- previous.onComplete(onComplete, getActorUtils().getClientDispatcher());
- return returnPromise.future();
- }
-
- private <T> Future<T> combineFutureWithPossiblePriorReadOnlyTxFutures(final Future<T> future,
- final TransactionIdentifier txId) {
- return priorReadOnlyTxPromises.isEmpty() || priorReadOnlyTxPromises.containsKey(txId) ? future
- // Tough luck, we need do some work
- : combineWithPriorReadOnlyTxFutures(future, txId);
- }
-
- // Split out of the common path
- private <T> Future<T> combineWithPriorReadOnlyTxFutures(final Future<T> future, final TransactionIdentifier txId) {
- // Take a stable snapshot, and check if we raced
- final List<Entry<TransactionIdentifier, Promise<Object>>> priorReadOnlyTxPromiseEntries =
- new ArrayList<>(priorReadOnlyTxPromises.entrySet());
- if (priorReadOnlyTxPromiseEntries.isEmpty()) {
- return future;
- }
-
- final List<Future<Object>> priorReadOnlyTxFutures = new ArrayList<>(priorReadOnlyTxPromiseEntries.size());
- for (Entry<TransactionIdentifier, Promise<Object>> entry: priorReadOnlyTxPromiseEntries) {
- LOG.debug("Tx: {} - waiting on future for prior read-only Tx {}", txId, entry.getKey());
- priorReadOnlyTxFutures.add(entry.getValue().future());
- }
-
- final Future<Iterable<Object>> combinedFutures = Futures.sequence(priorReadOnlyTxFutures,
- getActorUtils().getClientDispatcher());
-
- final Promise<T> returnPromise = Futures.promise();
- final OnComplete<Iterable<Object>> onComplete = new OnComplete<>() {
- @Override
- public void onComplete(final Throwable failure, final Iterable<Object> notUsed) {
- LOG.debug("Tx: {} - prior read-only Tx futures complete", txId);
-
- // Complete the returned Promise with the original Future.
- returnPromise.completeWith(future);
- }
- };
-
- combinedFutures.onComplete(onComplete, getActorUtils().getClientDispatcher());
- return returnPromise.future();
- }
-
- @Override
- protected <T> void onTransactionReady(final TransactionIdentifier transaction,
- final Collection<Future<T>> cohortFutures) {
- final State localState = currentState;
- checkState(localState instanceof Allocated, "Readying transaction %s while state is %s", transaction,
- localState);
- final TransactionIdentifier currentTx = ((Allocated)localState).getIdentifier();
- checkState(transaction.equals(currentTx), "Readying transaction %s while %s is allocated", transaction,
- currentTx);
-
- // Transaction ready and we are not waiting for futures -- go to idle
- if (cohortFutures.isEmpty()) {
- currentState = IDLE_STATE;
- return;
- }
-
- // Combine the ready Futures into 1
- final Future<Iterable<T>> combined = Futures.sequence(cohortFutures, getActorUtils().getClientDispatcher());
-
- // Record the we have outstanding futures
- final State newState = new Submitted(transaction, combined);
- currentState = newState;
-
- // Attach a completion reset, but only if we do not allocate a transaction
- // in-between
- combined.onComplete(new OnComplete<Iterable<T>>() {
- @Override
- public void onComplete(final Throwable arg0, final Iterable<T> arg1) {
- STATE_UPDATER.compareAndSet(TransactionChainProxy.this, newState, IDLE_STATE);
- }
- }, getActorUtils().getClientDispatcher());
- }
-
- @Override
- protected void onTransactionContextCreated(final TransactionIdentifier transactionId) {
- Promise<Object> promise = priorReadOnlyTxPromises.remove(transactionId);
- if (promise != null) {
- promise.success(null);
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import akka.actor.ActorSelection;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.Optional;
-import java.util.SortedSet;
-import org.opendaylight.controller.cluster.datastore.messages.AbstractRead;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import scala.concurrent.Future;
-
-/*
- * FIXME: why do we need this interface? It should be possible to integrate it with
- * AbstractTransactionContext, which is the only implementation anyway.
- */
-interface TransactionContext {
- void closeTransaction();
-
- Future<ActorSelection> readyTransaction(Boolean havePermit, Optional<SortedSet<String>> participatingShardNames);
-
- <T> void executeRead(AbstractRead<T> readCmd, SettableFuture<T> promise, Boolean havePermit);
-
- void executeDelete(YangInstanceIdentifier path, Boolean havePermit);
-
- void executeMerge(YangInstanceIdentifier path, NormalizedNode<?, ?> data, Boolean havePermit);
-
- void executeWrite(YangInstanceIdentifier path, NormalizedNode<?, ?> data, Boolean havePermit);
-
- Future<Object> directCommit(Boolean havePermit);
-
- /**
- * Invoked by {@link TransactionContextWrapper} when it has finished handing
- * off operations to this context. From this point on, the context is responsible
- * for throttling operations.
- *
- * <p>
- * Implementations can rely on the wrapper calling this operation in a synchronized
- * block, so they do not need to ensure visibility of this state transition themselves.
- */
- void operationHandOffComplete();
-
- /**
- * A TransactionContext that uses operation limiting should return true else false.
- *
- * @return true if operation limiting is used, false otherwise
- */
- boolean usesOperationLimiting();
-
- short getTransactionVersion();
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import com.google.common.base.FinalizablePhantomReference;
-import com.google.common.base.FinalizableReferenceQueue;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * A PhantomReference that closes remote transactions for a TransactionContext when it's
- * garbage collected. This is used for read-only transactions as they're not explicitly closed
- * by clients. So the only way to detect that a transaction is no longer in use and it's safe
- * to clean up is when it's garbage collected. It's inexact as to when an instance will be GC'ed
- * but TransactionProxy instances should generally be short-lived enough to avoid being moved
- * to the old generation space and thus should be cleaned up in a timely manner as the GC
- * runs on the young generation (eden, swap1...) space much more frequently.
- */
-final class TransactionContextCleanup extends FinalizablePhantomReference<TransactionProxy> {
- private static final Logger LOG = LoggerFactory.getLogger(TransactionContextCleanup.class);
- /**
- * Used to enqueue the PhantomReferences for read-only TransactionProxy instances. The
- * FinalizableReferenceQueue is safe to use statically in an OSGi environment as it uses some
- * trickery to clean up its internal thread when the bundle is unloaded.
- */
- private static final FinalizableReferenceQueue QUEUE = new FinalizableReferenceQueue();
-
- /**
- * This stores the TransactionProxyCleanupPhantomReference instances statically, This is
- * necessary because PhantomReferences need a hard reference so they're not garbage collected.
- * Once finalized, the TransactionProxyCleanupPhantomReference removes itself from this map
- * and thus becomes eligible for garbage collection.
- */
- private static final Map<TransactionContext, TransactionContextCleanup> CACHE = new ConcurrentHashMap<>();
-
- private final TransactionContext cleanup;
-
- private TransactionContextCleanup(final TransactionProxy referent, final TransactionContext cleanup) {
- super(referent, QUEUE);
- this.cleanup = cleanup;
- }
-
- static void track(final TransactionProxy referent, final TransactionContext cleanup) {
- final TransactionContextCleanup ret = new TransactionContextCleanup(referent, cleanup);
- CACHE.put(cleanup, ret);
- }
-
- @Override
- public void finalizeReferent() {
- LOG.trace("Cleaning up {} Tx actors", cleanup);
-
- if (CACHE.remove(cleanup) != null) {
- cleanup.closeTransaction();
- }
- }
-
- static void untrack(final TransactionContext cleanup) {
- CACHE.remove(cleanup);
- }
-}
\ No newline at end of file
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import akka.actor.ActorSelection;
-import java.util.Collection;
-import java.util.concurrent.atomic.AtomicLong;
-import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree;
-import scala.concurrent.Future;
-
-/**
- * An {@link AbstractTransactionContextFactory} which produces TransactionContext instances for single
- * transactions (ie not chained).
- */
-final class TransactionContextFactory extends AbstractTransactionContextFactory<LocalTransactionFactoryImpl> {
- private final AtomicLong nextHistory = new AtomicLong(1);
-
- TransactionContextFactory(final ActorUtils actorUtils, final ClientIdentifier clientId) {
- super(actorUtils, new LocalHistoryIdentifier(clientId, 0));
- }
-
- @Override
- public void close() {
- }
-
- @Override
- protected LocalTransactionFactoryImpl factoryForShard(final String shardName, final ActorSelection shardLeader,
- final ReadOnlyDataTree dataTree) {
- return new LocalTransactionFactoryImpl(getActorUtils(), shardLeader, dataTree);
- }
-
- @Override
- protected Future<PrimaryShardInfo> findPrimaryShard(final String shardName, TransactionIdentifier txId) {
- return getActorUtils().findPrimaryShardAsync(shardName);
- }
-
- @Override
- protected <T> void onTransactionReady(final TransactionIdentifier transaction,
- final Collection<Future<T>> cohortFutures) {
- // Transactions are disconnected, this is a no-op
- }
-
- DOMStoreTransactionChain createTransactionChain() {
- return new TransactionChainProxy(this, new LocalHistoryIdentifier(getHistoryId().getClientId(),
- nextHistory.getAndIncrement()));
- }
-
- @Override
- protected void onTransactionContextCreated(final TransactionIdentifier transactionId) {
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static com.google.common.base.Preconditions.checkState;
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorSelection;
-import akka.dispatch.Futures;
-import java.util.AbstractMap.SimpleImmutableEntry;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.Optional;
-import java.util.SortedSet;
-import java.util.concurrent.TimeUnit;
-import org.checkerframework.checker.lock.qual.GuardedBy;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-import scala.concurrent.Promise;
-
-/**
- * A helper class that wraps an eventual TransactionContext instance. Operations destined for the target
- * TransactionContext instance are cached until the TransactionContext instance becomes available at which
- * time they are executed.
- *
- * @author Thomas Pantelis
- */
-class TransactionContextWrapper {
- private static final Logger LOG = LoggerFactory.getLogger(TransactionContextWrapper.class);
-
- /**
- * The list of transaction operations to execute once the TransactionContext becomes available.
- */
- @GuardedBy("queuedTxOperations")
- private final List<Entry<TransactionOperation, Boolean>> queuedTxOperations = new ArrayList<>();
- private final TransactionIdentifier identifier;
- private final OperationLimiter limiter;
- private final String shardName;
-
- /**
- * The resulting TransactionContext.
- */
- private volatile TransactionContext transactionContext;
- @GuardedBy("queuedTxOperations")
- private TransactionContext deferredTransactionContext;
- @GuardedBy("queuedTxOperations")
- private boolean pendingEnqueue;
-
- TransactionContextWrapper(final TransactionIdentifier identifier, final ActorUtils actorUtils,
- final String shardName) {
- this.identifier = requireNonNull(identifier);
- this.limiter = new OperationLimiter(identifier,
- // 1 extra permit for the ready operation
- actorUtils.getDatastoreContext().getShardBatchedModificationCount() + 1,
- TimeUnit.MILLISECONDS.toSeconds(actorUtils.getDatastoreContext().getOperationTimeoutInMillis()));
- this.shardName = requireNonNull(shardName);
- }
-
- TransactionContext getTransactionContext() {
- return transactionContext;
- }
-
- TransactionIdentifier getIdentifier() {
- return identifier;
- }
-
- /**
- * Adds a TransactionOperation to be executed once the TransactionContext becomes available. This method is called
- * only after the caller has checked (without synchronizing with executePriorTransactionOperations()) that the
- * context is not available.
- */
- private void enqueueTransactionOperation(final TransactionOperation operation) {
- // We have three things to do here:
- // - synchronize with executePriorTransactionOperations() so that logical operation ordering is maintained
- // - acquire a permit for the operation if we still need to enqueue it
- // - enqueue the operation
- //
- // Since each operation needs to acquire a permit exactly once and the limiter is shared between us and the
- // TransactionContext, we need to know whether an operation has a permit before we enqueue it. Further
- // complications are:
- // - this method may be called from the thread invoking executePriorTransactionOperations()
- // - user may be violating API contract of using the transaction from a single thread
-
- // As a first step, we will synchronize on the queue and check if the handoff has completed. While we have
- // the lock, we will assert that we will be enqueing another operation.
- final TransactionContext contextOnEntry;
- synchronized (queuedTxOperations) {
- contextOnEntry = transactionContext;
- if (contextOnEntry == null) {
- checkState(pendingEnqueue == false, "Concurrent access to transaction %s detected", identifier);
- pendingEnqueue = true;
- }
- }
-
- // Short-circuit if there is a context
- if (contextOnEntry != null) {
- operation.invoke(transactionContext, null);
- return;
- }
-
- boolean cleanupEnqueue = true;
- TransactionContext finishHandoff = null;
- try {
- // Acquire the permit,
- final boolean havePermit = limiter.acquire();
- if (!havePermit) {
- LOG.warn("Failed to acquire enqueue operation permit for transaction {} on shard {}", identifier,
- shardName);
- }
-
- // Ready to enqueue, take the lock again and append the operation
- synchronized (queuedTxOperations) {
- LOG.debug("Tx {} Queuing TransactionOperation", identifier);
- queuedTxOperations.add(new SimpleImmutableEntry<>(operation, havePermit));
- pendingEnqueue = false;
- cleanupEnqueue = false;
- finishHandoff = deferredTransactionContext;
- deferredTransactionContext = null;
- }
- } finally {
- if (cleanupEnqueue) {
- synchronized (queuedTxOperations) {
- pendingEnqueue = false;
- finishHandoff = deferredTransactionContext;
- deferredTransactionContext = null;
- }
- }
- if (finishHandoff != null) {
- executePriorTransactionOperations(finishHandoff);
- }
- }
- }
-
- void maybeExecuteTransactionOperation(final TransactionOperation op) {
- final TransactionContext localContext = transactionContext;
- if (localContext != null) {
- op.invoke(localContext, null);
- } else {
- // The shard Tx hasn't been created yet so add the Tx operation to the Tx Future
- // callback to be executed after the Tx is created.
- enqueueTransactionOperation(op);
- }
- }
-
- void executePriorTransactionOperations(final TransactionContext localTransactionContext) {
- while (true) {
- // Access to queuedTxOperations and transactionContext must be protected and atomic
- // (ie synchronized) with respect to #addTxOperationOnComplete to handle timing
- // issues and ensure no TransactionOperation is missed and that they are processed
- // in the order they occurred.
-
- // We'll make a local copy of the queuedTxOperations list to handle re-entrancy
- // in case a TransactionOperation results in another transaction operation being
- // queued (eg a put operation from a client read Future callback that is notified
- // synchronously).
- final Collection<Entry<TransactionOperation, Boolean>> operationsBatch;
- synchronized (queuedTxOperations) {
- if (queuedTxOperations.isEmpty()) {
- if (!pendingEnqueue) {
- // We're done invoking the TransactionOperations so we can now publish the TransactionContext.
- localTransactionContext.operationHandOffComplete();
-
- // This is null-to-non-null transition after which we are releasing the lock and not doing
- // any further processing.
- transactionContext = localTransactionContext;
- } else {
- deferredTransactionContext = localTransactionContext;
- }
- return;
- }
-
- operationsBatch = new ArrayList<>(queuedTxOperations);
- queuedTxOperations.clear();
- }
-
- // Invoke TransactionOperations outside the sync block to avoid unnecessary blocking. A slight down-side is
- // that we need to re-acquire the lock below but this should be negligible.
- for (Entry<TransactionOperation, Boolean> oper : operationsBatch) {
- final Boolean permit = oper.getValue();
- if (permit.booleanValue() && !localTransactionContext.usesOperationLimiting()) {
- // If the context is not using limiting we need to release operations as we are queueing them, so
- // user threads are not charged for them.
- limiter.release();
- }
- oper.getKey().invoke(localTransactionContext, permit);
- }
- }
- }
-
- Future<ActorSelection> readyTransaction(Optional<SortedSet<String>> participatingShardNames) {
- // avoid the creation of a promise and a TransactionOperation
- final TransactionContext localContext = transactionContext;
- if (localContext != null) {
- return localContext.readyTransaction(null, participatingShardNames);
- }
-
- final Promise<ActorSelection> promise = Futures.promise();
- enqueueTransactionOperation(new TransactionOperation() {
- @Override
- public void invoke(final TransactionContext newTransactionContext, final Boolean havePermit) {
- promise.completeWith(newTransactionContext.readyTransaction(havePermit, participatingShardNames));
- }
- });
-
- return promise.future();
- }
-
- OperationLimiter getLimiter() {
- return limiter;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static java.util.Objects.requireNonNull;
-
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-/**
- * A TransactionOperation to apply a specific modification. Subclasses provide type capture of required data, so that
- * we instantiate AbstractModification subclasses for the bare minimum time required.
- */
-abstract class TransactionModificationOperation extends TransactionOperation {
- private abstract static class AbstractDataOperation extends TransactionModificationOperation {
- private final NormalizedNode<?, ?> data;
-
- AbstractDataOperation(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
- super(path);
- this.data = requireNonNull(data);
- }
-
- final NormalizedNode<?, ?> data() {
- return data;
- }
- }
-
- static final class DeleteOperation extends TransactionModificationOperation {
- DeleteOperation(final YangInstanceIdentifier path) {
- super(path);
- }
-
- @Override
- protected void invoke(final TransactionContext transactionContext, final Boolean havePermit) {
- transactionContext.executeDelete(path(), havePermit);
- }
- }
-
- static final class MergeOperation extends AbstractDataOperation {
- MergeOperation(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
- super(path, data);
- }
-
- @Override
- protected void invoke(final TransactionContext transactionContext, final Boolean havePermit) {
- transactionContext.executeMerge(path(), data(), havePermit);
- }
- }
-
- static final class WriteOperation extends AbstractDataOperation {
- WriteOperation(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
- super(path, data);
- }
-
- @Override
- protected void invoke(final TransactionContext transactionContext, final Boolean havePermit) {
- transactionContext.executeWrite(path(), data(), havePermit);
- }
- }
-
- private final YangInstanceIdentifier path;
-
- TransactionModificationOperation(final YangInstanceIdentifier path) {
- this.path = requireNonNull(path);
- }
-
- final YangInstanceIdentifier path() {
- return path;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import org.eclipse.jdt.annotation.Nullable;
-
-/**
- * Abstract superclass for transaction operations which should be executed
- * on a {@link TransactionContext} at a later point in time.
- */
-abstract class TransactionOperation {
- /**
- * Execute the delayed operation.
- *
- * @param transactionContext the TransactionContext
- * @param havePermit Boolean indicator if this operation has tried and acquired a permit, null if there was no
- * attempt to acquire a permit.
- */
- protected abstract void invoke(TransactionContext transactionContext, @Nullable Boolean havePermit);
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static com.google.common.base.Preconditions.checkArgument;
-import static com.google.common.base.Preconditions.checkState;
-import static com.google.common.base.Verify.verifyNotNull;
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorSelection;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.Iterables;
-import com.google.common.util.concurrent.FluentFuture;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Optional;
-import java.util.Set;
-import java.util.SortedSet;
-import java.util.TreeMap;
-import java.util.TreeSet;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.TransactionModificationOperation.DeleteOperation;
-import org.opendaylight.controller.cluster.datastore.TransactionModificationOperation.MergeOperation;
-import org.opendaylight.controller.cluster.datastore.TransactionModificationOperation.WriteOperation;
-import org.opendaylight.controller.cluster.datastore.messages.AbstractRead;
-import org.opendaylight.controller.cluster.datastore.messages.DataExists;
-import org.opendaylight.controller.cluster.datastore.messages.ReadData;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.controller.cluster.datastore.utils.NormalizedNodeAggregator;
-import org.opendaylight.mdsal.dom.spi.store.AbstractDOMStoreTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.DataContainerNodeBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-import scala.concurrent.Promise;
-
-/**
- * A transaction potentially spanning multiple backend shards.
- */
-public class TransactionProxy extends AbstractDOMStoreTransaction<TransactionIdentifier>
- implements DOMStoreReadWriteTransaction {
- private enum TransactionState {
- OPEN,
- READY,
- CLOSED,
- }
-
- private static final Logger LOG = LoggerFactory.getLogger(TransactionProxy.class);
- private static final DeleteOperation ROOT_DELETE_OPERATION = new DeleteOperation(YangInstanceIdentifier.empty());
-
- private final Map<String, TransactionContextWrapper> txContextWrappers = new TreeMap<>();
- private final AbstractTransactionContextFactory<?> txContextFactory;
- private final TransactionType type;
- private TransactionState state = TransactionState.OPEN;
-
- @VisibleForTesting
- public TransactionProxy(final AbstractTransactionContextFactory<?> txContextFactory, final TransactionType type) {
- super(txContextFactory.nextIdentifier(), txContextFactory.getActorUtils().getDatastoreContext()
- .isTransactionDebugContextEnabled());
- this.txContextFactory = txContextFactory;
- this.type = requireNonNull(type);
-
- LOG.debug("New {} Tx - {}", type, getIdentifier());
- }
-
- @Override
- public FluentFuture<Boolean> exists(final YangInstanceIdentifier path) {
- return executeRead(shardNameFromIdentifier(path), new DataExists(path, DataStoreVersions.CURRENT_VERSION));
- }
-
- private <T> FluentFuture<T> executeRead(final String shardName, final AbstractRead<T> readCmd) {
- checkState(type != TransactionType.WRITE_ONLY, "Reads from write-only transactions are not allowed");
-
- LOG.trace("Tx {} {} {}", getIdentifier(), readCmd.getClass().getSimpleName(), readCmd.getPath());
-
- final SettableFuture<T> proxyFuture = SettableFuture.create();
- TransactionContextWrapper contextWrapper = getContextWrapper(shardName);
- contextWrapper.maybeExecuteTransactionOperation(new TransactionOperation() {
- @Override
- public void invoke(final TransactionContext transactionContext, final Boolean havePermit) {
- transactionContext.executeRead(readCmd, proxyFuture, havePermit);
- }
- });
-
- return FluentFuture.from(proxyFuture);
- }
-
- @Override
- public FluentFuture<Optional<NormalizedNode<?, ?>>> read(final YangInstanceIdentifier path) {
- checkState(type != TransactionType.WRITE_ONLY, "Reads from write-only transactions are not allowed");
- requireNonNull(path, "path should not be null");
-
- LOG.trace("Tx {} read {}", getIdentifier(), path);
- return path.isEmpty() ? readAllData() : singleShardRead(shardNameFromIdentifier(path), path);
- }
-
- private FluentFuture<Optional<NormalizedNode<?, ?>>> singleShardRead(
- final String shardName, final YangInstanceIdentifier path) {
- return executeRead(shardName, new ReadData(path, DataStoreVersions.CURRENT_VERSION));
- }
-
- private FluentFuture<Optional<NormalizedNode<?, ?>>> readAllData() {
- final Set<String> allShardNames = txContextFactory.getActorUtils().getConfiguration().getAllShardNames();
- final Collection<FluentFuture<Optional<NormalizedNode<?, ?>>>> futures = new ArrayList<>(allShardNames.size());
-
- for (String shardName : allShardNames) {
- futures.add(singleShardRead(shardName, YangInstanceIdentifier.empty()));
- }
-
- final ListenableFuture<List<Optional<NormalizedNode<?, ?>>>> listFuture = Futures.allAsList(futures);
- final ListenableFuture<Optional<NormalizedNode<?, ?>>> aggregateFuture;
-
- aggregateFuture = Futures.transform(listFuture, input -> {
- try {
- return NormalizedNodeAggregator.aggregate(YangInstanceIdentifier.empty(), input,
- txContextFactory.getActorUtils().getSchemaContext(),
- txContextFactory.getActorUtils().getDatastoreContext().getLogicalStoreType());
- } catch (DataValidationFailedException e) {
- throw new IllegalArgumentException("Failed to aggregate", e);
- }
- }, MoreExecutors.directExecutor());
-
- return FluentFuture.from(aggregateFuture);
- }
-
- @Override
- public void delete(final YangInstanceIdentifier path) {
- checkModificationState("delete", path);
-
- if (path.isEmpty()) {
- deleteAllData();
- } else {
- executeModification(new DeleteOperation(path));
- }
- }
-
- private void deleteAllData() {
- for (String shardName : getActorUtils().getConfiguration().getAllShardNames()) {
- getContextWrapper(shardName).maybeExecuteTransactionOperation(ROOT_DELETE_OPERATION);
- }
- }
-
- @Override
- public void merge(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
- checkModificationState("merge", path);
-
- if (path.isEmpty()) {
- mergeAllData(checkRootData(data));
- } else {
- executeModification(new MergeOperation(path, data));
- }
- }
-
- private void mergeAllData(final ContainerNode rootData) {
- // Populate requests for individual shards that are being touched
- final Map<String, DataContainerNodeBuilder<NodeIdentifier, ContainerNode>> rootBuilders = new HashMap<>();
- for (DataContainerChild<?, ?> child : rootData.getValue()) {
- final String shardName = shardNameFromRootChild(child);
- rootBuilders.computeIfAbsent(shardName,
- unused -> Builders.containerBuilder().withNodeIdentifier(rootData.getIdentifier()))
- .addChild(child);
- }
-
- // Now dispatch all merges
- for (Entry<String, DataContainerNodeBuilder<NodeIdentifier, ContainerNode>> entry : rootBuilders.entrySet()) {
- getContextWrapper(entry.getKey()).maybeExecuteTransactionOperation(new MergeOperation(
- YangInstanceIdentifier.empty(), entry.getValue().build()));
- }
- }
-
- @Override
- public void write(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
- checkModificationState("write", path);
-
- if (path.isEmpty()) {
- writeAllData(checkRootData(data));
- } else {
- executeModification(new WriteOperation(path, data));
- }
- }
-
- private void writeAllData(final ContainerNode rootData) {
- // Open builders for all shards
- final Map<String, DataContainerNodeBuilder<NodeIdentifier, ContainerNode>> rootBuilders = new HashMap<>();
- for (String shardName : getActorUtils().getConfiguration().getAllShardNames()) {
- rootBuilders.put(shardName, Builders.containerBuilder().withNodeIdentifier(rootData.getIdentifier()));
- }
-
- // Now distribute children as needed
- for (DataContainerChild<?, ?> child : rootData.getValue()) {
- final String shardName = shardNameFromRootChild(child);
- verifyNotNull(rootBuilders.get(shardName), "Failed to find builder for %s", shardName).addChild(child);
- }
-
- // Now dispatch all writes
- for (Entry<String, DataContainerNodeBuilder<NodeIdentifier, ContainerNode>> entry : rootBuilders.entrySet()) {
- getContextWrapper(entry.getKey()).maybeExecuteTransactionOperation(new WriteOperation(
- YangInstanceIdentifier.empty(), entry.getValue().build()));
- }
- }
-
- private void executeModification(final TransactionModificationOperation operation) {
- getContextWrapper(operation.path()).maybeExecuteTransactionOperation(operation);
- }
-
- private static ContainerNode checkRootData(final NormalizedNode<?, ?> data) {
- // Root has to be a container
- checkArgument(data instanceof ContainerNode, "Invalid root data %s", data);
- return (ContainerNode) data;
- }
-
- private void checkModificationState(final String opName, final YangInstanceIdentifier path) {
- checkState(type != TransactionType.READ_ONLY, "Modification operation on read-only transaction is not allowed");
- checkState(state == TransactionState.OPEN, "Transaction is sealed - further modifications are not allowed");
- LOG.trace("Tx {} {} {}", getIdentifier(), opName, path);
- }
-
- private boolean seal(final TransactionState newState) {
- if (state == TransactionState.OPEN) {
- state = newState;
- return true;
- }
- return false;
- }
-
- @Override
- public final void close() {
- if (!seal(TransactionState.CLOSED)) {
- checkState(state == TransactionState.CLOSED, "Transaction %s is ready, it cannot be closed",
- getIdentifier());
- // Idempotent no-op as per AutoCloseable recommendation
- return;
- }
-
- for (TransactionContextWrapper contextWrapper : txContextWrappers.values()) {
- contextWrapper.maybeExecuteTransactionOperation(new TransactionOperation() {
- @Override
- public void invoke(final TransactionContext transactionContext, final Boolean havePermit) {
- transactionContext.closeTransaction();
- }
- });
- }
-
-
- txContextWrappers.clear();
- }
-
- @Override
- public final AbstractThreePhaseCommitCohort<?> ready() {
- checkState(type != TransactionType.READ_ONLY, "Read-only transactions cannot be readied");
-
- final boolean success = seal(TransactionState.READY);
- checkState(success, "Transaction %s is %s, it cannot be readied", getIdentifier(), state);
-
- LOG.debug("Tx {} Readying {} components for commit", getIdentifier(), txContextWrappers.size());
-
- final AbstractThreePhaseCommitCohort<?> ret;
- switch (txContextWrappers.size()) {
- case 0:
- ret = NoOpDOMStoreThreePhaseCommitCohort.INSTANCE;
- break;
- case 1:
- final Entry<String, TransactionContextWrapper> e = Iterables.getOnlyElement(
- txContextWrappers.entrySet());
- ret = createSingleCommitCohort(e.getKey(), e.getValue());
- break;
- default:
- ret = createMultiCommitCohort();
- }
-
- txContextFactory.onTransactionReady(getIdentifier(), ret.getCohortFutures());
-
- final Throwable debugContext = getDebugContext();
- return debugContext == null ? ret : new DebugThreePhaseCommitCohort(getIdentifier(), ret, debugContext);
- }
-
- @SuppressWarnings({ "rawtypes", "unchecked" })
- private AbstractThreePhaseCommitCohort<?> createSingleCommitCohort(final String shardName,
- final TransactionContextWrapper contextWrapper) {
-
- LOG.debug("Tx {} Readying transaction for shard {}", getIdentifier(), shardName);
-
- final OperationCallback.Reference operationCallbackRef =
- new OperationCallback.Reference(OperationCallback.NO_OP_CALLBACK);
-
- final TransactionContext transactionContext = contextWrapper.getTransactionContext();
- final Future future;
- if (transactionContext == null) {
- final Promise promise = akka.dispatch.Futures.promise();
- contextWrapper.maybeExecuteTransactionOperation(new TransactionOperation() {
- @Override
- public void invoke(final TransactionContext newTransactionContext, final Boolean havePermit) {
- promise.completeWith(getDirectCommitFuture(newTransactionContext, operationCallbackRef,
- havePermit));
- }
- });
- future = promise.future();
- } else {
- // avoid the creation of a promise and a TransactionOperation
- future = getDirectCommitFuture(transactionContext, operationCallbackRef, null);
- }
-
- return new SingleCommitCohortProxy(txContextFactory.getActorUtils(), future, getIdentifier(),
- operationCallbackRef);
- }
-
- private Future<?> getDirectCommitFuture(final TransactionContext transactionContext,
- final OperationCallback.Reference operationCallbackRef, final Boolean havePermit) {
- TransactionRateLimitingCallback rateLimitingCallback = new TransactionRateLimitingCallback(
- txContextFactory.getActorUtils());
- operationCallbackRef.set(rateLimitingCallback);
- rateLimitingCallback.run();
- return transactionContext.directCommit(havePermit);
- }
-
- private AbstractThreePhaseCommitCohort<ActorSelection> createMultiCommitCohort() {
-
- final List<ThreePhaseCommitCohortProxy.CohortInfo> cohorts = new ArrayList<>(txContextWrappers.size());
- final Optional<SortedSet<String>> shardNames = Optional.of(new TreeSet<>(txContextWrappers.keySet()));
- for (Entry<String, TransactionContextWrapper> e : txContextWrappers.entrySet()) {
- LOG.debug("Tx {} Readying transaction for shard {}", getIdentifier(), e.getKey());
-
- final TransactionContextWrapper wrapper = e.getValue();
-
- // The remote tx version is obtained the via TransactionContext which may not be available yet so
- // we pass a Supplier to dynamically obtain it. Once the ready Future is resolved the
- // TransactionContext is available.
- cohorts.add(new ThreePhaseCommitCohortProxy.CohortInfo(wrapper.readyTransaction(shardNames),
- () -> wrapper.getTransactionContext().getTransactionVersion()));
- }
-
- return new ThreePhaseCommitCohortProxy(txContextFactory.getActorUtils(), cohorts, getIdentifier());
- }
-
- private String shardNameFromRootChild(final DataContainerChild<?, ?> child) {
- return shardNameFromIdentifier(YangInstanceIdentifier.create(child.getIdentifier()));
- }
-
- private String shardNameFromIdentifier(final YangInstanceIdentifier path) {
- return getActorUtils().getShardStrategyFactory().getStrategy(path).findShard(path);
- }
-
- private TransactionContextWrapper getContextWrapper(final YangInstanceIdentifier path) {
- return getContextWrapper(shardNameFromIdentifier(path));
- }
-
- private TransactionContextWrapper getContextWrapper(final String shardName) {
- final TransactionContextWrapper existing = txContextWrappers.get(shardName);
- if (existing != null) {
- return existing;
- }
-
- final TransactionContextWrapper fresh = txContextFactory.newTransactionContextWrapper(this, shardName);
- txContextWrappers.put(shardName, fresh);
- return fresh;
- }
-
- TransactionType getType() {
- return type;
- }
-
- boolean isReady() {
- return state != TransactionState.OPEN;
- }
-
- final ActorUtils getActorUtils() {
- return txContextFactory.getActorUtils();
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorSelection;
-import akka.dispatch.Mapper;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-
-/**
- * A {@link Mapper} extracting the {@link ActorSelection} pointing to the actor which
- * is backing a particular transaction.
- *
- * <p>
- * This class is not for general consumption. It is public only to support the pre-lithium compatibility
- * package.
- * TODO: once we remove compatibility, make this class package-private and final.
- */
-public class TransactionReadyReplyMapper extends Mapper<Object, ActorSelection> {
- protected static final Mapper<Throwable, Throwable> SAME_FAILURE_TRANSFORMER = new Mapper<Throwable, Throwable>() {
- @Override
- public Throwable apply(final Throwable failure) {
- return failure;
- }
- };
- private static final Logger LOG = LoggerFactory.getLogger(TransactionReadyReplyMapper.class);
- private final TransactionIdentifier identifier;
- private final ActorUtils actorUtils;
-
- protected TransactionReadyReplyMapper(final ActorUtils actorUtils, final TransactionIdentifier identifier) {
- this.actorUtils = requireNonNull(actorUtils);
- this.identifier = requireNonNull(identifier);
- }
-
- protected final ActorUtils getActorUtils() {
- return actorUtils;
- }
-
- protected String extractCohortPathFrom(final ReadyTransactionReply readyTxReply) {
- return readyTxReply.getCohortPath();
- }
-
- @Override
- public final ActorSelection checkedApply(final Object serializedReadyReply) {
- LOG.debug("Tx {} readyTransaction", identifier);
-
- // At this point the ready operation succeeded and we need to extract the cohort
- // actor path from the reply.
- if (ReadyTransactionReply.isSerializedType(serializedReadyReply)) {
- ReadyTransactionReply readyTxReply = ReadyTransactionReply.fromSerializable(serializedReadyReply);
- return actorUtils.actorSelection(extractCohortPathFrom(readyTxReply));
- }
-
- // Throwing an exception here will fail the Future.
- throw new IllegalArgumentException(String.format("%s: Invalid reply type %s",
- identifier, serializedReadyReply.getClass()));
- }
-
- static Future<ActorSelection> transform(final Future<Object> readyReplyFuture, final ActorUtils actorUtils,
- final TransactionIdentifier identifier) {
- return readyReplyFuture.transform(new TransactionReadyReplyMapper(actorUtils, identifier),
- SAME_FAILURE_TRANSFORMER, actorUtils.getClientDispatcher());
- }
-}
WRITE_ONLY,
READ_WRITE;
- // Cache all values
- private static final TransactionType[] VALUES = values();
-
public static TransactionType fromInt(final int type) {
- try {
- return VALUES[type];
- } catch (IndexOutOfBoundsException e) {
- throw new IllegalArgumentException("In TransactionType enum value " + type, e);
- }
+ return switch (type) {
+ case 0 -> READ_ONLY;
+ case 1 -> WRITE_ONLY;
+ case 2 -> READ_WRITE;
+ default -> throw new IllegalArgumentException("In TransactionType enum value " + type);
+ };
}
}
\ No newline at end of file
import akka.actor.Props;
import com.google.common.annotations.VisibleForTesting;
import java.util.concurrent.TimeUnit;
+import org.eclipse.jdt.annotation.NonNullByDefault;
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeNotificationListenerRegistration;
import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeNotificationListenerRegistrationReply;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
import scala.concurrent.duration.FiniteDuration;
/**
* asked to do so via {@link CloseDataTreeNotificationListenerRegistration}.
*/
public final class DataTreeNotificationListenerRegistrationActor extends AbstractUntypedActor {
+ // FIXME: rework this constant to a duration and its injection
@VisibleForTesting
static long killDelay = TimeUnit.MILLISECONDS.convert(5, TimeUnit.SECONDS);
- private ListenerRegistration<?> registration;
- private Runnable onClose;
+ private SetRegistration registration = null;
+ private Cancellable killSchedule = null;
private boolean closed;
- private Cancellable killSchedule;
@Override
protected void handleReceive(final Object message) {
if (isValidSender(getSender())) {
getSender().tell(CloseDataTreeNotificationListenerRegistrationReply.getInstance(), getSelf());
}
- } else if (message instanceof SetRegistration) {
- registration = ((SetRegistration)message).registration;
- onClose = ((SetRegistration)message).onClose;
+ } else if (message instanceof SetRegistration setRegistration) {
+ registration = setRegistration;
if (closed) {
closeListenerRegistration();
}
private void closeListenerRegistration() {
closed = true;
- if (registration != null) {
- registration.close();
- onClose.run();
+
+ final var reg = registration;
+ if (reg != null) {
registration = null;
+ reg.registration.close();
+ reg.onClose.run();
if (killSchedule == null) {
killSchedule = getContext().system().scheduler().scheduleOnce(FiniteDuration.create(killDelay,
return Props.create(DataTreeNotificationListenerRegistrationActor.class);
}
- public static class SetRegistration {
- private final ListenerRegistration<?> registration;
- private final Runnable onClose;
-
- public SetRegistration(final ListenerRegistration<?> registration, final Runnable onClose) {
- this.registration = requireNonNull(registration);
- this.onClose = requireNonNull(onClose);
+ @NonNullByDefault
+ public record SetRegistration(Registration registration, Runnable onClose) {
+ public SetRegistration {
+ requireNonNull(registration);
+ requireNonNull(onClose);
}
}
}
--- /dev/null
+/*
+ * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.actors;
+
+import static com.google.common.base.Preconditions.checkState;
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.Props;
+import com.google.gson.stream.JsonWriter;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
+import org.opendaylight.controller.cluster.datastore.persisted.CommitTransactionPayload;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNodeContainer;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
+import org.opendaylight.yangtools.yang.data.codec.gson.JSONCodecFactorySupplier;
+import org.opendaylight.yangtools.yang.data.codec.gson.JSONNormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
+import org.opendaylight.yangtools.yang.model.util.SchemaInferenceStack;
+
+public final class JsonExportActor extends AbstractUntypedActor {
+ // Internal messages
+ public static final class ExportSnapshot {
+ private final String id;
+
+ private final DataTreeCandidate dataTreeCandidate;
+
+ public ExportSnapshot(final DataTreeCandidate candidate, final String id) {
+ dataTreeCandidate = requireNonNull(candidate);
+ this.id = requireNonNull(id);
+ }
+ }
+
+ public static final class ExportJournal {
+ private final ReplicatedLogEntry replicatedLogEntry;
+
+ public ExportJournal(final ReplicatedLogEntry replicatedLogEntry) {
+ this.replicatedLogEntry = requireNonNull(replicatedLogEntry);
+ }
+ }
+
+ public static final class FinishExport {
+ private final String id;
+
+ public FinishExport(final String id) {
+ this.id = requireNonNull(id);
+ }
+ }
+
+ private final List<ReplicatedLogEntry> entries = new ArrayList<>();
+ private final @NonNull EffectiveModelContext schemaContext;
+ private final @NonNull Path baseDirPath;
+
+ private JsonExportActor(final EffectiveModelContext schemaContext, final Path dirPath) {
+ this.schemaContext = requireNonNull(schemaContext);
+ baseDirPath = requireNonNull(dirPath);
+ }
+
+ public static Props props(final EffectiveModelContext schemaContext, final String dirPath) {
+ return Props.create(JsonExportActor.class, schemaContext, Paths.get(dirPath));
+ }
+
+ @Override
+ protected void handleReceive(final Object message) {
+ if (message instanceof ExportSnapshot) {
+ onExportSnapshot((ExportSnapshot) message);
+ } else if (message instanceof ExportJournal) {
+ onExportJournal((ExportJournal) message);
+ } else if (message instanceof FinishExport) {
+ onFinishExport((FinishExport)message);
+ } else {
+ unknownMessage(message);
+ }
+ }
+
+ private void onExportSnapshot(final ExportSnapshot exportSnapshot) {
+ final Path snapshotDir = baseDirPath.resolve("snapshots");
+ createDir(snapshotDir);
+
+ final Path filePath = snapshotDir.resolve(exportSnapshot.id + "-snapshot.json");
+ LOG.debug("Creating JSON file : {}", filePath);
+
+ final NormalizedNode root = exportSnapshot.dataTreeCandidate.getRootNode().getDataAfter();
+ checkState(root instanceof NormalizedNodeContainer, "Unexpected root %s", root);
+
+ writeSnapshot(filePath, (NormalizedNodeContainer<?>) root);
+ LOG.debug("Created JSON file: {}", filePath);
+ }
+
+ private void onExportJournal(final ExportJournal exportJournal) {
+ entries.add(exportJournal.replicatedLogEntry);
+ }
+
+ private void onFinishExport(final FinishExport finishExport) {
+ final Path journalDir = baseDirPath.resolve("journals");
+ createDir(journalDir);
+
+ final Path filePath = journalDir.resolve(finishExport.id + "-journal.json");
+ LOG.debug("Creating JSON file : {}", filePath);
+ writeJournal(filePath);
+ LOG.debug("Created JSON file: {}", filePath);
+ }
+
+ private void writeSnapshot(final Path path, final NormalizedNodeContainer<?> root) {
+ try (JsonWriter jsonWriter = new JsonWriter(Files.newBufferedWriter(path))) {
+ jsonWriter.beginObject();
+
+ try (var nnWriter = NormalizedNodeWriter.forStreamWriter(JSONNormalizedNodeStreamWriter.createNestedWriter(
+ JSONCodecFactorySupplier.RFC7951.getShared(schemaContext),
+ SchemaInferenceStack.of(schemaContext).toInference(), null, jsonWriter),
+ true)) {
+ for (NormalizedNode node : root.body()) {
+ nnWriter.write(node);
+ }
+ }
+
+ jsonWriter.endObject();
+ } catch (IOException e) {
+ LOG.error("Failed to export stapshot to {}", path, e);
+ }
+ }
+
+ private void writeJournal(final Path path) {
+ try (JsonWriter jsonWriter = new JsonWriter(Files.newBufferedWriter(path))) {
+ jsonWriter.beginObject().name("Entries");
+ jsonWriter.beginArray();
+ for (var entry : entries) {
+ final var data = entry.getData();
+ if (data instanceof CommitTransactionPayload payload) {
+ final var candidate = payload.getCandidate().candidate();
+ writeNode(jsonWriter, candidate);
+ } else {
+ jsonWriter.beginObject().name("Payload").value(data.toString()).endObject();
+ }
+ }
+ jsonWriter.endArray();
+ jsonWriter.endObject();
+ } catch (IOException e) {
+ LOG.error("Failed to export journal to {}", path, e);
+ }
+ }
+
+ private static void writeNode(final JsonWriter writer, final DataTreeCandidate candidate) throws IOException {
+ writer.beginObject().name("Entry").beginArray();
+ doWriteNode(writer, candidate.getRootPath(), candidate.getRootNode());
+ writer.endArray().endObject();
+ }
+
+ private static void doWriteNode(final JsonWriter writer, final YangInstanceIdentifier path,
+ final DataTreeCandidateNode node) throws IOException {
+ switch (node.modificationType()) {
+ case APPEARED:
+ case DISAPPEARED:
+ case SUBTREE_MODIFIED:
+ NodeIterator iterator = new NodeIterator(null, path, node.childNodes().iterator());
+ do {
+ iterator = iterator.next(writer);
+ } while (iterator != null);
+ break;
+ case DELETE:
+ case UNMODIFIED:
+ case WRITE:
+ outputNodeInfo(writer, path, node);
+ break;
+ default:
+ outputDefault(writer, path, node);
+ }
+ }
+
+ private static void outputNodeInfo(final JsonWriter writer, final YangInstanceIdentifier path,
+ final DataTreeCandidateNode node) throws IOException {
+ final ModificationType modificationType = node.modificationType();
+
+ writer.beginObject().name("Node");
+ writer.beginArray();
+ writer.beginObject().name("Path").value(path.toString()).endObject();
+ writer.beginObject().name("ModificationType").value(modificationType.toString()).endObject();
+ if (modificationType == ModificationType.WRITE) {
+ writer.beginObject().name("Data").value(node.getDataAfter().body().toString()).endObject();
+ }
+ writer.endArray();
+ writer.endObject();
+ }
+
+ private static void outputDefault(final JsonWriter writer, final YangInstanceIdentifier path,
+ final DataTreeCandidateNode node) throws IOException {
+ writer.beginObject().name("Node");
+ writer.beginArray();
+ writer.beginObject().name("Path").value(path.toString()).endObject();
+ writer.beginObject().name("ModificationType")
+ .value("UNSUPPORTED MODIFICATION: " + node.modificationType()).endObject();
+ writer.endArray();
+ writer.endObject();
+ }
+
+ private void createDir(final Path path) {
+ try {
+ Files.createDirectories(path);
+ } catch (IOException e) {
+ LOG.warn("Directory {} cannot be created", path, e);
+ }
+ }
+
+ private static final class NodeIterator {
+ private final Iterator<DataTreeCandidateNode> iterator;
+ private final YangInstanceIdentifier path;
+ private final NodeIterator parent;
+
+ NodeIterator(final @Nullable NodeIterator parent, final YangInstanceIdentifier path,
+ final Iterator<DataTreeCandidateNode> iterator) {
+ this.iterator = requireNonNull(iterator);
+ this.path = requireNonNull(path);
+ this.parent = parent;
+ }
+
+ NodeIterator next(final JsonWriter writer) throws IOException {
+ while (iterator.hasNext()) {
+ final var node = iterator.next();
+ final var child = path.node(node.name());
+
+ switch (node.modificationType()) {
+ case APPEARED:
+ case DISAPPEARED:
+ case SUBTREE_MODIFIED:
+ return new NodeIterator(this, child, node.childNodes().iterator());
+ case DELETE:
+ case UNMODIFIED:
+ case WRITE:
+ outputNodeInfo(writer, path, node);
+ break;
+ default:
+ outputDefault(writer, child, node);
+ }
+ }
+
+ return parent;
+ }
+ }
+}
private void onSerializeSnapshot(final SerializeSnapshot request) {
Optional<OutputStream> installSnapshotStream = request.getInstallSnapshotStream();
if (installSnapshotStream.isPresent()) {
- try (ObjectOutputStream out = getOutputStream(installSnapshotStream.get())) {
+ try (ObjectOutputStream out = getOutputStream(installSnapshotStream.orElseThrow())) {
request.getSnapshot().serialize(out);
} catch (IOException e) {
// TODO - we should communicate the failure in the CaptureSnapshotReply.
package org.opendaylight.controller.cluster.datastore.config;
import java.util.Collection;
-import java.util.Map;
import java.util.Set;
import org.eclipse.jdt.annotation.NonNull;
import org.eclipse.jdt.annotation.Nullable;
import org.opendaylight.controller.cluster.access.concepts.MemberName;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategy;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
public interface Configuration {
/**
* Returns all the shard names that belong on the member by the given name.
*/
+ // FIXME: return Set here
@NonNull Collection<String> getMemberShardNames(@NonNull MemberName memberName);
/**
*/
@Nullable String getShardNameForModule(@NonNull String moduleName);
- /**
- * Return the shard name corresponding to the prefix, or null if none is configured.
- */
- @Nullable String getShardNameForPrefix(@NonNull DOMDataTreeIdentifier prefix);
-
/**
* Returns the member replicas for the given shard name.
*/
+ // FIXME: return Set here
@NonNull Collection<MemberName> getMembersFromShardName(@NonNull String shardName);
/**
*/
void addModuleShardConfiguration(@NonNull ModuleShardConfiguration config);
- /**
- * Adds a new configuration for a shard based on prefix.
- */
- void addPrefixShardConfiguration(@NonNull PrefixShardConfiguration config);
-
- /**
- * Removes a shard configuration for the specified prefix.
- */
- void removePrefixShardConfiguration(@NonNull DOMDataTreeIdentifier prefix);
-
- /**
- * Returns the configuration for all configured prefix shards.
- *
- * @return An immutable copy of the currently configured prefix shards.
- */
- Map<DOMDataTreeIdentifier, PrefixShardConfiguration> getAllPrefixShardConfigurations();
-
/**
* Returns a unique set of all member names configured for all shards.
*/
+ // FIXME: return Set here
Collection<MemberName> getUniqueMemberNamesForAllShards();
/*
* Removes the given member as a replica for the given shardName.
*/
void removeMemberReplicaForShard(String shardName, MemberName memberName);
-
- /**
- * Returns the ShardStrategy for the given prefix or null if the prefix is not found.
- */
- @Nullable ShardStrategy getStrategyForPrefix(@NonNull DOMDataTreeIdentifier prefix);
}
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
-import java.util.AbstractMap.SimpleEntry;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.util.ArrayList;
import java.util.Collection;
-import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map.Entry;
import java.util.Set;
import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.shardstrategy.PrefixShardStrategy;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategy;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
-import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-// TODO clean this up once we get rid of module based configuration, prefix one should be alot simpler
+// FIXME: Non-final for testing
public class ConfigurationImpl implements Configuration {
private volatile Map<String, ModuleConfig> moduleConfigMap;
- // TODO should this be initialized with something? on restart we should restore the shards from configuration?
- private volatile Map<DOMDataTreeIdentifier, PrefixShardConfiguration> prefixConfigMap = Collections.emptyMap();
-
// Look up maps to speed things up
private volatile Map<String, String> namespaceToModuleName;
this(new FileModuleShardConfigProvider(moduleShardsConfigPath, modulesConfigPath));
}
+ @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Subclassed for testing")
public ConfigurationImpl(final ModuleShardConfigProvider provider) {
ImmutableMap.Builder<String, ModuleConfig> mapBuilder = ImmutableMap.builder();
- for (Map.Entry<String, ModuleConfig.Builder> e: provider.retrieveModuleConfigs(this).entrySet()) {
+ for (Entry<String, ModuleConfig.Builder> e: provider.retrieveModuleConfigs(this).entrySet()) {
mapBuilder.put(e.getKey(), e.getValue().build());
}
- this.moduleConfigMap = mapBuilder.build();
+ moduleConfigMap = mapBuilder.build();
- this.allShardNames = createAllShardNames(moduleConfigMap.values());
- this.namespaceToModuleName = createNamespaceToModuleName(moduleConfigMap.values());
+ allShardNames = createAllShardNames(moduleConfigMap.values());
+ namespaceToModuleName = createNamespaceToModuleName(moduleConfigMap.values());
}
private static Set<String> createAllShardNames(final Iterable<ModuleConfig> moduleConfigs) {
return moduleConfigMap.get(requireNonNull(moduleName, "moduleName should not be null"));
}
- @Override
- public String getShardNameForPrefix(final DOMDataTreeIdentifier prefix) {
- requireNonNull(prefix, "prefix should not be null");
-
- Entry<DOMDataTreeIdentifier, PrefixShardConfiguration> bestMatchEntry = new SimpleEntry<>(
- new DOMDataTreeIdentifier(prefix.getDatastoreType(), YangInstanceIdentifier.empty()), null);
-
- for (Entry<DOMDataTreeIdentifier, PrefixShardConfiguration> entry : prefixConfigMap.entrySet()) {
- if (entry.getKey().contains(prefix) && entry.getKey().getRootIdentifier().getPathArguments().size()
- > bestMatchEntry.getKey().getRootIdentifier().getPathArguments().size()) {
- bestMatchEntry = entry;
- }
- }
-
- //TODO we really should have mapping based on prefix instead of Strings
- return ClusterUtils.getCleanShardName(bestMatchEntry.getKey().getRootIdentifier());
- }
-
@Override
public Collection<MemberName> getMembersFromShardName(final String shardName) {
checkNotNullShardName(shardName);
}
}
- for (final PrefixShardConfiguration prefixConfig : prefixConfigMap.values()) {
- if (shardName.equals(ClusterUtils.getCleanShardName(prefixConfig.getPrefix().getRootIdentifier()))) {
- return prefixConfig.getShardMemberNames();
- }
- }
-
- return Collections.emptyList();
+ return List.of();
}
private static void checkNotNullShardName(final String shardName) {
requireNonNull(config, "ModuleShardConfiguration should not be null");
ModuleConfig moduleConfig = ModuleConfig.builder(config.getModuleName())
- .nameSpace(config.getNamespace().toASCIIString())
+ .nameSpace(config.getNamespace().toString())
.shardStrategy(createShardStrategy(config.getModuleName(), config.getShardStrategyName()))
.shardConfig(config.getShardName(), config.getShardMemberNames()).build();
allShardNames = ImmutableSet.<String>builder().addAll(allShardNames).add(config.getShardName()).build();
}
- @Override
- public void addPrefixShardConfiguration(final PrefixShardConfiguration config) {
- addPrefixConfig(requireNonNull(config, "PrefixShardConfiguration cannot be null"));
- allShardNames = ImmutableSet.<String>builder().addAll(allShardNames)
- .add(ClusterUtils.getCleanShardName(config.getPrefix().getRootIdentifier())).build();
- }
-
- @Override
- public void removePrefixShardConfiguration(final DOMDataTreeIdentifier prefix) {
- removePrefixConfig(requireNonNull(prefix, "Prefix cannot be null"));
-
- final HashSet<String> temp = new HashSet<>(allShardNames);
- temp.remove(ClusterUtils.getCleanShardName(prefix.getRootIdentifier()));
-
- allShardNames = ImmutableSet.copyOf(temp);
- }
-
- @Override
- public Map<DOMDataTreeIdentifier, PrefixShardConfiguration> getAllPrefixShardConfigurations() {
- return ImmutableMap.copyOf(prefixConfigMap);
- }
-
- private void addPrefixConfig(final PrefixShardConfiguration config) {
- final Map<DOMDataTreeIdentifier, PrefixShardConfiguration> newPrefixConfigMap = new HashMap<>(prefixConfigMap);
- newPrefixConfigMap.put(config.getPrefix(), config);
- prefixConfigMap = ImmutableMap.copyOf(newPrefixConfigMap);
- }
-
- private void removePrefixConfig(final DOMDataTreeIdentifier prefix) {
- final Map<DOMDataTreeIdentifier, PrefixShardConfiguration> newPrefixConfigMap = new HashMap<>(prefixConfigMap);
- newPrefixConfigMap.remove(prefix);
- prefixConfigMap = ImmutableMap.copyOf(newPrefixConfigMap);
- }
-
private ShardStrategy createShardStrategy(final String moduleName, final String shardStrategyName) {
return ShardStrategyFactory.newShardStrategyInstance(moduleName, shardStrategyName, this);
}
}
}
- @Override
- public ShardStrategy getStrategyForPrefix(final DOMDataTreeIdentifier prefix) {
- requireNonNull(prefix, "Prefix cannot be null");
- // FIXME using prefix tables like in mdsal will be better
- Entry<DOMDataTreeIdentifier, PrefixShardConfiguration> bestMatchEntry = new SimpleEntry<>(
- new DOMDataTreeIdentifier(prefix.getDatastoreType(), YangInstanceIdentifier.empty()), null);
-
- for (Entry<DOMDataTreeIdentifier, PrefixShardConfiguration> entry : prefixConfigMap.entrySet()) {
- if (entry.getKey().contains(prefix) && entry.getKey().getRootIdentifier().getPathArguments().size()
- > bestMatchEntry.getKey().getRootIdentifier().getPathArguments().size()) {
- bestMatchEntry = entry;
- }
- }
-
- if (bestMatchEntry.getValue() == null) {
- return null;
- }
- return new PrefixShardStrategy(ClusterUtils
- .getCleanShardName(bestMatchEntry.getKey().getRootIdentifier()),
- bestMatchEntry.getKey().getRootIdentifier());
- }
-
private void updateModuleConfigMap(final ModuleConfig moduleConfig) {
final Map<String, ModuleConfig> newModuleConfigMap = new HashMap<>(moduleConfigMap);
newModuleConfigMap.put(moduleConfig.getName(), moduleConfig);
import static java.util.Objects.requireNonNull;
-import java.net.URI;
import java.util.Collection;
import org.eclipse.jdt.annotation.NonNull;
import org.eclipse.jdt.annotation.Nullable;
import org.opendaylight.controller.cluster.access.concepts.MemberName;
+import org.opendaylight.yangtools.yang.common.XMLNamespace;
/**
* Encapsulates information for adding a new module shard configuration.
* @author Thomas Pantelis
*/
public class ModuleShardConfiguration {
- private final URI namespace;
+ private final XMLNamespace namespace;
private final String moduleName;
private final String shardName;
private final String shardStrategyName;
* is used.
* @param shardMemberNames the names of the shard's member replicas.
*/
- public ModuleShardConfiguration(@NonNull URI namespace, @NonNull String moduleName, @NonNull String shardName,
- @Nullable String shardStrategyName, @NonNull Collection<MemberName> shardMemberNames) {
+ public ModuleShardConfiguration(final @NonNull XMLNamespace namespace, final @NonNull String moduleName,
+ final @NonNull String shardName, final @Nullable String shardStrategyName,
+ final @NonNull Collection<MemberName> shardMemberNames) {
this.namespace = requireNonNull(namespace, "nameSpace should not be null");
this.moduleName = requireNonNull(moduleName, "moduleName should not be null");
this.shardName = requireNonNull(shardName, "shardName should not be null");
this.shardMemberNames = requireNonNull(shardMemberNames, "shardMemberNames");
}
- public URI getNamespace() {
+ public XMLNamespace getNamespace() {
return namespace;
}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.config;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.collect.ImmutableSet;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.Collection;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-
-/**
- * Configuration for prefix based shards.
- */
-public class PrefixShardConfiguration implements Serializable {
- private static final class Proxy implements Externalizable {
- private static final long serialVersionUID = 1L;
-
- private PrefixShardConfiguration prefixShardConfiguration;
-
- // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
- // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public Proxy() {
- }
-
- Proxy(final PrefixShardConfiguration prefixShardConfiguration) {
- this.prefixShardConfiguration = prefixShardConfiguration;
- }
-
- @Override
- public void writeExternal(final ObjectOutput objectOutput) throws IOException {
- objectOutput.writeObject(prefixShardConfiguration.getPrefix());
- objectOutput.writeObject(prefixShardConfiguration.getShardStrategyName());
-
- objectOutput.writeInt(prefixShardConfiguration.getShardMemberNames().size());
- for (MemberName name : prefixShardConfiguration.getShardMemberNames()) {
- name.writeTo(objectOutput);
- }
- }
-
- @Override
- public void readExternal(final ObjectInput objectInput) throws IOException, ClassNotFoundException {
- final DOMDataTreeIdentifier localPrefix = (DOMDataTreeIdentifier) objectInput.readObject();
- final String localStrategyName = (String) objectInput.readObject();
-
- final int size = objectInput.readInt();
- final Collection<MemberName> localShardMemberNames = new ArrayList<>(size);
- for (int i = 0; i < size; i++) {
- localShardMemberNames.add(MemberName.readFrom(objectInput));
- }
-
- prefixShardConfiguration = new PrefixShardConfiguration(localPrefix, localStrategyName,
- localShardMemberNames);
- }
-
- private Object readResolve() {
- return prefixShardConfiguration;
- }
- }
-
- private static final long serialVersionUID = 1L;
-
- private final DOMDataTreeIdentifier prefix;
- private final String shardStrategyName;
- private final Collection<MemberName> shardMemberNames;
-
- public PrefixShardConfiguration(final DOMDataTreeIdentifier prefix,
- final String shardStrategyName,
- final Collection<MemberName> shardMemberNames) {
- this.prefix = requireNonNull(prefix);
- this.shardStrategyName = requireNonNull(shardStrategyName);
- this.shardMemberNames = ImmutableSet.copyOf(shardMemberNames);
- }
-
- public DOMDataTreeIdentifier getPrefix() {
- return prefix;
- }
-
- public String getShardStrategyName() {
- return shardStrategyName;
- }
-
- public Collection<MemberName> getShardMemberNames() {
- return shardMemberNames;
- }
-
- @Override
- public String toString() {
- return "PrefixShardConfiguration{"
- + "prefix=" + prefix
- + ", shardStrategyName='"
- + shardStrategyName + '\''
- + ", shardMemberNames=" + shardMemberNames
- + '}';
- }
-
- private Object writeReplace() {
- return new Proxy(this);
- }
-}
return false;
}
- ShardIdentifier that = (ShardIdentifier) obj;
-
- if (!memberName.equals(that.memberName)) {
- return false;
- }
- if (!shardName.equals(that.shardName)) {
- return false;
- }
- if (!type.equals(that.type)) {
- return false;
- }
-
- return true;
+ final var that = (ShardIdentifier) obj;
+ return memberName.equals(that.memberName) && shardName.equals(that.shardName) && type.equals(that.type);
}
@Override
}
public Builder shardName(final String newShardName) {
- this.shardName = newShardName;
+ shardName = newShardName;
return this;
}
public Builder memberName(final MemberName newMemberName) {
- this.memberName = newMemberName;
+ memberName = newMemberName;
return this;
}
public Builder type(final String newType) {
- this.type = newType;
+ type = newType;
return this;
}
public class ShardManagerIdentifier {
private final String type;
- public ShardManagerIdentifier(String type) {
+ public ShardManagerIdentifier(final String type) {
this.type = type;
}
@Override
- public boolean equals(Object obj) {
+ public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
-
- ShardManagerIdentifier that = (ShardManagerIdentifier) obj;
-
- if (!type.equals(that.type)) {
- return false;
- }
-
- return true;
+ return type.equals(((ShardManagerIdentifier) obj).type);
}
@Override
public static class Builder {
private String type;
- public Builder type(String newType) {
- this.type = newType;
+ public Builder type(final String newType) {
+ type = newType;
return this;
}
public ShardManagerIdentifier build() {
- return new ShardManagerIdentifier(this.type);
+ return new ShardManagerIdentifier(type);
}
-
}
}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard;
-
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.cluster.datastore.Shard;
-
-/**
- * Factory for creating ShardStats mbeans.
- *
- * @author Basheeruddin syedbahm@cisco.com
- */
-public final class ShardMBeanFactory {
-
- private ShardMBeanFactory() {
- }
-
- public static ShardStats getShardStatsMBean(final String shardName, final String mxBeanType,
- final @NonNull Shard shard) {
- String finalMXBeanType = mxBeanType != null ? mxBeanType : "DistDataStore";
- ShardStats shardStatsMBeanImpl = new ShardStats(shardName, finalMXBeanType, shard);
- shardStatsMBeanImpl.registerMBean();
- return shardStatsMBeanImpl;
- }
-}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.datastore.messages;
import com.google.common.base.Preconditions;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-public class AbortTransaction extends AbstractThreePhaseCommitMessage {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class AbortTransaction extends AbstractThreePhaseCommitMessage {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
public AbortTransaction() {
}
- public AbortTransaction(TransactionIdentifier transactionID, final short version) {
+ public AbortTransaction(final TransactionIdentifier transactionID, final short version) {
super(transactionID, version);
}
- public static AbortTransaction fromSerializable(Object serializable) {
+ public static AbortTransaction fromSerializable(final Object serializable) {
Preconditions.checkArgument(serializable instanceof AbortTransaction);
return (AbortTransaction)serializable;
}
- public static boolean isSerializedType(Object message) {
+ public static boolean isSerializedType(final Object message) {
return message instanceof AbortTransaction;
}
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.datastore.messages;
import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
-public class AbortTransactionReply extends VersionedExternalizableMessage {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class AbortTransactionReply extends VersionedExternalizableMessage {
+ @java.io.Serial
+ private static final long serialVersionUID = 7251132353204199793L;
private static final AbortTransactionReply INSTANCE = new AbortTransactionReply();
public AbortTransactionReply() {
}
- private AbortTransactionReply(short version) {
+ private AbortTransactionReply(final short version) {
super(version);
}
- public static AbortTransactionReply instance(short version) {
+ public static AbortTransactionReply instance(final short version) {
return version == DataStoreVersions.CURRENT_VERSION ? INSTANCE : new AbortTransactionReply(version);
}
- public static boolean isSerializedType(Object message) {
+ public static boolean isSerializedType(final Object message) {
return message instanceof AbortTransactionReply;
}
}
* @author gwu
*
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
public abstract class AbstractRead<T> extends VersionedExternalizableMessage {
private static final long serialVersionUID = 1L;
*
* @author Thomas Pantelis
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
public abstract class AbstractThreePhaseCommitMessage extends VersionedExternalizableMessage {
private static final long serialVersionUID = 1L;
*/
package org.opendaylight.controller.cluster.datastore.messages;
-import java.io.Serializable;
+import static java.util.Objects.requireNonNull;
-public class ActorInitialized implements Serializable {
- private static final long serialVersionUID = 1L;
+import akka.actor.ActorRef;
+import org.eclipse.jdt.annotation.NonNullByDefault;
- public ActorInitialized() {
+@NonNullByDefault
+public record ActorInitialized(ActorRef actorRef) {
+ public ActorInitialized {
+ requireNonNull(actorRef);
}
}
+++ /dev/null
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.messages;
-
-import static java.util.Objects.requireNonNull;
-
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-/**
- * A message sent to the ShardManager to dynamically add a new local shard
- * that is a replica for an existing prefix shard that is already available
- * in the cluster.
- */
-public class AddPrefixShardReplica {
-
- private final YangInstanceIdentifier prefix;
-
- /**
- * Constructor.
- *
- * @param prefix prefix of the shard that is to be locally replicated.
- */
-
- public AddPrefixShardReplica(final @NonNull YangInstanceIdentifier prefix) {
- this.prefix = requireNonNull(prefix, "prefix should not be null");
- }
-
- public YangInstanceIdentifier getShardPrefix() {
- return this.prefix;
- }
-
- @Override
- public String toString() {
- return "AddPrefixShardReplica[prefix=" + prefix + "]";
- }
-}
*
* @author Thomas Pantelis
*/
-public class BatchedModifications extends MutableCompositeModification {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class BatchedModifications extends MutableCompositeModification {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
private boolean ready;
public BatchedModifications() {
}
- public BatchedModifications(TransactionIdentifier transactionId, short version) {
+ public BatchedModifications(final TransactionIdentifier transactionId, final short version) {
super(version);
this.transactionId = requireNonNull(transactionId, "transactionID can't be null");
}
return ready;
}
- public void setReady(Optional<SortedSet<String>> possibleParticipatingShardNames) {
- this.ready = true;
- this.participatingShardNames = requireNonNull(possibleParticipatingShardNames).orElse(null);
- Preconditions.checkArgument(this.participatingShardNames == null || this.participatingShardNames.size() > 1);
+ public void setReady(final Optional<SortedSet<String>> possibleParticipatingShardNames) {
+ ready = true;
+ participatingShardNames = requireNonNull(possibleParticipatingShardNames).orElse(null);
+ Preconditions.checkArgument(participatingShardNames == null || participatingShardNames.size() > 1);
}
public void setReady() {
return doCommitOnReady;
}
- public void setDoCommitOnReady(boolean doCommitOnReady) {
+ public void setDoCommitOnReady(final boolean doCommitOnReady) {
this.doCommitOnReady = doCommitOnReady;
}
return totalMessagesSent;
}
- public void setTotalMessagesSent(int totalMessagesSent) {
+ public void setTotalMessagesSent(final int totalMessagesSent) {
this.totalMessagesSent = totalMessagesSent;
}
}
@Override
- public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+ public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
super.readExternal(in);
transactionId = TransactionIdentifier.readFrom(in);
ready = in.readBoolean();
}
@Override
- public void writeExternal(ObjectOutput out) throws IOException {
+ public void writeExternal(final ObjectOutput out) throws IOException {
super.writeExternal(out);
transactionId.writeTo(out);
out.writeBoolean(ready);
if (getVersion() >= DataStoreVersions.FLUORINE_VERSION) {
if (participatingShardNames != null) {
out.writeInt(participatingShardNames.size());
- for (String shardName: participatingShardNames) {
+ for (String shardName : participatingShardNames) {
out.writeObject(shardName);
}
} else {
*
* @author Thomas Pantelis
*/
-public class BatchedModificationsReply extends VersionedExternalizableMessage {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class BatchedModificationsReply extends VersionedExternalizableMessage {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
private int numBatched;
public BatchedModificationsReply() {
}
- public BatchedModificationsReply(int numBatched) {
+ public BatchedModificationsReply(final int numBatched) {
this.numBatched = numBatched;
}
}
@Override
- public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+ public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
super.readExternal(in);
numBatched = in.readInt();
}
@Override
- public void writeExternal(ObjectOutput out) throws IOException {
+ public void writeExternal(final ObjectOutput out) throws IOException {
super.writeExternal(out);
out.writeInt(numBatched);
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.datastore.messages;
import com.google.common.base.Preconditions;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-public class CanCommitTransaction extends AbstractThreePhaseCommitMessage {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class CanCommitTransaction extends AbstractThreePhaseCommitMessage {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
public CanCommitTransaction() {
}
- public CanCommitTransaction(TransactionIdentifier transactionID, final short version) {
+ public CanCommitTransaction(final TransactionIdentifier transactionID, final short version) {
super(transactionID, version);
}
- public static CanCommitTransaction fromSerializable(Object serializable) {
+ public static CanCommitTransaction fromSerializable(final Object serializable) {
Preconditions.checkArgument(serializable instanceof CanCommitTransaction);
return (CanCommitTransaction)serializable;
}
- public static boolean isSerializedType(Object message) {
+ public static boolean isSerializedType(final Object message) {
return message instanceof CanCommitTransaction;
}
}
import java.io.ObjectOutput;
import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
-public class CanCommitTransactionReply extends VersionedExternalizableMessage {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class CanCommitTransactionReply extends VersionedExternalizableMessage {
+ @java.io.Serial
+ private static final long serialVersionUID = 4355566635423934872L;
+
private static final CanCommitTransactionReply YES =
new CanCommitTransactionReply(true, DataStoreVersions.CURRENT_VERSION);
private static final CanCommitTransactionReply NO =
}
@Override
- public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+ public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
super.readExternal(in);
canCommit = in.readBoolean();
}
@Override
- public void writeExternal(ObjectOutput out) throws IOException {
+ public void writeExternal(final ObjectOutput out) throws IOException {
super.writeExternal(out);
out.writeBoolean(canCommit);
}
return "CanCommitTransactionReply [canCommit=" + canCommit + ", version=" + getVersion() + "]";
}
- public static CanCommitTransactionReply yes(short version) {
+ public static CanCommitTransactionReply yes(final short version) {
return version == DataStoreVersions.CURRENT_VERSION ? YES : new CanCommitTransactionReply(true, version);
}
- public static CanCommitTransactionReply no(short version) {
+ public static CanCommitTransactionReply no(final short version) {
return version == DataStoreVersions.CURRENT_VERSION ? NO : new CanCommitTransactionReply(false, version);
}
return (CanCommitTransactionReply)serializable;
}
- public static boolean isSerializedType(Object message) {
+ public static boolean isSerializedType(final Object message) {
return message instanceof CanCommitTransactionReply;
}
}
import java.io.Serializable;
public final class CloseDataTreeNotificationListenerRegistration implements Serializable {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
private static final CloseDataTreeNotificationListenerRegistration INSTANCE =
new CloseDataTreeNotificationListenerRegistration();
return INSTANCE;
}
+ @java.io.Serial
private Object readResolve() {
return INSTANCE;
}
import java.io.Serializable;
public final class CloseDataTreeNotificationListenerRegistrationReply implements Serializable {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
private static final CloseDataTreeNotificationListenerRegistrationReply INSTANCE =
new CloseDataTreeNotificationListenerRegistrationReply();
return INSTANCE;
}
+ @java.io.Serial
private Object readResolve() {
return INSTANCE;
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.datastore.messages;
-public class CloseTransaction extends VersionedExternalizableMessage {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class CloseTransaction extends VersionedExternalizableMessage {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
public CloseTransaction() {
}
- public CloseTransaction(short version) {
+ public CloseTransaction(final short version) {
super(version);
}
- public static boolean isSerializedType(Object message) {
+ public static boolean isSerializedType(final Object message) {
return message instanceof CloseTransaction;
}
}
import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
import org.opendaylight.yangtools.concepts.Identifiable;
-public class CloseTransactionChain extends VersionedExternalizableMessage
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class CloseTransactionChain extends VersionedExternalizableMessage
implements Identifiable<LocalHistoryIdentifier> {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
private LocalHistoryIdentifier transactionChainId;
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.datastore.messages;
+@Deprecated(since = "9.0.0", forRemoval = true)
public class CloseTransactionReply extends VersionedExternalizableMessage {
private static final long serialVersionUID = 1L;
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.datastore.messages;
import com.google.common.base.Preconditions;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-public class CommitTransaction extends AbstractThreePhaseCommitMessage {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class CommitTransaction extends AbstractThreePhaseCommitMessage {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
public CommitTransaction() {
}
- public CommitTransaction(TransactionIdentifier transactionID, final short version) {
+ public CommitTransaction(final TransactionIdentifier transactionID, final short version) {
super(transactionID, version);
}
- public static CommitTransaction fromSerializable(Object serializable) {
+ public static CommitTransaction fromSerializable(final Object serializable) {
Preconditions.checkArgument(serializable instanceof CommitTransaction);
return (CommitTransaction)serializable;
}
- public static boolean isSerializedType(Object message) {
+ public static boolean isSerializedType(final Object message) {
return message instanceof CommitTransaction;
}
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.datastore.messages;
import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
-public class CommitTransactionReply extends VersionedExternalizableMessage {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class CommitTransactionReply extends VersionedExternalizableMessage {
+ @java.io.Serial
+ private static final long serialVersionUID = -8342450250867395000L;
+
public static final CommitTransactionReply INSTANCE = new CommitTransactionReply();
public CommitTransactionReply() {
}
- private CommitTransactionReply(short version) {
+ private CommitTransactionReply(final short version) {
super(version);
}
- public static CommitTransactionReply instance(short version) {
+ public static CommitTransactionReply instance(final short version) {
return version == DataStoreVersions.CURRENT_VERSION ? INSTANCE : new CommitTransactionReply(version);
}
- public static boolean isSerializedType(Object message) {
+ public static boolean isSerializedType(final Object message) {
return message instanceof CommitTransactionReply;
}
}
import java.io.ObjectOutput;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-public class CreateTransaction extends VersionedExternalizableMessage {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class CreateTransaction extends VersionedExternalizableMessage {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
private TransactionIdentifier transactionId;
import java.io.ObjectOutput;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-public class CreateTransactionReply extends VersionedExternalizableMessage {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class CreateTransactionReply extends VersionedExternalizableMessage {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
private String transactionPath;
}
@Override
- public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+ public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
super.readExternal(in);
transactionId = TransactionIdentifier.readFrom(in);
transactionPath = in.readUTF();
}
@Override
- public void writeExternal(ObjectOutput out) throws IOException {
+ public void writeExternal(final ObjectOutput out) throws IOException {
super.writeExternal(out);
transactionId.writeTo(out);
out.writeUTF(transactionPath);
+ ", version=" + getVersion() + "]";
}
- public static CreateTransactionReply fromSerializable(Object serializable) {
+ public static CreateTransactionReply fromSerializable(final Object serializable) {
checkArgument(serializable instanceof CreateTransactionReply);
return (CreateTransactionReply)serializable;
}
- public static boolean isSerializedType(Object message) {
+ public static boolean isSerializedType(final Object message) {
return message instanceof CreateTransactionReply;
}
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.datastore.messages;
import com.google.common.base.Preconditions;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+@Deprecated(since = "9.0.0", forRemoval = true)
public class DataExists extends AbstractRead<Boolean> {
private static final long serialVersionUID = 1L;
}
@Override
- public FluentFuture<Boolean> apply(DOMStoreReadTransaction readDelegate) {
+ public FluentFuture<Boolean> apply(final DOMStoreReadTransaction readDelegate) {
return readDelegate.exists(getPath());
}
@Override
- public void processResponse(Object response, SettableFuture<Boolean> returnFuture) {
+ public void processResponse(final Object response, final SettableFuture<Boolean> returnFuture) {
if (DataExistsReply.isSerializedType(response)) {
returnFuture.set(Boolean.valueOf(DataExistsReply.fromSerializable(response).exists()));
} else {
}
@Override
- protected AbstractRead<Boolean> newInstance(short withVersion) {
+ protected AbstractRead<Boolean> newInstance(final short withVersion) {
return new DataExists(getPath(), withVersion);
}
return (DataExists)serializable;
}
- public static boolean isSerializedType(Object message) {
+ public static boolean isSerializedType(final Object message) {
return message instanceof DataExists;
}
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.datastore.messages;
import com.google.common.base.Preconditions;
import java.io.ObjectInput;
import java.io.ObjectOutput;
+@Deprecated(since = "9.0.0", forRemoval = true)
public class DataExistsReply extends VersionedExternalizableMessage {
private static final long serialVersionUID = 1L;
}
@Override
- public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+ public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
super.readExternal(in);
exists = in.readBoolean();
}
@Override
- public void writeExternal(ObjectOutput out) throws IOException {
+ public void writeExternal(final ObjectOutput out) throws IOException {
super.writeExternal(out);
out.writeBoolean(exists);
}
return (DataExistsReply)serializable;
}
- public static boolean isSerializedType(Object message) {
+ public static boolean isSerializedType(final Object message) {
return message instanceof DataExistsReply;
}
}
import static java.util.Objects.requireNonNull;
-import java.util.Collection;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import java.util.List;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
/**
* A message about a DataTree having been changed. The message is not
* candidate.
*/
public final class DataTreeChanged {
- private final Collection<DataTreeCandidate> changes;
+ private final List<DataTreeCandidate> changes;
- public DataTreeChanged(final Collection<DataTreeCandidate> changes) {
+ public DataTreeChanged(final List<DataTreeCandidate> changes) {
this.changes = requireNonNull(changes);
}
*
* @return Change events
*/
- public Collection<DataTreeCandidate> getChanges() {
+ public List<DataTreeCandidate> getChanges() {
return changes;
}
}
import java.io.Serializable;
public final class DataTreeChangedReply implements Serializable {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
private static final DataTreeChangedReply INSTANCE = new DataTreeChangedReply();
return INSTANCE;
}
+ @java.io.Serial
private Object readResolve() {
return INSTANCE;
}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.messages;
-
-import java.io.Externalizable;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-
-/**
- * Externalizable with no data.
- *
- * @author Thomas Pantelis
- */
-public class EmptyExternalizable implements Externalizable {
-
- @Override
- public void readExternal(ObjectInput in) {
- }
-
- @Override
- public void writeExternal(ObjectOutput out) {
- }
-}
*
* @author Thomas Pantelis
*/
-public class ForwardedReadyTransaction {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class ForwardedReadyTransaction {
private final TransactionIdentifier transactionId;
private final ReadWriteShardDataTreeTransaction transaction;
private final boolean doImmediateCommit;
private final short txnClientVersion;
- private @Nullable final SortedSet<String> participatingShardNames;
+ private final @Nullable SortedSet<String> participatingShardNames;
- public ForwardedReadyTransaction(TransactionIdentifier transactionId, short txnClientVersion,
- ReadWriteShardDataTreeTransaction transaction, boolean doImmediateCommit,
- Optional<SortedSet<String>> participatingShardNames) {
+ public ForwardedReadyTransaction(final TransactionIdentifier transactionId, final short txnClientVersion,
+ final ReadWriteShardDataTreeTransaction transaction, final boolean doImmediateCommit,
+ final Optional<SortedSet<String>> participatingShardNames) {
this.transactionId = requireNonNull(transactionId);
this.transaction = requireNonNull(transaction);
this.txnClientVersion = txnClientVersion;
*/
package org.opendaylight.controller.cluster.datastore.messages;
+import org.opendaylight.controller.cluster.mgmt.api.DataTreeListenerInfo;
+
/**
- * Local message sent to an actor to retrieve internal information for reporting.
+ * Local message sent to an actor to retrieve {@link DataTreeListenerInfo} for reporting.
*
* @author Thomas Pantelis
*/
* Request a shard to report the clients it knows about. Shard is required to respond with {@link GetKnownClientsReply}.
*/
public final class GetKnownClients implements Serializable {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
public static final @NonNull GetKnownClients INSTANCE = new GetKnownClients();
}
+ @java.io.Serial
private Object readResolve() {
return INSTANCE;
}
import org.apache.commons.lang3.ObjectUtils;
import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.ReadOnlyDataTree;
/**
* Local message sent in reply to FindPrimaryShard to indicate the primary shard is local to the caller.
* @author Thomas Pantelis
*/
public class LocalPrimaryShardFound {
-
private final String primaryPath;
private final ReadOnlyDataTree localShardDataTree;
- public LocalPrimaryShardFound(@NonNull String primaryPath, @NonNull ReadOnlyDataTree localShardDataTree) {
+ public LocalPrimaryShardFound(final @NonNull String primaryPath,
+ final @NonNull ReadOnlyDataTree localShardDataTree) {
this.primaryPath = requireNonNull(primaryPath);
this.localShardDataTree = requireNonNull(localShardDataTree);
}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.messages;
-
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-
-/**
- * Message sent to a shard actor indicating one of its peers is down.
- *
- * @author Thomas Pantelis
- */
-public class PeerDown {
- private final MemberName memberName;
- private final String peerId;
-
- public PeerDown(MemberName memberName, String peerId) {
- this.memberName = memberName;
- this.peerId = peerId;
- }
-
- public MemberName getMemberName() {
- return memberName;
- }
-
-
- public String getPeerId() {
- return peerId;
- }
-
- @Override
- public String toString() {
- return "PeerDown [memberName=" + memberName.getName() + ", peerId=" + peerId + "]";
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.messages;
-
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-
-/**
- * Message sent to a shard actor indicating one of its peers is up.
- *
- * @author Thomas Pantelis
- */
-public class PeerUp {
- private final MemberName memberName;
- private final String peerId;
-
- public PeerUp(MemberName memberName, String peerId) {
- this.memberName = memberName;
- this.peerId = peerId;
- }
-
- public MemberName getMemberName() {
- return memberName;
- }
-
- public String getPeerId() {
- return peerId;
- }
-
- @Override
- public String toString() {
- return "PeerUp [memberName=" + memberName.getName() + ", peerId=" + peerId + "]";
- }
-}
\ No newline at end of file
import akka.actor.ActorSelection;
import java.util.Optional;
import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.ReadOnlyDataTree;
/**
* Local message DTO that contains information about the primary shard.
private final short primaryShardVersion;
private final ReadOnlyDataTree localShardDataTree;
- public PrimaryShardInfo(@NonNull ActorSelection primaryShardActor, short primaryShardVersion,
- @NonNull ReadOnlyDataTree localShardDataTree) {
+ public PrimaryShardInfo(final @NonNull ActorSelection primaryShardActor, final short primaryShardVersion,
+ final @NonNull ReadOnlyDataTree localShardDataTree) {
this.primaryShardActor = requireNonNull(primaryShardActor);
this.primaryShardVersion = primaryShardVersion;
this.localShardDataTree = requireNonNull(localShardDataTree);
}
- public PrimaryShardInfo(@NonNull ActorSelection primaryShardActor, short primaryShardVersion) {
+ public PrimaryShardInfo(final @NonNull ActorSelection primaryShardActor, final short primaryShardVersion) {
this.primaryShardActor = requireNonNull(primaryShardActor);
this.primaryShardVersion = primaryShardVersion;
- this.localShardDataTree = null;
+ localShardDataTree = null;
}
/**
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.datastore.messages;
import com.google.common.base.Preconditions;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-public class ReadData extends AbstractRead<Optional<NormalizedNode<?, ?>>> {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public class ReadData extends AbstractRead<Optional<NormalizedNode>> {
private static final long serialVersionUID = 1L;
public ReadData() {
}
- public ReadData(final YangInstanceIdentifier path, short version) {
+ public ReadData(final YangInstanceIdentifier path, final short version) {
super(path, version);
}
@Override
- public FluentFuture<Optional<NormalizedNode<?, ?>>> apply(DOMStoreReadTransaction readDelegate) {
+ public FluentFuture<Optional<NormalizedNode>> apply(final DOMStoreReadTransaction readDelegate) {
return readDelegate.read(getPath());
}
@Override
- public void processResponse(Object readResponse, SettableFuture<Optional<NormalizedNode<?, ?>>> returnFuture) {
+ public void processResponse(final Object readResponse,
+ final SettableFuture<Optional<NormalizedNode>> returnFuture) {
if (ReadDataReply.isSerializedType(readResponse)) {
ReadDataReply reply = ReadDataReply.fromSerializable(readResponse);
- returnFuture.set(Optional.<NormalizedNode<?, ?>>ofNullable(reply.getNormalizedNode()));
+ returnFuture.set(Optional.ofNullable(reply.getNormalizedNode()));
} else {
returnFuture.setException(new ReadFailedException("Invalid response reading data for path " + getPath()));
}
}
@Override
- protected AbstractRead<Optional<NormalizedNode<?, ?>>> newInstance(short withVersion) {
+ protected AbstractRead<Optional<NormalizedNode>> newInstance(final short withVersion) {
return new ReadData(getPath(), withVersion);
}
return (ReadData)serializable;
}
- public static boolean isSerializedType(Object message) {
+ public static boolean isSerializedType(final Object message) {
return message instanceof ReadData;
}
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.datastore.messages;
import java.io.IOException;
import org.opendaylight.controller.cluster.datastore.node.utils.stream.SerializationUtils;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+@Deprecated(since = "9.0.0", forRemoval = true)
public class ReadDataReply extends VersionedExternalizableMessage {
private static final long serialVersionUID = 1L;
- private NormalizedNode<?, ?> normalizedNode;
+ private NormalizedNode normalizedNode;
public ReadDataReply() {
}
- public ReadDataReply(final NormalizedNode<?, ?> normalizedNode, final short version) {
+ public ReadDataReply(final NormalizedNode normalizedNode, final short version) {
super(version);
this.normalizedNode = normalizedNode;
}
- public NormalizedNode<?, ?> getNormalizedNode() {
+ public NormalizedNode getNormalizedNode() {
return normalizedNode;
}
import org.eclipse.jdt.annotation.Nullable;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
/**
* Message notifying the shard leader to apply modifications which have been
* to be sent out to a remote system, it needs to be intercepted by {@link ReadyLocalTransactionSerializer}
* and turned into {@link BatchedModifications}.
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
public final class ReadyLocalTransaction {
private final DataTreeModification modification;
private final TransactionIdentifier transactionId;
private short remoteVersion = DataStoreVersions.CURRENT_VERSION;
public ReadyLocalTransaction(final TransactionIdentifier transactionId, final DataTreeModification modification,
- final boolean doCommitOnReady, Optional<SortedSet<String>> participatingShardNames) {
+ final boolean doCommitOnReady, final Optional<SortedSet<String>> participatingShardNames) {
this.transactionId = requireNonNull(transactionId);
this.modification = requireNonNull(modification);
this.doCommitOnReady = doCommitOnReady;
return remoteVersion;
}
- public void setRemoteVersion(short remoteVersion) {
+ public void setRemoteVersion(final short remoteVersion) {
this.remoteVersion = remoteVersion;
}
* into akka serialization to allow forwarding of ReadyLocalTransaction to remote
* shards.
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
public final class ReadyLocalTransactionSerializer extends JSerializer {
-
private final ExtendedActorSystem system;
public ReadyLocalTransactionSerializer(final ExtendedActorSystem system) {
import java.io.ObjectOutput;
import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
+@Deprecated(since = "9.0.0", forRemoval = true)
public class ReadyTransactionReply extends VersionedExternalizableMessage {
private static final long serialVersionUID = 1L;
public ReadyTransactionReply() {
}
- public ReadyTransactionReply(String cohortPath) {
+ public ReadyTransactionReply(final String cohortPath) {
this(cohortPath, DataStoreVersions.CURRENT_VERSION);
}
- public ReadyTransactionReply(String cohortPath, short version) {
+ public ReadyTransactionReply(final String cohortPath, final short version) {
super(version);
this.cohortPath = cohortPath;
}
}
@Override
- public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+ public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
super.readExternal(in);
cohortPath = in.readUTF();
}
@Override
- public void writeExternal(ObjectOutput out) throws IOException {
+ public void writeExternal(final ObjectOutput out) throws IOException {
super.writeExternal(out);
out.writeUTF(cohortPath);
}
- public static ReadyTransactionReply fromSerializable(Object serializable) {
+ public static ReadyTransactionReply fromSerializable(final Object serializable) {
return (ReadyTransactionReply)serializable;
}
- public static boolean isSerializedType(Object message) {
+ public static boolean isSerializedType(final Object message) {
return message instanceof ReadyTransactionReply;
}
}
+++ /dev/null
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.messages;
-
-import static java.util.Objects.requireNonNull;
-
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-/**
- * A message sent to the ShardManager to dynamically remove a local prefix shard
- * replica available in this node.
- */
-public class RemovePrefixShardReplica {
-
- private final YangInstanceIdentifier prefix;
- private final MemberName memberName;
-
- /**
- * Constructor.
- *
- * @param prefix prefix of the local shard that is to be dynamically removed.
- */
- public RemovePrefixShardReplica(final @NonNull YangInstanceIdentifier prefix,
- final @NonNull MemberName memberName) {
- this.prefix = requireNonNull(prefix, "prefix should not be null");
- this.memberName = requireNonNull(memberName, "memberName should not be null");
- }
-
- public YangInstanceIdentifier getShardPrefix() {
- return prefix;
- }
-
- public MemberName getMemberName() {
- return memberName;
- }
-
- @Override
- public String toString() {
- return "RemovePrefixShardReplica [prefix=" + prefix + ", memberName=" + memberName + "]";
- }
-}
import static java.util.Objects.requireNonNull;
-import java.util.Optional;
import org.eclipse.jdt.annotation.NonNull;
import org.eclipse.jdt.annotation.Nullable;
import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.ReadOnlyDataTree;
/**
* A local message derived from LeaderStateChanged containing additional Shard-specific info that is sent
*
* @author Thomas Pantelis
*/
-public class ShardLeaderStateChanged extends LeaderStateChanged {
- private final ReadOnlyDataTree localShardDataTree;
+public final class ShardLeaderStateChanged extends LeaderStateChanged {
+ private final @Nullable ReadOnlyDataTree localShardDataTree;
- public ShardLeaderStateChanged(@NonNull String memberId, @Nullable String leaderId,
- @NonNull ReadOnlyDataTree localShardDataTree, short leaderPayloadVersion) {
+ public ShardLeaderStateChanged(final @NonNull String memberId, final @Nullable String leaderId,
+ final @NonNull ReadOnlyDataTree localShardDataTree, final short leaderPayloadVersion) {
super(memberId, leaderId, leaderPayloadVersion);
this.localShardDataTree = requireNonNull(localShardDataTree);
}
- public ShardLeaderStateChanged(@NonNull String memberId, @Nullable String leaderId,
- short leaderPayloadVersion) {
+ public ShardLeaderStateChanged(final @NonNull String memberId, final @Nullable String leaderId,
+ final short leaderPayloadVersion) {
super(memberId, leaderId, leaderPayloadVersion);
- this.localShardDataTree = null;
+ localShardDataTree = null;
}
- public @NonNull Optional<ReadOnlyDataTree> getLocalShardDataTree() {
- return Optional.ofNullable(localShardDataTree);
+ public @Nullable ReadOnlyDataTree localShardDataTree() {
+ return localShardDataTree;
}
}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.messages;
-
-import java.io.Serializable;
-
-/**
- * A reply message indicating success.
- *
- * @author Thomas Pantelis
- */
-public final class SuccessReply implements Serializable {
- private static final long serialVersionUID = 1L;
-
- public static final SuccessReply INSTANCE = new SuccessReply();
-
- private SuccessReply() {
- }
-}
*/
package org.opendaylight.controller.cluster.datastore.messages;
+import static java.util.Objects.requireNonNull;
+
+import org.eclipse.jdt.annotation.NonNullByDefault;
import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
-import org.opendaylight.yangtools.yang.model.util.AbstractEffectiveModelContextProvider;
-public class UpdateSchemaContext extends AbstractEffectiveModelContextProvider {
- public UpdateSchemaContext(final EffectiveModelContext modelContext) {
- super(modelContext);
+@NonNullByDefault
+public record UpdateSchemaContext(EffectiveModelContext modelContext) {
+ public UpdateSchemaContext {
+ requireNonNull(modelContext);
}
}
*
* @author Thomas Pantelis
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
public abstract class VersionedExternalizableMessage implements Externalizable, SerializableMessage {
private static final long serialVersionUID = 1L;
private short version = DataStoreVersions.CURRENT_VERSION;
public VersionedExternalizableMessage() {
+ // Required for externalizable
}
public VersionedExternalizableMessage(final short version) {
this.version = version <= DataStoreVersions.CURRENT_VERSION ? version : DataStoreVersions.CURRENT_VERSION;
}
- public short getVersion() {
+ public final short getVersion() {
return version;
}
protected final @NonNull NormalizedNodeStreamVersion getStreamVersion() {
- if (version >= DataStoreVersions.MAGNESIUM_VERSION) {
+ if (version >= DataStoreVersions.POTASSIUM_VERSION) {
+ return NormalizedNodeStreamVersion.POTASSIUM;
+ } else if (version >= DataStoreVersions.PHOSPHORUS_VERSION) {
return NormalizedNodeStreamVersion.MAGNESIUM;
- } else if (version == DataStoreVersions.SODIUM_SR1_VERSION) {
- return NormalizedNodeStreamVersion.SODIUM_SR1;
- } else if (version == DataStoreVersions.NEON_SR2_VERSION) {
- return NormalizedNodeStreamVersion.NEON_SR2;
} else {
- return NormalizedNodeStreamVersion.LITHIUM;
+ throw new IllegalStateException("Unsupported version " + version);
}
}
@Override
public final Object toSerializable() {
final short ver = getVersion();
- if (ver < DataStoreVersions.BORON_VERSION) {
+ if (ver < DataStoreVersions.SODIUM_SR1_VERSION) {
throw new UnsupportedOperationException("Version " + ver
- + " is older than the oldest version supported version " + DataStoreVersions.BORON_VERSION);
+ + " is older than the oldest version supported version " + DataStoreVersions.SODIUM_SR1_VERSION);
}
return this;
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.datastore.modification;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
/**
* Base class to be used for all simple modifications that can be applied to a DOMStoreTransaction.
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
public abstract class AbstractModification implements Modification {
+ @java.io.Serial
+ private static final long serialVersionUID = 2647778426312509718L;
private YangInstanceIdentifier path;
private short version;
* A CompositeModification gets stored in the transaction log for a Shard. During recovery when the transaction log
* is being replayed a DOMStoreWriteTransaction could be created and a CompositeModification could be applied to it.
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
public interface CompositeModification extends Modification {
/**
* Get a list of modifications contained by this composite.
import org.opendaylight.controller.cluster.datastore.node.utils.stream.SerializationUtils;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
/**
* DeleteModification store all the parameters required to delete a path from the data tree.
*/
-public class DeleteModification extends AbstractModification {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class DeleteModification extends AbstractModification {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
public DeleteModification() {
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.api.schema.stream.ReusableStreamReceiver;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
/**
* MergeModification stores all the parameters required to merge data into the specified path.
*/
-public class MergeModification extends WriteModification {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class MergeModification extends WriteModification {
private static final long serialVersionUID = 1L;
public MergeModification() {
super(version);
}
- public MergeModification(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ public MergeModification(final YangInstanceIdentifier path, final NormalizedNode data) {
super(path, data);
}
- MergeModification(final short version, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ MergeModification(final short version, final YangInstanceIdentifier path, final NormalizedNode data) {
super(version, path, data);
}
public static MergeModification fromStream(final NormalizedNodeDataInput in, final short version,
final ReusableStreamReceiver receiver) throws IOException {
- final NormalizedNode<?, ?> node = in.readNormalizedNode(receiver);
+ final NormalizedNode node = in.readNormalizedNode(receiver);
final YangInstanceIdentifier path = in.readYangInstanceIdentifier();
return new MergeModification(version, path, node);
}
import java.io.ObjectInput;
import java.io.ObjectOutput;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
/**
* Represents a modification to the data store.
import org.opendaylight.controller.cluster.datastore.messages.VersionedExternalizableMessage;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.schema.stream.ReusableStreamReceiver;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
import org.opendaylight.yangtools.yang.data.impl.schema.ReusableImmutableNormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
/**
* MutableCompositeModification is just a mutable version of a CompositeModification.
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
public class MutableCompositeModification extends VersionedExternalizableMessage implements CompositeModification {
private static final long serialVersionUID = 1L;
@Override
public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
super.readExternal(in);
-
- int size = in.readInt();
+ final int size = in.readInt();
if (size > 0) {
- final NormalizedNodeDataInput input = NormalizedNodeDataInput.newDataInputWithoutValidation(in);
- final ReusableStreamReceiver receiver = ReusableImmutableNormalizedNodeStreamWriter.create();
-
- for (int i = 0; i < size; i++) {
- byte type = in.readByte();
- switch (type) {
- case Modification.WRITE:
- modifications.add(WriteModification.fromStream(input, getVersion(), receiver));
- break;
-
- case Modification.MERGE:
- modifications.add(MergeModification.fromStream(input, getVersion(), receiver));
- break;
-
- case Modification.DELETE:
- modifications.add(DeleteModification.fromStream(input, getVersion()));
- break;
- default:
- break;
- }
+ if (getVersion() >= DataStoreVersions.PHOSPHORUS_VERSION) {
+ readExternalModern(NormalizedNodeDataInput.newDataInput(in), size);
+ } else {
+ readExternalLegacy(in, size);
}
}
}
@Override
public void writeExternal(final ObjectOutput out) throws IOException {
super.writeExternal(out);
-
final int size = modifications.size();
out.writeInt(size);
if (size > 0) {
- try (NormalizedNodeDataOutput stream = getStreamVersion().newDataOutput(out)) {
- for (Modification mod : modifications) {
- out.writeByte(mod.getType());
- mod.writeTo(stream);
- }
+ if (getVersion() >= DataStoreVersions.PHOSPHORUS_VERSION) {
+ writeExternalModern(out);
+ } else {
+ writeExternalLegacy(out);
+ }
+ }
+ }
+
+ private void readExternalLegacy(final ObjectInput in, final int size) throws IOException {
+ final NormalizedNodeDataInput input = NormalizedNodeDataInput.newDataInputWithoutValidation(in);
+ final ReusableStreamReceiver receiver = ReusableImmutableNormalizedNodeStreamWriter.create();
+ for (int i = 0; i < size; i++) {
+ final byte type = in.readByte();
+ switch (type) {
+ case Modification.WRITE:
+ modifications.add(WriteModification.fromStream(input, getVersion(), receiver));
+ break;
+ case Modification.MERGE:
+ modifications.add(MergeModification.fromStream(input, getVersion(), receiver));
+ break;
+ case Modification.DELETE:
+ modifications.add(DeleteModification.fromStream(input, getVersion()));
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ private void writeExternalLegacy(final ObjectOutput out) throws IOException {
+ try (NormalizedNodeDataOutput stream = getStreamVersion().newDataOutput(out)) {
+ for (Modification mod : modifications) {
+ out.writeByte(mod.getType());
+ mod.writeTo(stream);
+ }
+ }
+ }
+
+ private void readExternalModern(final NormalizedNodeDataInput in, final int size) throws IOException {
+ final ReusableStreamReceiver receiver = ReusableImmutableNormalizedNodeStreamWriter.create();
+ for (int i = 0; i < size; i++) {
+ final byte type = in.readByte();
+ switch (type) {
+ case Modification.WRITE:
+ modifications.add(WriteModification.fromStream(in, getVersion(), receiver));
+ break;
+ case Modification.MERGE:
+ modifications.add(MergeModification.fromStream(in, getVersion(), receiver));
+ break;
+ case Modification.DELETE:
+ modifications.add(DeleteModification.fromStream(in, getVersion()));
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ private void writeExternalModern(final ObjectOutput out) throws IOException {
+ try (NormalizedNodeDataOutput stream = getStreamVersion().newDataOutput(out)) {
+ for (Modification mod : modifications) {
+ stream.writeByte(mod.getType());
+ mod.writeTo(stream);
}
}
}
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.api.schema.stream.ReusableStreamReceiver;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
/**
* WriteModification stores all the parameters required to write data to the specified path.
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
public class WriteModification extends AbstractModification {
private static final long serialVersionUID = 1L;
- private NormalizedNode<?, ?> data;
+ private NormalizedNode data;
public WriteModification() {
this(DataStoreVersions.CURRENT_VERSION);
super(version);
}
- WriteModification(final short version, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ WriteModification(final short version, final YangInstanceIdentifier path, final NormalizedNode data) {
super(version, path);
this.data = data;
}
- public WriteModification(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ public WriteModification(final YangInstanceIdentifier path, final NormalizedNode data) {
super(path);
this.data = data;
}
transaction.write(getPath(), data);
}
- public NormalizedNode<?, ?> getData() {
+ public NormalizedNode getData() {
return data;
}
public static WriteModification fromStream(final NormalizedNodeDataInput in, final short version,
final ReusableStreamReceiver receiver) throws IOException {
- final NormalizedNode<?, ?> node = in.readNormalizedNode(receiver);
+ final NormalizedNode node = in.readNormalizedNode(receiver);
final YangInstanceIdentifier path = in.readYangInstanceIdentifier();
return new WriteModification(version, path, node);
}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.io.ByteStreams;
+import java.io.IOException;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifiablePayload.SerialForm;
+
+/**
+ * Serialization proxy for {@link AbortTransactionPayload}.
+ */
+final class AT implements SerialForm {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private TransactionIdentifier identifier;
+ private byte[] bytes;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public AT() {
+ // For Externalizable
+ }
+
+ AT(final byte[] bytes) {
+ this.bytes = requireNonNull(bytes);
+ }
+
+ @Override
+ public byte[] bytes() {
+ return bytes;
+ }
+
+ @Override
+ public void readExternal(final byte[] newBytes) throws IOException {
+ bytes = requireNonNull(newBytes);
+ identifier = verifyNotNull(TransactionIdentifier.readFrom(ByteStreams.newDataInput(newBytes)));
+ }
+
+ @Override
+ public Object readResolve() {
+ return new AbortTransactionPayload(identifier, bytes);
+ }
+}
import com.google.common.io.ByteArrayDataOutput;
import com.google.common.io.ByteStreams;
-import java.io.DataInput;
import java.io.IOException;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.slf4j.Logger;
* @author Robert Varga
*/
public final class AbortTransactionPayload extends AbstractIdentifiablePayload<TransactionIdentifier> {
- private static final class Proxy extends AbstractProxy<TransactionIdentifier> {
- private static final long serialVersionUID = 1L;
-
- // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
- // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public Proxy() {
- // For Externalizable
- }
-
- Proxy(final byte[] serialized) {
- super(serialized);
- }
-
- @Override
- protected TransactionIdentifier readIdentifier(final DataInput in) throws IOException {
- return TransactionIdentifier.readFrom(in);
- }
-
- @Override
- protected AbortTransactionPayload createObject(final TransactionIdentifier identifier,
- final byte[] serialized) {
- return new AbortTransactionPayload(identifier, serialized);
- }
- }
-
private static final Logger LOG = LoggerFactory.getLogger(AbortTransactionPayload.class);
+ @java.io.Serial
private static final long serialVersionUID = 1L;
+ private static final int PROXY_SIZE = externalizableProxySize(AT::new);
AbortTransactionPayload(final TransactionIdentifier transactionId, final byte[] serialized) {
super(transactionId, serialized);
} catch (IOException e) {
// This should never happen
LOG.error("Failed to serialize {}", transactionId, e);
- throw new RuntimeException("Failed to serialized " + transactionId, e);
+ throw new IllegalStateException("Failed to serialized " + transactionId, e);
}
return new AbortTransactionPayload(transactionId, out.toByteArray());
}
@Override
- protected Proxy externalizableProxy(final byte[] serialized) {
- return new Proxy(serialized);
+ protected AT externalizableProxy(final byte[] serialized) {
+ return new AT(serialized);
+ }
+
+ @Override
+ protected int externalizableProxySize() {
+ return PROXY_SIZE;
}
}
import static java.util.Objects.requireNonNull;
import java.util.Collection;
-import java.util.Optional;
+import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
/**
- * Abstract base class for our internal implementation of {@link DataTreeCandidateNode},
- * which we instantiate from a serialized stream. We do not retain the before-image and
- * do not implement {@link #getModifiedChild(PathArgument)}, as that method is only
- * useful for end users. Instances based on this class should never be leaked outside of
- * this component.
+ * Abstract base class for our internal implementation of {@link DataTreeCandidateNode}, which we instantiate from a
+ * serialized stream. We do not retain the before-image and do not implement {@link #modifiedChild(PathArgument)}, as
+ * that method is only useful for end users. Instances based on this class should never be leaked outside of this
+ * component.
*/
abstract class AbstractDataTreeCandidateNode implements DataTreeCandidateNode {
- private final ModificationType type;
+ private final @NonNull ModificationType type;
protected AbstractDataTreeCandidateNode(final ModificationType type) {
this.type = requireNonNull(type);
}
@Override
- public final Optional<DataTreeCandidateNode> getModifiedChild(final PathArgument identifier) {
+ public final DataTreeCandidateNode modifiedChild(final PathArgument identifier) {
throw new UnsupportedOperationException("Not implemented");
}
@Override
- public final ModificationType getModificationType() {
+ public final ModificationType modificationType() {
return type;
}
@Override
- public final Optional<NormalizedNode<?, ?>> getDataBefore() {
+ public final NormalizedNode dataBefore() {
throw new UnsupportedOperationException("Before-image not available after serialization");
}
static DataTreeCandidateNode createUnmodified() {
return new AbstractDataTreeCandidateNode(ModificationType.UNMODIFIED) {
@Override
- public PathArgument getIdentifier() {
+ public PathArgument name() {
throw new UnsupportedOperationException("Root node does not have an identifier");
}
@Override
- public Optional<NormalizedNode<?, ?>> getDataAfter() {
+ public NormalizedNode dataAfter() {
throw new UnsupportedOperationException("After-image not available after serialization");
}
@Override
- public Collection<DataTreeCandidateNode> getChildNodes() {
+ public Collection<DataTreeCandidateNode> childNodes() {
throw new UnsupportedOperationException("Children not available after serialization");
}
};
*/
package org.opendaylight.controller.cluster.datastore.persisted;
+import static com.google.common.base.Verify.verifyNotNull;
import static java.util.Objects.requireNonNull;
-import com.google.common.base.Verify;
-import com.google.common.io.ByteStreams;
-import java.io.DataInput;
+import com.google.common.base.MoreObjects;
import java.io.Externalizable;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
-import java.io.Serializable;
+import java.util.function.Function;
+import org.apache.commons.lang3.SerializationUtils;
import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.IdentifiablePayload;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
-import org.opendaylight.yangtools.concepts.Identifiable;
+import org.opendaylight.controller.cluster.raft.messages.IdentifiablePayload;
import org.opendaylight.yangtools.concepts.Identifier;
/**
- * Abstract base class for {@link Payload}s which hold a single {@link Identifier}.
- *
- * @author Robert Varga
+ * Abstract base class for {@link IdentifiablePayload}s which hold a single {@link Identifier}.
*/
-public abstract class AbstractIdentifiablePayload<T extends Identifier> extends IdentifiablePayload<T>
- implements Serializable {
- protected abstract static class AbstractProxy<T extends Identifier> implements Externalizable {
- private static final long serialVersionUID = 1L;
- private byte[] serialized;
- private T identifier;
-
- public AbstractProxy() {
- // For Externalizable
- }
-
- protected AbstractProxy(final byte[] serialized) {
- this.serialized = requireNonNull(serialized);
- }
-
+public abstract class AbstractIdentifiablePayload<T extends Identifier> extends IdentifiablePayload<T> {
+ /**
+ * An {@link Externalizable} with default implementations we expect our implementations to comply with. On-wire
+ * serialization format is defined by {@link #bytes()}.
+ */
+ protected interface SerialForm extends Externalizable {
+ /**
+ * Return the serial form of this object contents, corresponding to
+ * {@link AbstractIdentifiablePayload#serialized}.
+ *
+ * @return Serialized form
+ */
+ byte[] bytes();
+
+ /**
+ * Resolve this proxy to an actual {@link AbstractIdentifiablePayload}.
+ *
+ * @return A payload.
+ */
+ @java.io.Serial
+ Object readResolve();
+
+ /**
+ * Restore state from specified serialized form.
+ *
+ * @param newBytes Serialized form, as returned by {@link #bytes()}
+ * @throws IOException when a deserialization problem occurs
+ */
+ void readExternal(byte[] newBytes) throws IOException;
+
+ /**
+ * {@inheritDoc}
+ *
+ * <p>
+ * The default implementation is canonical and should never be overridden.
+ */
@Override
- public final void writeExternal(final ObjectOutput out) throws IOException {
- out.writeInt(serialized.length);
- out.write(serialized);
+ default void readExternal(final ObjectInput in) throws IOException {
+ final var bytes = new byte[in.readInt()];
+ in.readFully(bytes);
+ readExternal(bytes);
}
+ /**
+ * {@inheritDoc}
+ *
+ * <p>
+ * The default implementation is canonical and should never be overridden.
+ */
@Override
- public final void readExternal(final ObjectInput in) throws IOException {
- final int length = in.readInt();
- serialized = new byte[length];
- in.readFully(serialized);
- identifier = Verify.verifyNotNull(readIdentifier(ByteStreams.newDataInput(serialized)));
+ default void writeExternal(final ObjectOutput out) throws IOException {
+ final var bytes = bytes();
+ out.writeInt(bytes.length);
+ out.write(bytes);
}
-
- protected final Object readResolve() {
- return Verify.verifyNotNull(createObject(identifier, serialized));
- }
-
- protected abstract @NonNull T readIdentifier(@NonNull DataInput in) throws IOException;
-
- @SuppressWarnings("checkstyle:hiddenField")
- protected abstract @NonNull Identifiable<T> createObject(@NonNull T identifier, byte @NonNull[] serialized);
}
+ @java.io.Serial
private static final long serialVersionUID = 1L;
- private final byte[] serialized;
- private final T identifier;
+
+ private final byte @NonNull [] serialized;
+ private final @NonNull T identifier;
AbstractIdentifiablePayload(final @NonNull T identifier, final byte @NonNull[] serialized) {
this.identifier = requireNonNull(identifier);
return serialized.length;
}
- protected final Object writeReplace() {
- return Verify.verifyNotNull(externalizableProxy(serialized));
+ protected final byte @NonNull [] serialized() {
+ return serialized;
+ }
+
+ @Override
+ public final int serializedSize() {
+ // TODO: this is not entirely accurate, as the serialization stream has additional overheads:
+ // - 3 bytes for each block of data <256 bytes
+ // - 5 bytes for each block of data >=256 bytes
+ // - each block of data is limited to 1024 bytes as per serialization spec
+ return size() + externalizableProxySize();
+ }
+
+ @Override
+ public final String toString() {
+ return MoreObjects.toStringHelper(this).add("identifier", identifier).add("size", size()).toString();
}
- @SuppressWarnings("checkstyle:hiddenField")
- protected abstract @NonNull AbstractProxy<T> externalizableProxy(byte @NonNull[] serialized);
+ @Override
+ public final Object writeReplace() {
+ return verifyNotNull(externalizableProxy(serialized));
+ }
+
+ protected abstract @NonNull SerialForm externalizableProxy(byte @NonNull[] serialized);
+
+ protected abstract int externalizableProxySize();
+
+ protected static final int externalizableProxySize(final Function<byte[], ? extends SerialForm> constructor) {
+ return SerializationUtils.serialize(constructor.apply(new byte[0])).length;
+ }
}
*/
package org.opendaylight.controller.cluster.datastore.persisted;
-import com.google.common.base.Verify;
+import static com.google.common.base.Verify.verifyNotNull;
+
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
static @NonNull ShardSnapshotState versionedDeserialize(final ObjectInput in) throws IOException {
final PayloadVersion version = PayloadVersion.readFrom(in);
switch (version) {
- case BORON:
- case NEON_SR2:
- case SODIUM_SR1:
+ case CHLORINE_SR2:
return new ShardSnapshotState(readSnapshot(in), true);
- case MAGNESIUM:
+ case POTASSIUM:
return new ShardSnapshotState(readSnapshot(in), false);
case TEST_FUTURE_VERSION:
case TEST_PAST_VERSION:
// These versions are never returned and this code is effectively dead
default:
// Not included as default in above switch to ensure we get warnings when new versions are added
- throw new IOException("Encountered unhandled version" + version);
+ throw new IOException("Encountered unhandled version " + version);
}
}
}
@Override
- public final Optional<NormalizedNode<?, ?>> getRootNode() {
- return Optional.of(Verify.verifyNotNull(rootNode(), "Snapshot %s returned non-present root node", getClass()));
+ public final Optional<NormalizedNode> getRootNode() {
+ return Optional.of(verifyNotNull(rootNode(), "Snapshot %s returned non-present root node", getClass()));
}
/**
*
* @return The root node.
*/
- abstract @NonNull NormalizedNode<?, ?> rootNode();
+ abstract @NonNull NormalizedNode rootNode();
/**
* Return the snapshot payload version. Implementations of this method should return a constant.
private void versionedSerialize(final ObjectOutput out, final PayloadVersion version) throws IOException {
switch (version) {
- case BORON:
- case NEON_SR2:
- case SODIUM_SR1:
- case MAGNESIUM:
- // Boron, NeonSR2, Sodium and Magnesium snapshots use Java Serialization, but differ in stream format
+ case CHLORINE_SR2:
+ case POTASSIUM:
+ // Sodium onwards snapshots use Java Serialization, but differ in stream format
out.writeObject(this);
return;
case TEST_FUTURE_VERSION:
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.io.ByteStreams;
+import java.io.IOException;
+import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
+import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifiablePayload.SerialForm;
+
+/**
+ * Serialization proxy for {@link CreateLocalHistoryPayload}.
+ */
+final class CH implements SerialForm {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private LocalHistoryIdentifier identifier;
+ private byte[] bytes;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public CH() {
+ // For Externalizable
+ }
+
+ CH(final byte[] bytes) {
+ this.bytes = requireNonNull(bytes);
+ }
+
+ @Override
+ public byte[] bytes() {
+ return bytes;
+ }
+
+ @Override
+ public void readExternal(final byte[] newBytes) throws IOException {
+ bytes = requireNonNull(newBytes);
+ identifier = verifyNotNull(LocalHistoryIdentifier.readFrom(ByteStreams.newDataInput(newBytes)));
+ }
+
+ @Override
+ public Object readResolve() {
+ return new CreateLocalHistoryPayload(identifier, bytes);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.io.StreamCorruptedException;
+import org.opendaylight.controller.cluster.datastore.persisted.CommitTransactionPayload.Chunked;
+import org.opendaylight.controller.cluster.datastore.persisted.CommitTransactionPayload.Simple;
+import org.opendaylight.controller.cluster.io.ChunkedByteArray;
+
+/**
+ * Serialization proxy for {@link CommitTransactionPayload}.
+ */
+final class CT implements Externalizable {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private CommitTransactionPayload payload;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public CT() {
+ // For Externalizable
+ }
+
+ CT(final CommitTransactionPayload payload) {
+ this.payload = requireNonNull(payload);
+ }
+
+ @Override
+ public void writeExternal(final ObjectOutput out) throws IOException {
+ out.writeInt(payload.size());
+ payload.writeBytes(out);
+ }
+
+ @Override
+ public void readExternal(final ObjectInput in) throws IOException {
+ final int length = in.readInt();
+ if (length < 0) {
+ throw new StreamCorruptedException("Invalid payload length " + length);
+ } else if (length < CommitTransactionPayload.MAX_ARRAY_SIZE) {
+ final byte[] serialized = new byte[length];
+ in.readFully(serialized);
+ payload = new Simple(serialized);
+ } else {
+ payload = new Chunked(ChunkedByteArray.readFrom(in, length, CommitTransactionPayload.MAX_ARRAY_SIZE));
+ }
+ }
+
+ @java.io.Serial
+ private Object readResolve() {
+ return verifyNotNull(payload);
+ }
+}
import com.google.common.io.ByteArrayDataOutput;
import com.google.common.io.ByteStreams;
-import java.io.DataInput;
import java.io.IOException;
import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
import org.slf4j.Logger;
* @author Robert Varga
*/
public final class CloseLocalHistoryPayload extends AbstractIdentifiablePayload<LocalHistoryIdentifier> {
- private static final class Proxy extends AbstractProxy<LocalHistoryIdentifier> {
- private static final long serialVersionUID = 1L;
-
- // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
- // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public Proxy() {
- // For Externalizable
- }
-
- Proxy(final byte[] serialized) {
- super(serialized);
- }
-
- @Override
- protected LocalHistoryIdentifier readIdentifier(final DataInput in) throws IOException {
- return LocalHistoryIdentifier.readFrom(in);
- }
-
- @Override
- protected CloseLocalHistoryPayload createObject(final LocalHistoryIdentifier identifier,
- final byte[] serialized) {
- return new CloseLocalHistoryPayload(identifier, serialized);
- }
- }
-
private static final Logger LOG = LoggerFactory.getLogger(CloseLocalHistoryPayload.class);
+ @java.io.Serial
private static final long serialVersionUID = 1L;
+ private static final int PROXY_SIZE = externalizableProxySize(CH::new);
CloseLocalHistoryPayload(final LocalHistoryIdentifier historyId, final byte[] serialized) {
super(historyId, serialized);
} catch (IOException e) {
// This should never happen
LOG.error("Failed to serialize {}", historyId, e);
- throw new RuntimeException("Failed to serialize " + historyId, e);
+ throw new IllegalStateException("Failed to serialize " + historyId, e);
}
return new CloseLocalHistoryPayload(historyId, out.toByteArray());
}
@Override
- protected Proxy externalizableProxy(final byte[] serialized) {
- return new Proxy(serialized);
+ protected DH externalizableProxy(final byte[] serialized) {
+ return new DH(serialized);
+ }
+
+ @Override
+ protected int externalizableProxySize() {
+ return PROXY_SIZE;
}
}
*/
package org.opendaylight.controller.cluster.datastore.persisted;
-import static com.google.common.base.Verify.verifyNotNull;
+import static com.google.common.math.IntMath.ceilingPowerOfTwo;
import static java.util.Objects.requireNonNull;
-import static org.opendaylight.controller.cluster.datastore.persisted.ChunkedOutputStream.MAX_ARRAY_SIZE;
import com.google.common.annotations.Beta;
import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.MoreObjects;
import com.google.common.io.ByteStreams;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutputStream;
-import java.io.Externalizable;
import java.io.IOException;
-import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.io.Serializable;
-import java.io.StreamCorruptedException;
-import java.util.AbstractMap.SimpleImmutableEntry;
-import java.util.Map.Entry;
+import org.apache.commons.lang3.SerializationUtils;
import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.NonNullByDefault;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.persisted.DataTreeCandidateInputOutput.DataTreeCandidateWithVersion;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.IdentifiablePayload;
-import org.opendaylight.yangtools.concepts.Variant;
+import org.opendaylight.controller.cluster.io.ChunkedByteArray;
+import org.opendaylight.controller.cluster.io.ChunkedOutputStream;
+import org.opendaylight.controller.cluster.raft.messages.IdentifiablePayload;
import org.opendaylight.yangtools.yang.data.api.schema.stream.ReusableStreamReceiver;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
import org.opendaylight.yangtools.yang.data.impl.schema.ReusableImmutableNormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
* @author Robert Varga
*/
@Beta
-public abstract class CommitTransactionPayload extends IdentifiablePayload<TransactionIdentifier>
+public abstract sealed class CommitTransactionPayload extends IdentifiablePayload<TransactionIdentifier>
implements Serializable {
+ @NonNullByDefault
+ public record CandidateTransaction(
+ TransactionIdentifier transactionId,
+ DataTreeCandidate candidate,
+ NormalizedNodeStreamVersion streamVersion) {
+ public CandidateTransaction {
+ requireNonNull(transactionId);
+ requireNonNull(candidate);
+ requireNonNull(streamVersion);
+ }
+ }
+
private static final Logger LOG = LoggerFactory.getLogger(CommitTransactionPayload.class);
private static final long serialVersionUID = 1L;
- private volatile Entry<TransactionIdentifier, DataTreeCandidateWithVersion> candidate = null;
+ static final int MAX_ARRAY_SIZE = ceilingPowerOfTwo(Integer.getInteger(
+ "org.opendaylight.controller.cluster.datastore.persisted.max-array-size", 256 * 1024));
- CommitTransactionPayload() {
+ private volatile CandidateTransaction candidate = null;
+ private CommitTransactionPayload() {
+ // hidden on purpose
}
public static @NonNull CommitTransactionPayload create(final TransactionIdentifier transactionId,
final DataTreeCandidate candidate, final PayloadVersion version, final int initialSerializedBufferCapacity)
throws IOException {
- final ChunkedOutputStream cos = new ChunkedOutputStream(initialSerializedBufferCapacity);
- try (DataOutputStream dos = new DataOutputStream(cos)) {
+ final var cos = new ChunkedOutputStream(initialSerializedBufferCapacity, MAX_ARRAY_SIZE);
+ try (var dos = new DataOutputStream(cos)) {
transactionId.writeTo(dos);
DataTreeCandidateInputOutput.writeDataTreeCandidate(dos, version, candidate);
}
- final Variant<byte[], ChunkedByteArray> source = cos.toVariant();
+ final var source = cos.toVariant();
LOG.debug("Initial buffer capacity {}, actual serialized size {}", initialSerializedBufferCapacity, cos.size());
return source.isFirst() ? new Simple(source.getFirst()) : new Chunked(source.getSecond());
}
return create(transactionId, candidate, PayloadVersion.current());
}
- public @NonNull Entry<TransactionIdentifier, DataTreeCandidateWithVersion> getCandidate() throws IOException {
- Entry<TransactionIdentifier, DataTreeCandidateWithVersion> localCandidate = candidate;
+ public @NonNull CandidateTransaction getCandidate() throws IOException {
+ var localCandidate = candidate;
if (localCandidate == null) {
synchronized (this) {
localCandidate = candidate;
return localCandidate;
}
- public final @NonNull Entry<TransactionIdentifier, DataTreeCandidateWithVersion> getCandidate(
- final ReusableStreamReceiver receiver) throws IOException {
- final DataInput in = newDataInput();
- return new SimpleImmutableEntry<>(TransactionIdentifier.readFrom(in),
- DataTreeCandidateInputOutput.readDataTreeCandidate(in, receiver));
+ public final @NonNull CandidateTransaction getCandidate(final ReusableStreamReceiver receiver) throws IOException {
+ final var in = newDataInput();
+ final var transactionId = TransactionIdentifier.readFrom(in);
+ final var readCandidate = DataTreeCandidateInputOutput.readDataTreeCandidate(in, receiver);
+
+ return new CandidateTransaction(transactionId, readCandidate.candidate(), readCandidate.version());
}
@Override
public TransactionIdentifier getIdentifier() {
try {
- return getCandidate().getKey();
+ return getCandidate().transactionId();
} catch (IOException e) {
throw new IllegalStateException("Candidate deserialization failed.", e);
}
}
+ @Override
+ public final int serializedSize() {
+ // TODO: this is not entirely accurate as the the byte[] can be chunked by the serialization stream
+ return ProxySizeHolder.PROXY_SIZE + size();
+ }
+
/**
* The cached candidate needs to be cleared after it is done applying to the DataTree, otherwise it would be keeping
* deserialized in memory which are not needed anymore leading to wasted memory. This lets the payload know that
* this was the last time the candidate was needed ant it is safe to be cleared.
*/
- public Entry<TransactionIdentifier, DataTreeCandidateWithVersion> acquireCandidate() throws IOException {
- final Entry<TransactionIdentifier, DataTreeCandidateWithVersion> localCandidate = getCandidate();
+ public @NonNull CandidateTransaction acquireCandidate() throws IOException {
+ final var localCandidate = getCandidate();
candidate = null;
return localCandidate;
}
+ @Override
+ public final String toString() {
+ final var helper = MoreObjects.toStringHelper(this);
+ final var localCandidate = candidate;
+ if (localCandidate != null) {
+ helper.add("identifier", candidate.transactionId());
+ }
+ return helper.add("size", size()).toString();
+ }
+
abstract void writeBytes(ObjectOutput out) throws IOException;
abstract DataInput newDataInput();
- final Object writeReplace() {
- return new Proxy(this);
+ @Override
+ public final Object writeReplace() {
+ return new CT(this);
}
- private static final class Simple extends CommitTransactionPayload {
+ static final class Simple extends CommitTransactionPayload {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
private final byte[] serialized;
}
}
- private static final class Chunked extends CommitTransactionPayload {
+ static final class Chunked extends CommitTransactionPayload {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
@SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "Handled via serialization proxy")
}
}
- private static final class Proxy implements Externalizable {
- private static final long serialVersionUID = 1L;
-
- private CommitTransactionPayload payload;
-
- // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
- // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public Proxy() {
- // For Externalizable
- }
-
- Proxy(final CommitTransactionPayload payload) {
- this.payload = requireNonNull(payload);
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) throws IOException {
- out.writeInt(payload.size());
- payload.writeBytes(out);
- }
-
- @Override
- public void readExternal(final ObjectInput in) throws IOException {
- final int length = in.readInt();
- if (length < 0) {
- throw new StreamCorruptedException("Invalid payload length " + length);
- } else if (length < MAX_ARRAY_SIZE) {
- final byte[] serialized = new byte[length];
- in.readFully(serialized);
- payload = new Simple(serialized);
- } else {
- payload = new Chunked(ChunkedByteArray.readFrom(in, length, MAX_ARRAY_SIZE));
- }
- }
+ // Exists to break initialization dependency between CommitTransactionPayload/Simple/Proxy
+ private static final class ProxySizeHolder {
+ static final int PROXY_SIZE = SerializationUtils.serialize(new CT(new Simple(new byte[0]))).length;
- private Object readResolve() {
- return verifyNotNull(payload);
+ private ProxySizeHolder() {
+ // Hidden on purpose
}
}
}
import com.google.common.io.ByteArrayDataOutput;
import com.google.common.io.ByteStreams;
-import java.io.DataInput;
import java.io.IOException;
import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
import org.slf4j.Logger;
* @author Robert Varga
*/
public final class CreateLocalHistoryPayload extends AbstractIdentifiablePayload<LocalHistoryIdentifier> {
- private static final class Proxy extends AbstractProxy<LocalHistoryIdentifier> {
- private static final long serialVersionUID = 1L;
-
- // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
- // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public Proxy() {
- // For Externalizable
- }
-
- Proxy(final byte[] serialized) {
- super(serialized);
- }
-
- @Override
- protected LocalHistoryIdentifier readIdentifier(final DataInput in) throws IOException {
- return LocalHistoryIdentifier.readFrom(in);
- }
-
- @Override
- protected CreateLocalHistoryPayload createObject(final LocalHistoryIdentifier identifier,
- final byte[] serialized) {
- return new CreateLocalHistoryPayload(identifier, serialized);
- }
- }
-
private static final Logger LOG = LoggerFactory.getLogger(CreateLocalHistoryPayload.class);
+ @java.io.Serial
private static final long serialVersionUID = 1L;
+ private static final int PROXY_SIZE = externalizableProxySize(CH::new);
CreateLocalHistoryPayload(final LocalHistoryIdentifier historyId, final byte[] serialized) {
super(historyId, serialized);
} catch (IOException e) {
// This should never happen
LOG.error("Failed to serialize {}", historyId, e);
- throw new RuntimeException("Failed to serialize " + historyId, e);
+ throw new IllegalStateException("Failed to serialize " + historyId, e);
}
return new CreateLocalHistoryPayload(historyId, out.toByteArray());
}
@Override
- protected Proxy externalizableProxy(final byte[] serialized) {
- return new Proxy(serialized);
+ protected CH externalizableProxy(final byte[] serialized) {
+ return new CH(serialized);
+ }
+
+ @Override
+ protected int externalizableProxySize() {
+ return PROXY_SIZE;
}
}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.io.ByteStreams;
+import java.io.IOException;
+import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
+import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifiablePayload.SerialForm;
+
+/**
+ * Serialization proxy for {@link CloseLocalHistoryPayload}.
+ */
+final class DH implements SerialForm {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private LocalHistoryIdentifier identifier;
+ private byte[] bytes;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public DH() {
+ // For Externalizable
+ }
+
+ DH(final byte[] bytes) {
+ this.bytes = requireNonNull(bytes);
+ }
+
+ @Override
+ public byte[] bytes() {
+ return bytes;
+ }
+
+ @Override
+ public void readExternal(final byte[] newBytes) throws IOException {
+ bytes = requireNonNull(newBytes);
+ identifier = verifyNotNull(LocalHistoryIdentifier.readFrom(ByteStreams.newDataInput(newBytes)));
+ }
+
+ @Override
+ public Object readResolve() {
+ return new CloseLocalHistoryPayload(identifier, bytes);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.ArrayList;
+import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot.ShardSnapshot;
+
+/**
+ * Serialization proxy for {@link DatastoreSnapshot}.
+ */
+final class DS implements Externalizable {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private DatastoreSnapshot datastoreSnapshot;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public DS() {
+ // For Externalizable
+ }
+
+ DS(final DatastoreSnapshot datastoreSnapshot) {
+ this.datastoreSnapshot = requireNonNull(datastoreSnapshot);
+ }
+
+ @Override
+ public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+ final var type = (String) in.readObject();
+ final var snapshot = (ShardManagerSnapshot) in.readObject();
+
+ final int size = in.readInt();
+ var localShardSnapshots = new ArrayList<ShardSnapshot>(size);
+ for (int i = 0; i < size; i++) {
+ localShardSnapshots.add((ShardSnapshot) in.readObject());
+ }
+
+ datastoreSnapshot = new DatastoreSnapshot(type, snapshot, localShardSnapshots);
+ }
+
+ @Override
+ public void writeExternal(final ObjectOutput out) throws IOException {
+ out.writeObject(datastoreSnapshot.getType());
+ out.writeObject(datastoreSnapshot.getShardManagerSnapshot());
+
+ final var shardSnapshots = datastoreSnapshot.getShardSnapshots();
+ out.writeInt(shardSnapshots.size());
+ for (var shardSnapshot : shardSnapshots) {
+ out.writeObject(shardSnapshot);
+ }
+ }
+
+ @java.io.Serial
+ private Object readResolve() {
+ return verifyNotNull(datastoreSnapshot);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot.ShardSnapshot;
+import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
+
+/**
+ * Serialization proxy for {@link ShardDataTreeSnapshot}.
+ */
+final class DSS implements Externalizable {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private ShardSnapshot shardSnapshot;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public DSS() {
+ // For Externalizable
+ }
+
+ DSS(final ShardSnapshot shardSnapshot) {
+ this.shardSnapshot = requireNonNull(shardSnapshot);
+ }
+
+ @Override
+ public void writeExternal(final ObjectOutput out) throws IOException {
+ out.writeObject(shardSnapshot.getName());
+ out.writeObject(shardSnapshot.getSnapshot());
+ }
+
+ @Override
+ public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+ shardSnapshot = new ShardSnapshot((String) in.readObject(), (Snapshot) in.readObject());
+ }
+
+ @java.io.Serial
+ private Object readResolve() {
+ return verifyNotNull(shardSnapshot);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.io.ByteStreams;
+import java.io.IOException;
+import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
+import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifiablePayload.SerialForm;
+
+/**
+ * Serialization proxy for {@link DisableTrackingPayload}.
+ */
+final class DT implements SerialForm {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private ClientIdentifier identifier;
+ private byte[] bytes;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public DT() {
+ // For Externalizable
+ }
+
+ DT(final byte[] bytes) {
+ this.bytes = requireNonNull(bytes);
+ }
+
+ @Override
+ public byte[] bytes() {
+ return bytes;
+ }
+
+ @Override
+ public void readExternal(final byte[] newBytes) throws IOException {
+ bytes = requireNonNull(newBytes);
+ identifier = verifyNotNull(ClientIdentifier.readFrom(ByteStreams.newDataInput(newBytes)));
+ }
+
+ @Override
+ public Object readResolve() {
+ return new DisableTrackingPayload(identifier, bytes);
+ }
+}
import com.google.common.annotations.Beta;
import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.ImmutableList;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
-import org.eclipse.jdt.annotation.NonNullByDefault;
+import java.util.List;
+import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.yangtools.concepts.Immutable;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
import org.opendaylight.yangtools.yang.data.api.schema.stream.ReusableStreamReceiver;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNodes;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
+import org.opendaylight.yangtools.yang.data.tree.spi.DataTreeCandidateNodes;
+import org.opendaylight.yangtools.yang.data.tree.spi.DataTreeCandidates;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Utility serialization/deserialization for {@link DataTreeCandidate}. Note that this utility does not maintain
* before-image information across serialization.
- *
- * @author Robert Varga
*/
@Beta
public final class DataTreeCandidateInputOutput {
+ public record DataTreeCandidateWithVersion(
+ @NonNull DataTreeCandidate candidate,
+ @NonNull NormalizedNodeStreamVersion version) implements Immutable {
+ public DataTreeCandidateWithVersion {
+ requireNonNull(candidate);
+ requireNonNull(version);
+ }
+ }
+
private static final Logger LOG = LoggerFactory.getLogger(DataTreeCandidateInputOutput.class);
private static final byte DELETE = 0;
private static final byte SUBTREE_MODIFIED = 1;
private static DataTreeCandidateNode readModifiedNode(final ModificationType type, final NormalizedNodeDataInput in,
final ReusableStreamReceiver receiver) throws IOException {
- final PathArgument identifier = in.readPathArgument();
- final Collection<DataTreeCandidateNode> children = readChildren(in, receiver);
+ final var pathArg = in.readPathArgument();
+ final var children = readChildren(in, receiver);
if (children.isEmpty()) {
- LOG.debug("Modified node {} does not have any children, not instantiating it", identifier);
+ LOG.debug("Modified node {} does not have any children, not instantiating it", pathArg);
return null;
}
- return ModifiedDataTreeCandidateNode.create(identifier, type, children);
+ return ModifiedDataTreeCandidateNode.create(pathArg, type, children);
}
- private static Collection<DataTreeCandidateNode> readChildren(final NormalizedNodeDataInput in,
+ private static List<DataTreeCandidateNode> readChildren(final NormalizedNodeDataInput in,
final ReusableStreamReceiver receiver) throws IOException {
final int size = in.readInt();
if (size == 0) {
- return ImmutableList.of();
+ return List.of();
}
- final Collection<DataTreeCandidateNode> ret = new ArrayList<>(size);
+ final var ret = new ArrayList<DataTreeCandidateNode>(size);
for (int i = 0; i < size; ++i) {
- final DataTreeCandidateNode child = readNode(in, receiver);
+ final var child = readNode(in, receiver);
if (child != null) {
ret.add(child);
}
private static DataTreeCandidateNode readNode(final NormalizedNodeDataInput in,
final ReusableStreamReceiver receiver) throws IOException {
final byte type = in.readByte();
- switch (type) {
- case APPEARED:
- return readModifiedNode(ModificationType.APPEARED, in, receiver);
- case DELETE:
- return DeletedDataTreeCandidateNode.create(in.readPathArgument());
- case DISAPPEARED:
- return readModifiedNode(ModificationType.DISAPPEARED, in, receiver);
- case SUBTREE_MODIFIED:
- return readModifiedNode(ModificationType.SUBTREE_MODIFIED, in, receiver);
- case UNMODIFIED:
- return null;
- case WRITE:
- return DataTreeCandidateNodes.written(in.readNormalizedNode(receiver));
- default:
- throw new IllegalArgumentException("Unhandled node type " + type);
- }
- }
-
- @NonNullByDefault
- public static final class DataTreeCandidateWithVersion implements Immutable {
- private final DataTreeCandidate candidate;
- private final NormalizedNodeStreamVersion version;
-
- public DataTreeCandidateWithVersion(final DataTreeCandidate candidate,
- final NormalizedNodeStreamVersion version) {
- this.candidate = requireNonNull(candidate);
- this.version = requireNonNull(version);
- }
-
- public DataTreeCandidate getCandidate() {
- return candidate;
- }
-
- public NormalizedNodeStreamVersion getVersion() {
- return version;
- }
+ return switch (type) {
+ case APPEARED -> readModifiedNode(ModificationType.APPEARED, in, receiver);
+ case DELETE -> DeletedDataTreeCandidateNode.create(in.readPathArgument());
+ case DISAPPEARED -> readModifiedNode(ModificationType.DISAPPEARED, in, receiver);
+ case SUBTREE_MODIFIED -> readModifiedNode(ModificationType.SUBTREE_MODIFIED, in, receiver);
+ case UNMODIFIED -> null;
+ case WRITE -> DataTreeCandidateNodes.written(in.readNormalizedNode(receiver));
+ default -> throw new IllegalArgumentException("Unhandled node type " + type);
+ };
}
public static DataTreeCandidateWithVersion readDataTreeCandidate(final DataInput in,
final ReusableStreamReceiver receiver) throws IOException {
- final NormalizedNodeDataInput reader = NormalizedNodeDataInput.newDataInput(in);
- final YangInstanceIdentifier rootPath = reader.readYangInstanceIdentifier();
+ final var reader = NormalizedNodeDataInput.newDataInput(in);
+ final var rootPath = reader.readYangInstanceIdentifier();
final byte type = reader.readByte();
- final DataTreeCandidateNode rootNode;
- switch (type) {
- case APPEARED:
- rootNode = ModifiedDataTreeCandidateNode.create(ModificationType.APPEARED,
- readChildren(reader, receiver));
- break;
- case DELETE:
- rootNode = DeletedDataTreeCandidateNode.create();
- break;
- case DISAPPEARED:
- rootNode = ModifiedDataTreeCandidateNode.create(ModificationType.DISAPPEARED,
- readChildren(reader, receiver));
- break;
- case SUBTREE_MODIFIED:
- rootNode = ModifiedDataTreeCandidateNode.create(ModificationType.SUBTREE_MODIFIED,
- readChildren(reader, receiver));
- break;
- case WRITE:
- rootNode = DataTreeCandidateNodes.written(reader.readNormalizedNode(receiver));
- break;
- case UNMODIFIED:
- rootNode = AbstractDataTreeCandidateNode.createUnmodified();
- break;
- default:
- throw new IllegalArgumentException("Unhandled node type " + type);
- }
-
+ final DataTreeCandidateNode rootNode = switch (type) {
+ case APPEARED -> ModifiedDataTreeCandidateNode.create(ModificationType.APPEARED,
+ readChildren(reader, receiver));
+ case DELETE -> DeletedDataTreeCandidateNode.create();
+ case DISAPPEARED -> ModifiedDataTreeCandidateNode.create(ModificationType.DISAPPEARED,
+ readChildren(reader, receiver));
+ case SUBTREE_MODIFIED -> ModifiedDataTreeCandidateNode.create(ModificationType.SUBTREE_MODIFIED,
+ readChildren(reader, receiver));
+ case WRITE -> DataTreeCandidateNodes.written(reader.readNormalizedNode(receiver));
+ case UNMODIFIED -> AbstractDataTreeCandidateNode.createUnmodified();
+ default -> throw new IllegalArgumentException("Unhandled node type " + type);
+ };
return new DataTreeCandidateWithVersion(DataTreeCandidates.newDataTreeCandidate(rootPath, rootNode),
reader.getVersion());
}
private static void writeChildren(final NormalizedNodeDataOutput out,
final Collection<DataTreeCandidateNode> children) throws IOException {
out.writeInt(children.size());
- for (DataTreeCandidateNode child : children) {
+ for (var child : children) {
writeNode(out, child);
}
}
private static void writeNode(final NormalizedNodeDataOutput out, final DataTreeCandidateNode node)
throws IOException {
- switch (node.getModificationType()) {
- case APPEARED:
+ switch (node.modificationType()) {
+ case APPEARED -> {
out.writeByte(APPEARED);
- out.writePathArgument(node.getIdentifier());
- writeChildren(out, node.getChildNodes());
- break;
- case DELETE:
+ out.writePathArgument(node.name());
+ writeChildren(out, node.childNodes());
+ }
+ case DELETE -> {
out.writeByte(DELETE);
- out.writePathArgument(node.getIdentifier());
- break;
- case DISAPPEARED:
+ out.writePathArgument(node.name());
+ }
+ case DISAPPEARED -> {
out.writeByte(DISAPPEARED);
- out.writePathArgument(node.getIdentifier());
- writeChildren(out, node.getChildNodes());
- break;
- case SUBTREE_MODIFIED:
+ out.writePathArgument(node.name());
+ writeChildren(out, node.childNodes());
+ }
+ case SUBTREE_MODIFIED -> {
out.writeByte(SUBTREE_MODIFIED);
- out.writePathArgument(node.getIdentifier());
- writeChildren(out, node.getChildNodes());
- break;
- case WRITE:
+ out.writePathArgument(node.name());
+ writeChildren(out, node.childNodes());
+ }
+ case WRITE -> {
out.writeByte(WRITE);
- out.writeNormalizedNode(node.getDataAfter().get());
- break;
- case UNMODIFIED:
- out.writeByte(UNMODIFIED);
- break;
- default:
- throwUnhandledNodeType(node);
+ out.writeNormalizedNode(node.getDataAfter());
+ }
+ case UNMODIFIED -> out.writeByte(UNMODIFIED);
+ default -> throwUnhandledNodeType(node);
}
}
@VisibleForTesting
public static void writeDataTreeCandidate(final DataOutput out, final PayloadVersion version,
final DataTreeCandidate candidate) throws IOException {
- try (NormalizedNodeDataOutput writer = version.getStreamVersion().newDataOutput(out)) {
+ try (var writer = version.getStreamVersion().newDataOutput(out)) {
writer.writeYangInstanceIdentifier(candidate.getRootPath());
- final DataTreeCandidateNode node = candidate.getRootNode();
- switch (node.getModificationType()) {
- case APPEARED:
+ final var node = candidate.getRootNode();
+ switch (node.modificationType()) {
+ case APPEARED -> {
writer.writeByte(APPEARED);
- writeChildren(writer, node.getChildNodes());
- break;
- case DELETE:
- writer.writeByte(DELETE);
- break;
- case DISAPPEARED:
+ writeChildren(writer, node.childNodes());
+ }
+ case DELETE -> writer.writeByte(DELETE);
+ case DISAPPEARED -> {
writer.writeByte(DISAPPEARED);
- writeChildren(writer, node.getChildNodes());
- break;
- case SUBTREE_MODIFIED:
+ writeChildren(writer, node.childNodes());
+ }
+ case SUBTREE_MODIFIED -> {
writer.writeByte(SUBTREE_MODIFIED);
- writeChildren(writer, node.getChildNodes());
- break;
- case UNMODIFIED:
- writer.writeByte(UNMODIFIED);
- break;
- case WRITE:
+ writeChildren(writer, node.childNodes());
+ }
+ case UNMODIFIED -> writer.writeByte(UNMODIFIED);
+ case WRITE -> {
writer.writeByte(WRITE);
- writer.writeNormalizedNode(node.getDataAfter().get());
- break;
- default:
- throwUnhandledNodeType(node);
+ writer.writeNormalizedNode(node.getDataAfter());
+ }
+ default -> throwUnhandledNodeType(node);
}
}
}
}
private static void throwUnhandledNodeType(final DataTreeCandidateNode node) {
- throw new IllegalArgumentException("Unhandled node type " + node.getModificationType());
+ throw new IllegalArgumentException("Unhandled node type " + node.modificationType());
}
}
import static java.util.Objects.requireNonNull;
import com.google.common.collect.ImmutableList;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
import java.io.Serializable;
-import java.util.ArrayList;
import java.util.List;
import org.eclipse.jdt.annotation.NonNull;
import org.eclipse.jdt.annotation.Nullable;
*
* @author Thomas Pantelis
*/
-public class DatastoreSnapshot implements Serializable {
+public final class DatastoreSnapshot implements Serializable {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
- private static final class Proxy implements Externalizable {
- private static final long serialVersionUID = 1L;
-
- private DatastoreSnapshot datastoreSnapshot;
-
- // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
- // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public Proxy() {
- // For Externalizable
- }
-
- Proxy(final DatastoreSnapshot datastoreSnapshot) {
- this.datastoreSnapshot = datastoreSnapshot;
- }
-
- @Override
- public void writeExternal(ObjectOutput out) throws IOException {
- out.writeObject(datastoreSnapshot.type);
- out.writeObject(datastoreSnapshot.shardManagerSnapshot);
-
- out.writeInt(datastoreSnapshot.shardSnapshots.size());
- for (ShardSnapshot shardSnapshot: datastoreSnapshot.shardSnapshots) {
- out.writeObject(shardSnapshot);
- }
- }
-
- @Override
- public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
- String localType = (String)in.readObject();
- ShardManagerSnapshot localShardManagerSnapshot = (ShardManagerSnapshot) in.readObject();
-
- int size = in.readInt();
- List<ShardSnapshot> localShardSnapshots = new ArrayList<>(size);
- for (int i = 0; i < size; i++) {
- localShardSnapshots.add((ShardSnapshot) in.readObject());
- }
-
- datastoreSnapshot = new DatastoreSnapshot(localType, localShardManagerSnapshot, localShardSnapshots);
- }
-
- private Object readResolve() {
- return datastoreSnapshot;
- }
- }
-
- private final String type;
+ private final @NonNull String type;
private final ShardManagerSnapshot shardManagerSnapshot;
- private final List<ShardSnapshot> shardSnapshots;
+ private final @NonNull ImmutableList<ShardSnapshot> shardSnapshots;
- public DatastoreSnapshot(@NonNull String type, @Nullable ShardManagerSnapshot shardManagerSnapshot,
- @NonNull List<ShardSnapshot> shardSnapshots) {
+ public DatastoreSnapshot(final @NonNull String type, final @Nullable ShardManagerSnapshot shardManagerSnapshot,
+ final @NonNull List<ShardSnapshot> shardSnapshots) {
this.type = requireNonNull(type);
this.shardManagerSnapshot = shardManagerSnapshot;
this.shardSnapshots = ImmutableList.copyOf(shardSnapshots);
return shardSnapshots;
}
+ @java.io.Serial
private Object writeReplace() {
- return new Proxy(this);
+ return new DS(this);
}
- public static class ShardSnapshot implements Serializable {
+ public static final class ShardSnapshot implements Serializable {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
- private static final class Proxy implements Externalizable {
- private static final long serialVersionUID = 1L;
-
- private ShardSnapshot shardSnapshot;
-
- // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
- // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public Proxy() {
- // For Externalizable
- }
-
- Proxy(final ShardSnapshot shardSnapshot) {
- this.shardSnapshot = shardSnapshot;
- }
-
- @Override
- public void writeExternal(ObjectOutput out) throws IOException {
- out.writeObject(shardSnapshot.name);
- out.writeObject(shardSnapshot.snapshot);
- }
-
- @Override
- public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
- shardSnapshot = new ShardSnapshot((String)in.readObject(), (Snapshot) in.readObject());
- }
-
- private Object readResolve() {
- return shardSnapshot;
- }
- }
-
- private final String name;
- private final Snapshot snapshot;
+ private final @NonNull String name;
+ private final @NonNull Snapshot snapshot;
- public ShardSnapshot(@NonNull String name, @NonNull Snapshot snapshot) {
+ public ShardSnapshot(final @NonNull String name, final @NonNull Snapshot snapshot) {
this.name = requireNonNull(name);
this.snapshot = requireNonNull(snapshot);
}
return snapshot;
}
+ @java.io.Serial
private Object writeReplace() {
- return new Proxy(this);
+ return new DSS(this);
}
}
}
package org.opendaylight.controller.cluster.datastore.persisted;
import java.util.Collection;
-import java.util.Optional;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
/**
* A deserialized {@link DataTreeCandidateNode} which represents a deletion.
static DataTreeCandidateNode create() {
return new DeletedDataTreeCandidateNode() {
@Override
- public PathArgument getIdentifier() {
+ public PathArgument name() {
throw new UnsupportedOperationException("Root node does not have an identifier");
}
};
static DataTreeCandidateNode create(final PathArgument identifier) {
return new DeletedDataTreeCandidateNode() {
@Override
- public PathArgument getIdentifier() {
+ public PathArgument name() {
return identifier;
}
};
}
@Override
- public final Optional<NormalizedNode<?, ?>> getDataAfter() {
- return Optional.empty();
+ public final NormalizedNode dataAfter() {
+ return null;
}
@Override
- public final Collection<DataTreeCandidateNode> getChildNodes() {
- // We would require the before-image to reconstruct the list of nodes which
- // were deleted.
+ public final Collection<DataTreeCandidateNode> childNodes() {
+ // We would require the before-image to reconstruct the list of nodes which were deleted.
throw new UnsupportedOperationException("Children not available after serialization");
}
}
import com.google.common.io.ByteArrayDataOutput;
import com.google.common.io.ByteStreams;
-import java.io.DataInput;
import java.io.IOException;
import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public final class DisableTrackingPayload extends AbstractIdentifiablePayload<ClientIdentifier> {
- private static final class Proxy extends AbstractProxy<ClientIdentifier> {
- @SuppressWarnings("checkstyle:RedundantModifier")
- public Proxy() {
- // For Externalizable
- }
-
- Proxy(final byte[] serialized) {
- super(serialized);
- }
-
- @Override
- protected ClientIdentifier readIdentifier(final DataInput in) throws IOException {
- return ClientIdentifier.readFrom(in);
- }
-
- @Override
- protected DisableTrackingPayload createObject(final ClientIdentifier identifier,
- final byte[] serialized) {
- return new DisableTrackingPayload(identifier, serialized);
- }
- }
-
private static final Logger LOG = LoggerFactory.getLogger(DisableTrackingPayload.class);
+ @java.io.Serial
private static final long serialVersionUID = 1L;
+ private static final int PROXY_SIZE = externalizableProxySize(DT::new);
DisableTrackingPayload(final ClientIdentifier clientId, final byte[] serialized) {
super(clientId, serialized);
} catch (IOException e) {
// This should never happen
LOG.error("Failed to serialize {}", clientId, e);
- throw new RuntimeException("Failed to serialize " + clientId, e);
+ throw new IllegalStateException("Failed to serialize " + clientId, e);
}
return new DisableTrackingPayload(clientId, out.toByteArray());
}
@Override
- protected Proxy externalizableProxy(final byte[] serialized) {
- return new Proxy(serialized);
+ protected DT externalizableProxy(final byte[] serialized) {
+ return new DT(serialized);
+ }
+
+ @Override
+ protected int externalizableProxySize() {
+ return PROXY_SIZE;
}
}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import com.google.common.collect.ImmutableList;
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.List;
+
+/**
+ * Externalizable proxy for {@link FrontendShardDataTreeSnapshotMetadata}.
+ */
+final class FM implements Externalizable {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private List<FrontendClientMetadata> clients;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public FM() {
+ // For Externalizable
+ }
+
+ FM(final FrontendShardDataTreeSnapshotMetadata metadata) {
+ clients = metadata.getClients();
+ }
+
+ @Override
+ public void writeExternal(final ObjectOutput out) throws IOException {
+ out.writeInt(clients.size());
+ for (var c : clients) {
+ c.writeTo(out);
+ }
+ }
+
+ @Override
+ public void readExternal(final ObjectInput in) throws IOException {
+ final int size = in.readInt();
+ final var builder = ImmutableList.<FrontendClientMetadata>builderWithExpectedSize(size);
+ for (int i = 0; i < size ; ++i) {
+ builder.add(FrontendClientMetadata.readFrom(in));
+ }
+ clients = builder.build();
+ }
+
+ @java.io.Serial
+ private Object readResolve() {
+ return new FrontendShardDataTreeSnapshotMetadata(clients);
+ }
+}
\ No newline at end of file
import com.google.common.base.MoreObjects;
import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableRangeSet;
-import com.google.common.collect.ImmutableRangeSet.Builder;
-import com.google.common.collect.Range;
-import com.google.common.collect.RangeSet;
-import com.google.common.primitives.UnsignedLong;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
-import java.util.ArrayList;
import java.util.Collection;
-import java.util.Set;
+import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
-import org.opendaylight.yangtools.concepts.Identifiable;
+import org.opendaylight.controller.cluster.datastore.utils.ImmutableUnsignedLongSet;
import org.opendaylight.yangtools.concepts.WritableObject;
-import org.opendaylight.yangtools.concepts.WritableObjects;
-public final class FrontendClientMetadata implements Identifiable<ClientIdentifier>, WritableObject {
- private final Collection<FrontendHistoryMetadata> currentHistories;
- private final RangeSet<UnsignedLong> purgedHistories;
- private final ClientIdentifier identifier;
+public final class FrontendClientMetadata implements WritableObject {
+ private final @NonNull ImmutableList<FrontendHistoryMetadata> currentHistories;
+ private final @NonNull ImmutableUnsignedLongSet purgedHistories;
+ private final @NonNull ClientIdentifier clientId;
- public FrontendClientMetadata(final ClientIdentifier identifier, final RangeSet<UnsignedLong> purgedHistories,
+ public FrontendClientMetadata(final ClientIdentifier clientId, final ImmutableUnsignedLongSet purgedHistories,
final Collection<FrontendHistoryMetadata> currentHistories) {
- this.identifier = requireNonNull(identifier);
- this.purgedHistories = ImmutableRangeSet.copyOf(purgedHistories);
+ this.clientId = requireNonNull(clientId);
+ this.purgedHistories = requireNonNull(purgedHistories);
this.currentHistories = ImmutableList.copyOf(currentHistories);
}
- public Collection<FrontendHistoryMetadata> getCurrentHistories() {
- return currentHistories;
+ public ClientIdentifier clientId() {
+ return clientId;
}
- public RangeSet<UnsignedLong> getPurgedHistories() {
- return purgedHistories;
+ public ImmutableList<FrontendHistoryMetadata> getCurrentHistories() {
+ return currentHistories;
}
- @Override
- public ClientIdentifier getIdentifier() {
- return identifier;
+ public ImmutableUnsignedLongSet getPurgedHistories() {
+ return purgedHistories;
}
@Override
public void writeTo(final DataOutput out) throws IOException {
- identifier.writeTo(out);
-
- final Set<Range<UnsignedLong>> ranges = purgedHistories.asRanges();
- out.writeInt(ranges.size());
- for (final Range<UnsignedLong> r : ranges) {
- WritableObjects.writeLongs(out, r.lowerEndpoint().longValue(), r.upperEndpoint().longValue());
- }
+ clientId.writeTo(out);
+ purgedHistories.writeTo(out);
out.writeInt(currentHistories.size());
for (final FrontendHistoryMetadata h : currentHistories) {
}
public static FrontendClientMetadata readFrom(final DataInput in) throws IOException {
- final ClientIdentifier id = ClientIdentifier.readFrom(in);
-
- final int purgedSize = in.readInt();
- final Builder<UnsignedLong> b = ImmutableRangeSet.builder();
- for (int i = 0; i < purgedSize; ++i) {
- final byte header = WritableObjects.readLongHeader(in);
- final UnsignedLong lower = UnsignedLong.fromLongBits(WritableObjects.readFirstLong(in, header));
- final UnsignedLong upper = UnsignedLong.fromLongBits(WritableObjects.readSecondLong(in, header));
-
- b.add(Range.closed(lower, upper));
- }
+ final var clientId = ClientIdentifier.readFrom(in);
+ final var purgedHistories = ImmutableUnsignedLongSet.readFrom(in);
final int currentSize = in.readInt();
- final Collection<FrontendHistoryMetadata> currentHistories = new ArrayList<>(currentSize);
+ final var currentBuilder = ImmutableList.<FrontendHistoryMetadata>builderWithExpectedSize(currentSize);
for (int i = 0; i < currentSize; ++i) {
- currentHistories.add(FrontendHistoryMetadata.readFrom(in));
+ currentBuilder.add(FrontendHistoryMetadata.readFrom(in));
}
- return new FrontendClientMetadata(id, b.build(), currentHistories);
+ return new FrontendClientMetadata(clientId, purgedHistories, currentBuilder.build());
}
@Override
public String toString() {
- return MoreObjects.toStringHelper(FrontendClientMetadata.class).add("identifer", identifier)
- .add("current", currentHistories).add("purged", purgedHistories).toString();
+ return MoreObjects.toStringHelper(FrontendClientMetadata.class)
+ .add("clientId", clientId).add("current", currentHistories).add("purged", purgedHistories).toString();
}
}
*/
package org.opendaylight.controller.cluster.datastore.persisted;
+import static java.util.Objects.requireNonNull;
+
import com.google.common.base.MoreObjects;
-import com.google.common.base.Verify;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableRangeSet;
-import com.google.common.collect.Range;
-import com.google.common.collect.RangeSet;
-import com.google.common.collect.TreeRangeSet;
-import com.google.common.primitives.UnsignedLong;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.controller.cluster.datastore.utils.ImmutableUnsignedLongSet;
+import org.opendaylight.controller.cluster.datastore.utils.UnsignedLongBitmap;
import org.opendaylight.yangtools.concepts.WritableObject;
import org.opendaylight.yangtools.concepts.WritableObjects;
public final class FrontendHistoryMetadata implements WritableObject {
- private final RangeSet<UnsignedLong> purgedTransactions;
- private final Map<UnsignedLong, Boolean> closedTransactions;
+ private final @NonNull ImmutableUnsignedLongSet purgedTransactions;
+ private final @NonNull UnsignedLongBitmap closedTransactions;
private final long historyId;
private final long cookie;
private final boolean closed;
public FrontendHistoryMetadata(final long historyId, final long cookie, final boolean closed,
- final Map<UnsignedLong, Boolean> closedTransactions, final RangeSet<UnsignedLong> purgedTransactions) {
+ final UnsignedLongBitmap closedTransactions, final ImmutableUnsignedLongSet purgedTransactions) {
this.historyId = historyId;
this.cookie = cookie;
this.closed = closed;
- this.closedTransactions = ImmutableMap.copyOf(closedTransactions);
- this.purgedTransactions = ImmutableRangeSet.copyOf(purgedTransactions);
+ this.closedTransactions = requireNonNull(closedTransactions);
+ this.purgedTransactions = requireNonNull(purgedTransactions);
}
public long getHistoryId() {
return closed;
}
- public Map<UnsignedLong, Boolean> getClosedTransactions() {
+ public UnsignedLongBitmap getClosedTransactions() {
return closedTransactions;
}
- public RangeSet<UnsignedLong> getPurgedTransactions() {
+ public ImmutableUnsignedLongSet getPurgedTransactions() {
return purgedTransactions;
}
WritableObjects.writeLongs(out, historyId, cookie);
out.writeBoolean(closed);
- final Set<Range<UnsignedLong>> purgedRanges = purgedTransactions.asRanges();
- WritableObjects.writeLongs(out, closedTransactions.size(), purgedRanges.size());
- for (Entry<UnsignedLong, Boolean> e : closedTransactions.entrySet()) {
- WritableObjects.writeLong(out, e.getKey().longValue());
- out.writeBoolean(e.getValue().booleanValue());
- }
- for (Range<UnsignedLong> r : purgedRanges) {
- WritableObjects.writeLongs(out, r.lowerEndpoint().longValue(), r.upperEndpoint().longValue());
- }
+ final int closedSize = closedTransactions.size();
+ final int purgedSize = purgedTransactions.rangeSize();
+ WritableObjects.writeLongs(out, closedSize, purgedSize);
+ closedTransactions.writeEntriesTo(out, closedSize);
+ purgedTransactions.writeRangesTo(out, purgedSize);
}
public static FrontendHistoryMetadata readFrom(final DataInput in) throws IOException {
- byte header = WritableObjects.readLongHeader(in);
- final long historyId = WritableObjects.readFirstLong(in, header);
- final long cookie = WritableObjects.readSecondLong(in, header);
+ final byte firstHdr = WritableObjects.readLongHeader(in);
+ final long historyId = WritableObjects.readFirstLong(in, firstHdr);
+ final long cookie = WritableObjects.readSecondLong(in, firstHdr);
final boolean closed = in.readBoolean();
- header = WritableObjects.readLongHeader(in);
- long ls = WritableObjects.readFirstLong(in, header);
- Verify.verify(ls >= 0 && ls <= Integer.MAX_VALUE);
- final int csize = (int) ls;
-
- ls = WritableObjects.readSecondLong(in, header);
- Verify.verify(ls >= 0 && ls <= Integer.MAX_VALUE);
- final int psize = (int) ls;
-
- final Map<UnsignedLong, Boolean> closedTransactions = new HashMap<>(csize);
- for (int i = 0; i < csize; ++i) {
- final UnsignedLong key = UnsignedLong.fromLongBits(WritableObjects.readLong(in));
- final Boolean value = Boolean.valueOf(in.readBoolean());
- closedTransactions.put(key, value);
- }
- final RangeSet<UnsignedLong> purgedTransactions = TreeRangeSet.create();
- for (int i = 0; i < psize; ++i) {
- final byte h = WritableObjects.readLongHeader(in);
- final UnsignedLong l = UnsignedLong.fromLongBits(WritableObjects.readFirstLong(in, h));
- final UnsignedLong u = UnsignedLong.fromLongBits(WritableObjects.readSecondLong(in, h));
- purgedTransactions.add(Range.closed(l, u));
- }
+ final byte secondHdr = WritableObjects.readLongHeader(in);
+ final int csize = verifySize(WritableObjects.readFirstLong(in, secondHdr));
+ final int psize = verifySize(WritableObjects.readSecondLong(in, secondHdr));
- return new FrontendHistoryMetadata(historyId, cookie, closed, closedTransactions, purgedTransactions);
+ return new FrontendHistoryMetadata(historyId, cookie, closed,
+ UnsignedLongBitmap.readFrom(in, csize),
+ ImmutableUnsignedLongSet.readFrom(in, psize));
}
@Override
public String toString() {
- return MoreObjects.toStringHelper(FrontendHistoryMetadata.class).add("historyId", historyId)
- .add("cookie", cookie).add("closed", closed).add("closedTransactions", closedTransactions)
- .add("purgedTransactions", purgedTransactions).toString();
+ return MoreObjects.toStringHelper(FrontendHistoryMetadata.class)
+ .add("historyId", historyId)
+ .add("cookie", cookie)
+ .add("closed", closed)
+ .add("closedTransactions", closedTransactions)
+ .add("purgedTransactions", purgedTransactions)
+ .toString();
+ }
+
+ private static int verifySize(final long size) throws IOException {
+ if (size < 0 || size > Integer.MAX_VALUE) {
+ throw new IOException("Invalid size " + size);
+ }
+ return (int) size;
}
}
import com.google.common.collect.ImmutableList;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
-public final class FrontendShardDataTreeSnapshotMetadata extends
- ShardDataTreeSnapshotMetadata<FrontendShardDataTreeSnapshotMetadata> {
-
- private static final class Proxy implements Externalizable {
- private static final long serialVersionUID = 1L;
-
- private List<FrontendClientMetadata> clients;
-
- // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
- // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public Proxy() {
- // For Externalizable
- }
-
- Proxy(final FrontendShardDataTreeSnapshotMetadata metadata) {
- this.clients = metadata.getClients();
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) throws IOException {
- out.writeInt(clients.size());
- for (final FrontendClientMetadata c : clients) {
- c.writeTo(out);
- }
- }
-
- @Override
- public void readExternal(final ObjectInput in) throws IOException {
- final int size = in.readInt();
- final List<FrontendClientMetadata> readedClients = new ArrayList<>(size);
- for (int i = 0; i < size ; ++i) {
- readedClients.add(FrontendClientMetadata.readFrom(in));
- }
- this.clients = ImmutableList.copyOf(readedClients);
- }
-
- private Object readResolve() {
- return new FrontendShardDataTreeSnapshotMetadata(clients);
- }
- }
-
+public final class FrontendShardDataTreeSnapshotMetadata
+ extends ShardDataTreeSnapshotMetadata<FrontendShardDataTreeSnapshotMetadata> {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
@SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but this class "
@Override
protected Externalizable externalizableProxy() {
- return new Proxy(this);
+ return new FM(this);
}
@Override
@Override
public String toString() {
- return MoreObjects.toStringHelper(FrontendShardDataTreeSnapshotMetadata.class).add("clients", clients)
- .toString();
+ return MoreObjects.toStringHelper(FrontendShardDataTreeSnapshotMetadata.class)
+ .add("clients", clients)
+ .toString();
}
}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Preconditions.checkArgument;
+
+import com.google.common.collect.ImmutableMap;
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.Map;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Externalizable proxy for {@link MetadataShardDataTreeSnapshot}.
+ */
+final class MS implements Externalizable {
+ private static final Logger LOG = LoggerFactory.getLogger(MS.class);
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private Map<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>> metadata;
+ private NormalizedNodeStreamVersion version;
+ private NormalizedNode rootNode;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public MS() {
+ // For Externalizable
+ }
+
+ MS(final MetadataShardDataTreeSnapshot snapshot) {
+ rootNode = snapshot.getRootNode().orElseThrow();
+ metadata = snapshot.getMetadata();
+ version = snapshot.version().getStreamVersion();
+ }
+
+ @Override
+ public void writeExternal(final ObjectOutput out) throws IOException {
+ out.writeInt(metadata.size());
+ for (var m : metadata.values()) {
+ out.writeObject(m);
+ }
+ try (var stream = version.newDataOutput(out)) {
+ stream.writeNormalizedNode(rootNode);
+ }
+ }
+
+ @Override
+ public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+ final int metaSize = in.readInt();
+ checkArgument(metaSize >= 0, "Invalid negative metadata map length %s", metaSize);
+
+ // Default pre-allocate is 4, which should be fine
+ final var metaBuilder = ImmutableMap
+ .<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>>builder();
+ for (int i = 0; i < metaSize; ++i) {
+ final var m = (ShardDataTreeSnapshotMetadata<?>) in.readObject();
+ if (m != null) {
+ metaBuilder.put(m.getType(), m);
+ } else {
+ LOG.warn("Skipping null metadata");
+ }
+ }
+ metadata = metaBuilder.build();
+
+ final var stream = NormalizedNodeDataInput.newDataInput(in);
+ version = stream.getVersion();
+ rootNode = stream.readNormalizedNode();
+ }
+
+ @java.io.Serial
+ private Object readResolve() {
+ return new MetadataShardDataTreeSnapshot(rootNode, metadata);
+ }
+}
\ No newline at end of file
*/
package org.opendaylight.controller.cluster.datastore.persisted;
-import static com.google.common.base.Preconditions.checkArgument;
import static java.util.Objects.requireNonNull;
import com.google.common.annotations.Beta;
import com.google.common.base.MoreObjects;
import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableMap.Builder;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
import java.io.Serializable;
-import java.io.StreamCorruptedException;
import java.util.Map;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
-import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
-import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* An {@link AbstractVersionedShardDataTreeSnapshot} which contains additional metadata.
@Beta
public final class MetadataShardDataTreeSnapshot extends AbstractVersionedShardDataTreeSnapshot
implements Serializable {
- private static final class Proxy implements Externalizable {
- private static final long serialVersionUID = 1L;
- private static final Logger LOG = LoggerFactory.getLogger(MetadataShardDataTreeSnapshot.class);
-
- private Map<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>> metadata;
- private NormalizedNodeStreamVersion version;
- private NormalizedNode<?, ?> rootNode;
-
- // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
- // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public Proxy() {
- // For Externalizable
- }
-
- Proxy(final MetadataShardDataTreeSnapshot snapshot) {
- this.rootNode = snapshot.getRootNode().get();
- this.metadata = snapshot.getMetadata();
- this.version = snapshot.version().getStreamVersion();
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) throws IOException {
- out.writeInt(metadata.size());
- for (ShardDataTreeSnapshotMetadata<?> m : metadata.values()) {
- out.writeObject(m);
- }
- out.writeBoolean(true);
- try (NormalizedNodeDataOutput stream = version.newDataOutput(out)) {
- stream.writeNormalizedNode(rootNode);
- }
- }
-
- @Override
- public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
- final int metaSize = in.readInt();
- checkArgument(metaSize >= 0, "Invalid negative metadata map length %s", metaSize);
-
- // Default pre-allocate is 4, which should be fine
- final Builder<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>>
- metaBuilder = ImmutableMap.builder();
- for (int i = 0; i < metaSize; ++i) {
- final ShardDataTreeSnapshotMetadata<?> m = (ShardDataTreeSnapshotMetadata<?>) in.readObject();
- if (m != null) {
- metaBuilder.put(m.getType(), m);
- } else {
- LOG.warn("Skipping null metadata");
- }
- }
-
- metadata = metaBuilder.build();
- final boolean present = in.readBoolean();
- if (!present) {
- throw new StreamCorruptedException("Unexpected missing root node");
- }
-
- final NormalizedNodeDataInput stream = NormalizedNodeDataInput.newDataInput(in);
- version = stream.getVersion();
- rootNode = stream.readNormalizedNode();
- }
-
- private Object readResolve() {
- return new MetadataShardDataTreeSnapshot(rootNode, metadata);
- }
- }
-
+ @java.io.Serial
private static final long serialVersionUID = 1L;
@SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but this class "
private final Map<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>> metadata;
@SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "See above justification.")
- private final NormalizedNode<?, ?> rootNode;
+ private final NormalizedNode rootNode;
- public MetadataShardDataTreeSnapshot(final NormalizedNode<?, ?> rootNode) {
+ public MetadataShardDataTreeSnapshot(final NormalizedNode rootNode) {
this(rootNode, ImmutableMap.of());
}
- public MetadataShardDataTreeSnapshot(final NormalizedNode<?, ?> rootNode,
+ public MetadataShardDataTreeSnapshot(final NormalizedNode rootNode,
final Map<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>> metadata) {
this.rootNode = requireNonNull(rootNode);
this.metadata = ImmutableMap.copyOf(metadata);
}
@Override
- NormalizedNode<?, ?> rootNode() {
+ NormalizedNode rootNode() {
return rootNode;
}
@Override
PayloadVersion version() {
- return PayloadVersion.MAGNESIUM;
+ return PayloadVersion.POTASSIUM;
}
+ @java.io.Serial
private Object writeReplace() {
- return new Proxy(this);
+ return new MS(this);
}
@Override
import static java.util.Objects.requireNonNull;
import java.util.Collection;
-import java.util.Optional;
+import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
/**
* A deserialized {@link DataTreeCandidateNode} which represents a modification in
* one of its children.
*/
abstract class ModifiedDataTreeCandidateNode extends AbstractDataTreeCandidateNode {
- private final Collection<DataTreeCandidateNode> children;
+ private final @NonNull Collection<DataTreeCandidateNode> children;
private ModifiedDataTreeCandidateNode(final ModificationType type,
final Collection<DataTreeCandidateNode> children) {
static DataTreeCandidateNode create(final ModificationType type, final Collection<DataTreeCandidateNode> children) {
return new ModifiedDataTreeCandidateNode(type, children) {
@Override
- public PathArgument getIdentifier() {
+ public PathArgument name() {
throw new UnsupportedOperationException("Root node does not have an identifier");
}
};
final Collection<DataTreeCandidateNode> children) {
return new ModifiedDataTreeCandidateNode(type, children) {
@Override
- public PathArgument getIdentifier() {
+ public PathArgument name() {
return identifier;
}
};
}
@Override
- public final Optional<NormalizedNode<?, ?>> getDataAfter() {
+ public final NormalizedNode dataAfter() {
throw new UnsupportedOperationException("After-image not available after serialization");
}
@Override
- public final Collection<DataTreeCandidateNode> getChildNodes() {
+ public final Collection<DataTreeCandidateNode> childNodes() {
return children;
}
}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.io.ByteStreams;
+import java.io.IOException;
+import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
+import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifiablePayload.SerialForm;
+
+/**
+ * Serialization proxy for {@link PurgeLocalHistoryPayload}.
+ */
+final class PH implements SerialForm {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private LocalHistoryIdentifier identifier;
+ private byte[] bytes;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public PH() {
+ // For Externalizable
+ }
+
+ PH(final byte[] bytes) {
+ this.bytes = requireNonNull(bytes);
+ }
+
+ @Override
+ public byte[] bytes() {
+ return bytes;
+ }
+
+ @Override
+ public void readExternal(final byte[] newBytes) throws IOException {
+ bytes = requireNonNull(newBytes);
+ identifier = verifyNotNull(LocalHistoryIdentifier.readFrom(ByteStreams.newDataInput(newBytes)));
+ }
+
+ @Override
+ public Object readResolve() {
+ return new PurgeLocalHistoryPayload(identifier, bytes);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.io.ByteStreams;
+import java.io.IOException;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifiablePayload.SerialForm;
+
+/**
+ * Serialization proxy for {@link PurgeTransactionPayload}.
+ */
+final class PT implements SerialForm {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private TransactionIdentifier identifier;
+ private byte[] bytes;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public PT() {
+ // For Externalizable
+ }
+
+ PT(final byte[] bytes) {
+ this.bytes = requireNonNull(bytes);
+ }
+
+ @Override
+ public byte[] bytes() {
+ return bytes;
+ }
+
+ @Override
+ public void readExternal(final byte[] newBytes) throws IOException {
+ bytes = requireNonNull(newBytes);
+ identifier = verifyNotNull(TransactionIdentifier.readFrom(ByteStreams.newDataInput(newBytes)));
+ }
+
+ @Override
+ public Object readResolve() {
+ return new PurgeTransactionPayload(identifier, bytes);
+ }
+}
* participant instance should oppose RAFT candidates which produce persistence of an unsupported version. If a follower
* encounters an unsupported version it must not become fully-operational, as it does not have an accurate view
* of shard state.
- *
- * @author Robert Varga
*/
@Beta
public enum PayloadVersion implements WritableObject {
},
/**
- * Initial ABI version, as shipped with Boron Simultaneous release.
- */
- // We seed the initial version to be the same as DataStoreVersions.BORON_VERSION for compatibility reasons.
- BORON(5) {
- @Override
- public NormalizedNodeStreamVersion getStreamVersion() {
- return NormalizedNodeStreamVersion.LITHIUM;
- }
- },
-
- /**
- * Revised payload version. Payloads remain the same as {@link #BORON}, but messages bearing QNames in any shape
- * are using {@link NormalizedNodeStreamVersion#NEON_SR2}, which improves encoding.
- */
- NEON_SR2(6) {
- @Override
- public NormalizedNodeStreamVersion getStreamVersion() {
- return NormalizedNodeStreamVersion.NEON_SR2;
- }
- },
-
- /**
- * Revised payload version. Payloads remain the same as {@link #NEON_SR2}, but messages bearing QNames in any shape
- * are using {@link NormalizedNodeStreamVersion#SODIUM_SR1}, which improves encoding.
+ * ABI version shipped enabled {@code 2022.09 Chlorine SR2}. This version revises the serialization format of
+ * payloads proxies to reduce their size. Otherwise this format is equivalent to {@code #MAGNESIUM}.
+ *
+ * @deprecated Use {@link #POTASSIUM} instead.
*/
- SODIUM_SR1(7) {
+ @Deprecated(since = "8.0.0", forRemoval = true)
+ CHLORINE_SR2(9) {
@Override
public NormalizedNodeStreamVersion getStreamVersion() {
- return NormalizedNodeStreamVersion.SODIUM_SR1;
+ return NormalizedNodeStreamVersion.MAGNESIUM;
}
},
/**
- * Revised payload version. Payloads remain the same as {@link #SODIUM_SR1}, but messages bearing QNames in any
- * shape are using {@link NormalizedNodeStreamVersion#MAGNESIUM}, which improves encoding.
+ * ABI version shipped enabled {@code 2023.09 Potassium}. This version removes Augmentation identifier and nodes.
+ * Otherwise this format is equivalent to {@link #CHLORINE_SR2}.
*/
- MAGNESIUM(8) {
+ POTASSIUM(10) {
@Override
public NormalizedNodeStreamVersion getStreamVersion() {
- return NormalizedNodeStreamVersion.MAGNESIUM;
+ return NormalizedNodeStreamVersion.POTASSIUM;
}
},
* @return Current {@link PayloadVersion}
*/
public static @NonNull PayloadVersion current() {
- return MAGNESIUM;
+ return POTASSIUM;
}
/**
*/
public static @NonNull PayloadVersion valueOf(final short version)
throws FutureVersionException, PastVersionException {
- switch (Short.toUnsignedInt(version)) {
- case 0:
- case 1:
- case 2:
- case 3:
- case 4:
- throw new PastVersionException(version, BORON);
- case 5:
- return BORON;
- case 6:
- return NEON_SR2;
- case 7:
- return SODIUM_SR1;
- case 8:
- return MAGNESIUM;
- default:
- throw new FutureVersionException(version, MAGNESIUM);
- }
+ return switch (Short.toUnsignedInt(version)) {
+ case 0, 1, 2, 3, 4, 5, 6, 7, 8 -> throw new PastVersionException(version, CHLORINE_SR2);
+ case 9 -> CHLORINE_SR2;
+ case 10 -> POTASSIUM;
+ default -> throw new FutureVersionException(version, CHLORINE_SR2);
+ };
}
@Override
try {
return valueOf(s);
} catch (FutureVersionException | PastVersionException e) {
- throw new IOException("Unsupported version", e);
+ throw new IOException(e);
}
}
}
import com.google.common.io.ByteArrayDataOutput;
import com.google.common.io.ByteStreams;
-import java.io.DataInput;
import java.io.IOException;
import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
import org.slf4j.Logger;
* @author Robert Varga
*/
public final class PurgeLocalHistoryPayload extends AbstractIdentifiablePayload<LocalHistoryIdentifier> {
- private static final class Proxy extends AbstractProxy<LocalHistoryIdentifier> {
- private static final long serialVersionUID = 1L;
-
- // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
- // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public Proxy() {
- // For Externalizable
- }
-
- Proxy(final byte[] serialized) {
- super(serialized);
- }
-
- @Override
- protected LocalHistoryIdentifier readIdentifier(final DataInput in) throws IOException {
- return LocalHistoryIdentifier.readFrom(in);
- }
-
- @Override
- protected PurgeLocalHistoryPayload createObject(final LocalHistoryIdentifier identifier,
- final byte[] serialized) {
- return new PurgeLocalHistoryPayload(identifier, serialized);
- }
- }
-
private static final Logger LOG = LoggerFactory.getLogger(PurgeLocalHistoryPayload.class);
+ @java.io.Serial
private static final long serialVersionUID = 1L;
+ private static final int PROXY_SIZE = externalizableProxySize(PH::new);
PurgeLocalHistoryPayload(final LocalHistoryIdentifier historyId, final byte[] serialized) {
super(historyId, serialized);
} catch (IOException e) {
// This should never happen
LOG.error("Failed to serialize {}", historyId, e);
- throw new RuntimeException("Failed to serialize " + historyId, e);
+ throw new IllegalStateException("Failed to serialize " + historyId, e);
}
return new PurgeLocalHistoryPayload(historyId, out.toByteArray());
}
@Override
- protected Proxy externalizableProxy(final byte[] serialized) {
- return new Proxy(serialized);
+ protected PH externalizableProxy(final byte[] serialized) {
+ return new PH(serialized);
+ }
+
+ @Override
+ protected int externalizableProxySize() {
+ return PROXY_SIZE;
}
}
import com.google.common.io.ByteArrayDataOutput;
import com.google.common.io.ByteStreams;
-import java.io.DataInput;
import java.io.IOException;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.slf4j.Logger;
* @author Robert Varga
*/
public final class PurgeTransactionPayload extends AbstractIdentifiablePayload<TransactionIdentifier> {
- private static final class Proxy extends AbstractProxy<TransactionIdentifier> {
- private static final long serialVersionUID = 1L;
-
- // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
- // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public Proxy() {
- // For Externalizable
- }
-
- Proxy(final byte[] serialized) {
- super(serialized);
- }
-
- @Override
- protected TransactionIdentifier readIdentifier(final DataInput in) throws IOException {
- return TransactionIdentifier.readFrom(in);
- }
-
- @Override
- protected PurgeTransactionPayload createObject(final TransactionIdentifier identifier,
- final byte[] serialized) {
- return new PurgeTransactionPayload(identifier, serialized);
- }
- }
-
private static final Logger LOG = LoggerFactory.getLogger(PurgeTransactionPayload.class);
+ @java.io.Serial
private static final long serialVersionUID = 1L;
+ private static final int PROXY_SIZE = externalizableProxySize(PT::new);
PurgeTransactionPayload(final TransactionIdentifier transactionId, final byte[] serialized) {
super(transactionId, serialized);
} catch (IOException e) {
// This should never happen
LOG.error("Failed to serialize {}", transactionId, e);
- throw new RuntimeException("Failed to serialize " + transactionId, e);
+ throw new IllegalStateException("Failed to serialize " + transactionId, e);
}
return new PurgeTransactionPayload(transactionId, out.toByteArray());
}
@Override
- protected Proxy externalizableProxy(final byte[] serialized) {
- return new Proxy(serialized);
+ protected PT externalizableProxy(final byte[] serialized) {
+ return new PT(serialized);
+ }
+
+ @Override
+ protected int externalizableProxySize() {
+ return PROXY_SIZE;
}
}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.ArrayList;
+
+/**
+ * Serialization proxy for {@link ShardManagerSnapshot}.
+ */
+final class SM implements Externalizable {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private ShardManagerSnapshot snapshot;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public SM() {
+ // For Externalizable
+ }
+
+ SM(final ShardManagerSnapshot snapshot) {
+ this.snapshot = requireNonNull(snapshot);
+ }
+
+ @Override
+ public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+ final int size = in.readInt();
+ final var shardList = new ArrayList<String>(size);
+ for (int i = 0; i < size; i++) {
+ shardList.add((String) in.readObject());
+ }
+ snapshot = new ShardManagerSnapshot(shardList);
+ }
+
+ @Override
+ public void writeExternal(final ObjectOutput out) throws IOException {
+ final var shardList = snapshot.getShardList();
+ out.writeInt(shardList.size());
+ for (var shardName : shardList) {
+ out.writeObject(shardName);
+ }
+ }
+
+ @java.io.Serial
+ private Object readResolve() {
+ return verifyNotNull(snapshot);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+
+/**
+ * Serialization proxy for {@link ShardSnapshotState}.
+ */
+final class SS implements Externalizable {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private ShardSnapshotState snapshotState;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public SS() {
+ // For Externalizable
+ }
+
+ SS(final ShardSnapshotState snapshotState) {
+ this.snapshotState = requireNonNull(snapshotState);
+ }
+
+ @Override
+ public void readExternal(final ObjectInput in) throws IOException {
+ snapshotState = ShardDataTreeSnapshot.deserialize(in);
+ }
+
+ @Override
+ public void writeExternal(final ObjectOutput out) throws IOException {
+ snapshotState.getSnapshot().serialize(out);
+ }
+
+ @java.io.Serial
+ private Object readResolve() {
+ return verifyNotNull(snapshotState);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.io.ByteStreams;
+import java.io.IOException;
+import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
+import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifiablePayload.SerialForm;
+import org.opendaylight.controller.cluster.datastore.utils.ImmutableUnsignedLongSet;
+
+/**
+ * Serialization proxy for {@link SkipTransactionsPayload}.
+ */
+final class ST implements SerialForm {
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+
+ private ImmutableUnsignedLongSet transactionIds;
+ private LocalHistoryIdentifier identifier;
+ private byte[] bytes;
+
+ @SuppressWarnings("checkstyle:RedundantModifier")
+ public ST() {
+ // For Externalizable
+ }
+
+ ST(final byte[] bytes) {
+ this.bytes = requireNonNull(bytes);
+ }
+
+ @Override
+ public byte[] bytes() {
+ return bytes;
+ }
+
+ @Override
+ public void readExternal(final byte[] newBytes) throws IOException {
+ bytes = requireNonNull(newBytes);
+
+ final var in = ByteStreams.newDataInput(newBytes);
+ identifier = LocalHistoryIdentifier.readFrom(in);
+ transactionIds = verifyNotNull(ImmutableUnsignedLongSet.readFrom(in));
+ }
+
+ @Override
+ public Object readResolve() {
+ return new SkipTransactionsPayload(identifier, bytes, transactionIds);
+ }
+}
*
* @return An optional root node.
*/
- public abstract Optional<NormalizedNode<?, ?>> getRootNode();
+ public abstract Optional<NormalizedNode> getRootNode();
public abstract void serialize(ObjectOutput out) throws IOException;
}
package org.opendaylight.controller.cluster.datastore.persisted;
import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.HashMap;
import java.util.List;
-import java.util.Map;
import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.cluster.datastore.config.PrefixShardConfiguration;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
/**
* Represents the persisted snapshot state for the ShardManager.
*
* @author Thomas Pantelis
*/
-public class ShardManagerSnapshot implements Serializable {
+public final class ShardManagerSnapshot implements Serializable {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
- private static final class Proxy implements Externalizable {
- private static final long serialVersionUID = 1L;
-
- private ShardManagerSnapshot snapshot;
-
- // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
- // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public Proxy() {
- // For Externalizable
- }
-
- Proxy(final ShardManagerSnapshot snapshot) {
- this.snapshot = snapshot;
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) throws IOException {
- out.writeInt(snapshot.shardList.size());
- for (String shard: snapshot.shardList) {
- out.writeObject(shard);
- }
-
- out.writeInt(snapshot.prefixShardConfiguration.size());
- for (Map.Entry<?, ?> prefixShardConfigEntry : snapshot.prefixShardConfiguration.entrySet()) {
- out.writeObject(prefixShardConfigEntry.getKey());
- out.writeObject(prefixShardConfigEntry.getValue());
- }
- }
-
- @Override
- public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
- int size = in.readInt();
- List<String> localShardList = new ArrayList<>(size);
- for (int i = 0; i < size; i++) {
- localShardList.add((String) in.readObject());
- }
-
- size = in.readInt();
- Map<DOMDataTreeIdentifier, PrefixShardConfiguration> localPrefixShardConfiguration = new HashMap<>(size);
- for (int i = 0; i < size; i++) {
- localPrefixShardConfiguration.put((DOMDataTreeIdentifier) in.readObject(),
- (PrefixShardConfiguration) in.readObject());
- }
-
- snapshot = new ShardManagerSnapshot(localShardList, localPrefixShardConfiguration);
- }
-
- private Object readResolve() {
- return snapshot;
- }
- }
-
private final List<String> shardList;
- private final Map<DOMDataTreeIdentifier, PrefixShardConfiguration> prefixShardConfiguration;
- public ShardManagerSnapshot(final @NonNull List<String> shardList,
- final Map<DOMDataTreeIdentifier, PrefixShardConfiguration> prefixShardConfiguration) {
+ public ShardManagerSnapshot(final @NonNull List<String> shardList) {
this.shardList = ImmutableList.copyOf(shardList);
- this.prefixShardConfiguration = ImmutableMap.copyOf(prefixShardConfiguration);
}
public List<String> getShardList() {
- return this.shardList;
+ return shardList;
}
+ @java.io.Serial
private Object writeReplace() {
- return new Proxy(this);
+ return new SM(this);
}
@Override
import com.google.common.annotations.VisibleForTesting;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
*
* @author Thomas Pantelis
*/
-public class ShardSnapshotState implements Snapshot.State {
+public final class ShardSnapshotState implements Snapshot.State {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
- private static final class Proxy implements Externalizable {
- private static final long serialVersionUID = 1L;
-
- private ShardSnapshotState snapshotState;
-
- // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
- // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
- @SuppressWarnings("checkstyle:RedundantModifier")
- public Proxy() {
- // For Externalizable
- }
-
- Proxy(final ShardSnapshotState snapshotState) {
- this.snapshotState = snapshotState;
- }
-
- @Override
- public void writeExternal(final ObjectOutput out) throws IOException {
- snapshotState.snapshot.serialize(out);
- }
-
- @Override
- public void readExternal(final ObjectInput in) throws IOException {
- snapshotState = ShardDataTreeSnapshot.deserialize(in);
- }
-
- private Object readResolve() {
- return snapshotState;
- }
- }
-
@SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but this class "
+ "implements writeReplace to delegate serialization to a Proxy class and thus instances of this class "
+ "aren't serialized. FindBugs does not recognize this.")
return migrated;
}
+ @java.io.Serial
private Object writeReplace() {
- return new Proxy(this);
+ return new SS(this);
}
}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.io.ByteStreams;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.io.IOException;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
+import org.opendaylight.controller.cluster.datastore.utils.ImmutableUnsignedLongSet;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Payload persisted when a local history is instructed some transaction identifiers, i.e. the frontend has used them
+ * for other purposes. It contains a {@link LocalHistoryIdentifier} and a list of transaction identifiers within that
+ * local history.
+ */
+public final class SkipTransactionsPayload extends AbstractIdentifiablePayload<LocalHistoryIdentifier> {
+ private static final Logger LOG = LoggerFactory.getLogger(SkipTransactionsPayload.class);
+ @java.io.Serial
+ private static final long serialVersionUID = 1L;
+ private static final int PROXY_SIZE = externalizableProxySize(ST::new);
+
+ @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "Handled via externalizable proxy")
+ private final @NonNull ImmutableUnsignedLongSet transactionIds;
+
+ SkipTransactionsPayload(final @NonNull LocalHistoryIdentifier historyId,
+ final byte @NonNull [] serialized, final ImmutableUnsignedLongSet transactionIds) {
+ super(historyId, serialized);
+ this.transactionIds = requireNonNull(transactionIds);
+ }
+
+ public static @NonNull SkipTransactionsPayload create(final LocalHistoryIdentifier historyId,
+ final ImmutableUnsignedLongSet transactionIds, final int initialSerializedBufferCapacity) {
+ final var out = ByteStreams.newDataOutput(initialSerializedBufferCapacity);
+ try {
+ historyId.writeTo(out);
+ transactionIds.writeTo(out);
+ } catch (IOException e) {
+ // This should never happen
+ LOG.error("Failed to serialize {} ids {}", historyId, transactionIds, e);
+ throw new IllegalStateException("Failed to serialize " + historyId + " ids " + transactionIds, e);
+ }
+
+ return new SkipTransactionsPayload(historyId, out.toByteArray(), transactionIds);
+ }
+
+ public @NonNull ImmutableUnsignedLongSet getTransactionIds() {
+ return transactionIds;
+ }
+
+ @Override
+ protected ST externalizableProxy(final byte[] serialized) {
+ return new ST(serialized);
+ }
+
+ @Override
+ protected int externalizableProxySize() {
+ return PROXY_SIZE;
+ }
+}
import org.opendaylight.controller.cluster.datastore.config.Configuration;
import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
import org.opendaylight.controller.cluster.datastore.utils.PrimaryShardInfoFutureCache;
+import org.opendaylight.yangtools.yang.common.Empty;
public abstract class AbstractShardManagerCreator<T extends AbstractShardManagerCreator<T>> {
- private SettableFuture<Void> readinessFuture;
+ private SettableFuture<Empty> readinessFuture;
private ClusterWrapper cluster;
private Configuration configuration;
private DatastoreContextFactory datastoreContextFactory;
return self();
}
- SettableFuture<Void> getReadinessFuture() {
+ SettableFuture<Empty> getReadinessFuture() {
return readinessFuture;
}
- public T readinessFuture(final SettableFuture<Void> newReadinessFuture) {
+ public T readinessFuture(final SettableFuture<Empty> newReadinessFuture) {
checkSealed();
this.readinessFuture = newReadinessFuture;
return self();
*/
package org.opendaylight.controller.cluster.datastore.shardmanager;
-import com.google.common.base.Verify;
+import static com.google.common.base.Verify.verifyNotNull;
+
import java.util.concurrent.atomic.AtomicReference;
+import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
-import org.opendaylight.yangtools.yang.model.api.EffectiveModelContextProvider;
-final class AtomicShardContextProvider extends AtomicReference<EffectiveModelContext>
- implements EffectiveModelContextProvider {
+final class AtomicShardContextProvider extends AtomicReference<EffectiveModelContext> {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
- @Override
- public EffectiveModelContext getEffectiveModelContext() {
- return Verify.verifyNotNull(get());
+ @NonNull EffectiveModelContext modelContext() {
+ return verifyNotNull(get());
}
}
\ No newline at end of file
import java.util.Optional;
import java.util.Set;
import org.eclipse.jdt.annotation.Nullable;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
import org.opendaylight.controller.cluster.datastore.DatastoreContext;
import org.opendaylight.controller.cluster.datastore.Shard;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
-import org.opendaylight.controller.cluster.datastore.messages.PeerDown;
-import org.opendaylight.controller.cluster.datastore.messages.PeerUp;
import org.opendaylight.controller.cluster.datastore.shardmanager.ShardManager.OnShardInitialized;
import org.opendaylight.controller.cluster.datastore.shardmanager.ShardManager.OnShardReady;
import org.opendaylight.controller.cluster.raft.RaftState;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.ReadOnlyDataTree;
import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
Props newProps() {
Props props = requireNonNull(builder).id(shardId).peerAddresses(initialPeerAddresses)
- .datastoreContext(datastoreContext).schemaContextProvider(schemaContextProvider).props();
+ .datastoreContext(datastoreContext).schemaContextProvider(schemaContextProvider::modelContext).props();
builder = null;
return props;
}
return shardId;
}
- void setLocalDataTree(final Optional<ReadOnlyDataTree> dataTree) {
- this.localShardDataTree = dataTree;
+ void setLocalDataTree(final ReadOnlyDataTree dataTree) {
+ localShardDataTree = Optional.ofNullable(dataTree);
}
Optional<ReadOnlyDataTree> getLocalShardDataTree() {
}
void setDatastoreContext(final DatastoreContext newDatastoreContext, final ActorRef sender) {
- this.datastoreContext = newDatastoreContext;
+ datastoreContext = newDatastoreContext;
if (actor != null) {
LOG.debug("Sending new DatastoreContext to {}", shardId);
- actor.tell(this.datastoreContext, sender);
+ actor.tell(datastoreContext, sender);
}
}
notifyOnShardInitializedCallbacks();
}
- void peerDown(final MemberName memberName, final String peerId, final ActorRef sender) {
- if (actor != null) {
- actor.tell(new PeerDown(memberName, peerId), sender);
- }
- }
-
- void peerUp(final MemberName memberName, final String peerId, final ActorRef sender) {
- if (actor != null) {
- actor.tell(new PeerUp(memberName, peerId), sender);
- }
- }
-
boolean isShardReady() {
return !RaftState.Candidate.name().equals(role) && !Strings.isNullOrEmpty(role);
}
}
String getSerializedLeaderActor() {
- if (isLeader()) {
- return Serialization.serializedActorPath(getActor());
- } else {
- return addressResolver.resolve(leaderId);
- }
+ return isLeader() ? Serialization.serializedActorPath(getActor()) : addressResolver.resolve(leaderId);
}
void setActorInitialized() {
LOG.debug("Shard {} is initialized", shardId);
- this.actorInitialized = true;
+ actorInitialized = true;
notifyOnShardInitializedCallbacks();
}
return;
}
- boolean ready = isShardReadyWithLeaderId();
-
- LOG.debug("Shard {} is {} - notifying {} OnShardInitialized callbacks", shardId,
- ready ? "ready" : "initialized", onShardInitializedSet.size());
+ final boolean ready = isShardReadyWithLeaderId();
+ final String readyStr = ready ? "ready" : "initialized";
+ LOG.debug("Shard {} is {} - notifying {} OnShardInitialized callbacks", shardId, readyStr,
+ onShardInitializedSet.size());
Iterator<OnShardInitialized> iter = onShardInitializedSet.iterator();
while (iter.hasNext()) {
}
void setRole(final String newRole) {
- this.role = newRole;
+ role = newRole;
notifyOnShardInitializedCallbacks();
}
}
void setFollowerSyncStatus(final boolean syncStatus) {
- this.followerSyncStatus = syncStatus;
+ followerSyncStatus = syncStatus;
}
boolean isInSync() {
- if (RaftState.Follower.name().equals(this.role)) {
+ if (RaftState.Follower.name().equals(role)) {
return followerSyncStatus;
- } else if (RaftState.Leader.name().equals(this.role)) {
+ } else if (RaftState.Leader.name().equals(role)) {
return true;
}
}
boolean setLeaderId(final String newLeaderId) {
- final boolean changed = !Objects.equals(this.leaderId, newLeaderId);
- this.leaderId = newLeaderId;
+ final boolean changed = !Objects.equals(leaderId, newLeaderId);
+ leaderId = newLeaderId;
if (newLeaderId != null) {
- this.leaderAvailable = true;
+ leaderAvailable = true;
}
notifyOnShardInitializedCallbacks();
}
void setActiveMember(final boolean isActiveMember) {
- this.activeMember = isActiveMember;
+ activeMember = isActiveMember;
}
EffectiveModelContext getSchemaContext() {
- return schemaContextProvider.getEffectiveModelContext();
+ return schemaContextProvider.modelContext();
}
void setSchemaContext(final EffectiveModelContext schemaContext) {
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.datastore.shardmanager;
-import static akka.pattern.Patterns.ask;
import static java.util.Objects.requireNonNull;
import akka.actor.ActorRef;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.util.ArrayList;
import java.util.Collection;
-import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import org.opendaylight.controller.cluster.access.concepts.MemberName;
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedPersistentActorWithMetering;
import org.opendaylight.controller.cluster.common.actor.Dispatchers;
-import org.opendaylight.controller.cluster.datastore.AbstractDataStore;
import org.opendaylight.controller.cluster.datastore.ClusterWrapper;
import org.opendaylight.controller.cluster.datastore.DatastoreContext;
-import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
import org.opendaylight.controller.cluster.datastore.DatastoreContextFactory;
import org.opendaylight.controller.cluster.datastore.Shard;
import org.opendaylight.controller.cluster.datastore.config.Configuration;
import org.opendaylight.controller.cluster.datastore.config.ModuleShardConfiguration;
-import org.opendaylight.controller.cluster.datastore.config.PrefixShardConfiguration;
import org.opendaylight.controller.cluster.datastore.exceptions.AlreadyExistsException;
import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
import org.opendaylight.controller.cluster.datastore.exceptions.PrimaryNotFoundException;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
-import org.opendaylight.controller.cluster.datastore.messages.AddPrefixShardReplica;
import org.opendaylight.controller.cluster.datastore.messages.AddShardReplica;
import org.opendaylight.controller.cluster.datastore.messages.ChangeShardMembersVotingStatus;
import org.opendaylight.controller.cluster.datastore.messages.CreateShard;
import org.opendaylight.controller.cluster.datastore.messages.LocalShardNotFound;
import org.opendaylight.controller.cluster.datastore.messages.RemoteFindPrimary;
import org.opendaylight.controller.cluster.datastore.messages.RemotePrimaryShardFound;
-import org.opendaylight.controller.cluster.datastore.messages.RemovePrefixShardReplica;
import org.opendaylight.controller.cluster.datastore.messages.RemoveShardReplica;
import org.opendaylight.controller.cluster.datastore.messages.ShardLeaderStateChanged;
import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
import org.opendaylight.controller.cluster.datastore.persisted.ShardManagerSnapshot;
-import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
import org.opendaylight.controller.cluster.datastore.utils.CompositeOnComplete;
import org.opendaylight.controller.cluster.datastore.utils.PrimaryShardInfoFutureCache;
import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
import org.opendaylight.controller.cluster.raft.messages.ServerChangeStatus;
import org.opendaylight.controller.cluster.raft.messages.ServerRemoved;
import org.opendaylight.controller.cluster.raft.policy.DisableElectionsRaftPolicy;
-import org.opendaylight.controller.cluster.sharding.PrefixedShardConfigUpdateHandler;
-import org.opendaylight.controller.cluster.sharding.messages.InitConfigListener;
-import org.opendaylight.controller.cluster.sharding.messages.PrefixShardCreated;
-import org.opendaylight.controller.cluster.sharding.messages.PrefixShardRemoved;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
import org.opendaylight.yangtools.concepts.Registration;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.Empty;
import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
private DatastoreContextFactory datastoreContextFactory;
- private final SettableFuture<Void> readinessFuture;
+ private final SettableFuture<Empty> readinessFuture;
private final PrimaryShardInfoFutureCache primaryShardInfoCache;
@VisibleForTesting
final ShardPeerAddressResolver peerAddressResolver;
- private EffectiveModelContext schemaContext;
+ private EffectiveModelContext modelContext;
private DatastoreSnapshot restoreFromSnapshot;
private final Set<Consumer<String>> shardAvailabilityCallbacks = new HashSet<>();
private final String persistenceId;
- private final AbstractDataStore dataStore;
-
- private PrefixedShardConfigUpdateHandler configUpdateHandler;
+ @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design")
ShardManager(final AbstractShardManagerCreator<?> builder) {
- this.cluster = builder.getCluster();
- this.configuration = builder.getConfiguration();
- this.datastoreContextFactory = builder.getDatastoreContextFactory();
- this.type = datastoreContextFactory.getBaseDatastoreContext().getDataStoreName();
- this.shardDispatcherPath =
- new Dispatchers(context().system().dispatchers()).getDispatcherPath(Dispatchers.DispatcherType.Shard);
- this.readinessFuture = builder.getReadinessFuture();
- this.primaryShardInfoCache = builder.getPrimaryShardInfoCache();
- this.restoreFromSnapshot = builder.getRestoreFromSnapshot();
+ cluster = builder.getCluster();
+ configuration = builder.getConfiguration();
+ datastoreContextFactory = builder.getDatastoreContextFactory();
+ type = datastoreContextFactory.getBaseDatastoreContext().getDataStoreName();
+ shardDispatcherPath = new Dispatchers(context().system().dispatchers())
+ .getDispatcherPath(Dispatchers.DispatcherType.Shard);
+ readinessFuture = builder.getReadinessFuture();
+ primaryShardInfoCache = builder.getPrimaryShardInfoCache();
+ restoreFromSnapshot = builder.getRestoreFromSnapshot();
String possiblePersistenceId = datastoreContextFactory.getBaseDatastoreContext().getShardManagerPersistenceId();
persistenceId = possiblePersistenceId != null ? possiblePersistenceId : "shard-manager-" + type;
cluster.subscribeToMemberEvents(getSelf());
shardManagerMBean = new ShardManagerInfo(getSelf(), cluster.getCurrentMemberName(),
- "shard-manager-" + this.type,
+ "shard-manager-" + type,
datastoreContextFactory.getBaseDatastoreContext().getDataStoreMXBeanType());
shardManagerMBean.registerMBean();
-
- dataStore = builder.getDistributedDataStore();
}
@Override
@Override
public void handleCommand(final Object message) throws Exception {
- if (message instanceof FindPrimary) {
- findPrimary((FindPrimary)message);
- } else if (message instanceof FindLocalShard) {
- findLocalShard((FindLocalShard) message);
- } else if (message instanceof UpdateSchemaContext) {
- updateSchemaContext(message);
- } else if (message instanceof ActorInitialized) {
- onActorInitialized(message);
- } else if (message instanceof ClusterEvent.MemberUp) {
- memberUp((ClusterEvent.MemberUp) message);
- } else if (message instanceof ClusterEvent.MemberWeaklyUp) {
- memberWeaklyUp((ClusterEvent.MemberWeaklyUp) message);
- } else if (message instanceof ClusterEvent.MemberExited) {
- memberExited((ClusterEvent.MemberExited) message);
- } else if (message instanceof ClusterEvent.MemberRemoved) {
- memberRemoved((ClusterEvent.MemberRemoved) message);
- } else if (message instanceof ClusterEvent.UnreachableMember) {
- memberUnreachable((ClusterEvent.UnreachableMember) message);
- } else if (message instanceof ClusterEvent.ReachableMember) {
- memberReachable((ClusterEvent.ReachableMember) message);
- } else if (message instanceof DatastoreContextFactory) {
- onDatastoreContextFactory((DatastoreContextFactory) message);
- } else if (message instanceof RoleChangeNotification) {
- onRoleChangeNotification((RoleChangeNotification) message);
- } else if (message instanceof FollowerInitialSyncUpStatus) {
- onFollowerInitialSyncStatus((FollowerInitialSyncUpStatus) message);
- } else if (message instanceof ShardNotInitializedTimeout) {
- onShardNotInitializedTimeout((ShardNotInitializedTimeout) message);
- } else if (message instanceof ShardLeaderStateChanged) {
- onLeaderStateChanged((ShardLeaderStateChanged) message);
- } else if (message instanceof SwitchShardBehavior) {
- onSwitchShardBehavior((SwitchShardBehavior) message);
- } else if (message instanceof CreateShard) {
- onCreateShard((CreateShard)message);
- } else if (message instanceof AddShardReplica) {
- onAddShardReplica((AddShardReplica) message);
- } else if (message instanceof AddPrefixShardReplica) {
- onAddPrefixShardReplica((AddPrefixShardReplica) message);
- } else if (message instanceof PrefixShardCreated) {
- onPrefixShardCreated((PrefixShardCreated) message);
- } else if (message instanceof PrefixShardRemoved) {
- onPrefixShardRemoved((PrefixShardRemoved) message);
- } else if (message instanceof InitConfigListener) {
- onInitConfigListener();
- } else if (message instanceof ForwardedAddServerReply) {
- ForwardedAddServerReply msg = (ForwardedAddServerReply)message;
- onAddServerReply(msg.shardInfo, msg.addServerReply, getSender(), msg.leaderPath,
- msg.removeShardOnFailure);
- } else if (message instanceof ForwardedAddServerFailure) {
- ForwardedAddServerFailure msg = (ForwardedAddServerFailure)message;
+ if (message instanceof FindPrimary msg) {
+ findPrimary(msg);
+ } else if (message instanceof FindLocalShard msg) {
+ findLocalShard(msg);
+ } else if (message instanceof UpdateSchemaContext msg) {
+ updateSchemaContext(msg);
+ } else if (message instanceof ActorInitialized msg) {
+ onActorInitialized(msg);
+ } else if (message instanceof ClusterEvent.MemberUp msg) {
+ memberUp(msg);
+ } else if (message instanceof ClusterEvent.MemberWeaklyUp msg) {
+ memberWeaklyUp(msg);
+ } else if (message instanceof ClusterEvent.MemberExited msg) {
+ memberExited(msg);
+ } else if (message instanceof ClusterEvent.MemberRemoved msg) {
+ memberRemoved(msg);
+ } else if (message instanceof ClusterEvent.UnreachableMember msg) {
+ memberUnreachable(msg);
+ } else if (message instanceof ClusterEvent.ReachableMember msg) {
+ memberReachable(msg);
+ } else if (message instanceof DatastoreContextFactory msg) {
+ onDatastoreContextFactory(msg);
+ } else if (message instanceof RoleChangeNotification msg) {
+ onRoleChangeNotification(msg);
+ } else if (message instanceof FollowerInitialSyncUpStatus msg) {
+ onFollowerInitialSyncStatus(msg);
+ } else if (message instanceof ShardNotInitializedTimeout msg) {
+ onShardNotInitializedTimeout(msg);
+ } else if (message instanceof ShardLeaderStateChanged msg) {
+ onLeaderStateChanged(msg);
+ } else if (message instanceof SwitchShardBehavior msg) {
+ onSwitchShardBehavior(msg);
+ } else if (message instanceof CreateShard msg) {
+ onCreateShard(msg);
+ } else if (message instanceof AddShardReplica msg) {
+ onAddShardReplica(msg);
+ } else if (message instanceof ForwardedAddServerReply msg) {
+ onAddServerReply(msg.shardInfo, msg.addServerReply, getSender(), msg.leaderPath, msg.removeShardOnFailure);
+ } else if (message instanceof ForwardedAddServerFailure msg) {
onAddServerFailure(msg.shardName, msg.failureMessage, msg.failure, getSender(), msg.removeShardOnFailure);
- } else if (message instanceof RemoveShardReplica) {
- onRemoveShardReplica((RemoveShardReplica) message);
- } else if (message instanceof RemovePrefixShardReplica) {
- onRemovePrefixShardReplica((RemovePrefixShardReplica) message);
- } else if (message instanceof WrappedShardResponse) {
- onWrappedShardResponse((WrappedShardResponse) message);
- } else if (message instanceof GetSnapshot) {
- onGetSnapshot((GetSnapshot) message);
- } else if (message instanceof ServerRemoved) {
- onShardReplicaRemoved((ServerRemoved) message);
- } else if (message instanceof ChangeShardMembersVotingStatus) {
- onChangeShardServersVotingStatus((ChangeShardMembersVotingStatus) message);
- } else if (message instanceof FlipShardMembersVotingStatus) {
- onFlipShardMembersVotingStatus((FlipShardMembersVotingStatus) message);
- } else if (message instanceof SaveSnapshotSuccess) {
- onSaveSnapshotSuccess((SaveSnapshotSuccess) message);
- } else if (message instanceof SaveSnapshotFailure) {
- LOG.error("{}: SaveSnapshotFailure received for saving snapshot of shards", persistenceId(),
- ((SaveSnapshotFailure) message).cause());
+ } else if (message instanceof RemoveShardReplica msg) {
+ onRemoveShardReplica(msg);
+ } else if (message instanceof WrappedShardResponse msg) {
+ onWrappedShardResponse(msg);
+ } else if (message instanceof GetSnapshot msg) {
+ onGetSnapshot(msg);
+ } else if (message instanceof ServerRemoved msg) {
+ onShardReplicaRemoved(msg);
+ } else if (message instanceof ChangeShardMembersVotingStatus msg) {
+ onChangeShardServersVotingStatus(msg);
+ } else if (message instanceof FlipShardMembersVotingStatus msg) {
+ onFlipShardMembersVotingStatus(msg);
+ } else if (message instanceof SaveSnapshotSuccess msg) {
+ onSaveSnapshotSuccess(msg);
+ } else if (message instanceof SaveSnapshotFailure msg) {
+ LOG.error("{}: SaveSnapshotFailure received for saving snapshot of shards", persistenceId(), msg.cause());
} else if (message instanceof Shutdown) {
onShutDown();
} else if (message instanceof GetLocalShardIds) {
onGetLocalShardIds();
- } else if (message instanceof GetShardRole) {
- onGetShardRole((GetShardRole) message);
- } else if (message instanceof RunnableMessage) {
- ((RunnableMessage)message).run();
- } else if (message instanceof RegisterForShardAvailabilityChanges) {
- onRegisterForShardAvailabilityChanges((RegisterForShardAvailabilityChanges)message);
- } else if (message instanceof DeleteSnapshotsFailure) {
- LOG.warn("{}: Failed to delete prior snapshots", persistenceId(),
- ((DeleteSnapshotsFailure) message).cause());
+ } else if (message instanceof GetShardRole msg) {
+ onGetShardRole(msg);
+ } else if (message instanceof RunnableMessage msg) {
+ msg.run();
+ } else if (message instanceof RegisterForShardAvailabilityChanges msg) {
+ onRegisterForShardAvailabilityChanges(msg);
+ } else if (message instanceof DeleteSnapshotsFailure msg) {
+ LOG.warn("{}: Failed to delete prior snapshots", persistenceId(), msg.cause());
} else if (message instanceof DeleteSnapshotsSuccess) {
LOG.debug("{}: Successfully deleted prior snapshots", persistenceId());
} else if (message instanceof RegisterRoleChangeListenerReply) {
LOG.trace("{}: Received RegisterRoleChangeListenerReply", persistenceId());
- } else if (message instanceof ClusterEvent.MemberEvent) {
- LOG.trace("{}: Received other ClusterEvent.MemberEvent: {}", persistenceId(), message);
+ } else if (message instanceof ClusterEvent.MemberEvent msg) {
+ LOG.trace("{}: Received other ClusterEvent.MemberEvent: {}", persistenceId(), msg);
} else {
unknownMessage(message);
}
getSender().tell(new GetShardRoleReply(shardInformation.getRole()), ActorRef.noSender());
}
- private void onInitConfigListener() {
- LOG.debug("{}: Initializing config listener on {}", persistenceId(), cluster.getCurrentMemberName());
-
- final org.opendaylight.mdsal.common.api.LogicalDatastoreType datastoreType =
- org.opendaylight.mdsal.common.api.LogicalDatastoreType
- .valueOf(datastoreContextFactory.getBaseDatastoreContext().getLogicalStoreType().name());
-
- if (configUpdateHandler != null) {
- configUpdateHandler.close();
- }
-
- configUpdateHandler = new PrefixedShardConfigUpdateHandler(self(), cluster.getCurrentMemberName());
- configUpdateHandler.initListener(dataStore, datastoreType);
- }
-
void onShutDown() {
List<Future<Boolean>> stopFutures = new ArrayList<>(localShards.size());
for (ShardInformation info : localShards.values()) {
}
}
- @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
- justification = "https://github.com/spotbugs/spotbugs/issues/811")
- private void removePrefixShardReplica(final RemovePrefixShardReplica contextMessage, final String shardName,
- final String primaryPath, final ActorRef sender) {
- if (isShardReplicaOperationInProgress(shardName, sender)) {
- return;
- }
-
- shardReplicaOperationsInProgress.add(shardName);
-
- final ShardIdentifier shardId = getShardIdentifier(contextMessage.getMemberName(), shardName);
-
- final DatastoreContext datastoreContext = newShardDatastoreContextBuilder(shardName).build();
-
- //inform ShardLeader to remove this shard as a replica by sending an RemoveServer message
- LOG.debug("{}: Sending RemoveServer message to peer {} for shard {}", persistenceId(),
- primaryPath, shardId);
-
- Timeout removeServerTimeout = new Timeout(datastoreContext.getShardLeaderElectionTimeout().duration());
- Future<Object> futureObj = ask(getContext().actorSelection(primaryPath),
- new RemoveServer(shardId.toString()), removeServerTimeout);
-
- futureObj.onComplete(new OnComplete<>() {
- @Override
- public void onComplete(final Throwable failure, final Object response) {
- if (failure != null) {
- shardReplicaOperationsInProgress.remove(shardName);
-
- LOG.debug("{}: RemoveServer request to leader {} for shard {} failed", persistenceId(), primaryPath,
- shardName, failure);
-
- // FAILURE
- sender.tell(new Status.Failure(new RuntimeException(
- String.format("RemoveServer request to leader %s for shard %s failed", primaryPath, shardName),
- failure)), self());
- } else {
- // SUCCESS
- self().tell(new WrappedShardResponse(shardId, response, primaryPath), sender);
- }
- }
- }, new Dispatchers(context().system().dispatchers()).getDispatcher(Dispatchers.DispatcherType.Client));
- }
-
- @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
- justification = "https://github.com/spotbugs/spotbugs/issues/811")
private void removeShardReplica(final RemoveShardReplica contextMessage, final String shardName,
final String primaryPath, final ActorRef sender) {
if (isShardReplicaOperationInProgress(shardName, sender)) {
primaryPath, shardId);
Timeout removeServerTimeout = new Timeout(datastoreContext.getShardLeaderElectionTimeout().duration());
- Future<Object> futureObj = ask(getContext().actorSelection(primaryPath),
+ Future<Object> futureObj = Patterns.ask(getContext().actorSelection(primaryPath),
new RemoveServer(shardId.toString()), removeServerTimeout);
futureObj.onComplete(new OnComplete<>() {
}
}
- private void onPrefixShardCreated(final PrefixShardCreated message) {
- LOG.debug("{}: onPrefixShardCreated: {}", persistenceId(), message);
-
- final PrefixShardConfiguration config = message.getConfiguration();
- final ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(),
- ClusterUtils.getCleanShardName(config.getPrefix().getRootIdentifier()));
- final String shardName = shardId.getShardName();
-
- if (isPreviousShardActorStopInProgress(shardName, message)) {
- return;
- }
-
- if (localShards.containsKey(shardName)) {
- LOG.debug("{}: Received create for an already existing shard {}", persistenceId(), shardName);
- final PrefixShardConfiguration existing =
- configuration.getAllPrefixShardConfigurations().get(config.getPrefix());
-
- if (existing != null && existing.equals(config)) {
- // we don't have to do nothing here
- return;
- }
- }
-
- doCreatePrefixShard(config, shardId, shardName);
- }
-
private boolean isPreviousShardActorStopInProgress(final String shardName, final Object messageToDefer) {
final CompositeOnComplete<Boolean> stopOnComplete = shardActorsStopping.get(shardName);
if (stopOnComplete == null) {
return true;
}
- private void doCreatePrefixShard(final PrefixShardConfiguration config, final ShardIdentifier shardId,
- final String shardName) {
- configuration.addPrefixShardConfiguration(config);
-
- final Builder builder = newShardDatastoreContextBuilder(shardName);
- builder.logicalStoreType(config.getPrefix().getDatastoreType())
- .storeRoot(config.getPrefix().getRootIdentifier());
- DatastoreContext shardDatastoreContext = builder.build();
-
- final Map<String, String> peerAddresses = getPeerAddresses(shardName);
- final boolean isActiveMember = true;
-
- LOG.debug("{} doCreatePrefixShard: shardId: {}, memberNames: {}, peerAddresses: {}, isActiveMember: {}",
- persistenceId(), shardId, config.getShardMemberNames(), peerAddresses, isActiveMember);
-
- final ShardInformation info = new ShardInformation(shardName, shardId, peerAddresses,
- shardDatastoreContext, Shard.builder(), peerAddressResolver);
- info.setActiveMember(isActiveMember);
- localShards.put(info.getShardName(), info);
-
- if (schemaContext != null) {
- info.setSchemaContext(schemaContext);
- info.setActor(newShardActor(info));
- }
- }
-
- private void onPrefixShardRemoved(final PrefixShardRemoved message) {
- LOG.debug("{}: onPrefixShardRemoved : {}", persistenceId(), message);
-
- final DOMDataTreeIdentifier prefix = message.getPrefix();
- final ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(),
- ClusterUtils.getCleanShardName(prefix.getRootIdentifier()));
-
- configuration.removePrefixShardConfiguration(prefix);
- removeShard(shardId);
- }
-
private void doCreateShard(final CreateShard createShard) {
final ModuleShardConfiguration moduleShardConfig = createShard.getModuleShardConfig();
final String shardName = moduleShardConfig.getShardName();
// the shard with no peers and with elections disabled so it stays as follower. A
// subsequent AddServer request will be needed to make it an active member.
isActiveMember = false;
- peerAddresses = Collections.emptyMap();
+ peerAddresses = Map.of();
shardDatastoreContext = DatastoreContext.newBuilderFrom(shardDatastoreContext)
.customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName()).build();
}
info.setActiveMember(isActiveMember);
localShards.put(info.getShardName(), info);
- if (schemaContext != null) {
- info.setSchemaContext(schemaContext);
+ if (modelContext != null) {
+ info.setSchemaContext(modelContext);
info.setActor(newShardActor(info));
}
}
private void checkReady() {
if (isReadyWithLeaderId()) {
LOG.info("{}: All Shards are ready - data store {} is ready", persistenceId(), type);
- readinessFuture.set(null);
+ readinessFuture.set(Empty.value());
}
}
ShardInformation shardInformation = findShardInformation(leaderStateChanged.getMemberId());
if (shardInformation != null) {
- shardInformation.setLocalDataTree(leaderStateChanged.getLocalShardDataTree());
+ shardInformation.setLocalDataTree(leaderStateChanged.localShardDataTree());
shardInformation.setLeaderVersion(leaderStateChanged.getLeaderPayloadVersion());
if (shardInformation.setLeaderId(leaderStateChanged.getLeaderId())) {
primaryShardInfoCache.remove(shardInformation.getShardName());
return true;
}
- private void onActorInitialized(final Object message) {
- final ActorRef sender = getSender();
-
- if (sender == null) {
- // why is a non-actor sending this message? Just ignore.
- return;
- }
+ private void onActorInitialized(final ActorInitialized message) {
+ final var sender = message.actorRef();
String actorName = sender.path().name();
//find shard name from actor name; actor name is stringified shardId
protected void handleRecover(final Object message) throws Exception {
if (message instanceof RecoveryCompleted) {
onRecoveryCompleted();
- } else if (message instanceof SnapshotOffer) {
- applyShardManagerSnapshot((ShardManagerSnapshot)((SnapshotOffer) message).snapshot());
+ } else if (message instanceof SnapshotOffer msg) {
+ applyShardManagerSnapshot((ShardManagerSnapshot) msg.snapshot());
}
}
message.member().address());
peerAddressResolver.removePeerAddress(memberName);
-
- for (ShardInformation info : localShards.values()) {
- info.peerDown(memberName, getShardIdentifier(memberName, info.getShardName()).toString(), getSelf());
- }
}
private void memberExited(final ClusterEvent.MemberExited message) {
message.member().address());
peerAddressResolver.removePeerAddress(memberName);
-
- for (ShardInformation info : localShards.values()) {
- info.peerDown(memberName, getShardIdentifier(memberName, info.getShardName()).toString(), getSelf());
- }
}
private void memberUp(final ClusterEvent.MemberUp message) {
String shardName = info.getShardName();
String peerId = getShardIdentifier(memberName, shardName).toString();
info.updatePeerAddress(peerId, peerAddressResolver.getShardActorAddress(shardName, memberName), getSelf());
-
- info.peerUp(memberName, peerId, getSelf());
}
}
notifyShardAvailabilityCallbacks(info);
}
-
- info.peerDown(memberName, getShardIdentifier(memberName, info.getShardName()).toString(), getSelf());
}
}
LOG.debug("Marking Leader {} as available.", leaderId);
info.setLeaderAvailable(true);
}
-
- info.peerUp(memberName, getShardIdentifier(memberName, info.getShardName()).toString(), getSelf());
}
}
*
* @param message the message to send
*/
- private void updateSchemaContext(final Object message) {
- schemaContext = ((UpdateSchemaContext) message).getEffectiveModelContext();
+ private void updateSchemaContext(final UpdateSchemaContext message) {
+ modelContext = message.modelContext();
- LOG.debug("Got updated SchemaContext: # of modules {}", schemaContext.getModules().size());
+ LOG.debug("Got updated SchemaContext: # of modules {}", modelContext.getModules().size());
for (ShardInformation info : localShards.values()) {
- info.setSchemaContext(schemaContext);
+ info.setSchemaContext(modelContext);
if (info.getActor() == null) {
LOG.debug("Creating Shard {}", info.getShardId());
String peerId = getShardIdentifier(memberName, shardName).toString() ;
String peerAddress = peerAddressResolver.getShardActorAddress(shardName, memberName);
info.updatePeerAddress(peerId, peerAddress, getSelf());
- info.peerUp(memberName, peerId, getSelf());
LOG.debug("{}: updated peer {} on member {} with address {} on shard {} whose actor address is {}",
persistenceId(), peerId, memberName, peerAddress, info.getShardId(), info.getActor());
}
sendResponse(info, message.isWaitUntilReady(), true, () -> {
String primaryPath = info.getSerializedLeaderActor();
Object found = canReturnLocalShardState && info.isLeader()
- ? new LocalPrimaryShardFound(primaryPath, info.getLocalShardDataTree().get()) :
+ ? new LocalPrimaryShardFound(primaryPath, info.getLocalShardDataTree().orElseThrow()) :
new RemotePrimaryShardFound(primaryPath, info.getLeaderVersion());
LOG.debug("{}: Found primary for {}: {}", persistenceId(), shardName, found);
Timeout findPrimaryTimeout = new Timeout(datastoreContextFactory.getBaseDatastoreContext()
.getShardInitializationTimeout().duration().$times(2));
- Future<Object> futureObj = ask(getSelf(), new FindPrimary(shardName, true), findPrimaryTimeout);
+ Future<Object> futureObj = Patterns.ask(getSelf(), new FindPrimary(shardName, true), findPrimaryTimeout);
futureObj.onComplete(new OnComplete<>() {
@Override
public void onComplete(final Throwable failure, final Object response) {
if (failure != null) {
handler.onFailure(failure);
+ } else if (response instanceof RemotePrimaryShardFound msg) {
+ handler.onRemotePrimaryShardFound(msg);
+ } else if (response instanceof LocalPrimaryShardFound msg) {
+ handler.onLocalPrimaryFound(msg);
} else {
- if (response instanceof RemotePrimaryShardFound) {
- handler.onRemotePrimaryShardFound((RemotePrimaryShardFound) response);
- } else if (response instanceof LocalPrimaryShardFound) {
- handler.onLocalPrimaryFound((LocalPrimaryShardFound) response);
- } else {
- handler.onUnknownResponse(response);
- }
+ handler.onUnknownResponse(response);
}
}
}, new Dispatchers(context().system().dispatchers()).getDispatcher(Dispatchers.DispatcherType.Client));
* Create shards that are local to the member on which the ShardManager runs.
*/
private void createLocalShards() {
- MemberName memberName = this.cluster.getCurrentMemberName();
- Collection<String> memberShardNames = this.configuration.getMemberShardNames(memberName);
+ MemberName memberName = cluster.getCurrentMemberName();
+ Collection<String> memberShardNames = configuration.getMemberShardNames(memberName);
Map<String, DatastoreSnapshot.ShardSnapshot> shardSnapshots = new HashMap<>();
if (restoreFromSnapshot != null) {
private Map<String, String> getPeerAddresses(final String shardName, final Collection<MemberName> members) {
Map<String, String> peerAddresses = new HashMap<>();
- MemberName currentMemberName = this.cluster.getCurrentMemberName();
+ MemberName currentMemberName = cluster.getCurrentMemberName();
for (MemberName memberName : members) {
if (!currentMemberName.equals(memberName)) {
return false;
}
- private void onAddPrefixShardReplica(final AddPrefixShardReplica message) {
- LOG.debug("{}: onAddPrefixShardReplica: {}", persistenceId(), message);
-
- final ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(),
- ClusterUtils.getCleanShardName(message.getShardPrefix()));
- final String shardName = shardId.getShardName();
-
- // Create the localShard
- if (schemaContext == null) {
- LOG.debug("{}: No SchemaContext is available in order to create a local shard instance for {}",
- persistenceId(), shardName);
- getSender().tell(new Status.Failure(new IllegalStateException(
- "No SchemaContext is available in order to create a local shard instance for " + shardName)),
- getSelf());
- return;
- }
-
- findPrimary(shardName, new AutoFindPrimaryFailureResponseHandler(getSender(), shardName, persistenceId(),
- getSelf()) {
- @Override
- public void onRemotePrimaryShardFound(final RemotePrimaryShardFound response) {
- final RunnableMessage runnable = (RunnableMessage) () -> addPrefixShard(getShardName(),
- message.getShardPrefix(), response, getSender());
- if (!isPreviousShardActorStopInProgress(getShardName(), runnable)) {
- getSelf().tell(runnable, getTargetActor());
- }
- }
-
- @Override
- public void onLocalPrimaryFound(final LocalPrimaryShardFound message) {
- sendLocalReplicaAlreadyExistsReply(getShardName(), getTargetActor());
- }
- });
- }
-
private void onAddShardReplica(final AddShardReplica shardReplicaMsg) {
final String shardName = shardReplicaMsg.getShardName();
LOG.debug("{}: onAddShardReplica: {}", persistenceId(), shardReplicaMsg);
// verify the shard with the specified name is present in the cluster configuration
- if (!this.configuration.isShardConfigured(shardName)) {
+ if (!configuration.isShardConfigured(shardName)) {
LOG.debug("{}: No module configuration exists for shard {}", persistenceId(), shardName);
getSender().tell(new Status.Failure(new IllegalArgumentException(
"No module configuration exists for shard " + shardName)), getSelf());
}
// Create the localShard
- if (schemaContext == null) {
+ if (modelContext == null) {
LOG.debug("{}: No SchemaContext is available in order to create a local shard instance for {}",
persistenceId(), shardName);
getSender().tell(new Status.Failure(new IllegalStateException(
});
}
- @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
- justification = "https://github.com/spotbugs/spotbugs/issues/811")
private void sendLocalReplicaAlreadyExistsReply(final String shardName, final ActorRef sender) {
LOG.debug("{}: Local shard {} already exists", persistenceId(), shardName);
sender.tell(new Status.Failure(new AlreadyExistsException(
String.format("Local shard %s already exists", shardName))), getSelf());
}
- @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
- justification = "https://github.com/spotbugs/spotbugs/issues/811")
- private void addPrefixShard(final String shardName, final YangInstanceIdentifier shardPrefix,
- final RemotePrimaryShardFound response, final ActorRef sender) {
- if (isShardReplicaOperationInProgress(shardName, sender)) {
- return;
- }
-
- shardReplicaOperationsInProgress.add(shardName);
-
- final ShardInformation shardInfo;
- final boolean removeShardOnFailure;
- ShardInformation existingShardInfo = localShards.get(shardName);
- if (existingShardInfo == null) {
- removeShardOnFailure = true;
- ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(), shardName);
-
- final Builder builder = newShardDatastoreContextBuilder(shardName);
- builder.storeRoot(shardPrefix).customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName());
-
- DatastoreContext datastoreContext = builder.build();
-
- shardInfo = new ShardInformation(shardName, shardId, getPeerAddresses(shardName), datastoreContext,
- Shard.builder(), peerAddressResolver);
- shardInfo.setActiveMember(false);
- shardInfo.setSchemaContext(schemaContext);
- localShards.put(shardName, shardInfo);
- shardInfo.setActor(newShardActor(shardInfo));
- } else {
- removeShardOnFailure = false;
- shardInfo = existingShardInfo;
- }
-
- execAddShard(shardName, shardInfo, response, removeShardOnFailure, sender);
- }
-
- @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
- justification = "https://github.com/spotbugs/spotbugs/issues/811")
private void addShard(final String shardName, final RemotePrimaryShardFound response, final ActorRef sender) {
if (isShardReplicaOperationInProgress(shardName, sender)) {
return;
shardInfo = new ShardInformation(shardName, shardId, getPeerAddresses(shardName), datastoreContext,
Shard.builder(), peerAddressResolver);
shardInfo.setActiveMember(false);
- shardInfo.setSchemaContext(schemaContext);
+ shardInfo.setSchemaContext(modelContext);
localShards.put(shardName, shardInfo);
shardInfo.setActor(newShardActor(shardInfo));
} else {
final Timeout addServerTimeout = new Timeout(shardInfo.getDatastoreContext()
.getShardLeaderElectionTimeout().duration());
- final Future<Object> futureObj = ask(getContext().actorSelection(response.getPrimaryPath()),
+ final Future<Object> futureObj = Patterns.ask(getContext().actorSelection(response.getPrimaryPath()),
new AddServer(shardInfo.getShardId().toString(), localShardAddress, true), addServerTimeout);
futureObj.onComplete(new OnComplete<>() {
private static Exception getServerChangeException(final Class<?> serverChange,
final ServerChangeStatus serverChangeStatus, final String leaderPath, final ShardIdentifier shardId) {
- switch (serverChangeStatus) {
- case TIMEOUT:
- return new TimeoutException(String.format(
- "The shard leader %s timed out trying to replicate the initial data to the new shard %s."
- + "Possible causes - there was a problem replicating the data or shard leadership changed "
- + "while replicating the shard data", leaderPath, shardId.getShardName()));
- case NO_LEADER:
- return new NoShardLeaderException(shardId);
- case NOT_SUPPORTED:
- return new UnsupportedOperationException(String.format("%s request is not supported for shard %s",
- serverChange.getSimpleName(), shardId.getShardName()));
- default :
- return new RuntimeException(String.format("%s request to leader %s for shard %s failed with status %s",
- serverChange.getSimpleName(), leaderPath, shardId.getShardName(), serverChangeStatus));
- }
+ return switch (serverChangeStatus) {
+ case TIMEOUT -> new TimeoutException("""
+ The shard leader %s timed out trying to replicate the initial data to the new shard %s. Possible \
+ causes - there was a problem replicating the data or shard leadership changed while replicating the \
+ shard data""".formatted(leaderPath, shardId.getShardName()));
+ case NO_LEADER -> new NoShardLeaderException(shardId);
+ case NOT_SUPPORTED -> new UnsupportedOperationException(
+ "%s request is not supported for shard %s".formatted(
+ serverChange.getSimpleName(), shardId.getShardName()));
+ default -> new RuntimeException("%s request to leader %s for shard %s failed with status %s".formatted(
+ serverChange.getSimpleName(), leaderPath, shardId.getShardName(), serverChangeStatus));
+ };
}
private void onRemoveShardReplica(final RemoveShardReplica shardReplicaMsg) {
});
}
- private void onRemovePrefixShardReplica(final RemovePrefixShardReplica message) {
- LOG.debug("{}: onRemovePrefixShardReplica: {}", persistenceId(), message);
-
- final ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(),
- ClusterUtils.getCleanShardName(message.getShardPrefix()));
- final String shardName = shardId.getShardName();
-
- findPrimary(shardName, new AutoFindPrimaryFailureResponseHandler(getSender(),
- shardName, persistenceId(), getSelf()) {
- @Override
- public void onRemotePrimaryShardFound(final RemotePrimaryShardFound response) {
- doRemoveShardReplicaAsync(response.getPrimaryPath());
- }
-
- @Override
- public void onLocalPrimaryFound(final LocalPrimaryShardFound response) {
- doRemoveShardReplicaAsync(response.getPrimaryPath());
- }
-
- private void doRemoveShardReplicaAsync(final String primaryPath) {
- getSelf().tell((RunnableMessage) () -> removePrefixShardReplica(message, getShardName(),
- primaryPath, getSender()), getTargetActor());
- }
- });
- }
-
private void persistShardList() {
List<String> shardList = new ArrayList<>(localShards.keySet());
for (ShardInformation shardInfo : localShards.values()) {
}
}
LOG.debug("{}: persisting the shard list {}", persistenceId(), shardList);
- saveSnapshot(updateShardManagerSnapshot(shardList, configuration.getAllPrefixShardConfigurations()));
+ saveSnapshot(updateShardManagerSnapshot(shardList));
}
- private ShardManagerSnapshot updateShardManagerSnapshot(
- final List<String> shardList,
- final Map<DOMDataTreeIdentifier, PrefixShardConfiguration> allPrefixShardConfigurations) {
- currentSnapshot = new ShardManagerSnapshot(shardList, allPrefixShardConfigurations);
+ private ShardManagerSnapshot updateShardManagerSnapshot(final List<String> shardList) {
+ currentSnapshot = new ShardManagerSnapshot(shardList);
return currentSnapshot;
}
ActorRef sender = getSender();
final String shardName = flipMembersVotingStatus.getShardName();
findLocalShard(shardName, sender, localShardFound -> {
- Future<Object> future = ask(localShardFound.getPath(), GetOnDemandRaftState.INSTANCE,
+ Future<Object> future = Patterns.ask(localShardFound.getPath(), GetOnDemandRaftState.INSTANCE,
Timeout.apply(30, TimeUnit.SECONDS));
future.onComplete(new OnComplete<>() {
Timeout findLocalTimeout = new Timeout(datastoreContextFactory.getBaseDatastoreContext()
.getShardInitializationTimeout().duration().$times(2));
- Future<Object> futureObj = ask(getSelf(), new FindLocalShard(shardName, true), findLocalTimeout);
+ Future<Object> futureObj = Patterns.ask(getSelf(), new FindLocalShard(shardName, true), findLocalTimeout);
futureObj.onComplete(new OnComplete<>() {
@Override
public void onComplete(final Throwable failure, final Object response) {
if (failure != null) {
LOG.debug("{}: Received failure from FindLocalShard for shard {}", persistenceId, shardName,
- failure);
+ failure);
sender.tell(new Status.Failure(new RuntimeException(
- String.format("Failed to find local shard %s", shardName), failure)), self());
+ String.format("Failed to find local shard %s", shardName), failure)), self());
+ } if (response instanceof LocalShardFound msg) {
+ getSelf().tell((RunnableMessage) () -> onLocalShardFound.accept(msg), sender);
+ } else if (response instanceof LocalShardNotFound) {
+ LOG.debug("{}: Local shard {} does not exist", persistenceId, shardName);
+ sender.tell(new Status.Failure(new IllegalArgumentException(
+ String.format("Local shard %s does not exist", shardName))), self());
} else {
- if (response instanceof LocalShardFound) {
- getSelf().tell((RunnableMessage) () -> onLocalShardFound.accept((LocalShardFound) response),
- sender);
- } else if (response instanceof LocalShardNotFound) {
- LOG.debug("{}: Local shard {} does not exist", persistenceId, shardName);
- sender.tell(new Status.Failure(new IllegalArgumentException(
- String.format("Local shard %s does not exist", shardName))), self());
- } else {
- LOG.debug("{}: Failed to find local shard {}: received response: {}", persistenceId, shardName,
- response);
- sender.tell(new Status.Failure(response instanceof Throwable ? (Throwable) response
- : new RuntimeException(
- String.format("Failed to find local shard %s: received response: %s", shardName,
- response))), self());
- }
+ LOG.debug("{}: Failed to find local shard {}: received response: {}", persistenceId, shardName,
+ response);
+ sender.tell(new Status.Failure(response instanceof Throwable throwable ? throwable
+ : new RuntimeException(String.format("Failed to find local shard %s: received response: %s",
+ shardName, response))), self());
}
}
}, new Dispatchers(context().system().dispatchers()).getDispatcher(Dispatchers.DispatcherType.Client));
changeServersVotingStatus, shardActorRef.path());
Timeout timeout = new Timeout(datastoreContext.getShardLeaderElectionTimeout().duration().$times(2));
- Future<Object> futureObj = ask(shardActorRef, changeServersVotingStatus, timeout);
+ Future<Object> futureObj = Patterns.ask(shardActorRef, changeServersVotingStatus, timeout);
futureObj.onComplete(new OnComplete<>() {
@Override
import akka.actor.ActorRef;
import akka.pattern.Patterns;
+import com.google.common.base.Throwables;
import java.util.List;
import org.opendaylight.controller.cluster.access.concepts.MemberName;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
try {
return (List<String>) Await.result(
Patterns.ask(shardManager, GetLocalShardIds.INSTANCE, ASK_TIMEOUT_MILLIS), Duration.Inf());
- } catch (RuntimeException e) {
- throw e;
} catch (Exception e) {
- throw new RuntimeException(e);
+ Throwables.throwIfUnchecked(e);
+ throw new IllegalStateException(e);
}
}
try {
Await.result(Patterns.ask(shardManager, new SwitchShardBehavior(shardId, state, term),
ASK_TIMEOUT_MILLIS), Duration.Inf());
- } catch (RuntimeException e) {
- throw e;
} catch (Exception e) {
- throw new RuntimeException(e);
+ Throwables.throwIfUnchecked(e);
+ throw new IllegalStateException(e);
}
break;
case Candidate:
+++ /dev/null
-/*
- * Copyright (c) 2015 Dell Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.shardmanager;
-
-import com.google.common.collect.ImmutableList;
-import java.io.Serializable;
-import java.util.Collections;
-import java.util.List;
-import org.eclipse.jdt.annotation.NonNull;
-
-
-/**
- * Persisted data of the ShardManager.
- *
- * @deprecated Use {@link org.opendaylight.controller.cluster.datastore.persisted.ShardManagerSnapshot} instead.
- */
-@Deprecated(forRemoval = true)
-public final class ShardManagerSnapshot implements Serializable {
- private static final long serialVersionUID = 1L;
- private final List<String> shardList;
-
- ShardManagerSnapshot(final @NonNull List<String> shardList) {
- this.shardList = ImmutableList.copyOf(shardList);
- }
-
- public List<String> getShardList() {
- return this.shardList;
- }
-
- /**
- * Creates a ShardManagerSnapshot.
- *
- * @deprecated This method is for migration only and should me removed once
- * org.opendaylight.controller.cluster.datastore.ShardManagerSnapshot is removed.
- */
- @Deprecated
- public static ShardManagerSnapshot forShardList(final @NonNull List<String> shardList) {
- return new ShardManagerSnapshot(shardList);
- }
-
- private Object readResolve() {
- return new org.opendaylight.controller.cluster.datastore.persisted.ShardManagerSnapshot(shardList,
- Collections.emptyMap());
- }
-
- @Override
- public String toString() {
- return "ShardManagerSnapshot [ShardList = " + shardList + " ]";
- }
-}
public String findShard(YangInstanceIdentifier path) {
return DEFAULT_SHARD;
}
-
- @Override
- public YangInstanceIdentifier getPrefixForPath(YangInstanceIdentifier path) {
- return YangInstanceIdentifier.empty();
- }
}
String shardName = configuration.getShardNameForModule(moduleName);
return shardName != null ? shardName : DefaultShardStrategy.DEFAULT_SHARD;
}
-
- @Override
- public YangInstanceIdentifier getPrefixForPath(YangInstanceIdentifier path) {
- return YangInstanceIdentifier.empty();
- }
-
-
}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.shardstrategy;
-
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-/**
- * Shard Strategy that resolves a path to a prefix shard name.
- */
-public class PrefixShardStrategy implements ShardStrategy {
-
- public static final String NAME = "prefix";
-
- private final String shardName;
- private final YangInstanceIdentifier prefix;
-
- public PrefixShardStrategy(final String shardName,
- final YangInstanceIdentifier prefix) {
- this.shardName = shardName != null ? shardName : DefaultShardStrategy.DEFAULT_SHARD;
- this.prefix = prefix;
- }
-
- @Override
- public String findShard(final YangInstanceIdentifier path) {
- return shardName;
- }
-
- @Override
- public YangInstanceIdentifier getPrefixForPath(YangInstanceIdentifier path) {
- return prefix;
- }
-}
* @return the corresponding shard name.
*/
String findShard(YangInstanceIdentifier path);
-
- /**
- * Get the prefix of the shard that contains the data pointed to by the specified path.
- * @param path the location of the data in the logical tree.
- * @return the corresponding shards prefix.
- */
- YangInstanceIdentifier getPrefixForPath(YangInstanceIdentifier path);
}
import static java.util.Objects.requireNonNull;
import org.opendaylight.controller.cluster.datastore.config.Configuration;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
public class ShardStrategyFactory {
private static final String UNKNOWN_MODULE_NAME = "unknown";
private final Configuration configuration;
- private final LogicalDatastoreType logicalStoreType;
- public ShardStrategyFactory(final Configuration configuration, final LogicalDatastoreType logicalStoreType) {
+ public ShardStrategyFactory(final Configuration configuration) {
checkState(configuration != null, "configuration should not be missing");
this.configuration = configuration;
- this.logicalStoreType = requireNonNull(logicalStoreType);
}
public ShardStrategy getStrategy(final YangInstanceIdentifier path) {
- // try with the legacy module based shard mapping
final String moduleName = getModuleName(requireNonNull(path, "path should not be null"));
final ShardStrategy shardStrategy = configuration.getStrategyForModule(moduleName);
if (shardStrategy == null) {
- // retry with prefix based sharding
- final ShardStrategy strategyForPrefix =
- configuration.getStrategyForPrefix(new DOMDataTreeIdentifier(logicalStoreType, path));
- if (strategyForPrefix == null) {
- return DefaultShardStrategy.getInstance();
- }
- return strategyForPrefix;
+ return DefaultShardStrategy.getInstance();
}
return shardStrategy;
return UNKNOWN_MODULE_NAME;
}
- String namespace = path.getPathArguments().get(0).getNodeType().getNamespace().toASCIIString();
+ String namespace = path.getPathArguments().get(0).getNodeType().getNamespace().toString();
String moduleName = configuration.getModuleNameFromNameSpace(namespace);
return moduleName != null ? moduleName : UNKNOWN_MODULE_NAME;
}
*
* @author Thomas Pantelis
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
public abstract class AbstractBatchedModificationsCursor extends AbstractDataTreeModificationCursor {
-
protected abstract BatchedModifications getModifications();
@Override
}
@Override
- public final void merge(final PathArgument child, final NormalizedNode<?, ?> data) {
+ public final void merge(final PathArgument child, final NormalizedNode data) {
getModifications().addModification(new MergeModification(current().node(child), data));
}
@Override
- public final void write(final PathArgument child, final NormalizedNode<?, ?> data) {
+ public final void write(final PathArgument child, final NormalizedNode data) {
getModifications().addModification(new WriteModification(current().node(child), data));
}
}
*/
package org.opendaylight.controller.cluster.datastore.utils;
-import static akka.pattern.Patterns.ask;
-
import akka.actor.ActorPath;
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
import akka.actor.ActorSystem;
-import akka.actor.Address;
import akka.dispatch.Mapper;
import akka.dispatch.OnComplete;
import akka.pattern.AskTimeoutException;
import com.codahale.metrics.Timer;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.lang.invoke.VarHandle;
import java.util.Optional;
import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.LongAdder;
import java.util.function.Function;
import org.opendaylight.controller.cluster.access.concepts.MemberName;
import org.opendaylight.controller.cluster.common.actor.Dispatchers;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
import org.opendaylight.controller.cluster.raft.client.messages.Shutdown;
import org.opendaylight.controller.cluster.reporting.MetricsReporter;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.ReadOnlyDataTree;
import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
* not be passed to actors especially remote actors.
*/
public class ActorUtils {
+ private static final class AskTimeoutCounter extends OnComplete<Object> implements ExecutionContext {
+ private LongAdder ateExceptions = new LongAdder();
+
+ @Override
+ public void onComplete(final Throwable failure, final Object success) throws Throwable {
+ if (failure instanceof AskTimeoutException) {
+ ateExceptions.increment();
+ }
+ }
+
+ void reset() {
+ ateExceptions = new LongAdder();
+ }
+
+ long sum() {
+ return ateExceptions.sum();
+ }
+
+ @Override
+ public void execute(final Runnable runnable) {
+ // Yes, we are this ugly, but then we are just doing a check + an increment
+ runnable.run();
+ }
+
+ @Override
+ public void reportFailure(final Throwable cause) {
+ LOG.warn("Unexpected failure updating counters", cause);
+ }
+ }
+
private static final Logger LOG = LoggerFactory.getLogger(ActorUtils.class);
private static final String DISTRIBUTED_DATA_STORE_METRIC_REGISTRY = "distributed-data-store";
private static final String METRIC_RATE = "rate";
- private static final Mapper<Throwable, Throwable> FIND_PRIMARY_FAILURE_TRANSFORMER =
- new Mapper<>() {
+ private static final Mapper<Throwable, Throwable> FIND_PRIMARY_FAILURE_TRANSFORMER = new Mapper<>() {
@Override
public Throwable apply(final Throwable failure) {
- Throwable actualFailure = failure;
if (failure instanceof AskTimeoutException) {
// A timeout exception most likely means the shard isn't initialized.
- actualFailure = new NotInitializedException(
+ return new NotInitializedException(
"Timed out trying to find the primary shard. Most likely cause is the "
+ "shard is not initialized yet.");
}
-
- return actualFailure;
+ return failure;
}
};
public static final String BOUNDED_MAILBOX = "bounded-mailbox";
public static final String COMMIT = "commit";
+ private final AskTimeoutCounter askTimeoutCounter = new AskTimeoutCounter();
private final ActorSystem actorSystem;
private final ActorRef shardManager;
private final ClusterWrapper clusterWrapper;
private final Configuration configuration;
+ private final String selfAddressHostPort;
+ private final Dispatchers dispatchers;
+
private DatastoreContext datastoreContext;
private FiniteDuration operationDuration;
private Timeout operationTimeout;
- private final String selfAddressHostPort;
private TransactionRateLimiter txRateLimiter;
private Timeout transactionCommitOperationTimeout;
private Timeout shardInitializationTimeout;
- private final Dispatchers dispatchers;
private volatile EffectiveModelContext schemaContext;
- // Used as a write memory barrier.
- @SuppressWarnings("unused")
- private volatile boolean updated;
-
private final MetricRegistry metricRegistry = MetricsReporter.getInstance(DatastoreContext.METRICS_DOMAIN)
.getMetricsRegistry();
this.clusterWrapper = clusterWrapper;
this.configuration = configuration;
this.datastoreContext = datastoreContext;
- this.dispatchers = new Dispatchers(actorSystem.dispatchers());
+ dispatchers = new Dispatchers(actorSystem.dispatchers());
this.primaryShardInfoCache = primaryShardInfoCache;
-
- final LogicalDatastoreType convertedType =
- LogicalDatastoreType.valueOf(datastoreContext.getLogicalStoreType().name());
- this.shardStrategyFactory = new ShardStrategyFactory(configuration, convertedType);
+ shardStrategyFactory = new ShardStrategyFactory(configuration);
setCachedProperties();
- Address selfAddress = clusterWrapper.getSelfAddress();
+ final var selfAddress = clusterWrapper.getSelfAddress();
if (selfAddress != null && !selfAddress.host().isEmpty()) {
selfAddressHostPort = selfAddress.host().get() + ":" + selfAddress.port().get();
} else {
selfAddressHostPort = null;
}
-
}
private void setCachedProperties() {
TimeUnit.MILLISECONDS);
operationTimeout = new Timeout(operationDuration);
- transactionCommitOperationTimeout = new Timeout(FiniteDuration.create(
+ transactionCommitOperationTimeout = new Timeout(FiniteDuration.create(
datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS));
shardInitializationTimeout = new Timeout(datastoreContext.getShardInitializationTimeout().duration().$times(2));
}
public void setDatastoreContext(final DatastoreContextFactory contextFactory) {
- this.datastoreContext = contextFactory.getBaseDatastoreContext();
+ datastoreContext = contextFactory.getBaseDatastoreContext();
setCachedProperties();
- // We write the 'updated' volatile to trigger a write memory barrier so that the writes above
- // will be published immediately even though they may not be immediately visible to other
- // threads due to unsynchronized reads. That's OK though - we're going for eventual
- // consistency here as immediately visible updates to these members aren't critical. These
- // members could've been made volatile but wanted to avoid volatile reads as these are
- // accessed often and updates will be infrequent.
-
- updated = true;
+ // Trigger a write memory barrier so that the writes above will be published immediately even though they may
+ // not be immediately visible to other threads due to unsynchronized reads. That is OK though - we are going for
+ // eventual consistency here as immediately visible updates to these members are not critical. These members
+ // could have been made volatile but wanted to avoid volatile reads as these are accessed often and updates will
+ // be infrequent.
+ VarHandle.fullFence();
if (shardManager != null) {
shardManager.tell(contextFactory, ActorRef.noSender());
}
public Future<PrimaryShardInfo> findPrimaryShardAsync(final String shardName) {
- Future<PrimaryShardInfo> ret = primaryShardInfoCache.getIfPresent(shardName);
+ final var ret = primaryShardInfoCache.getIfPresent(shardName);
if (ret != null) {
return ret;
}
- Future<Object> future = executeOperationAsync(shardManager,
- new FindPrimary(shardName, true), shardInitializationTimeout);
-
- return future.transform(new Mapper<Object, PrimaryShardInfo>() {
- @Override
- public PrimaryShardInfo checkedApply(final Object response) throws UnknownMessageException {
- if (response instanceof RemotePrimaryShardFound) {
- LOG.debug("findPrimaryShardAsync received: {}", response);
- RemotePrimaryShardFound found = (RemotePrimaryShardFound)response;
- return onPrimaryShardFound(shardName, found.getPrimaryPath(), found.getPrimaryVersion(), null);
- } else if (response instanceof LocalPrimaryShardFound) {
- LOG.debug("findPrimaryShardAsync received: {}", response);
- LocalPrimaryShardFound found = (LocalPrimaryShardFound)response;
- return onPrimaryShardFound(shardName, found.getPrimaryPath(), DataStoreVersions.CURRENT_VERSION,
+
+ return executeOperationAsync(shardManager, new FindPrimary(shardName, true), shardInitializationTimeout)
+ .transform(new Mapper<>() {
+ @Override
+ public PrimaryShardInfo checkedApply(final Object response) throws UnknownMessageException {
+ if (response instanceof RemotePrimaryShardFound found) {
+ LOG.debug("findPrimaryShardAsync received: {}", found);
+ return onPrimaryShardFound(shardName, found.getPrimaryPath(), found.getPrimaryVersion(), null);
+ } else if (response instanceof LocalPrimaryShardFound found) {
+ LOG.debug("findPrimaryShardAsync received: {}", found);
+ return onPrimaryShardFound(shardName, found.getPrimaryPath(), DataStoreVersions.CURRENT_VERSION,
found.getLocalShardDataTree());
- } else if (response instanceof NotInitializedException) {
- throw (NotInitializedException)response;
- } else if (response instanceof PrimaryNotFoundException) {
- throw (PrimaryNotFoundException)response;
- } else if (response instanceof NoShardLeaderException) {
- throw (NoShardLeaderException)response;
- }
+ } else if (response instanceof NotInitializedException notInitialized) {
+ throw notInitialized;
+ } else if (response instanceof PrimaryNotFoundException primaryNotFound) {
+ throw primaryNotFound;
+ } else if (response instanceof NoShardLeaderException noShardLeader) {
+ throw noShardLeader;
+ }
- throw new UnknownMessageException(String.format(
+ throw new UnknownMessageException(String.format(
"FindPrimary returned unkown response: %s", response));
- }
- }, FIND_PRIMARY_FAILURE_TRANSFORMER, getClientDispatcher());
+ }
+ }, FIND_PRIMARY_FAILURE_TRANSFORMER, getClientDispatcher());
}
- @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
- justification = "https://github.com/spotbugs/spotbugs/issues/811")
private PrimaryShardInfo onPrimaryShardFound(final String shardName, final String primaryActorPath,
final short primaryVersion, final ReadOnlyDataTree localShardDataTree) {
- ActorSelection actorSelection = actorSystem.actorSelection(primaryActorPath);
- PrimaryShardInfo info = localShardDataTree == null ? new PrimaryShardInfo(actorSelection, primaryVersion) :
+ final var actorSelection = actorSystem.actorSelection(primaryActorPath);
+ final var info = localShardDataTree == null ? new PrimaryShardInfo(actorSelection, primaryVersion) :
new PrimaryShardInfo(actorSelection, primaryVersion, localShardDataTree);
primaryShardInfoCache.putSuccessful(shardName, info);
return info;
* specified by the shardName
*/
public Optional<ActorRef> findLocalShard(final String shardName) {
- Object result = executeOperation(shardManager, new FindLocalShard(shardName, false));
-
- if (result instanceof LocalShardFound) {
- LocalShardFound found = (LocalShardFound) result;
+ final var result = executeOperation(shardManager, new FindLocalShard(shardName, false));
+ if (result instanceof LocalShardFound found) {
LOG.debug("Local shard found {}", found.getPath());
return Optional.of(found.getPath());
}
* @param shardName the name of the local shard that needs to be found
*/
public Future<ActorRef> findLocalShardAsync(final String shardName) {
- Future<Object> future = executeOperationAsync(shardManager,
- new FindLocalShard(shardName, true), shardInitializationTimeout);
-
- return future.map(new Mapper<Object, ActorRef>() {
- @Override
- public ActorRef checkedApply(final Object response) throws Throwable {
- if (response instanceof LocalShardFound) {
- LocalShardFound found = (LocalShardFound)response;
- LOG.debug("Local shard found {}", found.getPath());
- return found.getPath();
- } else if (response instanceof NotInitializedException) {
- throw (NotInitializedException)response;
- } else if (response instanceof LocalShardNotFound) {
- throw new LocalShardNotFoundException(
+ return executeOperationAsync(shardManager, new FindLocalShard(shardName, true), shardInitializationTimeout)
+ .map(new Mapper<>() {
+ @Override
+ public ActorRef checkedApply(final Object response) throws Throwable {
+ if (response instanceof LocalShardFound found) {
+ LOG.debug("Local shard found {}", found.getPath());
+ return found.getPath();
+ } else if (response instanceof NotInitializedException) {
+ throw (NotInitializedException)response;
+ } else if (response instanceof LocalShardNotFound) {
+ throw new LocalShardNotFoundException(
String.format("Local shard for %s does not exist.", shardName));
- }
+ }
- throw new UnknownMessageException(String.format(
- "FindLocalShard returned unkown response: %s", response));
- }
- }, getClientDispatcher());
+ throw new UnknownMessageException("FindLocalShard returned unkown response: " + response);
+ }
+ }, getClientDispatcher());
}
/**
@SuppressWarnings("checkstyle:IllegalCatch")
public void shutdown() {
- FiniteDuration duration = datastoreContext.getShardRaftConfig().getElectionTimeOutInterval().$times(3);
+ final var duration = datastoreContext.getShardRaftConfig().getElectionTimeOutInterval().$times(3);
try {
Await.ready(Patterns.gracefulStop(shardManager, duration, Shutdown.INSTANCE), duration);
} catch (Exception e) {
public void broadcast(final Function<Short, Object> messageSupplier, final Class<?> messageClass) {
for (final String shardName : configuration.getAllShardNames()) {
- Future<PrimaryShardInfo> primaryFuture = findPrimaryShardAsync(shardName);
- primaryFuture.onComplete(new OnComplete<PrimaryShardInfo>() {
+ final var primaryFuture = findPrimaryShardAsync(shardName);
+ primaryFuture.onComplete(new OnComplete<>() {
@Override
public void onComplete(final Throwable failure, final PrimaryShardInfo primaryShardInfo) {
if (failure != null) {
LOG.warn("broadcast failed to send message {} to shard {}", messageClass.getSimpleName(),
shardName, failure);
} else {
- Object message = messageSupplier.apply(primaryShardInfo.getPrimaryShardVersion());
+ final var message = messageSupplier.apply(primaryShardInfo.getPrimaryShardVersion());
primaryShardInfo.getPrimaryShardActor().tell(message, ActorRef.noSender());
}
}
return false;
}
- String hostPort = path.substring(pathAtIndex + 1, slashIndex);
+ final var hostPort = path.substring(pathAtIndex + 1, slashIndex);
return hostPort.equals(selfAddressHostPort);
} else {
}
public Timer getOperationTimer(final String dataStoreType, final String operationName) {
- final String rate = MetricRegistry.name(DISTRIBUTED_DATA_STORE_METRIC_REGISTRY, dataStoreType,
- operationName, METRIC_RATE);
+ final var rate = MetricRegistry.name(DISTRIBUTED_DATA_STORE_METRIC_REGISTRY, dataStoreType, operationName,
+ METRIC_RATE);
return metricRegistry.timer(rate);
}
return txRateLimiter.getTxCreationLimit();
}
+ public long getAskTimeoutExceptionCount() {
+ return askTimeoutCounter.sum();
+ }
+
+ public void resetAskTimeoutExceptionCount() {
+ askTimeoutCounter.reset();
+ }
+
/**
* Try to acquire a transaction creation permit. Will block if no permits are available.
*/
* @return the dispatcher
*/
public ExecutionContext getClientDispatcher() {
- return this.dispatchers.getDispatcher(Dispatchers.DispatcherType.Client);
+ return dispatchers.getDispatcher(Dispatchers.DispatcherType.Client);
}
public String getNotificationDispatcherPath() {
- return this.dispatchers.getDispatcherPath(Dispatchers.DispatcherType.Notification);
+ return dispatchers.getDispatcherPath(Dispatchers.DispatcherType.Notification);
}
public Configuration getConfiguration() {
}
protected Future<Object> doAsk(final ActorRef actorRef, final Object message, final Timeout timeout) {
- return ask(actorRef, message, timeout);
+ return Patterns.ask(actorRef, message, timeout);
}
protected Future<Object> doAsk(final ActorSelection actorRef, final Object message, final Timeout timeout) {
- return ask(actorRef, message, timeout);
+ final var ret = Patterns.ask(actorRef, message, timeout);
+ ret.onComplete(askTimeoutCounter, askTimeoutCounter);
+ return ret;
}
public PrimaryShardInfoFutureCache getPrimaryShardInfoCache() {
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.utils;
-
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Utils for encoding prefix shard name.
- */
-public final class ClusterUtils {
- private static final Logger LOG = LoggerFactory.getLogger(ClusterUtils.class);
-
- // id for the shard used to store prefix configuration
- public static final String PREFIX_CONFIG_SHARD_ID = "prefix-configuration-shard";
-
- public static final QName PREFIX_SHARDS_QNAME =
- QName.create("urn:opendaylight:params:xml:ns:yang:controller:md:sal:clustering:prefix-shard-configuration",
- "2017-01-10", "prefix-shards").intern();
- public static final QName SHARD_LIST_QNAME =
- QName.create(PREFIX_SHARDS_QNAME, "shard").intern();
- public static final QName SHARD_PREFIX_QNAME =
- QName.create(PREFIX_SHARDS_QNAME, "prefix").intern();
- public static final QName SHARD_REPLICAS_QNAME =
- QName.create(PREFIX_SHARDS_QNAME, "replicas").intern();
- public static final QName SHARD_REPLICA_QNAME =
- QName.create(PREFIX_SHARDS_QNAME, "replica").intern();
-
- public static final YangInstanceIdentifier PREFIX_SHARDS_PATH =
- YangInstanceIdentifier.of(PREFIX_SHARDS_QNAME).toOptimized();
- public static final YangInstanceIdentifier SHARD_LIST_PATH =
- PREFIX_SHARDS_PATH.node(SHARD_LIST_QNAME).toOptimized();
-
- private ClusterUtils() {
- }
-
- public static ShardIdentifier getShardIdentifier(final MemberName memberName, final DOMDataTreeIdentifier prefix) {
- final String type;
- switch (prefix.getDatastoreType()) {
- case OPERATIONAL:
- type = "operational";
- break;
- case CONFIGURATION:
- type = "config";
- break;
- default:
- type = prefix.getDatastoreType().name();
- LOG.warn("Unknown data store type {}", type);
- }
-
- return ShardIdentifier.create(getCleanShardName(prefix.getRootIdentifier()), memberName, type);
- }
-
- /**
- * Returns an encoded shard name based on the provided path that should doesn't contain characters that cannot be
- * present in akka actor paths.
- *
- * @param path Path on which to base the shard name
- * @return encoded name that doesn't contain characters that cannot be in actor path.
- */
- public static String getCleanShardName(final YangInstanceIdentifier path) {
- if (path.isEmpty()) {
- return "default";
- }
-
- final StringBuilder builder = new StringBuilder();
- // TODO need a better mapping that includes namespace, but we'll need to cleanup the string beforehand
- // we have to fight both javax and akka url path restrictions..
- path.getPathArguments().forEach(p -> {
- builder.append(p.getNodeType().getLocalName());
- if (p instanceof NodeIdentifierWithPredicates) {
- builder.append("-key_");
- ((NodeIdentifierWithPredicates) p).entrySet().forEach(entry -> {
- builder.append(entry.getKey().getLocalName()).append(entry.getValue()).append('-');
- });
- builder.append('_');
- }
- builder.append('!');
- });
- return builder.toString();
- }
-}
import org.opendaylight.controller.cluster.datastore.util.AbstractDataTreeModificationCursor;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
output.write(current().node(child).toString().getBytes(StandardCharsets.UTF_8));
output.writeByte('\n');
} catch (IOException e) {
- throw new RuntimeException(e);
+ throw new IllegalStateException(e);
}
}
@Override
- public void merge(final PathArgument child, final NormalizedNode<?, ?> data) {
+ public void merge(final PathArgument child, final NormalizedNode data) {
outputPathAndNode("MERGE", child, data);
}
@Override
- public void write(final PathArgument child, final NormalizedNode<?, ?> data) {
+ public void write(final PathArgument child, final NormalizedNode data) {
outputPathAndNode("WRITE", child, data);
}
- private void outputPathAndNode(final String name, final PathArgument child, final NormalizedNode<?, ?> data) {
+ private void outputPathAndNode(final String name, final PathArgument child, final NormalizedNode data) {
try {
output.writeByte('\n');
output.write(name.getBytes(StandardCharsets.UTF_8));
NormalizedNodeXMLOutput.toStream(output, data);
output.writeByte('\n');
} catch (IOException | XMLStreamException e) {
- throw new RuntimeException(e);
+ throw new IllegalStateException(e);
}
}
}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import com.google.common.annotations.Beta;
+import com.google.common.collect.ImmutableSortedSet;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.NavigableSet;
+import java.util.TreeSet;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.concepts.Immutable;
+import org.opendaylight.yangtools.concepts.WritableObject;
+
+@Beta
+public final class ImmutableUnsignedLongSet extends UnsignedLongSet implements Immutable, WritableObject {
+ // Do not all
+ private static final int ARRAY_MAX_ELEMENTS = 4096;
+
+ private static final @NonNull ImmutableUnsignedLongSet EMPTY =
+ new ImmutableUnsignedLongSet(ImmutableSortedSet.of());
+
+ private ImmutableUnsignedLongSet(final NavigableSet<Entry> ranges) {
+ super(ranges);
+ }
+
+ static @NonNull ImmutableUnsignedLongSet copyOf(final MutableUnsignedLongSet mutable) {
+ if (mutable.isEmpty()) {
+ return of();
+ }
+ if (mutable.rangeSize() <= ARRAY_MAX_ELEMENTS) {
+ return new ImmutableUnsignedLongSet(ImmutableSortedSet.copyOfSorted(mutable.trustedRanges()));
+ }
+ return new ImmutableUnsignedLongSet(new TreeSet<>(mutable.trustedRanges()));
+ }
+
+ public static @NonNull ImmutableUnsignedLongSet of() {
+ return EMPTY;
+ }
+
+ @Override
+ public ImmutableUnsignedLongSet immutableCopy() {
+ return this;
+ }
+
+ public static @NonNull ImmutableUnsignedLongSet readFrom(final DataInput in) throws IOException {
+ return readFrom(in, in.readInt());
+ }
+
+ public static @NonNull ImmutableUnsignedLongSet readFrom(final DataInput in, final int size) throws IOException {
+ if (size == 0) {
+ return EMPTY;
+ }
+
+ final NavigableSet<Entry> ranges;
+ if (size <= ARRAY_MAX_ELEMENTS) {
+ final var entries = new ArrayList<Entry>(size);
+ for (int i = 0; i < size; ++i) {
+ entries.add(Entry.readUnsigned(in));
+ }
+ ranges = ImmutableSortedSet.copyOf(entries);
+ } else {
+ ranges = new TreeSet<>();
+ for (int i = 0; i < size; ++i) {
+ ranges.add(Entry.readUnsigned(in));
+ }
+ }
+ return new ImmutableUnsignedLongSet(ranges);
+ }
+
+ @Override
+ public void writeTo(final DataOutput out) throws IOException {
+ out.writeInt(rangeSize());
+ writeRanges(out);
+ }
+
+ public void writeRangesTo(final @NonNull DataOutput out, final int size) throws IOException {
+ final int rangeSize = rangeSize();
+ if (size != rangeSize) {
+ throw new IOException("Mismatched size: expected " + rangeSize + ", got " + size);
+ }
+ writeRanges(out);
+ }
+
+ private void writeRanges(final @NonNull DataOutput out) throws IOException {
+ for (var range : trustedRanges()) {
+ range.writeUnsigned(out);
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import com.google.common.annotations.Beta;
+import com.google.common.collect.Collections2;
+import com.google.common.collect.ImmutableRangeSet;
+import com.google.common.collect.Range;
+import com.google.common.primitives.UnsignedLong;
+import java.util.NavigableSet;
+import java.util.TreeSet;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.concepts.Mutable;
+
+@Beta
+public final class MutableUnsignedLongSet extends UnsignedLongSet implements Mutable {
+ MutableUnsignedLongSet(final TreeSet<Entry> ranges) {
+ super(ranges);
+ }
+
+ public static @NonNull MutableUnsignedLongSet of() {
+ return new MutableUnsignedLongSet(new TreeSet<>());
+ }
+
+ public static @NonNull MutableUnsignedLongSet of(final long... ulongs) {
+ final var ret = MutableUnsignedLongSet.of();
+ for (long longBits : ulongs) {
+ ret.add(longBits);
+ }
+ return ret;
+ }
+
+ @Override
+ public ImmutableUnsignedLongSet immutableCopy() {
+ return ImmutableUnsignedLongSet.copyOf(this);
+ }
+
+ public void add(final long longBits) {
+ addOne(trustedRanges(), Entry.of(longBits));
+ }
+
+ public void addAll(final UnsignedLongSet other) {
+ final var ranges = trustedRanges();
+ for (var range : other.trustedRanges()) {
+ if (range.lowerBits == range.upperBits) {
+ addOne(ranges, range);
+ } else {
+ addRange(ranges, range);
+ }
+ }
+ }
+
+ private static void addOne(final NavigableSet<Entry> ranges, final Entry range) {
+ final long longBits = range.lowerBits;
+
+ // We need Iterator.remove() to perform efficient merge below
+ final var headIt = ranges.headSet(range, true).descendingIterator();
+ if (headIt.hasNext()) {
+ final var head = headIt.next();
+ if (Long.compareUnsigned(head.upperBits, longBits) >= 0) {
+ // Already contained, this is a no-op
+ return;
+ }
+
+ // Merge into head entry if possible
+ if (head.upperBits + 1 == longBits) {
+ // We will be replacing head
+ headIt.remove();
+
+ // Potentially merge head entry and tail entry
+ final var tailIt = ranges.tailSet(range, false).iterator();
+ if (tailIt.hasNext()) {
+ final var tail = tailIt.next();
+ if (tail.lowerBits - 1 == longBits) {
+ // Update tail.lowerBits to include contents of head
+ tailIt.remove();
+ ranges.add(tail.withLower(head.lowerBits));
+ return;
+ }
+ }
+
+ // Update head.upperBits
+ ranges.add(head.withUpper(longBits));
+ return;
+ }
+ }
+
+ final var tailIt = ranges.tailSet(range, false).iterator();
+ if (tailIt.hasNext()) {
+ final var tail = tailIt.next();
+ // Merge into tail entry if possible
+ if (tail.lowerBits - 1 == longBits) {
+ // Update tail.lowerBits
+ tailIt.remove();
+ ranges.add(tail.withLower(longBits));
+ return;
+ }
+ }
+
+ // No luck, store a new entry
+ ranges.add(range);
+ }
+
+ private static void addRange(final NavigableSet<Entry> ranges, final Entry range) {
+ // If the start of the range is already covered by an existing range, we can expand that
+ final var headIt = ranges.headSet(range, true).descendingIterator();
+ final boolean hasFloor = headIt.hasNext();
+ if (hasFloor) {
+ final var floor = headIt.next();
+ if (Long.compareUnsigned(floor.upperBits, range.upperBits) < 0
+ && Long.compareUnsigned(floor.upperBits + 1, range.lowerBits) >= 0) {
+ headIt.remove();
+ ranges.add(expandFloor(ranges, floor, range.upperBits));
+ return;
+ }
+ }
+
+ // If the end of the range is already covered by an existing range, we can expand that
+ final var tailIt = ranges.headSet(Entry.of(range.upperBits), true).descendingIterator();
+ if (tailIt.hasNext()) {
+ final var upper = tailIt.next();
+ tailIt.remove();
+
+ // Quick check: if we did not find a lower range at all, we might be expanding the entire span, in which
+ // case upper needs to become the first entry
+ if (!hasFloor) {
+ ranges.headSet(upper, false).clear();
+ }
+
+ ranges.add(expandCeiling(ranges, upper, range.lowerBits, range.upperBits));
+ return;
+ }
+
+ // No luck, insert
+ ranges.add(range);
+ }
+
+ private static @NonNull Entry expandFloor(final NavigableSet<Entry> ranges, final Entry floor,
+ final long upperBits) {
+ // Acquire any ranges after floor and clean them up
+ final var tailIt = ranges.tailSet(floor, false).iterator();
+ final long nextLower = upperBits + 1;
+ while (tailIt.hasNext()) {
+ final var tail = tailIt.next();
+ if (Long.compareUnsigned(tail.lowerBits, nextLower) > 0) {
+ // There is gap, nothing more to cleanup
+ break;
+ }
+
+ // We can merge this entry into floor...
+ tailIt.remove();
+
+ if (Long.compareUnsigned(tail.upperBits, nextLower) >= 0) {
+ // ... but we need to expand floor accordingly and after that we are done
+ return floor.withUpper(tail.upperBits);
+ }
+ }
+
+ // Expand floor to include this range and we are done
+ return floor.withUpper(upperBits);
+ }
+
+ private static @NonNull Entry expandCeiling(final NavigableSet<Entry> ranges, final Entry ceiling,
+ final long lowerBits, final long upperBits) {
+ if (Long.compareUnsigned(ceiling.upperBits, upperBits) >= 0) {
+ // Upper end is already covered
+ return ceiling.withLower(lowerBits);
+ }
+
+ // We are expanding the entry's upper boundary, we need to check if we need to coalesce following entries
+ long newUpper = upperBits;
+ final var tailIt = ranges.tailSet(ceiling, false).iterator();
+ if (tailIt.hasNext()) {
+ final var tail = tailIt.next();
+ if (Long.compareUnsigned(tail.lowerBits, newUpper + 1) <= 0) {
+ tailIt.remove();
+ newUpper = tail.upperBits;
+ }
+ }
+
+ return Entry.of(lowerBits, newUpper);
+ }
+
+ // Provides compatibility with RangeSet<UnsignedLong> using [lower, upper + 1)
+ public ImmutableRangeSet<UnsignedLong> toRangeSet() {
+ return ImmutableRangeSet.copyOf(Collections2.transform(trustedRanges(), entry -> Range.closedOpen(
+ UnsignedLong.fromLongBits(entry.lowerBits), UnsignedLong.fromLongBits(entry.upperBits + 1))));
+ }
+}
import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
public final class NormalizedNodeAggregator {
private final YangInstanceIdentifier rootIdentifier;
- private final List<Optional<NormalizedNode<?, ?>>> nodes;
+ private final List<Optional<NormalizedNode>> nodes;
private final DataTree dataTree;
private NormalizedNodeAggregator(final YangInstanceIdentifier rootIdentifier,
- final List<Optional<NormalizedNode<?, ?>>> nodes, final EffectiveModelContext schemaContext,
+ final List<Optional<NormalizedNode>> nodes, final EffectiveModelContext schemaContext,
final LogicalDatastoreType logicalDatastoreType) {
this.rootIdentifier = rootIdentifier;
this.nodes = nodes;
- this.dataTree = new InMemoryDataTreeFactory().create(
- logicalDatastoreType == LogicalDatastoreType.CONFIGURATION ? DataTreeConfiguration.DEFAULT_CONFIGURATION
- : DataTreeConfiguration.DEFAULT_OPERATIONAL);
- this.dataTree.setEffectiveModelContext(schemaContext);
+ dataTree = new InMemoryDataTreeFactory().create(logicalDatastoreType == LogicalDatastoreType.CONFIGURATION
+ ? DataTreeConfiguration.DEFAULT_CONFIGURATION : DataTreeConfiguration.DEFAULT_OPERATIONAL);
+ dataTree.setEffectiveModelContext(schemaContext);
}
/**
* Combine data from all the nodes in the list into a tree with root as rootIdentifier.
*/
- public static Optional<NormalizedNode<?,?>> aggregate(final YangInstanceIdentifier rootIdentifier,
- final List<Optional<NormalizedNode<?, ?>>> nodes, final EffectiveModelContext schemaContext,
+ public static Optional<NormalizedNode> aggregate(final YangInstanceIdentifier rootIdentifier,
+ final List<Optional<NormalizedNode>> nodes, final EffectiveModelContext schemaContext,
final LogicalDatastoreType logicalDatastoreType) throws DataValidationFailedException {
return new NormalizedNodeAggregator(rootIdentifier, nodes, schemaContext, logicalDatastoreType).aggregate();
}
- private Optional<NormalizedNode<?,?>> aggregate() throws DataValidationFailedException {
- return combine().getRootNode();
- }
-
- private NormalizedNodeAggregator combine() throws DataValidationFailedException {
+ private Optional<NormalizedNode> aggregate() throws DataValidationFailedException {
final DataTreeModification mod = dataTree.takeSnapshot().newModification();
+ boolean nodePresent = false;
- for (final Optional<NormalizedNode<?,?>> node : nodes) {
+ for (final Optional<NormalizedNode> node : nodes) {
if (node.isPresent()) {
- mod.merge(rootIdentifier, node.get());
+ mod.merge(rootIdentifier, node.orElseThrow());
+ nodePresent = true;
}
}
+
+ if (!nodePresent) {
+ return Optional.empty();
+ }
+
+
mod.ready();
dataTree.validate(mod);
final DataTreeCandidate candidate = dataTree.prepare(mod);
dataTree.commit(candidate);
- return this;
- }
-
- private Optional<NormalizedNode<?, ?>> getRootNode() {
return dataTree.takeSnapshot().readNode(rootIdentifier);
}
}
private NormalizedNodeXMLOutput() {
}
- public static void toStream(OutputStream outStream, NormalizedNode<?, ?> node)
+ public static void toStream(final OutputStream outStream, final NormalizedNode node)
throws XMLStreamException, IOException {
XMLStreamWriter xmlWriter = XOF.createXMLStreamWriter(outStream);
}
}
- public static void toFile(File file, NormalizedNode<?, ?> node) {
+ public static void toFile(final File file, final NormalizedNode node) {
try (FileOutputStream outStream = new FileOutputStream(file)) {
toStream(outStream, node);
} catch (IOException | XMLStreamException e) {
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModificationCursor;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.SchemaValidationFailedException;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModificationCursor;
+import org.opendaylight.yangtools.yang.data.tree.api.SchemaValidationFailedException;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
}
@Override
- public void merge(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ public void merge(final YangInstanceIdentifier path, final NormalizedNode data) {
pruneAndMergeNode(path, data);
}
@Override
- public void write(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ public void write(final YangInstanceIdentifier path, final NormalizedNode data) {
pruneAndWriteNode(path, data);
}
}
@Override
- public void merge(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ public void merge(final YangInstanceIdentifier path, final NormalizedNode data) {
if (path.isEmpty()) {
pruneAndMergeNode(path, data);
return;
}
@Override
- public void write(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ public void write(final YangInstanceIdentifier path, final NormalizedNode data) {
if (path.isEmpty()) {
pruneAndWriteNode(path, data);
return;
}
@Override
- public final SchemaContext getSchemaContext() {
- return delegate.getSchemaContext();
+ public final EffectiveModelContext modelContext() {
+ return delegate.modelContext();
}
@Override
}
}
- final void pruneAndMergeNode(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
- final NormalizedNode<?, ?> pruned = pruneNormalizedNode(path, data);
+ final void pruneAndMergeNode(final YangInstanceIdentifier path, final NormalizedNode data) {
+ final NormalizedNode pruned = pruneNormalizedNode(path, data);
if (pruned != null) {
delegate.merge(path, pruned);
}
}
- final void pruneAndWriteNode(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
- final NormalizedNode<?, ?> pruned = pruneNormalizedNode(path, data);
+ final void pruneAndWriteNode(final YangInstanceIdentifier path, final NormalizedNode data) {
+ final NormalizedNode pruned = pruneNormalizedNode(path, data);
if (pruned != null) {
delegate.write(path, pruned);
}
}
@Override
- public final Optional<NormalizedNode<?, ?>> readNode(final YangInstanceIdentifier yangInstanceIdentifier) {
+ public final Optional<NormalizedNode> readNode(final YangInstanceIdentifier yangInstanceIdentifier) {
return delegate.readNode(yangInstanceIdentifier);
}
}
@VisibleForTesting
- final NormalizedNode<?, ?> pruneNormalizedNode(final YangInstanceIdentifier path,
- final NormalizedNode<?, ?> input) {
+ final NormalizedNode pruneNormalizedNode(final YangInstanceIdentifier path, final NormalizedNode input) {
pruner.initializeForPath(path);
try {
NormalizedNodeWriter.forStreamWriter(pruner).write(input);
}
@Override
- public void write(final PathArgument child, final NormalizedNode<?, ?> data) {
+ public void write(final PathArgument child, final NormalizedNode data) {
final YangInstanceIdentifier path = current().node(child);
- final NormalizedNode<?, ?> prunedNode = pruningModification.pruneNormalizedNode(path, data);
+ final NormalizedNode prunedNode = pruningModification.pruneNormalizedNode(path, data);
if (prunedNode != null) {
toModification.write(path, prunedNode);
}
}
@Override
- public void merge(final PathArgument child, final NormalizedNode<?, ?> data) {
+ public void merge(final PathArgument child, final NormalizedNode data) {
final YangInstanceIdentifier path = current().node(child);
- final NormalizedNode<?, ?> prunedNode = pruningModification.pruneNormalizedNode(path, data);
+ final NormalizedNode prunedNode = pruningModification.pruneNormalizedNode(path, data);
if (prunedNode != null) {
toModification.merge(path, prunedNode);
}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.base.MoreObjects;
+import com.google.common.collect.ImmutableList;
+import com.google.common.util.concurrent.FluentFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.MoreExecutors;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Optional;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.NonNullByDefault;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
+
+/**
+ * Utility methods for dealing with datastore root {@link ContainerNode} with respect to module shards.
+ */
+public final class RootScatterGather {
+ @NonNullByDefault
+ public record ShardContainer<T>(T shard, ContainerNode container) {
+ public ShardContainer {
+ requireNonNull(shard);
+ requireNonNull(container);
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(this).add("shard", shard).toString();
+ }
+ }
+
+ private RootScatterGather() {
+ // Hidden on purpose
+ }
+
+ /**
+ * Check whether a {@link NormalizedNode} represents a root container and return it cast to {@link ContainerNode}.
+ *
+ * @param node a normalized node
+ * @return {@code node} cast to ContainerNode
+ * @throws NullPointerException if {@code node} is null
+ * @throws IllegalArgumentException if {@code node} is not a {@link ContainerNode}
+ */
+ public static @NonNull ContainerNode castRootNode(final NormalizedNode node) {
+ final var nonnull = requireNonNull(node);
+ checkArgument(nonnull instanceof ContainerNode, "Invalid root data %s", nonnull);
+ return (ContainerNode) nonnull;
+ }
+
+ /**
+ * Reconstruct root container from a set of constituents.
+ *
+ * @param actorUtils {@link ActorUtils} reference
+ * @param readFutures Consitutent read futures
+ * @return A composite future
+ */
+ public static @NonNull FluentFuture<Optional<NormalizedNode>> gather(final ActorUtils actorUtils,
+ final Stream<FluentFuture<Optional<NormalizedNode>>> readFutures) {
+ return FluentFuture.from(Futures.transform(
+ Futures.allAsList(readFutures.collect(ImmutableList.toImmutableList())), input -> {
+ try {
+ return NormalizedNodeAggregator.aggregate(YangInstanceIdentifier.of(), input,
+ actorUtils.getSchemaContext(), actorUtils.getDatastoreContext().getLogicalStoreType());
+ } catch (DataValidationFailedException e) {
+ throw new IllegalArgumentException("Failed to aggregate", e);
+ }
+ }, MoreExecutors.directExecutor()));
+ }
+
+ public static <T> @NonNull Stream<ShardContainer<T>> scatterAll(final ContainerNode rootNode,
+ final Function<PathArgument, T> childToShard, final Stream<T> allShards) {
+ final var builders = allShards
+ .collect(Collectors.toUnmodifiableMap(Function.identity(), unused -> ImmutableNodes.newContainerBuilder()));
+ for (var child : rootNode.body()) {
+ final var shard = childToShard.apply(child.name());
+ verifyNotNull(builders.get(shard), "Failed to find builder for %s", shard).addChild(child);
+ }
+ return streamContainers(rootNode.name(), builders);
+ }
+
+ /**
+ * Split root container into per-shard root containers.
+ *
+ * @param <T> Shard reference type
+ * @param rootNode Root container to be split up
+ * @param childToShard Mapping function from child {@link PathArgument} to shard reference
+ * @return Stream of {@link ShardContainer}s, one for each touched shard
+ */
+ public static <T> @NonNull Stream<ShardContainer<T>> scatterTouched(final ContainerNode rootNode,
+ final Function<PathArgument, T> childToShard) {
+ final var builders = new HashMap<T, ContainerNode.Builder>();
+ for (var child : rootNode.body()) {
+ builders.computeIfAbsent(childToShard.apply(child.name()), unused -> ImmutableNodes.newContainerBuilder())
+ .addChild(child);
+ }
+ return streamContainers(rootNode.name(), builders);
+ }
+
+ private static <T> @NonNull Stream<ShardContainer<T>> streamContainers(final NodeIdentifier rootId,
+ final Map<T, ContainerNode.Builder> builders) {
+ return builders.entrySet().stream()
+ .map(entry -> new ShardContainer<>(entry.getKey(), entry.getValue().withNodeIdentifier(rootId).build()));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import static com.google.common.base.Verify.verify;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.annotations.Beta;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Maps;
+import com.google.common.primitives.UnsignedLong;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.concepts.Immutable;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * A more efficient equivalent of {@code ImmutableMap<UnsignedLong, Boolean>}.
+ */
+@Beta
+public abstract class UnsignedLongBitmap implements Immutable {
+ @VisibleForTesting
+ static final class Regular extends UnsignedLongBitmap {
+ private final long[] keys;
+ private final boolean[] values;
+
+ Regular(final long[] keys, final boolean[] values) {
+ this.keys = requireNonNull(keys);
+ this.values = requireNonNull(values);
+ verify(keys.length == values.length);
+ }
+
+ @Override
+ public boolean isEmpty() {
+ return keys.length == 0;
+ }
+
+ @Override
+ public int size() {
+ return keys.length;
+ }
+
+ @Override
+ void writeEntriesTo(final DataOutput out) throws IOException {
+ for (int i = 0; i < keys.length; ++i) {
+ writeEntry(out, keys[i], values[i]);
+ }
+ }
+
+ @Override
+ StringBuilder appendEntries(final StringBuilder sb) {
+ final int last = keys.length - 1;
+ for (int i = 0; i < last; ++i) {
+ appendEntry(sb, keys[i], values[i]).append(", ");
+ }
+ return appendEntry(sb, keys[last], values[last]);
+ }
+
+ @Override
+ void putEntries(final HashMap<UnsignedLong, Boolean> ret) {
+ for (int i = 0; i < keys.length; ++i) {
+ ret.put(UnsignedLong.fromLongBits(keys[i]), values[i]);
+ }
+ }
+
+ @Override
+ public int hashCode() {
+ return Arrays.hashCode(keys) ^ Arrays.hashCode(values);
+ }
+
+ @Override
+ public boolean equals(final Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof Regular)) {
+ return false;
+ }
+ final var other = (Regular) obj;
+ return Arrays.equals(keys, other.keys) && Arrays.equals(values, other.values);
+ }
+ }
+
+ private static final class Singleton extends UnsignedLongBitmap {
+ private final long key;
+ private final boolean value;
+
+ Singleton(final long key, final boolean value) {
+ this.key = key;
+ this.value = value;
+ }
+
+ @Override
+ public boolean isEmpty() {
+ return false;
+ }
+
+ @Override
+ public int size() {
+ return 1;
+ }
+
+ @Override
+ void writeEntriesTo(final DataOutput out) throws IOException {
+ writeEntry(out, key, value);
+ }
+
+ @Override
+ StringBuilder appendEntries(final StringBuilder sb) {
+ return sb.append(Long.toUnsignedString(key)).append('=').append(value);
+ }
+
+ @Override
+ void putEntries(final HashMap<UnsignedLong, Boolean> ret) {
+ ret.put(UnsignedLong.fromLongBits(key), value);
+ }
+
+ @Override
+ public int hashCode() {
+ return Long.hashCode(key) ^ Boolean.hashCode(value);
+ }
+
+ @Override
+ public boolean equals(final Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof Singleton)) {
+ return false;
+ }
+ final var other = (Singleton) obj;
+ return key == other.key && value == other.value;
+ }
+ }
+
+ private static final @NonNull UnsignedLongBitmap EMPTY = new Regular(new long[0], new boolean[0]);
+
+ private UnsignedLongBitmap() {
+ // Hidden on purpose
+ }
+
+ public static @NonNull UnsignedLongBitmap of() {
+ return EMPTY;
+ }
+
+ public static @NonNull UnsignedLongBitmap of(final long keyBits, final boolean value) {
+ return new Singleton(keyBits, value);
+ }
+
+ public static @NonNull UnsignedLongBitmap copyOf(final Map<UnsignedLong, Boolean> map) {
+ final int size = map.size();
+ switch (size) {
+ case 0:
+ return of();
+ case 1:
+ final var entry = map.entrySet().iterator().next();
+ return of(entry.getKey().longValue(), entry.getValue());
+ default:
+ final var entries = new ArrayList<>(map.entrySet());
+ entries.sort(Comparator.comparing(Entry::getKey));
+
+ final var keys = new long[size];
+ final var values = new boolean[size];
+
+ int idx = 0;
+ for (var e : entries) {
+ keys[idx] = e.getKey().longValue();
+ values[idx] = e.getValue();
+ ++idx;
+ }
+
+ return new Regular(keys, values);
+ }
+ }
+
+ public abstract boolean isEmpty();
+
+ public abstract int size();
+
+ public final @NonNull HashMap<UnsignedLong, Boolean> mutableCopy() {
+ final int size = size();
+ switch (size) {
+ case 0:
+ return new HashMap<>();
+ default:
+ final var ret = Maps.<UnsignedLong, Boolean>newHashMapWithExpectedSize(size);
+ putEntries(ret);
+ return ret;
+ }
+ }
+
+ public static @NonNull UnsignedLongBitmap readFrom(final @NonNull DataInput in, final int size) throws IOException {
+ switch (size) {
+ case 0:
+ return of();
+ case 1:
+ return new Singleton(WritableObjects.readLong(in), in.readBoolean());
+ default:
+ final var keys = new long[size];
+ final var values = new boolean[size];
+ for (int i = 0; i < size; ++i) {
+ keys[i] = WritableObjects.readLong(in);
+ values[i] = in.readBoolean();
+ }
+
+ // There should be no duplicates and the IDs need to be increasing
+ long prevKey = keys[0];
+ for (int i = 1; i < size; ++i) {
+ final long key = keys[i];
+ if (Long.compareUnsigned(prevKey, key) >= 0) {
+ throw new IOException("Key " + Long.toUnsignedString(key) + " may not be used after key "
+ + Long.toUnsignedString(prevKey));
+ }
+ prevKey = key;
+ }
+
+ return new Regular(keys, values);
+ }
+ }
+
+ public void writeEntriesTo(final @NonNull DataOutput out, final int size) throws IOException {
+ if (size != size()) {
+ throw new IOException("Mismatched size: expected " + size() + ", got " + size);
+ }
+ writeEntriesTo(out);
+ }
+
+ abstract void writeEntriesTo(@NonNull DataOutput out) throws IOException;
+
+ abstract StringBuilder appendEntries(StringBuilder sb);
+
+ abstract void putEntries(HashMap<UnsignedLong, Boolean> ret);
+
+ /**
+ * {@inheritDoc}
+ *
+ * <p>
+ * Implementations of this method return a deterministic value.
+ */
+ @Override
+ public abstract int hashCode();
+
+ @Override
+ public abstract boolean equals(Object obj);
+
+ @Override
+ public final String toString() {
+ return isEmpty() ? "{}" : appendEntries(new StringBuilder().append('{')).append('}').toString();
+ }
+
+ private static StringBuilder appendEntry(final StringBuilder sb, final long key, final boolean value) {
+ return sb.append(Long.toUnsignedString(key)).append('=').append(value);
+ }
+
+ private static void writeEntry(final @NonNull DataOutput out, final long key, final boolean value)
+ throws IOException {
+ // FIXME: This serialization format is what we inherited. We could do better by storing the boolean in
+ // writeLong()'s flags. On the other had, we could also be writing longs by twos, which might be
+ // benefitial.
+ WritableObjects.writeLong(out, key);
+ out.writeBoolean(value);
+ }
+}
+++ /dev/null
-/*
- * Copyright (c) 2017 Pantheon Technologies, s.r.o. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.utils;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.annotations.Beta;
-import com.google.common.base.MoreObjects;
-import com.google.common.collect.ImmutableRangeSet;
-import com.google.common.collect.Range;
-import com.google.common.collect.RangeSet;
-import com.google.common.collect.TreeRangeSet;
-import com.google.common.primitives.UnsignedLong;
-import org.opendaylight.yangtools.concepts.Mutable;
-
-/**
- * Utility {@link RangeSet}-like class, specialized for holding {@link UnsignedLong}. It does not directly implement
- * the {@link RangeSet} interface, but allows converting to and from it. Internal implementation takes advantage of
- * knowing that {@link UnsignedLong} is a discrete type and that it can be stored in a long.
- *
- * @author Robert Varga
- */
-@Beta
-public final class UnsignedLongRangeSet implements Mutable {
- // FIXME: this is just to get us started
- private final RangeSet<UnsignedLong> rangeset;
-
- private UnsignedLongRangeSet(final RangeSet<UnsignedLong> rangeset) {
- this.rangeset = requireNonNull(rangeset);
- }
-
- public static UnsignedLongRangeSet create() {
- return new UnsignedLongRangeSet(TreeRangeSet.create());
- }
-
- public static UnsignedLongRangeSet create(final RangeSet<UnsignedLong> input) {
- return new UnsignedLongRangeSet(TreeRangeSet.create(input));
- }
-
- public RangeSet<UnsignedLong> toImmutable() {
- return ImmutableRangeSet.copyOf(rangeset);
- }
-
- public void add(final long longBits) {
- add(UnsignedLong.fromLongBits(longBits));
- }
-
- public void add(final UnsignedLong value) {
- rangeset.add(Range.closedOpen(value, UnsignedLong.ONE.plus(value)));
- }
-
- public boolean contains(final UnsignedLong value) {
- return rangeset.contains(value);
- }
-
- public boolean contains(final long longBits) {
- return contains(UnsignedLong.fromLongBits(longBits));
- }
-
- public UnsignedLongRangeSet copy() {
- return new UnsignedLongRangeSet(TreeRangeSet.create(rangeset));
- }
-
- @Override
- public String toString() {
- return MoreObjects.toStringHelper(this)
- .omitNullValues()
- .add("span", rangeset.isEmpty() ? null : rangeset.span())
- .add("rangeSize", rangeset.asRanges().size())
- .toString();
- }
-}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.annotations.Beta;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.MoreObjects;
+import com.google.common.collect.RangeSet;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.NavigableSet;
+import java.util.TreeSet;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.concepts.Immutable;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * A class holding an equivalent of {@code Set<UnsignedLong>}. It is geared towards efficiently tracking ranges of
+ * objects, similar to what a {@link RangeSet} would do.
+ *
+ * <p>
+ * Unlike a {@code RangeSet}, though, this class takes advantage of knowing that an unsigned long is a discrete unit
+ * and can be stored in a simple {@code long}.
+ *
+ * @author Robert Varga
+ */
+abstract class UnsignedLongSet {
+ @Beta
+ @VisibleForTesting
+ public static final class Entry implements Comparable<Entry>, Immutable {
+ public final long lowerBits;
+ public final long upperBits;
+
+ private Entry(final long lowerBits, final long upperBits) {
+ this.lowerBits = lowerBits;
+ this.upperBits = upperBits;
+ }
+
+ static @NonNull Entry of(final long longBits) {
+ return of(longBits, longBits);
+ }
+
+ static @NonNull Entry of(final long lowerBits, final long upperBits) {
+ return new Entry(lowerBits, upperBits);
+ }
+
+ @NonNull Entry withLower(final long newLowerBits) {
+ return of(newLowerBits, upperBits);
+ }
+
+ @NonNull Entry withUpper(final long newUpperBits) {
+ return of(lowerBits, newUpperBits);
+ }
+
+ // These two methods provide the same serialization format as the one we've used to serialize
+ // Range<UnsignedLong>
+ static @NonNull Entry readUnsigned(final DataInput in) throws IOException {
+ final byte hdr = WritableObjects.readLongHeader(in);
+ final long first = WritableObjects.readFirstLong(in, hdr);
+ final long second = WritableObjects.readSecondLong(in, hdr) - 1;
+ if (Long.compareUnsigned(first, second) > 0) {
+ throw new IOException("Lower endpoint " + Long.toUnsignedString(first) + " is greater than upper "
+ + "endpoint " + Long.toUnsignedString(second));
+ }
+
+ return new Entry(first, second);
+ }
+
+ void writeUnsigned(final @NonNull DataOutput out) throws IOException {
+ WritableObjects.writeLongs(out, lowerBits, upperBits + 1);
+ }
+
+ @Override
+ @SuppressWarnings("checkstyle:parameterName")
+ public int compareTo(final Entry o) {
+ return Long.compareUnsigned(lowerBits, o.lowerBits);
+ }
+
+ @Override
+ public int hashCode() {
+ return Long.hashCode(lowerBits) * 31 + Long.hashCode(upperBits);
+ }
+
+ @Override
+ public boolean equals(final Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof Entry)) {
+ return false;
+ }
+ final var other = (Entry) obj;
+ return lowerBits == other.lowerBits && upperBits == other.upperBits;
+ }
+
+ @Override
+ public String toString() {
+ return "[" + Long.toUnsignedString(lowerBits) + ".." + Long.toUnsignedString(upperBits) + "]";
+ }
+ }
+
+ // The idea is rather simple, we track a NavigableSet of range entries, ordered by their lower bound. This means
+ // that for a contains() operation we just need the first headSet() entry. For insert operations we just update
+ // either the lower bound or the upper bound of an existing entry. When we do, we also look at prev/next entry and
+ // if they are contiguous with the updated entry, we adjust the entry once more and remove the prev/next entry.
+ private final @NonNull NavigableSet<Entry> ranges;
+
+ UnsignedLongSet(final NavigableSet<Entry> ranges) {
+ this.ranges = requireNonNull(ranges);
+ }
+
+ public final boolean contains(final long longBits) {
+ final var head = ranges.floor(Entry.of(longBits));
+ return head != null
+ && Long.compareUnsigned(head.lowerBits, longBits) <= 0
+ && Long.compareUnsigned(head.upperBits, longBits) >= 0;
+ }
+
+ public final boolean isEmpty() {
+ return ranges.isEmpty();
+ }
+
+ public final int rangeSize() {
+ return ranges.size();
+ }
+
+ public abstract @NonNull ImmutableUnsignedLongSet immutableCopy();
+
+ public final @NonNull MutableUnsignedLongSet mutableCopy() {
+ return new MutableUnsignedLongSet(new TreeSet<>(ranges));
+ }
+
+ public final @NonNull NavigableSet<Entry> ranges() {
+ return Collections.unmodifiableNavigableSet(ranges);
+ }
+
+ final @NonNull NavigableSet<Entry> trustedRanges() {
+ return ranges;
+ }
+
+ @Override
+ public final int hashCode() {
+ return ranges.hashCode();
+ }
+
+ @Override
+ public final boolean equals(final Object obj) {
+ return obj == this || obj instanceof UnsignedLongSet && ranges.equals(((UnsignedLongSet) obj).ranges);
+ }
+
+ @Override
+ public final String toString() {
+ final var helper = MoreObjects.toStringHelper(this);
+
+ final int size = ranges.size();
+ switch (size) {
+ case 0:
+ break;
+ case 1:
+ helper.add("span", ranges.first());
+ break;
+ default:
+ helper.add("span", Entry.of(ranges.first().lowerBits, ranges.last().upperBits));
+ }
+
+ return helper.add("size", size).toString();
+ }
+}
+++ /dev/null
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.sharding;
-
-import static akka.actor.ActorRef.noSender;
-import static com.google.common.base.Preconditions.checkArgument;
-import static com.google.common.base.Preconditions.checkState;
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorRef;
-import akka.actor.PoisonPill;
-import akka.dispatch.Futures;
-import akka.dispatch.Mapper;
-import akka.dispatch.OnComplete;
-import akka.util.Timeout;
-import java.util.Collection;
-import java.util.Optional;
-import java.util.concurrent.CompletionStage;
-import java.util.concurrent.ConcurrentHashMap;
-import org.opendaylight.controller.cluster.datastore.exceptions.LocalShardNotFoundException;
-import org.opendaylight.controller.cluster.datastore.messages.MakeLeaderLocal;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
-import org.opendaylight.controller.cluster.dom.api.CDSShardAccess;
-import org.opendaylight.controller.cluster.dom.api.LeaderLocation;
-import org.opendaylight.controller.cluster.dom.api.LeaderLocationListener;
-import org.opendaylight.controller.cluster.dom.api.LeaderLocationListenerRegistration;
-import org.opendaylight.controller.cluster.raft.LeadershipTransferFailedException;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.compat.java8.FutureConverters;
-import scala.concurrent.Future;
-
-/**
- * Default {@link CDSShardAccess} implementation. Listens on leader location
- * change events and distributes them to registered listeners. Also updates
- * current information about leader location accordingly.
- *
- * <p>
- * Sends {@link MakeLeaderLocal} message to local shards and translates its result
- * on behalf users {@link #makeLeaderLocal()} calls.
- *
- * <p>
- * {@link org.opendaylight.controller.cluster.dom.api.CDSDataTreeProducer} that
- * creates instances of this class has to call {@link #close()} once it is no
- * longer valid.
- */
-final class CDSShardAccessImpl implements CDSShardAccess, LeaderLocationListener, AutoCloseable {
- private static final Logger LOG = LoggerFactory.getLogger(CDSShardAccessImpl.class);
-
- private final Collection<LeaderLocationListener> listeners = ConcurrentHashMap.newKeySet();
- private final DOMDataTreeIdentifier prefix;
- private final ActorUtils actorUtils;
- private final Timeout makeLeaderLocalTimeout;
-
- private ActorRef roleChangeListenerActor;
-
- private volatile LeaderLocation currentLeader = LeaderLocation.UNKNOWN;
- private volatile boolean closed = false;
-
- CDSShardAccessImpl(final DOMDataTreeIdentifier prefix, final ActorUtils actorUtils) {
- this.prefix = requireNonNull(prefix);
- this.actorUtils = requireNonNull(actorUtils);
- this.makeLeaderLocalTimeout =
- new Timeout(actorUtils.getDatastoreContext().getShardLeaderElectionTimeout().duration().$times(2));
-
- // register RoleChangeListenerActor
- // TODO Maybe we should do this in async
- final Optional<ActorRef> localShardReply =
- actorUtils.findLocalShard(ClusterUtils.getCleanShardName(prefix.getRootIdentifier()));
- checkState(localShardReply.isPresent(),
- "Local shard for {} not present. Cannot register RoleChangeListenerActor", prefix);
- roleChangeListenerActor =
- actorUtils.getActorSystem().actorOf(RoleChangeListenerActor.props(localShardReply.get(), this));
- }
-
- private void checkNotClosed() {
- checkState(!closed, "CDSDataTreeProducer, that this CDSShardAccess is associated with, is no longer valid");
- }
-
- @Override
- public DOMDataTreeIdentifier getShardIdentifier() {
- checkNotClosed();
- return prefix;
- }
-
- @Override
- public LeaderLocation getLeaderLocation() {
- checkNotClosed();
- // TODO before getting first notification from roleChangeListenerActor
- // we will always return UNKNOWN
- return currentLeader;
- }
-
- @Override
- public CompletionStage<Void> makeLeaderLocal() {
- // TODO when we have running make leader local operation
- // we should just return the same completion stage
- checkNotClosed();
-
- // TODO can we cache local shard actorRef?
- final Future<ActorRef> localShardReply =
- actorUtils.findLocalShardAsync(ClusterUtils.getCleanShardName(prefix.getRootIdentifier()));
-
- // we have to tell local shard to make leader local
- final scala.concurrent.Promise<Object> makeLeaderLocalAsk = Futures.promise();
- localShardReply.onComplete(new OnComplete<ActorRef>() {
- @Override
- public void onComplete(final Throwable failure, final ActorRef actorRef) {
- if (failure instanceof LocalShardNotFoundException) {
- LOG.debug("No local shard found for {} - Cannot request leadership transfer to local shard.",
- getShardIdentifier(), failure);
- makeLeaderLocalAsk.failure(failure);
- } else if (failure != null) {
- // TODO should this be WARN?
- LOG.debug("Failed to find local shard for {} - Cannot request leadership transfer to local shard.",
- getShardIdentifier(), failure);
- makeLeaderLocalAsk.failure(failure);
- } else {
- makeLeaderLocalAsk
- .completeWith(actorUtils
- .executeOperationAsync(actorRef, MakeLeaderLocal.INSTANCE, makeLeaderLocalTimeout));
- }
- }
- }, actorUtils.getClientDispatcher());
-
- // we have to transform make leader local request result
- Future<Void> makeLeaderLocalFuture = makeLeaderLocalAsk.future()
- .transform(new Mapper<Object, Void>() {
- @Override
- public Void apply(final Object parameter) {
- return null;
- }
- }, new Mapper<Throwable, Throwable>() {
- @Override
- public Throwable apply(final Throwable parameter) {
- if (parameter instanceof LeadershipTransferFailedException) {
- // do nothing with exception and just pass it as it is
- return parameter;
- }
- // wrap exception in LeadershipTransferFailedEx
- return new LeadershipTransferFailedException("Leadership transfer failed", parameter);
- }
- }, actorUtils.getClientDispatcher());
-
- return FutureConverters.toJava(makeLeaderLocalFuture);
- }
-
- @Override
- public <L extends LeaderLocationListener> LeaderLocationListenerRegistration<L>
- registerLeaderLocationListener(final L listener) {
- checkNotClosed();
- requireNonNull(listener);
- checkArgument(!listeners.contains(listener), "Listener %s is already registered with ShardAccess %s", listener,
- this);
-
- LOG.debug("Registering LeaderLocationListener {}", listener);
-
- listeners.add(listener);
-
- return new LeaderLocationListenerRegistration<L>() {
- @Override
- public L getInstance() {
- return listener;
- }
-
- @Override
- public void close() {
- listeners.remove(listener);
- }
- };
- }
-
- @Override
- @SuppressWarnings("checkstyle:IllegalCatch")
- public void onLeaderLocationChanged(final LeaderLocation location) {
- if (closed) {
- // we are closed already. Do not dispatch any new leader location
- // change events.
- return;
- }
-
- LOG.debug("Received leader location change notification. New leader location: {}", location);
- currentLeader = location;
- listeners.forEach(listener -> {
- try {
- listener.onLeaderLocationChanged(location);
- } catch (Exception e) {
- LOG.warn("Ignoring uncaught exception thrown be LeaderLocationListener {} "
- + "during processing leader location change {}", listener, location, e);
- }
- });
- }
-
- @Override
- public void close() {
- // TODO should we also remove all listeners?
- LOG.debug("Closing {} ShardAccess", prefix);
- closed = true;
-
- if (roleChangeListenerActor != null) {
- // stop RoleChangeListenerActor
- roleChangeListenerActor.tell(PoisonPill.getInstance(), noSender());
- roleChangeListenerActor = null;
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.sharding;
-
-import com.google.common.annotations.Beta;
-import org.eclipse.jdt.annotation.NonNull;
-
-/**
- * Exception thrown when there was a at any point during the creation of a shard via {@link DistributedShardFactory}.
- */
-@Beta
-public class DOMDataTreeShardCreationFailedException extends Exception {
- private static final long serialVersionUID = 1L;
-
- public DOMDataTreeShardCreationFailedException(final @NonNull String message) {
- super(message);
- }
-
- public DOMDataTreeShardCreationFailedException(final @NonNull String message, final @NonNull Throwable cause) {
- super(message, cause);
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.sharding;
-
-import static com.google.common.base.Preconditions.checkState;
-import static java.util.Objects.requireNonNull;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.stream.Collectors;
-import org.checkerframework.checker.lock.qual.GuardedBy;
-import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient;
-import org.opendaylight.controller.cluster.datastore.DistributedDataStoreInterface;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.spi.AbstractDOMDataTreeChangeListenerRegistration;
-import org.opendaylight.mdsal.dom.spi.AbstractRegistrationTree;
-import org.opendaylight.mdsal.dom.spi.RegistrationTreeNode;
-import org.opendaylight.mdsal.dom.spi.shard.ChildShardContext;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTreeChangePublisher;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNodes;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.SchemaValidationFailedException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class DistributedShardChangePublisher
- extends AbstractRegistrationTree<AbstractDOMDataTreeChangeListenerRegistration<?>>
- implements DOMStoreTreeChangePublisher {
-
- private static final Logger LOG = LoggerFactory.getLogger(DistributedShardChangePublisher.class);
-
- private final DistributedDataStoreInterface distributedDataStore;
- private final YangInstanceIdentifier shardPath;
-
- private final Map<DOMDataTreeIdentifier, ChildShardContext> childShards;
-
- @GuardedBy("this")
- private final DataTree dataTree;
-
- public DistributedShardChangePublisher(final DataStoreClient client,
- final DistributedDataStoreInterface distributedDataStore,
- final DOMDataTreeIdentifier prefix,
- final Map<DOMDataTreeIdentifier, ChildShardContext> childShards) {
- this.distributedDataStore = distributedDataStore;
- // TODO keeping the whole dataTree thats contained in subshards doesn't seem like a good idea
- // maybe the whole listener logic would be better in the backend shards where we have direct access to the
- // dataTree and wont have to cache it redundantly.
-
- final DataTreeConfiguration baseConfig;
- switch (prefix.getDatastoreType()) {
- case CONFIGURATION:
- baseConfig = DataTreeConfiguration.DEFAULT_CONFIGURATION;
- break;
- case OPERATIONAL:
- baseConfig = DataTreeConfiguration.DEFAULT_OPERATIONAL;
- break;
- default:
- throw new UnsupportedOperationException("Unknown prefix type " + prefix.getDatastoreType());
- }
-
- this.dataTree = new InMemoryDataTreeFactory().create(new DataTreeConfiguration.Builder(baseConfig.getTreeType())
- .setMandatoryNodesValidation(baseConfig.isMandatoryNodesValidationEnabled())
- .setUniqueIndexes(baseConfig.isUniqueIndexEnabled())
- .setRootPath(prefix.getRootIdentifier())
- .build());
-
- // XXX: can we guarantee that the root is present in the schemacontext?
- this.dataTree.setEffectiveModelContext(distributedDataStore.getActorUtils().getSchemaContext());
- this.shardPath = prefix.getRootIdentifier();
- this.childShards = childShards;
- }
-
- protected void registrationRemoved(final AbstractDOMDataTreeChangeListenerRegistration<?> registration) {
- LOG.debug("Closing registration {}", registration);
- }
-
- @Override
- public <L extends DOMDataTreeChangeListener> AbstractDOMDataTreeChangeListenerRegistration<L>
- registerTreeChangeListener(final YangInstanceIdentifier path, final L listener) {
- takeLock();
- try {
- return setupListenerContext(path, listener);
- } finally {
- releaseLock();
- }
- }
-
- private <L extends DOMDataTreeChangeListener> AbstractDOMDataTreeChangeListenerRegistration<L>
- setupListenerContext(final YangInstanceIdentifier listenerPath, final L listener) {
- // we need to register the listener registration path based on the shards root
- // we have to strip the shard path from the listener path and then register
- YangInstanceIdentifier strippedIdentifier = listenerPath;
- if (!shardPath.isEmpty()) {
- strippedIdentifier = YangInstanceIdentifier.create(stripShardPath(shardPath, listenerPath));
- }
-
- final DOMDataTreeListenerWithSubshards subshardListener =
- new DOMDataTreeListenerWithSubshards(strippedIdentifier, listener);
- final AbstractDOMDataTreeChangeListenerRegistration<L> reg =
- setupContextWithoutSubshards(listenerPath, strippedIdentifier, subshardListener);
-
- for (final ChildShardContext maybeAffected : childShards.values()) {
- if (listenerPath.contains(maybeAffected.getPrefix().getRootIdentifier())) {
- // consumer has initialDataChangeEvent subshard somewhere on lower level
- // register to the notification manager with snapshot and forward child notifications to parent
- LOG.debug("Adding new subshard{{}} to listener at {}", maybeAffected.getPrefix(), listenerPath);
- subshardListener.addSubshard(maybeAffected);
- } else if (maybeAffected.getPrefix().getRootIdentifier().contains(listenerPath)) {
- // bind path is inside subshard
- // TODO can this happen? seems like in ShardedDOMDataTree we are
- // already registering to the lowest shard possible
- throw new UnsupportedOperationException("Listener should be registered directly "
- + "into initialDataChangeEvent subshard");
- }
- }
-
- return reg;
- }
-
- private <L extends DOMDataTreeChangeListener> AbstractDOMDataTreeChangeListenerRegistration<L>
- setupContextWithoutSubshards(final YangInstanceIdentifier shardLookup,
- final YangInstanceIdentifier listenerPath,
- final DOMDataTreeListenerWithSubshards listener) {
-
- LOG.debug("Registering root listener full path: {}, path inside shard: {}", shardLookup, listenerPath);
-
- // register in the shard tree
- final RegistrationTreeNode<AbstractDOMDataTreeChangeListenerRegistration<?>> node =
- findNodeFor(listenerPath.getPathArguments());
-
- // register listener in CDS
- ListenerRegistration<DOMDataTreeChangeListener> listenerReg = distributedDataStore
- .registerProxyListener(shardLookup, listenerPath, listener);
-
- @SuppressWarnings("unchecked")
- final AbstractDOMDataTreeChangeListenerRegistration<L> registration =
- new AbstractDOMDataTreeChangeListenerRegistration<>((L) listener) {
- @Override
- protected void removeRegistration() {
- listener.close();
- DistributedShardChangePublisher.this.removeRegistration(node, this);
- registrationRemoved(this);
- listenerReg.close();
- }
- };
- addRegistration(node, registration);
-
- return registration;
- }
-
- private static Iterable<PathArgument> stripShardPath(final YangInstanceIdentifier shardPath,
- final YangInstanceIdentifier listenerPath) {
- if (shardPath.isEmpty()) {
- return listenerPath.getPathArguments();
- }
-
- final List<PathArgument> listenerPathArgs = new ArrayList<>(listenerPath.getPathArguments());
- final Iterator<PathArgument> shardIter = shardPath.getPathArguments().iterator();
- final Iterator<PathArgument> listenerIter = listenerPathArgs.iterator();
-
- while (shardIter.hasNext()) {
- if (shardIter.next().equals(listenerIter.next())) {
- listenerIter.remove();
- } else {
- break;
- }
- }
-
- return listenerPathArgs;
- }
-
- synchronized DataTreeCandidate applyChanges(final YangInstanceIdentifier listenerPath,
- final Collection<DataTreeCandidate> changes) throws DataValidationFailedException {
- final DataTreeModification modification = dataTree.takeSnapshot().newModification();
- for (final DataTreeCandidate change : changes) {
- try {
- DataTreeCandidates.applyToModification(modification, change);
- } catch (SchemaValidationFailedException e) {
- LOG.error("Validation failed", e);
- }
- }
-
- modification.ready();
-
- final DataTreeCandidate candidate;
-
- dataTree.validate(modification);
-
- // strip nodes we dont need since this listener doesn't have to be registered at the root of the DataTree
- candidate = dataTree.prepare(modification);
- dataTree.commit(candidate);
-
-
- DataTreeCandidateNode modifiedChild = candidate.getRootNode();
-
- for (final PathArgument pathArgument : listenerPath.getPathArguments()) {
- modifiedChild = modifiedChild.getModifiedChild(pathArgument).orElse(null);
- }
-
-
- if (modifiedChild == null) {
- modifiedChild = DataTreeCandidateNodes.empty(dataTree.getRootPath().getLastPathArgument());
- }
-
- return DataTreeCandidates.newDataTreeCandidate(dataTree.getRootPath(), modifiedChild);
- }
-
-
- private final class DOMDataTreeListenerWithSubshards implements DOMDataTreeChangeListener {
-
- private final YangInstanceIdentifier listenerPath;
- private final DOMDataTreeChangeListener delegate;
- private final Map<YangInstanceIdentifier, ListenerRegistration<DOMDataTreeChangeListener>> registrations =
- new ConcurrentHashMap<>();
-
- @GuardedBy("this")
- private final Collection<DataTreeCandidate> stashedDataTreeCandidates = new LinkedList<>();
-
- DOMDataTreeListenerWithSubshards(final YangInstanceIdentifier listenerPath,
- final DOMDataTreeChangeListener delegate) {
- this.listenerPath = requireNonNull(listenerPath);
- this.delegate = requireNonNull(delegate);
- }
-
- @Override
- public synchronized void onDataTreeChanged(final Collection<DataTreeCandidate> changes) {
- LOG.debug("Received data changed {}", changes);
-
- if (!stashedDataTreeCandidates.isEmpty()) {
- LOG.debug("Adding stashed subshards' changes {}", stashedDataTreeCandidates);
- changes.addAll(stashedDataTreeCandidates);
- stashedDataTreeCandidates.clear();
- }
-
- try {
- applyChanges(listenerPath, changes);
- } catch (final DataValidationFailedException e) {
- // TODO should we fail here? What if stashed changes
- // (changes from subshards) got ahead more than one generation
- // from current shard. Than we can fail to apply this changes
- // upon current data tree, but once we get respective changes
- // from current shard, we can apply also changes from
- // subshards.
- //
- // However, we can loose ability to notice and report some
- // errors then. For example, we cannot detect potential lost
- // changes from current shard.
- LOG.error("Validation failed for modification built from changes {}, current data tree: {}",
- changes, dataTree, e);
- throw new RuntimeException("Notification validation failed", e);
- }
-
- delegate.onDataTreeChanged(changes);
- }
-
- synchronized void onDataTreeChanged(final YangInstanceIdentifier pathFromRoot,
- final Collection<DataTreeCandidate> changes) {
- final YangInstanceIdentifier changeId =
- YangInstanceIdentifier.create(stripShardPath(dataTree.getRootPath(), pathFromRoot));
-
- final List<DataTreeCandidate> newCandidates = changes.stream()
- .map(candidate -> DataTreeCandidates.newDataTreeCandidate(changeId, candidate.getRootNode()))
- .collect(Collectors.toList());
-
- try {
- delegate.onDataTreeChanged(Collections.singleton(applyChanges(listenerPath, newCandidates)));
- } catch (final DataValidationFailedException e) {
- // We cannot apply changes from subshard to current data tree.
- // Maybe changes from current shard haven't been applied to
- // data tree yet. Postpone processing of these changes till we
- // receive changes from current shard.
- LOG.debug("Validation for modification built from subshard {} changes {} failed, current data tree {}.",
- pathFromRoot, changes, dataTree, e);
- stashedDataTreeCandidates.addAll(newCandidates);
- }
- }
-
- void addSubshard(final ChildShardContext context) {
- checkState(context.getShard() instanceof DOMStoreTreeChangePublisher,
- "All subshards that are initialDataChangeEvent part of ListenerContext need to be listenable");
-
- final DOMStoreTreeChangePublisher listenableShard = (DOMStoreTreeChangePublisher) context.getShard();
- // since this is going into subshard we want to listen for ALL changes in the subshard
- registrations.put(context.getPrefix().getRootIdentifier(),
- listenableShard.registerTreeChangeListener(
- context.getPrefix().getRootIdentifier(), changes -> onDataTreeChanged(
- context.getPrefix().getRootIdentifier(), changes)));
- }
-
- void close() {
- for (final ListenerRegistration<DOMDataTreeChangeListener> registration : registrations.values()) {
- registration.close();
- }
- registrations.clear();
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.sharding;
-
-import com.google.common.annotations.Beta;
-import java.util.Collection;
-import java.util.concurrent.CompletionStage;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeShardingConflictException;
-
-/**
- * A factory that handles addition of new clustered shard's based on a prefix. This factory is a QoL class that handles
- * all the boilerplate that comes with registration of a new clustered shard into the system and creating the backend
- * shard/replicas that come along with it.
- */
-@Beta
-public interface DistributedShardFactory {
- /**
- * Register a new shard that is rooted at the desired prefix with replicas on the provided members.
- * Note to register a shard without replicas you still need to provide at least one Member for the shard.
- *
- * @param prefix Shard root
- * @param replicaMembers Members that this shard is replicated on, has to have at least one Member even if the shard
- * should not be replicated.
- * @return A future that will be completed with a DistributedShardRegistration once the backend and frontend shards
- * are spawned.
- * @throws DOMDataTreeShardingConflictException If the initial check for a conflict on the local node fails, the
- * sharding configuration won't be updated if this exception is thrown.
- */
- CompletionStage<DistributedShardRegistration>
- createDistributedShard(DOMDataTreeIdentifier prefix, Collection<MemberName> replicaMembers)
- throws DOMDataTreeShardingConflictException;
-}
\ No newline at end of file
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.sharding;
-
-import static com.google.common.base.Preconditions.checkArgument;
-import static java.util.Objects.requireNonNull;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import org.checkerframework.checker.lock.qual.GuardedBy;
-import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient;
-import org.opendaylight.controller.cluster.datastore.DistributedDataStoreInterface;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeShard;
-import org.opendaylight.mdsal.dom.spi.shard.ChildShardContext;
-import org.opendaylight.mdsal.dom.spi.shard.DOMDataTreeShardProducer;
-import org.opendaylight.mdsal.dom.spi.shard.ForeignShardModificationContext;
-import org.opendaylight.mdsal.dom.spi.shard.ReadableWriteableDOMDataTreeShard;
-import org.opendaylight.mdsal.dom.spi.shard.SubshardProducerSpecification;
-import org.opendaylight.mdsal.dom.spi.shard.WriteableDOMDataTreeShard;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Proxy implementation of a shard that creates forwarding producers to the backend shard.
- */
-class DistributedShardFrontend implements ReadableWriteableDOMDataTreeShard {
-
- private static final Logger LOG = LoggerFactory.getLogger(DistributedShardFrontend.class);
-
- private final DataStoreClient client;
- private final DOMDataTreeIdentifier shardRoot;
- @GuardedBy("this")
- private final Map<DOMDataTreeIdentifier, ChildShardContext> childShards = new HashMap<>();
- @GuardedBy("this")
- private final List<ShardProxyProducer> producers = new ArrayList<>();
-
- private final DistributedShardChangePublisher publisher;
-
- DistributedShardFrontend(final DistributedDataStoreInterface distributedDataStore,
- final DataStoreClient client,
- final DOMDataTreeIdentifier shardRoot) {
- this.client = requireNonNull(client);
- this.shardRoot = requireNonNull(shardRoot);
-
- publisher = new DistributedShardChangePublisher(client, requireNonNull(distributedDataStore), shardRoot,
- childShards);
- }
-
- @Override
- public synchronized DOMDataTreeShardProducer createProducer(final Collection<DOMDataTreeIdentifier> paths) {
- for (final DOMDataTreeIdentifier prodPrefix : paths) {
- checkArgument(shardRoot.contains(prodPrefix), "Prefix %s is not contained under shard root", prodPrefix,
- paths);
- }
-
- final ShardProxyProducer ret =
- new ShardProxyProducer(shardRoot, paths, client, createModificationFactory(paths));
- producers.add(ret);
- return ret;
- }
-
- @Override
- public synchronized void onChildAttached(final DOMDataTreeIdentifier prefix, final DOMDataTreeShard child) {
- LOG.debug("{} : Child shard attached at {}", shardRoot, prefix);
- checkArgument(child != this, "Attempted to attach child %s onto self", this);
- addChildShard(prefix, child);
- updateProducers();
- }
-
- @Override
- public synchronized void onChildDetached(final DOMDataTreeIdentifier prefix, final DOMDataTreeShard child) {
- LOG.debug("{} : Child shard detached at {}", shardRoot, prefix);
- childShards.remove(prefix);
- updateProducers();
- // TODO we should grab the dataTreeSnapshot that's in the shard and apply it to this shard
- }
-
- private void addChildShard(final DOMDataTreeIdentifier prefix, final DOMDataTreeShard child) {
- checkArgument(child instanceof WriteableDOMDataTreeShard);
- childShards.put(prefix, new ChildShardContext(prefix, (WriteableDOMDataTreeShard) child));
- }
-
- DistributedShardModificationFactory createModificationFactory(final Collection<DOMDataTreeIdentifier> prefixes) {
- // TODO this could be abstract
- final Map<DOMDataTreeIdentifier, SubshardProducerSpecification> affectedSubshards = new HashMap<>();
-
- for (final DOMDataTreeIdentifier producerPrefix : prefixes) {
- for (final ChildShardContext maybeAffected : childShards.values()) {
- final DOMDataTreeIdentifier bindPath;
- if (producerPrefix.contains(maybeAffected.getPrefix())) {
- bindPath = maybeAffected.getPrefix();
- } else if (maybeAffected.getPrefix().contains(producerPrefix)) {
- // Bound path is inside subshard
- bindPath = producerPrefix;
- } else {
- continue;
- }
-
- SubshardProducerSpecification spec = affectedSubshards.computeIfAbsent(maybeAffected.getPrefix(),
- k -> new SubshardProducerSpecification(maybeAffected));
- spec.addPrefix(bindPath);
- }
- }
-
- final DistributedShardModificationFactoryBuilder builder =
- new DistributedShardModificationFactoryBuilder(shardRoot);
- for (final SubshardProducerSpecification spec : affectedSubshards.values()) {
- final ForeignShardModificationContext foreignContext =
- new ForeignShardModificationContext(spec.getPrefix(), spec.createProducer());
- builder.addSubshard(foreignContext);
- builder.addSubshard(spec.getPrefix(), foreignContext);
- }
-
- return builder.build();
- }
-
- private void updateProducers() {
- for (final ShardProxyProducer producer : producers) {
- producer.setModificationFactory(createModificationFactory(producer.getPrefixes()));
- }
- }
-
- @Override
- public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerTreeChangeListener(
- final YangInstanceIdentifier treeId, final L listener) {
- return publisher.registerTreeChangeListener(treeId, listener);
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.sharding;
-
-import static java.util.Objects.requireNonNull;
-
-import java.util.Map;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteCursor;
-import org.opendaylight.mdsal.dom.spi.shard.ForeignShardModificationContext;
-import org.opendaylight.mdsal.dom.spi.shard.WritableNodeOperation;
-import org.opendaylight.mdsal.dom.spi.shard.WriteCursorStrategy;
-import org.opendaylight.mdsal.dom.spi.shard.WriteableModificationNode;
-import org.opendaylight.mdsal.dom.spi.shard.WriteableNodeWithSubshard;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-
-/**
- * Shard modification that consists of the whole shard context, provides cursors which correctly delegate to subshards
- * if any are present.
- */
-public class DistributedShardModification extends WriteableNodeWithSubshard {
-
- private final DistributedShardModificationContext context;
- private final Map<DOMDataTreeIdentifier, ForeignShardModificationContext> childShards;
-
- public DistributedShardModification(final DistributedShardModificationContext context,
- final Map<PathArgument, WriteableModificationNode> subshards,
- final Map<DOMDataTreeIdentifier, ForeignShardModificationContext> childShards) {
- super(subshards);
- this.context = requireNonNull(context);
- this.childShards = requireNonNull(childShards);
- }
-
- @Override
- public PathArgument getIdentifier() {
- return context.getIdentifier().getRootIdentifier().getLastPathArgument();
- }
-
- @Override
- public WriteCursorStrategy createOperation(final DOMDataTreeWriteCursor parentCursor) {
- return new WritableNodeOperation(this, context.cursor()) {
- @Override
- public void exit() {
- throw new IllegalStateException("Can not exit data tree root");
- }
- };
- }
-
- void cursorClosed() {
- context.closeCursor();
- }
-
- DOMStoreThreePhaseCommitCohort seal() {
- childShards.values().stream().filter(ForeignShardModificationContext::isModified)
- .forEach(ForeignShardModificationContext::ready);
-
- return context.ready();
- }
-
- DOMDataTreeIdentifier getPrefix() {
- return context.getIdentifier();
- }
-
- Map<DOMDataTreeIdentifier, ForeignShardModificationContext> getChildShards() {
- return childShards;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding;
-
-import org.opendaylight.controller.cluster.databroker.actors.dds.ClientTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteCursor;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-
-/**
- * The context for a single shards modification, keeps a ClientTransaction so it can route requests correctly.
- */
-public class DistributedShardModificationContext {
-
- private ClientTransaction transaction;
- private DOMDataTreeIdentifier identifier;
- private DOMDataTreeWriteCursor cursor;
-
- public DistributedShardModificationContext(final ClientTransaction transaction,
- final DOMDataTreeIdentifier identifier) {
- this.transaction = transaction;
- this.identifier = identifier;
- }
-
- public DOMDataTreeIdentifier getIdentifier() {
- return identifier;
- }
-
- DOMDataTreeWriteCursor cursor() {
- if (cursor == null) {
- cursor = transaction.openCursor();
- }
-
- return cursor;
- }
-
- DOMStoreThreePhaseCommitCohort ready() {
- if (cursor != null) {
- cursor.close();
- cursor = null;
- }
-
- return transaction.ready();
- }
-
- void closeCursor() {
- if (cursor != null) {
- cursor.close();
- cursor = null;
- }
- }
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding;
-
-import org.opendaylight.mdsal.dom.spi.shard.AbstractDataModificationCursor;
-import org.opendaylight.mdsal.dom.spi.shard.WriteCursorStrategy;
-
-/**
- * Internal cursor implementation consisting of WriteCursorStrategies which forwards writes to foreign modifications
- * if any.
- */
-public class DistributedShardModificationCursor extends AbstractDataModificationCursor<DistributedShardModification> {
-
- private ShardProxyTransaction parent;
-
- public DistributedShardModificationCursor(final DistributedShardModification root,
- final ShardProxyTransaction parent) {
- super(root);
- this.parent = parent;
- }
-
- @Override
- protected WriteCursorStrategy getRootOperation(final DistributedShardModification root) {
- return root.createOperation(null);
- }
-
- @Override
- public void close() {
- parent.cursorClosed();
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.sharding;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.ImmutableMap;
-import java.util.Map;
-import org.opendaylight.controller.cluster.databroker.actors.dds.ClientTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.spi.shard.ForeignShardModificationContext;
-import org.opendaylight.mdsal.dom.spi.shard.WriteableModificationNode;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-
-/**
- * Factory for {@link DistributedShardModification}.
- */
-public final class DistributedShardModificationFactory {
- private final Map<DOMDataTreeIdentifier, ForeignShardModificationContext> childShards;
- private final Map<PathArgument, WriteableModificationNode> children;
- private final DOMDataTreeIdentifier root;
-
- DistributedShardModificationFactory(final DOMDataTreeIdentifier root,
- final Map<PathArgument, WriteableModificationNode> children,
- final Map<DOMDataTreeIdentifier, ForeignShardModificationContext> childShards) {
- this.root = requireNonNull(root);
- this.children = ImmutableMap.copyOf(children);
- this.childShards = ImmutableMap.copyOf(childShards);
- }
-
- @VisibleForTesting
- Map<PathArgument, WriteableModificationNode> getChildren() {
- return children;
- }
-
- @VisibleForTesting
- Map<DOMDataTreeIdentifier, ForeignShardModificationContext> getChildShards() {
- return childShards;
- }
-
- DistributedShardModification createModification(final ClientTransaction transaction) {
- return new DistributedShardModification(
- new DistributedShardModificationContext(transaction, root), children, childShards);
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding;
-
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.spi.shard.AbstractShardModificationFactoryBuilder;
-
-/**
- * Builder for {@link DistributedShardModificationFactory}.
- */
-public class DistributedShardModificationFactoryBuilder
- extends AbstractShardModificationFactoryBuilder<DistributedShardModificationFactory> {
-
-
- public DistributedShardModificationFactoryBuilder(final DOMDataTreeIdentifier root) {
- super(root);
- }
-
- @Override
- public DistributedShardModificationFactory build() {
- return new DistributedShardModificationFactory(root, buildChildren(), childShards);
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.sharding;
-
-import com.google.common.annotations.Beta;
-import java.util.concurrent.CompletionStage;
-
-/**
- * Registration of the CDS shard that allows you to remove the shard from the system by closing the registration.
- * This removal is done asynchronously.
- */
-@Beta
-public interface DistributedShardRegistration {
-
- /**
- * Removes the shard from the system, this removal is done asynchronously, the future completes once the
- * backend shard is no longer present.
- */
- CompletionStage<Void> close();
-}
+++ /dev/null
-/*
- * Copyright (c) 2016, 2017 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.sharding;
-
-import static akka.actor.ActorRef.noSender;
-import static com.google.common.base.Preconditions.checkArgument;
-import static com.google.common.base.Preconditions.checkState;
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorRef;
-import akka.actor.ActorSystem;
-import akka.actor.PoisonPill;
-import akka.actor.Props;
-import akka.dispatch.Mapper;
-import akka.dispatch.OnComplete;
-import akka.pattern.Patterns;
-import akka.util.Timeout;
-import com.google.common.base.Throwables;
-import com.google.common.collect.ClassToInstanceMap;
-import com.google.common.collect.ForwardingObject;
-import com.google.common.collect.ImmutableClassToInstanceMap;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import com.google.common.util.concurrent.SettableFuture;
-import com.google.common.util.concurrent.Uninterruptibles;
-import java.util.AbstractMap.SimpleEntry;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.EnumMap;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Optional;
-import java.util.Set;
-import java.util.concurrent.CompletionStage;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import org.checkerframework.checker.lock.qual.GuardedBy;
-import org.opendaylight.controller.cluster.ActorSystemProvider;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient;
-import org.opendaylight.controller.cluster.databroker.actors.dds.SimpleDataStoreClientActor;
-import org.opendaylight.controller.cluster.datastore.DistributedDataStoreInterface;
-import org.opendaylight.controller.cluster.datastore.Shard;
-import org.opendaylight.controller.cluster.datastore.config.Configuration;
-import org.opendaylight.controller.cluster.datastore.config.ModuleShardConfiguration;
-import org.opendaylight.controller.cluster.datastore.messages.CreateShard;
-import org.opendaylight.controller.cluster.datastore.shardstrategy.ModuleShardStrategy;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
-import org.opendaylight.controller.cluster.dom.api.CDSDataTreeProducer;
-import org.opendaylight.controller.cluster.dom.api.CDSShardAccess;
-import org.opendaylight.controller.cluster.sharding.ShardedDataTreeActor.ShardedDataTreeActorCreator;
-import org.opendaylight.controller.cluster.sharding.messages.InitConfigListener;
-import org.opendaylight.controller.cluster.sharding.messages.LookupPrefixShard;
-import org.opendaylight.controller.cluster.sharding.messages.PrefixShardRemovalLookup;
-import org.opendaylight.controller.cluster.sharding.messages.ProducerCreated;
-import org.opendaylight.controller.cluster.sharding.messages.ProducerRemoved;
-import org.opendaylight.controller.cluster.sharding.messages.StartConfigShardLookup;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCursorAwareTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeListener;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeLoopException;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeProducer;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeProducerException;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeService;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeServiceExtension;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeShard;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeShardingConflictException;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeShardingService;
-import org.opendaylight.mdsal.dom.broker.DOMDataTreeShardRegistration;
-import org.opendaylight.mdsal.dom.broker.ShardedDOMDataTree;
-import org.opendaylight.mdsal.dom.spi.DOMDataTreePrefixTable;
-import org.opendaylight.mdsal.dom.spi.DOMDataTreePrefixTableEntry;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.clustering.prefix.shard.configuration.rev170110.PrefixShards;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.compat.java8.FutureConverters;
-import scala.concurrent.Future;
-import scala.concurrent.Promise;
-import scala.concurrent.duration.FiniteDuration;
-
-/**
- * A layer on top of DOMDataTreeService that distributes producer/shard registrations to remote nodes via
- * {@link ShardedDataTreeActor}. Also provides QoL method for addition of prefix based clustered shard into the system.
- */
-public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDataTreeShardingService,
- DistributedShardFactory {
-
- private static final Logger LOG = LoggerFactory.getLogger(DistributedShardedDOMDataTree.class);
-
- private static final int MAX_ACTOR_CREATION_RETRIES = 100;
- private static final int ACTOR_RETRY_DELAY = 100;
- private static final TimeUnit ACTOR_RETRY_TIME_UNIT = TimeUnit.MILLISECONDS;
- private static final int LOOKUP_TASK_MAX_RETRIES = 100;
- static final FiniteDuration SHARD_FUTURE_TIMEOUT_DURATION =
- new FiniteDuration(LOOKUP_TASK_MAX_RETRIES * LOOKUP_TASK_MAX_RETRIES * 3, TimeUnit.SECONDS);
- static final Timeout SHARD_FUTURE_TIMEOUT = new Timeout(SHARD_FUTURE_TIMEOUT_DURATION);
-
- static final String ACTOR_ID = "ShardedDOMDataTreeFrontend";
-
- private final ShardedDOMDataTree shardedDOMDataTree;
- private final ActorSystem actorSystem;
- private final DistributedDataStoreInterface distributedOperDatastore;
- private final DistributedDataStoreInterface distributedConfigDatastore;
-
- private final ActorRef shardedDataTreeActor;
- private final MemberName memberName;
-
- @GuardedBy("shards")
- private final DOMDataTreePrefixTable<DOMDataTreeShardRegistration<DOMDataTreeShard>> shards =
- DOMDataTreePrefixTable.create();
-
- private final EnumMap<LogicalDatastoreType, Entry<DataStoreClient, ActorRef>> configurationShardMap =
- new EnumMap<>(LogicalDatastoreType.class);
-
- private final EnumMap<LogicalDatastoreType, PrefixedShardConfigWriter> writerMap =
- new EnumMap<>(LogicalDatastoreType.class);
-
- private final PrefixedShardConfigUpdateHandler updateHandler;
-
- public DistributedShardedDOMDataTree(final ActorSystemProvider actorSystemProvider,
- final DistributedDataStoreInterface distributedOperDatastore,
- final DistributedDataStoreInterface distributedConfigDatastore) {
- this.actorSystem = requireNonNull(actorSystemProvider).getActorSystem();
- this.distributedOperDatastore = requireNonNull(distributedOperDatastore);
- this.distributedConfigDatastore = requireNonNull(distributedConfigDatastore);
- shardedDOMDataTree = new ShardedDOMDataTree();
-
- shardedDataTreeActor = createShardedDataTreeActor(actorSystem,
- new ShardedDataTreeActorCreator()
- .setShardingService(this)
- .setActorSystem(actorSystem)
- .setClusterWrapper(distributedConfigDatastore.getActorUtils().getClusterWrapper())
- .setDistributedConfigDatastore(distributedConfigDatastore)
- .setDistributedOperDatastore(distributedOperDatastore)
- .setLookupTaskMaxRetries(LOOKUP_TASK_MAX_RETRIES),
- ACTOR_ID);
-
- this.memberName = distributedConfigDatastore.getActorUtils().getCurrentMemberName();
-
- updateHandler = new PrefixedShardConfigUpdateHandler(shardedDataTreeActor,
- distributedConfigDatastore.getActorUtils().getCurrentMemberName());
-
- LOG.debug("{} - Starting prefix configuration shards", memberName);
- createPrefixConfigShard(distributedConfigDatastore);
- createPrefixConfigShard(distributedOperDatastore);
- }
-
- private static void createPrefixConfigShard(final DistributedDataStoreInterface dataStore) {
- Configuration configuration = dataStore.getActorUtils().getConfiguration();
- Collection<MemberName> memberNames = configuration.getUniqueMemberNamesForAllShards();
- CreateShard createShardMessage =
- new CreateShard(new ModuleShardConfiguration(PrefixShards.QNAME.getNamespace(),
- "prefix-shard-configuration", ClusterUtils.PREFIX_CONFIG_SHARD_ID, ModuleShardStrategy.NAME,
- memberNames),
- Shard.builder(), dataStore.getActorUtils().getDatastoreContext());
-
- dataStore.getActorUtils().getShardManager().tell(createShardMessage, noSender());
- }
-
- /**
- * This will try to initialize prefix configuration shards upon their
- * successful start. We need to create writers to these shards, so we can
- * satisfy future {@link #createDistributedShard} and
- * {@link #resolveShardAdditions} requests and update prefix configuration
- * shards accordingly.
- *
- * <p>
- * We also need to initialize listeners on these shards, so we can react
- * on changes made on them by other cluster members or even by ourselves.
- *
- * <p>
- * Finally, we need to be sure that default shards for both operational and
- * configuration data stores are up and running and we have distributed
- * shards frontend created for them.
- *
- * <p>
- * This is intended to be invoked by blueprint as initialization method.
- */
- public void init() {
- // create our writers to the configuration
- try {
- LOG.debug("{} - starting config shard lookup.", memberName);
-
- // We have to wait for prefix config shards to be up and running
- // so we can create datastore clients for them
- handleConfigShardLookup().get(SHARD_FUTURE_TIMEOUT_DURATION.length(), SHARD_FUTURE_TIMEOUT_DURATION.unit());
- } catch (InterruptedException | ExecutionException | TimeoutException e) {
- throw new IllegalStateException("Prefix config shards not found", e);
- }
-
- try {
- LOG.debug("{}: Prefix configuration shards ready - creating clients", memberName);
- configurationShardMap.put(LogicalDatastoreType.CONFIGURATION,
- createDatastoreClient(ClusterUtils.PREFIX_CONFIG_SHARD_ID,
- distributedConfigDatastore.getActorUtils()));
- } catch (final DOMDataTreeShardCreationFailedException e) {
- throw new IllegalStateException(
- "Unable to create datastoreClient for config DS prefix configuration shard.", e);
- }
-
- try {
- configurationShardMap.put(LogicalDatastoreType.OPERATIONAL,
- createDatastoreClient(ClusterUtils.PREFIX_CONFIG_SHARD_ID,
- distributedOperDatastore.getActorUtils()));
-
- } catch (final DOMDataTreeShardCreationFailedException e) {
- throw new IllegalStateException(
- "Unable to create datastoreClient for oper DS prefix configuration shard.", e);
- }
-
- writerMap.put(LogicalDatastoreType.CONFIGURATION, new PrefixedShardConfigWriter(
- configurationShardMap.get(LogicalDatastoreType.CONFIGURATION).getKey()));
-
- writerMap.put(LogicalDatastoreType.OPERATIONAL, new PrefixedShardConfigWriter(
- configurationShardMap.get(LogicalDatastoreType.OPERATIONAL).getKey()));
-
- updateHandler.initListener(distributedConfigDatastore, LogicalDatastoreType.CONFIGURATION);
- updateHandler.initListener(distributedOperDatastore, LogicalDatastoreType.OPERATIONAL);
-
- distributedConfigDatastore.getActorUtils().getShardManager().tell(InitConfigListener.INSTANCE, noSender());
- distributedOperDatastore.getActorUtils().getShardManager().tell(InitConfigListener.INSTANCE, noSender());
-
-
- //create shard registration for DEFAULT_SHARD
- initDefaultShard(LogicalDatastoreType.CONFIGURATION);
- initDefaultShard(LogicalDatastoreType.OPERATIONAL);
- }
-
- private ListenableFuture<List<Void>> handleConfigShardLookup() {
-
- final ListenableFuture<Void> configFuture = lookupConfigShard(LogicalDatastoreType.CONFIGURATION);
- final ListenableFuture<Void> operFuture = lookupConfigShard(LogicalDatastoreType.OPERATIONAL);
-
- return Futures.allAsList(configFuture, operFuture);
- }
-
- private ListenableFuture<Void> lookupConfigShard(final LogicalDatastoreType type) {
- final SettableFuture<Void> future = SettableFuture.create();
-
- final Future<Object> ask =
- Patterns.ask(shardedDataTreeActor, new StartConfigShardLookup(type), SHARD_FUTURE_TIMEOUT);
-
- ask.onComplete(new OnComplete<>() {
- @Override
- public void onComplete(final Throwable throwable, final Object result) {
- if (throwable != null) {
- future.setException(throwable);
- } else {
- future.set(null);
- }
- }
- }, actorSystem.dispatcher());
-
- return future;
- }
-
- @Override
- public <T extends DOMDataTreeListener> ListenerRegistration<T> registerListener(
- final T listener, final Collection<DOMDataTreeIdentifier> subtrees,
- final boolean allowRxMerges, final Collection<DOMDataTreeProducer> producers)
- throws DOMDataTreeLoopException {
- return shardedDOMDataTree.registerListener(listener, subtrees, allowRxMerges, producers);
- }
-
- @Override
- public ClassToInstanceMap<DOMDataTreeServiceExtension> getExtensions() {
- return ImmutableClassToInstanceMap.of();
- }
-
- @Override
- public DOMDataTreeProducer createProducer(final Collection<DOMDataTreeIdentifier> subtrees) {
- LOG.debug("{} - Creating producer for {}", memberName, subtrees);
- final DOMDataTreeProducer producer = shardedDOMDataTree.createProducer(subtrees);
-
- final Object response = distributedConfigDatastore.getActorUtils()
- .executeOperation(shardedDataTreeActor, new ProducerCreated(subtrees));
- if (response == null) {
- LOG.debug("{} - Received success from remote nodes, creating producer:{}", memberName, subtrees);
- return new ProxyProducer(producer, subtrees, shardedDataTreeActor,
- distributedConfigDatastore.getActorUtils(), shards);
- }
-
- closeProducer(producer);
-
- if (response instanceof Throwable) {
- Throwables.throwIfUnchecked((Throwable) response);
- throw new RuntimeException((Throwable) response);
- }
- throw new RuntimeException("Unexpected response to create producer received." + response);
- }
-
- @Override
- public CompletionStage<DistributedShardRegistration> createDistributedShard(
- final DOMDataTreeIdentifier prefix, final Collection<MemberName> replicaMembers)
- throws DOMDataTreeShardingConflictException {
-
- synchronized (shards) {
- final DOMDataTreePrefixTableEntry<DOMDataTreeShardRegistration<DOMDataTreeShard>> lookup =
- shards.lookup(prefix);
- if (lookup != null && lookup.getValue().getPrefix().equals(prefix)) {
- throw new DOMDataTreeShardingConflictException(
- "Prefix " + prefix + " is already occupied by another shard.");
- }
- }
-
- final PrefixedShardConfigWriter writer = writerMap.get(prefix.getDatastoreType());
-
- final ListenableFuture<Void> writeFuture =
- writer.writeConfig(prefix.getRootIdentifier(), replicaMembers);
-
- final Promise<DistributedShardRegistration> shardRegistrationPromise = akka.dispatch.Futures.promise();
- Futures.addCallback(writeFuture, new FutureCallback<Void>() {
- @Override
- public void onSuccess(final Void result) {
-
- final Future<Object> ask =
- Patterns.ask(shardedDataTreeActor, new LookupPrefixShard(prefix), SHARD_FUTURE_TIMEOUT);
-
- shardRegistrationPromise.completeWith(ask.transform(
- new Mapper<Object, DistributedShardRegistration>() {
- @Override
- public DistributedShardRegistration apply(final Object parameter) {
- return new DistributedShardRegistrationImpl(
- prefix, shardedDataTreeActor, DistributedShardedDOMDataTree.this);
- }
- },
- new Mapper<Throwable, Throwable>() {
- @Override
- public Throwable apply(final Throwable throwable) {
- return new DOMDataTreeShardCreationFailedException(
- "Unable to create a cds shard.", throwable);
- }
- }, actorSystem.dispatcher()));
- }
-
- @Override
- public void onFailure(final Throwable throwable) {
- shardRegistrationPromise.failure(
- new DOMDataTreeShardCreationFailedException("Unable to create a cds shard.", throwable));
- }
- }, MoreExecutors.directExecutor());
-
- return FutureConverters.toJava(shardRegistrationPromise.future());
- }
-
- void resolveShardAdditions(final Set<DOMDataTreeIdentifier> additions) {
- LOG.debug("{}: Resolving additions : {}", memberName, additions);
- // we need to register the shards from top to bottom, so we need to atleast make sure the ordering reflects that
- additions
- .stream()
- .sorted(Comparator.comparingInt(o -> o.getRootIdentifier().getPathArguments().size()))
- .forEachOrdered(this::createShardFrontend);
- }
-
- void resolveShardRemovals(final Set<DOMDataTreeIdentifier> removals) {
- LOG.debug("{}: Resolving removals : {}", memberName, removals);
-
- // do we need to go from bottom to top?
- removals.forEach(this::despawnShardFrontend);
- }
-
- private void createShardFrontend(final DOMDataTreeIdentifier prefix) {
- LOG.debug("{}: Creating CDS shard for prefix: {}", memberName, prefix);
- final String shardName = ClusterUtils.getCleanShardName(prefix.getRootIdentifier());
- final DistributedDataStoreInterface distributedDataStore =
- prefix.getDatastoreType().equals(LogicalDatastoreType.CONFIGURATION)
- ? distributedConfigDatastore : distributedOperDatastore;
-
- try (DOMDataTreeProducer producer = localCreateProducer(Collections.singletonList(prefix))) {
- final Entry<DataStoreClient, ActorRef> entry =
- createDatastoreClient(shardName, distributedDataStore.getActorUtils());
-
- final DistributedShardFrontend shard =
- new DistributedShardFrontend(distributedDataStore, entry.getKey(), prefix);
-
- final DOMDataTreeShardRegistration<DOMDataTreeShard> reg =
- shardedDOMDataTree.registerDataTreeShard(prefix, shard, producer);
-
- synchronized (shards) {
- shards.store(prefix, reg);
- }
-
- } catch (final DOMDataTreeShardingConflictException e) {
- LOG.error("{}: Prefix {} is already occupied by another shard",
- distributedConfigDatastore.getActorUtils().getClusterWrapper().getCurrentMemberName(), prefix, e);
- } catch (DOMDataTreeProducerException e) {
- LOG.error("Unable to close producer", e);
- } catch (DOMDataTreeShardCreationFailedException e) {
- LOG.error("Unable to create datastore client for shard {}", prefix, e);
- }
- }
-
- private void despawnShardFrontend(final DOMDataTreeIdentifier prefix) {
- LOG.debug("{}: Removing CDS shard for prefix: {}", memberName, prefix);
- final DOMDataTreePrefixTableEntry<DOMDataTreeShardRegistration<DOMDataTreeShard>> lookup;
- synchronized (shards) {
- lookup = shards.lookup(prefix);
- }
-
- if (lookup == null || !lookup.getValue().getPrefix().equals(prefix)) {
- LOG.debug("{}: Received despawn for non-existing CDS shard frontend, prefix: {}, ignoring..",
- memberName, prefix);
- return;
- }
-
- lookup.getValue().close();
- // need to remove from our local table thats used for tracking
- synchronized (shards) {
- shards.remove(prefix);
- }
-
- final PrefixedShardConfigWriter writer = writerMap.get(prefix.getDatastoreType());
- final ListenableFuture<Void> future = writer.removeConfig(prefix.getRootIdentifier());
-
- Futures.addCallback(future, new FutureCallback<Void>() {
- @Override
- public void onSuccess(final Void result) {
- LOG.debug("{} - Succesfuly removed shard for {}", memberName, prefix);
- }
-
- @Override
- public void onFailure(final Throwable throwable) {
- LOG.error("Removal of shard {} from configuration failed.", prefix, throwable);
- }
- }, MoreExecutors.directExecutor());
- }
-
- DOMDataTreePrefixTableEntry<DOMDataTreeShardRegistration<DOMDataTreeShard>> lookupShardFrontend(
- final DOMDataTreeIdentifier prefix) {
- synchronized (shards) {
- return shards.lookup(prefix);
- }
- }
-
- DOMDataTreeProducer localCreateProducer(final Collection<DOMDataTreeIdentifier> prefix) {
- return shardedDOMDataTree.createProducer(prefix);
- }
-
- @Override
- public <T extends DOMDataTreeShard> ListenerRegistration<T> registerDataTreeShard(
- final DOMDataTreeIdentifier prefix, final T shard, final DOMDataTreeProducer producer)
- throws DOMDataTreeShardingConflictException {
-
- LOG.debug("Registering shard[{}] at prefix: {}", shard, prefix);
-
- if (producer instanceof ProxyProducer) {
- return shardedDOMDataTree.registerDataTreeShard(prefix, shard, ((ProxyProducer) producer).delegate());
- }
-
- return shardedDOMDataTree.registerDataTreeShard(prefix, shard, producer);
- }
-
- @SuppressWarnings("checkstyle:IllegalCatch")
- private Entry<DataStoreClient, ActorRef> createDatastoreClient(final String shardName, final ActorUtils actorUtils)
- throws DOMDataTreeShardCreationFailedException {
-
- LOG.debug("{}: Creating distributed datastore client for shard {}", memberName, shardName);
- final Props distributedDataStoreClientProps =
- SimpleDataStoreClientActor.props(memberName, "Shard-" + shardName, actorUtils, shardName);
-
- final ActorRef clientActor = actorSystem.actorOf(distributedDataStoreClientProps);
- try {
- return new SimpleEntry<>(SimpleDataStoreClientActor
- .getDistributedDataStoreClient(clientActor, 30, TimeUnit.SECONDS), clientActor);
- } catch (final Exception e) {
- LOG.error("{}: Failed to get actor for {}", distributedDataStoreClientProps, memberName, e);
- clientActor.tell(PoisonPill.getInstance(), noSender());
- throw new DOMDataTreeShardCreationFailedException(
- "Unable to create datastore client for shard{" + shardName + "}", e);
- }
- }
-
- @SuppressWarnings("checkstyle:IllegalCatch")
- private void initDefaultShard(final LogicalDatastoreType logicalDatastoreType) {
-
- final PrefixedShardConfigWriter writer = writerMap.get(logicalDatastoreType);
-
- if (writer.checkDefaultIsPresent()) {
- LOG.debug("{}: Default shard for {} is already present in the config. Possibly saved in snapshot.",
- memberName, logicalDatastoreType);
- } else {
- try {
- // Currently the default shard configuration is present in the out-of-box modules.conf and is
- // expected to be present. So look up the local default shard here and create the frontend.
-
- // TODO we don't have to do it for config and operational default shard separately. Just one of them
- // should be enough
- final ActorUtils actorUtils = logicalDatastoreType == LogicalDatastoreType.CONFIGURATION
- ? distributedConfigDatastore.getActorUtils() : distributedOperDatastore.getActorUtils();
-
- final Optional<ActorRef> defaultLocalShardOptional =
- actorUtils.findLocalShard(ClusterUtils.getCleanShardName(YangInstanceIdentifier.empty()));
-
- if (defaultLocalShardOptional.isPresent()) {
- LOG.debug("{}: Default shard for {} is already started, creating just frontend", memberName,
- logicalDatastoreType);
- createShardFrontend(new DOMDataTreeIdentifier(logicalDatastoreType,
- YangInstanceIdentifier.empty()));
- }
-
- // The local shard isn't present - we assume that means the local member isn't in the replica list
- // and will be dynamically created later via an explicit add-shard-replica request. This is the
- // bootstrapping mechanism to add a new node into an existing cluster. The following code to create
- // the default shard as a prefix shard is problematic in this scenario so it is commented out. Since
- // the default shard is a module-based shard by default, it makes sense to always treat it as such,
- // ie bootstrap it in the same manner as the special prefix-configuration and EOS shards.
-// final Collection<MemberName> names = distributedConfigDatastore.getActorUtils().getConfiguration()
-// .getUniqueMemberNamesForAllShards();
-// Await.result(FutureConverters.toScala(createDistributedShard(
-// new DOMDataTreeIdentifier(logicalDatastoreType, YangInstanceIdentifier.empty()), names)),
-// SHARD_FUTURE_TIMEOUT_DURATION);
-// } catch (DOMDataTreeShardingConflictException e) {
-// LOG.debug("{}: Default shard for {} already registered, possibly due to other node doing it faster",
-// memberName, logicalDatastoreType);
- } catch (Exception e) {
- LOG.error("{}: Default shard initialization for {} failed", memberName, logicalDatastoreType, e);
- throw new RuntimeException(e);
- }
- }
- }
-
- private static void closeProducer(final DOMDataTreeProducer producer) {
- try {
- producer.close();
- } catch (final DOMDataTreeProducerException e) {
- LOG.error("Unable to close producer", e);
- }
- }
-
- @SuppressWarnings("checkstyle:IllegalCatch")
- private static ActorRef createShardedDataTreeActor(final ActorSystem actorSystem,
- final ShardedDataTreeActorCreator creator,
- final String shardDataTreeActorId) {
- Exception lastException = null;
-
- for (int i = 0; i < MAX_ACTOR_CREATION_RETRIES; i++) {
- try {
- return actorSystem.actorOf(creator.props(), shardDataTreeActorId);
- } catch (final Exception e) {
- lastException = e;
- Uninterruptibles.sleepUninterruptibly(ACTOR_RETRY_DELAY, ACTOR_RETRY_TIME_UNIT);
- LOG.debug("Could not create actor {} because of {} -"
- + " waiting for sometime before retrying (retry count = {})",
- shardDataTreeActorId, e.getMessage(), i);
- }
- }
-
- throw new IllegalStateException("Failed to create actor for ShardedDOMDataTree", lastException);
- }
-
- private class DistributedShardRegistrationImpl implements DistributedShardRegistration {
-
- private final DOMDataTreeIdentifier prefix;
- private final ActorRef shardedDataTreeActor;
- private final DistributedShardedDOMDataTree distributedShardedDOMDataTree;
-
- DistributedShardRegistrationImpl(final DOMDataTreeIdentifier prefix,
- final ActorRef shardedDataTreeActor,
- final DistributedShardedDOMDataTree distributedShardedDOMDataTree) {
- this.prefix = prefix;
- this.shardedDataTreeActor = shardedDataTreeActor;
- this.distributedShardedDOMDataTree = distributedShardedDOMDataTree;
- }
-
- @Override
- public CompletionStage<Void> close() {
- // first despawn on the local node
- distributedShardedDOMDataTree.despawnShardFrontend(prefix);
- // update the config so the remote nodes are updated
- final Future<Object> ask =
- Patterns.ask(shardedDataTreeActor, new PrefixShardRemovalLookup(prefix), SHARD_FUTURE_TIMEOUT);
-
- final Future<Void> closeFuture = ask.transform(
- new Mapper<Object, Void>() {
- @Override
- public Void apply(final Object parameter) {
- return null;
- }
- },
- new Mapper<Throwable, Throwable>() {
- @Override
- public Throwable apply(final Throwable throwable) {
- return throwable;
- }
- }, actorSystem.dispatcher());
-
- return FutureConverters.toJava(closeFuture);
- }
- }
-
- // TODO what about producers created by this producer?
- // They should also be CDSProducers
- private static final class ProxyProducer extends ForwardingObject implements CDSDataTreeProducer {
-
- private final DOMDataTreeProducer delegate;
- private final Collection<DOMDataTreeIdentifier> subtrees;
- private final ActorRef shardDataTreeActor;
- private final ActorUtils actorUtils;
- @GuardedBy("shardAccessMap")
- private final Map<DOMDataTreeIdentifier, CDSShardAccessImpl> shardAccessMap = new HashMap<>();
-
- // We don't have to guard access to shardTable in ProxyProducer.
- // ShardTable's entries relevant to this ProxyProducer shouldn't
- // change during producer's lifetime.
- private final DOMDataTreePrefixTable<DOMDataTreeShardRegistration<DOMDataTreeShard>> shardTable;
-
- ProxyProducer(final DOMDataTreeProducer delegate,
- final Collection<DOMDataTreeIdentifier> subtrees,
- final ActorRef shardDataTreeActor,
- final ActorUtils actorUtils,
- final DOMDataTreePrefixTable<DOMDataTreeShardRegistration<DOMDataTreeShard>> shardLayout) {
- this.delegate = requireNonNull(delegate);
- this.subtrees = requireNonNull(subtrees);
- this.shardDataTreeActor = requireNonNull(shardDataTreeActor);
- this.actorUtils = requireNonNull(actorUtils);
- this.shardTable = requireNonNull(shardLayout);
- }
-
- @Override
- public DOMDataTreeCursorAwareTransaction createTransaction(final boolean isolated) {
- return delegate.createTransaction(isolated);
- }
-
- @Override
- @SuppressWarnings("checkstyle:hiddenField")
- public DOMDataTreeProducer createProducer(final Collection<DOMDataTreeIdentifier> subtrees) {
- // TODO we probably don't need to distribute this on the remote nodes since once we have this producer
- // open we surely have the rights to all the subtrees.
- return delegate.createProducer(subtrees);
- }
-
- @Override
- @SuppressWarnings("checkstyle:IllegalCatch")
- public void close() throws DOMDataTreeProducerException {
- delegate.close();
-
- synchronized (shardAccessMap) {
- shardAccessMap.values().forEach(CDSShardAccessImpl::close);
- }
-
- final Object o = actorUtils.executeOperation(shardDataTreeActor, new ProducerRemoved(subtrees));
- if (o instanceof DOMDataTreeProducerException) {
- throw (DOMDataTreeProducerException) o;
- } else if (o instanceof Throwable) {
- throw new DOMDataTreeProducerException("Unable to close producer", (Throwable) o);
- }
- }
-
- @Override
- protected DOMDataTreeProducer delegate() {
- return delegate;
- }
-
- @Override
- public CDSShardAccess getShardAccess(final DOMDataTreeIdentifier subtree) {
- checkArgument(subtrees.stream().anyMatch(dataTreeIdentifier -> dataTreeIdentifier.contains(subtree)),
- "Subtree %s is not controlled by this producer %s", subtree, this);
-
- final DOMDataTreePrefixTableEntry<DOMDataTreeShardRegistration<DOMDataTreeShard>> lookup =
- shardTable.lookup(subtree);
- checkState(lookup != null, "Subtree %s is not contained in any registered shard.", subtree);
-
- final DOMDataTreeIdentifier lookupId = lookup.getValue().getPrefix();
-
- synchronized (shardAccessMap) {
- if (shardAccessMap.get(lookupId) != null) {
- return shardAccessMap.get(lookupId);
- }
-
- // TODO Maybe we can have static factory method and return the same instance
- // for same subtrees. But maybe it is not needed since there can be only one
- // producer attached to some subtree at a time. And also how we can close ShardAccess
- // then
- final CDSShardAccessImpl shardAccess = new CDSShardAccessImpl(lookupId, actorUtils);
- shardAccessMap.put(lookupId, shardAccess);
- return shardAccess;
- }
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.sharding;
-
-import static akka.actor.ActorRef.noSender;
-
-import akka.actor.ActorRef;
-import akka.actor.Status;
-import org.eclipse.jdt.annotation.Nullable;
-
-/**
- * Base class for lookup tasks. Lookup tasks are supposed to run repeatedly until successful lookup or maximum retries
- * are hit. This class is NOT thread-safe.
- */
-abstract class LookupTask implements Runnable {
- private final int maxRetries;
- private final ActorRef replyTo;
- private int retried = 0;
-
- LookupTask(final ActorRef replyTo, final int maxRetries) {
- this.replyTo = replyTo;
- this.maxRetries = maxRetries;
- }
-
- abstract void reschedule(int retries);
-
- void tryReschedule(final @Nullable Throwable throwable) {
- if (retried <= maxRetries) {
- retried++;
- reschedule(retried);
- } else {
- fail(throwable);
- }
- }
-
- void fail(final @Nullable Throwable throwable) {
- if (throwable == null) {
- replyTo.tell(new Status.Failure(
- new DOMDataTreeShardCreationFailedException("Unable to find the backend shard."
- + "Failing..")), noSender());
- } else {
- replyTo.tell(new Status.Failure(
- new DOMDataTreeShardCreationFailedException("Unable to find the backend shard."
- + "Failing..", throwable)), noSender());
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.sharding;
-
-import com.google.common.collect.ClassToInstanceMap;
-import java.util.Collection;
-import java.util.concurrent.CompletionStage;
-import org.opendaylight.controller.cluster.ActorSystemProvider;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.DistributedDataStoreInterface;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeListener;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeLoopException;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeProducer;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeService;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeServiceExtension;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeShard;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeShardingConflictException;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeShardingService;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.osgi.service.component.annotations.Activate;
-import org.osgi.service.component.annotations.Component;
-import org.osgi.service.component.annotations.Deactivate;
-import org.osgi.service.component.annotations.Reference;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@Component(immediate = true, property = "type=default")
-public final class OSGiDistributedShardedDOMDataTree
- implements DOMDataTreeService, DOMDataTreeShardingService, DistributedShardFactory {
- private static final Logger LOG = LoggerFactory.getLogger(OSGiDistributedShardedDOMDataTree.class);
-
- @Reference
- ActorSystemProvider actorSystemProvider = null;
- @Reference(target = "(type=distributed-config)")
- DistributedDataStoreInterface configDatastore = null;
- @Reference(target = "(type=distributed-operational)")
- DistributedDataStoreInterface operDatastore = null;
-
- private DistributedShardedDOMDataTree delegate;
-
- @Override
- public DOMDataTreeProducer createProducer(final Collection<DOMDataTreeIdentifier> subtrees) {
- return delegate.createProducer(subtrees);
- }
-
- @Override
- public ClassToInstanceMap<DOMDataTreeServiceExtension> getExtensions() {
- return delegate.getExtensions();
- }
-
- @Override
- public CompletionStage<DistributedShardRegistration> createDistributedShard(final DOMDataTreeIdentifier prefix,
- final Collection<MemberName> replicaMembers) throws DOMDataTreeShardingConflictException {
- return delegate.createDistributedShard(prefix, replicaMembers);
- }
-
- @Override
- public <T extends DOMDataTreeShard> ListenerRegistration<T> registerDataTreeShard(
- final DOMDataTreeIdentifier prefix, final T shard, final DOMDataTreeProducer producer)
- throws DOMDataTreeShardingConflictException {
- return delegate.registerDataTreeShard(prefix, shard, producer);
- }
-
- @Override
- public <T extends DOMDataTreeListener> ListenerRegistration<T> registerListener(final T listener,
- final Collection<DOMDataTreeIdentifier> subtrees, final boolean allowRxMerges,
- final Collection<DOMDataTreeProducer> producers) throws DOMDataTreeLoopException {
- return delegate.registerListener(listener, subtrees, allowRxMerges, producers);
- }
-
- @Activate
- void activate() {
- LOG.info("Distributed DOM Data Tree Service starting");
- delegate = new DistributedShardedDOMDataTree(actorSystemProvider, operDatastore, configDatastore);
- delegate.init();
- LOG.info("Distributed DOM Data Tree Service started");
- }
-
- @Deactivate
- void deactivate() {
- LOG.info("Distributed DOM Data Tree Service stopping");
- // TODO: this needs a shutdown hook, I think
- delegate = null;
- LOG.info("Distributed DOM Data Tree Service stopped");
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.sharding;
-
-import static akka.actor.ActorRef.noSender;
-import static java.util.Objects.requireNonNull;
-import static org.opendaylight.controller.cluster.datastore.utils.ClusterUtils.SHARD_PREFIX_QNAME;
-import static org.opendaylight.controller.cluster.datastore.utils.ClusterUtils.SHARD_REPLICAS_QNAME;
-import static org.opendaylight.controller.cluster.datastore.utils.ClusterUtils.SHARD_REPLICA_QNAME;
-
-import akka.actor.ActorRef;
-import java.util.Collection;
-import java.util.EnumMap;
-import java.util.List;
-import java.util.stream.Collectors;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.DistributedDataStoreInterface;
-import org.opendaylight.controller.cluster.datastore.config.PrefixShardConfiguration;
-import org.opendaylight.controller.cluster.datastore.shardstrategy.PrefixShardStrategy;
-import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
-import org.opendaylight.controller.cluster.sharding.messages.PrefixShardCreated;
-import org.opendaylight.controller.cluster.sharding.messages.PrefixShardRemoved;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.ClusteredDOMDataTreeChangeListener;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafSetNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Listens on changes on prefix-shard-configuration. Resolves the changes and
- * notifies handling actor with {@link PrefixShardCreated} and
- * {@link PrefixShardRemoved} messages.
- */
-public class PrefixedShardConfigUpdateHandler {
-
- private static final Logger LOG = LoggerFactory.getLogger(PrefixedShardConfigUpdateHandler.class);
- private final ActorRef handlingActor;
- private final MemberName memberName;
-
- private final EnumMap<LogicalDatastoreType,ListenerRegistration<DOMDataTreeChangeListener>> registrations =
- new EnumMap<>(LogicalDatastoreType.class);
-
- public PrefixedShardConfigUpdateHandler(final ActorRef handlingActor, final MemberName memberName) {
- this.handlingActor = requireNonNull(handlingActor);
- this.memberName = requireNonNull(memberName);
- }
-
- public void initListener(final DistributedDataStoreInterface dataStore, final LogicalDatastoreType type) {
- registrations.put(type, dataStore.registerShardConfigListener(
- ClusterUtils.SHARD_LIST_PATH, new ShardConfigHandler(memberName, type, handlingActor)));
- }
-
- public void close() {
- registrations.values().forEach(ListenerRegistration::close);
- registrations.clear();
- }
-
- public static final class ShardConfigHandler implements ClusteredDOMDataTreeChangeListener {
-
- private final MemberName memberName;
- private final LogicalDatastoreType type;
- private final ActorRef handlingActor;
- private final String logName;
-
- public ShardConfigHandler(final MemberName memberName,
- final LogicalDatastoreType type,
- final ActorRef handlingActor) {
- this.memberName = memberName;
- this.type = type;
- this.handlingActor = handlingActor;
- logName = memberName.getName() + "-" + type;
- }
-
- @Override
- public void onDataTreeChanged(final Collection<DataTreeCandidate> changes) {
- changes.forEach(this::resolveChange);
- }
-
- private void resolveChange(final DataTreeCandidate candidate) {
- switch (candidate.getRootNode().getModificationType()) {
- case UNMODIFIED:
- break;
- case APPEARED:
- case DELETE:
- case DISAPPEARED:
- case SUBTREE_MODIFIED:
- case WRITE:
- resolveModifiedRoot(candidate.getRootNode());
- break;
- default:
- break;
- }
- }
-
- private void resolveModifiedRoot(final DataTreeCandidateNode rootNode) {
-
- LOG.debug("{}: New config received {}", logName, rootNode);
- LOG.debug("{}: Data after: {}", logName, rootNode.getDataAfter());
-
- // were in the shards list, iter children and resolve
- for (final DataTreeCandidateNode childNode : rootNode.getChildNodes()) {
- switch (childNode.getModificationType()) {
- case UNMODIFIED:
- break;
- case SUBTREE_MODIFIED:
- case APPEARED:
- case WRITE:
- resolveWrittenShard(childNode);
- break;
- case DELETE:
- case DISAPPEARED:
- resolveDeletedShard(childNode);
- break;
- default:
- break;
- }
- }
- }
-
- @SuppressWarnings("unchecked")
- private void resolveWrittenShard(final DataTreeCandidateNode childNode) {
- final MapEntryNode entryNode = (MapEntryNode) childNode.getDataAfter().get();
- final LeafNode<YangInstanceIdentifier> prefix =
- (LeafNode<YangInstanceIdentifier>) entryNode.getChild(new NodeIdentifier(SHARD_PREFIX_QNAME)).get();
-
- final YangInstanceIdentifier identifier = prefix.getValue();
-
- LOG.debug("{}: Deserialized {} from datastore", logName, identifier);
-
- final ContainerNode replicas =
- (ContainerNode) entryNode.getChild(new NodeIdentifier(SHARD_REPLICAS_QNAME)).get();
-
- final LeafSetNode<String> replicaList =
- (LeafSetNode<String>) replicas.getChild(new NodeIdentifier(SHARD_REPLICA_QNAME)).get();
-
- final List<MemberName> retReplicas = replicaList.getValue().stream()
- .map(child -> MemberName.forName(child.getValue()))
- .collect(Collectors.toList());
-
- LOG.debug("{}: Replicas read from ds {}", logName, retReplicas.toString());
-
- final PrefixShardConfiguration newConfig =
- new PrefixShardConfiguration(new DOMDataTreeIdentifier(type, identifier),
- PrefixShardStrategy.NAME, retReplicas);
-
- LOG.debug("{}: Resulting config {} - sending PrefixShardCreated to {}", logName, newConfig, handlingActor);
-
- handlingActor.tell(new PrefixShardCreated(newConfig), noSender());
- }
-
- private void resolveDeletedShard(final DataTreeCandidateNode childNode) {
-
- final MapEntryNode entryNode = (MapEntryNode) childNode.getDataBefore().get();
-
- final LeafNode<YangInstanceIdentifier> prefix =
- (LeafNode<YangInstanceIdentifier>) entryNode.getChild(new NodeIdentifier(SHARD_PREFIX_QNAME)).get();
-
- final YangInstanceIdentifier deleted = prefix.getValue();
- LOG.debug("{}: Removing shard at {}.", memberName, deleted);
-
- final DOMDataTreeIdentifier domDataTreeIdentifier = new DOMDataTreeIdentifier(type, deleted);
- final PrefixShardRemoved message = new PrefixShardRemoved(domDataTreeIdentifier);
-
- handlingActor.tell(message, noSender());
- }
-
- @Override
- public String toString() {
- return "ShardConfigHandler [logName=" + logName + ", handlingActor=" + handlingActor + "]";
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding;
-
-import com.google.common.util.concurrent.AsyncFunction;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import java.util.Collection;
-import java.util.concurrent.ExecutionException;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.databroker.actors.dds.ClientLocalHistory;
-import org.opendaylight.controller.cluster.databroker.actors.dds.ClientSnapshot;
-import org.opendaylight.controller.cluster.databroker.actors.dds.ClientTransaction;
-import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient;
-import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteCursor;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.ListNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafSetEntryNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafSetNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableMapEntryNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableMapNodeBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Writes and removes prefix-based shards' configuration
- * to prefix-shard-configuration. This classed is meant to be utilized
- * by {@link DistributedShardedDOMDataTree} for updating
- * prefix-shard-configuration upon creating and de-spawning prefix-based shards.
- */
-class PrefixedShardConfigWriter {
-
- private static final Logger LOG = LoggerFactory.getLogger(PrefixedShardConfigWriter.class);
-
- private final ClientLocalHistory history;
-
- PrefixedShardConfigWriter(final DataStoreClient client) {
- history = client.createLocalHistory();
- writeInitialParent();
- }
-
- ListenableFuture<Void> writeConfig(final YangInstanceIdentifier path, final Collection<MemberName> replicas) {
- LOG.debug("Writing config for {}, replicas {}", path, replicas);
-
- return doSubmit(doWrite(path, replicas));
- }
-
- ListenableFuture<Void> removeConfig(final YangInstanceIdentifier path) {
- LOG.debug("Removing config for {}.", path);
-
- return doSubmit(doDelete(path));
- }
-
- private void writeInitialParent() {
- final ClientTransaction tx = history.createTransaction();
-
- final DOMDataTreeWriteCursor cursor = tx.openCursor();
-
- final ContainerNode root = ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(new NodeIdentifier(ClusterUtils.PREFIX_SHARDS_QNAME))
- .withChild(ImmutableMapNodeBuilder.create()
- .withNodeIdentifier(new NodeIdentifier(ClusterUtils.SHARD_LIST_QNAME))
- .build())
- .build();
-
- cursor.merge(ClusterUtils.PREFIX_SHARDS_PATH.getLastPathArgument(), root);
- cursor.close();
-
- final DOMStoreThreePhaseCommitCohort cohort = tx.ready();
-
- submitBlocking(cohort);
- }
-
- private static void submitBlocking(final DOMStoreThreePhaseCommitCohort cohort) {
- try {
- doSubmit(cohort).get();
- } catch (final InterruptedException | ExecutionException e) {
- LOG.error("Unable to write initial shard config parent.", e);
- }
- }
-
- private static ListenableFuture<Void> doSubmit(final DOMStoreThreePhaseCommitCohort cohort) {
- final AsyncFunction<Boolean, Void> validateFunction = input -> cohort.preCommit();
- final AsyncFunction<Void, Void> prepareFunction = input -> cohort.commit();
-
- final ListenableFuture<Void> prepareFuture = Futures.transformAsync(cohort.canCommit(), validateFunction,
- MoreExecutors.directExecutor());
- return Futures.transformAsync(prepareFuture, prepareFunction, MoreExecutors.directExecutor());
- }
-
- boolean checkDefaultIsPresent() {
- final NodeIdentifierWithPredicates pag =
- NodeIdentifierWithPredicates.of(ClusterUtils.SHARD_LIST_QNAME, ClusterUtils.SHARD_PREFIX_QNAME,
- YangInstanceIdentifier.empty());
-
- final YangInstanceIdentifier defaultId = ClusterUtils.SHARD_LIST_PATH.node(pag);
-
- final ClientSnapshot snapshot = history.takeSnapshot();
- try {
- return snapshot.exists(defaultId).get();
- } catch (InterruptedException | ExecutionException e) {
- LOG.error("Presence check of default shard in configuration failed.", e);
- return false;
- } finally {
- snapshot.abort();
- }
- }
-
- private DOMStoreThreePhaseCommitCohort doWrite(final YangInstanceIdentifier path,
- final Collection<MemberName> replicas) {
-
- final ListNodeBuilder<Object, LeafSetEntryNode<Object>> replicaListBuilder =
- ImmutableLeafSetNodeBuilder.create().withNodeIdentifier(
- new NodeIdentifier(ClusterUtils.SHARD_REPLICA_QNAME));
-
- replicas.forEach(name -> replicaListBuilder.withChild(
- ImmutableLeafSetEntryNodeBuilder.create()
- .withNodeIdentifier(new NodeWithValue<>(ClusterUtils.SHARD_REPLICA_QNAME, name.getName()))
- .withValue(name.getName())
- .build()));
-
- final MapEntryNode newEntry = ImmutableMapEntryNodeBuilder.create()
- .withNodeIdentifier(
- NodeIdentifierWithPredicates.of(ClusterUtils.SHARD_LIST_QNAME, ClusterUtils.SHARD_PREFIX_QNAME,
- path))
- .withChild(ImmutableLeafNodeBuilder.create()
- .withNodeIdentifier(new NodeIdentifier(ClusterUtils.SHARD_PREFIX_QNAME))
- .withValue(path)
- .build())
- .withChild(ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(new NodeIdentifier(ClusterUtils.SHARD_REPLICAS_QNAME))
- .withChild(replicaListBuilder.build())
- .build())
- .build();
-
- final ClientTransaction tx = history.createTransaction();
- final DOMDataTreeWriteCursor cursor = tx.openCursor();
-
- ClusterUtils.SHARD_LIST_PATH.getPathArguments().forEach(cursor::enter);
-
- cursor.write(newEntry.getIdentifier(), newEntry);
- cursor.close();
-
- return tx.ready();
- }
-
- private DOMStoreThreePhaseCommitCohort doDelete(final YangInstanceIdentifier path) {
-
- final ClientTransaction tx = history.createTransaction();
- final DOMDataTreeWriteCursor cursor = tx.openCursor();
-
- ClusterUtils.SHARD_LIST_PATH.getPathArguments().forEach(cursor::enter);
-
- cursor.delete(
- NodeIdentifierWithPredicates.of(ClusterUtils.SHARD_LIST_QNAME, ClusterUtils.SHARD_PREFIX_QNAME, path));
- cursor.close();
-
- return tx.ready();
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.sharding;
-
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorRef;
-import akka.actor.Props;
-import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
-import org.opendaylight.controller.cluster.dom.api.LeaderLocation;
-import org.opendaylight.controller.cluster.dom.api.LeaderLocationListener;
-import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
-import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
-import org.opendaylight.controller.cluster.notifications.RoleChangeNotification;
-
-/**
- * Proxy actor which acts as a facade for user-provided
- * {@link LeaderLocationListener}. It subscribes for {@link LeaderStateChanged}
- * notifications in its pre start hook and translates them to
- * {@link LeaderLocationListener#onLeaderLocationChanged(LeaderLocation)}
- * events.
- */
-public final class RoleChangeListenerActor extends AbstractUntypedActor {
- private final LeaderLocationListener leaderLocationListener;
- private final ActorRef roleChangeNotifier;
-
- private RoleChangeListenerActor(final ActorRef roleChangeNotifier, final LeaderLocationListener listener) {
- this.roleChangeNotifier = requireNonNull(roleChangeNotifier);
- this.leaderLocationListener = requireNonNull(listener);
- }
-
- @Override
- public void preStart() throws Exception {
- super.preStart();
- roleChangeNotifier.tell(new RegisterRoleChangeListener(), getSelf());
- }
-
- @Override
- protected void handleReceive(final Object message) {
- if (message instanceof RoleChangeNotification) {
- ignoreMessage(message);
- } else if (message instanceof LeaderStateChanged) {
- onLeaderStateChanged((LeaderStateChanged) message);
- } else {
- unknownMessage(message);
- }
- }
-
- private void onLeaderStateChanged(final LeaderStateChanged message) {
- final LeaderLocation newLocation;
- if (message.getLeaderId() == null) {
- newLocation = LeaderLocation.UNKNOWN;
- } else if (message.getMemberId().equals(message.getLeaderId())) {
- newLocation = LeaderLocation.LOCAL;
- } else {
- newLocation = LeaderLocation.REMOTE;
- }
-
- // TODO should we wrap this in try catch block?
- leaderLocationListener.onLeaderLocationChanged(newLocation);
- }
-
- public static Props props(final ActorRef roleChangeNotifier, final LeaderLocationListener listener) {
- return Props.create(RoleChangeListenerActor.class, roleChangeNotifier, listener);
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.sharding;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.collect.ImmutableList;
-import java.util.Collection;
-import org.opendaylight.controller.cluster.databroker.actors.dds.ClientLocalHistory;
-import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.spi.shard.DOMDataTreeShardProducer;
-import org.opendaylight.mdsal.dom.spi.shard.DOMDataTreeShardWriteTransaction;
-
-/**
- * Proxy producer implementation that creates transactions that forward all calls to {@link DataStoreClient}.
- */
-class ShardProxyProducer implements DOMDataTreeShardProducer {
- private final DOMDataTreeIdentifier shardRoot;
- private final Collection<DOMDataTreeIdentifier> prefixes;
- private final ClientLocalHistory history;
- private DistributedShardModificationFactory modificationFactory;
-
- ShardProxyProducer(final DOMDataTreeIdentifier shardRoot,
- final Collection<DOMDataTreeIdentifier> prefixes,
- final DataStoreClient client,
- final DistributedShardModificationFactory modificationFactory) {
- this.shardRoot = requireNonNull(shardRoot);
- this.prefixes = ImmutableList.copyOf(prefixes);
- this.modificationFactory = requireNonNull(modificationFactory);
- history = requireNonNull(client).createLocalHistory();
- }
-
- @Override
- public Collection<DOMDataTreeIdentifier> getPrefixes() {
- return prefixes;
- }
-
- @Override
- public DOMDataTreeShardWriteTransaction createTransaction() {
- return new ShardProxyTransaction(shardRoot, prefixes,
- modificationFactory.createModification(history.createTransaction()));
- }
-
- DistributedShardModificationFactory getModificationFactory() {
- return modificationFactory;
- }
-
- void setModificationFactory(final DistributedShardModificationFactory modificationFactory) {
- this.modificationFactory = requireNonNull(modificationFactory);
- }
-
- @Override
- public void close() {
- // FIXME: implement this
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.sharding;
-
-import static com.google.common.base.Preconditions.checkArgument;
-import static com.google.common.base.Preconditions.checkState;
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.util.concurrent.AsyncFunction;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.Optional;
-import java.util.stream.Collectors;
-import org.opendaylight.controller.cluster.databroker.actors.dds.ClientTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteCursor;
-import org.opendaylight.mdsal.dom.spi.shard.DOMDataTreeShardWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.shard.ForeignShardModificationContext;
-import org.opendaylight.mdsal.dom.spi.shard.ForeignShardThreePhaseCommitCohort;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Proxy {@link DOMDataTreeShardWriteTransaction} that creates a proxy cursor that translates all calls into
- * {@link ClientTransaction} calls.
- */
-class ShardProxyTransaction implements DOMDataTreeShardWriteTransaction {
-
- private static final Logger LOG = LoggerFactory.getLogger(ShardProxyTransaction.class);
-
- private final DOMDataTreeIdentifier shardRoot;
- private final Collection<DOMDataTreeIdentifier> prefixes;
- private final DistributedShardModification modification;
- private ClientTransaction currentTx;
- private final List<DOMStoreThreePhaseCommitCohort> cohorts = new ArrayList<>();
-
- private DOMDataTreeWriteCursor cursor = null;
-
- ShardProxyTransaction(final DOMDataTreeIdentifier shardRoot,
- final Collection<DOMDataTreeIdentifier> prefixes,
- final DistributedShardModification modification) {
- this.shardRoot = requireNonNull(shardRoot);
- this.prefixes = requireNonNull(prefixes);
- this.modification = requireNonNull(modification);
- }
-
- private DOMDataTreeWriteCursor getCursor() {
- if (cursor == null) {
- cursor = new DistributedShardModificationCursor(modification, this);
- }
- return cursor;
- }
-
- @Override
- public DOMDataTreeWriteCursor createCursor(final DOMDataTreeIdentifier prefix) {
- checkAvailable(prefix);
- final YangInstanceIdentifier relativePath = toRelative(prefix.getRootIdentifier());
- final DOMDataTreeWriteCursor ret = getCursor();
- ret.enter(relativePath.getPathArguments());
- return ret;
- }
-
- void cursorClosed() {
- cursor = null;
- modification.cursorClosed();
- }
-
- private void checkAvailable(final DOMDataTreeIdentifier prefix) {
- for (final DOMDataTreeIdentifier p : prefixes) {
- if (p.contains(prefix)) {
- return;
- }
- }
- throw new IllegalArgumentException("Prefix[" + prefix + "] not available for this transaction. "
- + "Available prefixes: " + prefixes);
- }
-
- private YangInstanceIdentifier toRelative(final YangInstanceIdentifier path) {
- final Optional<YangInstanceIdentifier> relative =
- path.relativeTo(modification.getPrefix().getRootIdentifier());
- checkArgument(relative.isPresent());
- return relative.get();
- }
-
- @Override
- public void ready() {
- LOG.debug("Readying transaction for shard {}", shardRoot);
-
- requireNonNull(modification, "Attempting to ready an empty transaction.");
-
- cohorts.add(modification.seal());
- for (Entry<DOMDataTreeIdentifier, ForeignShardModificationContext> entry
- : modification.getChildShards().entrySet()) {
- cohorts.add(new ForeignShardThreePhaseCommitCohort(entry.getKey(), entry.getValue()));
- }
- }
-
- @Override
- public void close() {
- cohorts.forEach(DOMStoreThreePhaseCommitCohort::abort);
- cohorts.clear();
-
- if (currentTx != null) {
- currentTx.abort();
- currentTx = null;
- }
- }
-
- @Override
- public ListenableFuture<Void> submit() {
- LOG.debug("Submitting transaction for shard {}", shardRoot);
-
- checkTransactionReadied();
-
- final AsyncFunction<Boolean, Void> validateFunction = input -> prepare();
- final AsyncFunction<Void, Void> prepareFunction = input -> commit();
-
- // transform validate into prepare
- final ListenableFuture<Void> prepareFuture = Futures.transformAsync(validate(), validateFunction,
- MoreExecutors.directExecutor());
- // transform prepare into commit and return as submit result
- return Futures.transformAsync(prepareFuture, prepareFunction, MoreExecutors.directExecutor());
- }
-
- private void checkTransactionReadied() {
- checkState(!cohorts.isEmpty(), "Transaction not readied yet");
- }
-
- @Override
- public ListenableFuture<Boolean> validate() {
- LOG.debug("Validating transaction for shard {}", shardRoot);
-
- checkTransactionReadied();
- final List<ListenableFuture<Boolean>> futures =
- cohorts.stream().map(DOMStoreThreePhaseCommitCohort::canCommit).collect(Collectors.toList());
- final SettableFuture<Boolean> ret = SettableFuture.create();
-
- Futures.addCallback(Futures.allAsList(futures), new FutureCallback<List<Boolean>>() {
- @Override
- public void onSuccess(final List<Boolean> result) {
- ret.set(true);
- }
-
- @Override
- public void onFailure(final Throwable throwable) {
- ret.setException(throwable);
- }
- }, MoreExecutors.directExecutor());
-
- return ret;
- }
-
- @Override
- public ListenableFuture<Void> prepare() {
- LOG.debug("Preparing transaction for shard {}", shardRoot);
-
- checkTransactionReadied();
- final List<ListenableFuture<Void>> futures =
- cohorts.stream().map(DOMStoreThreePhaseCommitCohort::preCommit).collect(Collectors.toList());
- final SettableFuture<Void> ret = SettableFuture.create();
-
- Futures.addCallback(Futures.allAsList(futures), new FutureCallback<List<Void>>() {
- @Override
- public void onSuccess(final List<Void> result) {
- ret.set(null);
- }
-
- @Override
- public void onFailure(final Throwable throwable) {
- ret.setException(throwable);
- }
- }, MoreExecutors.directExecutor());
-
- return ret;
- }
-
- @Override
- public ListenableFuture<Void> commit() {
- LOG.debug("Committing transaction for shard {}", shardRoot);
-
- checkTransactionReadied();
- final List<ListenableFuture<Void>> futures =
- cohorts.stream().map(DOMStoreThreePhaseCommitCohort::commit).collect(Collectors.toList());
- final SettableFuture<Void> ret = SettableFuture.create();
-
- Futures.addCallback(Futures.allAsList(futures), new FutureCallback<List<Void>>() {
- @Override
- public void onSuccess(final List<Void> result) {
- ret.set(null);
- }
-
- @Override
- public void onFailure(final Throwable throwable) {
- ret.setException(throwable);
- }
- }, MoreExecutors.directExecutor());
-
- return ret;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.sharding;
-
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorRef;
-import akka.actor.ActorSelection;
-import akka.actor.ActorSystem;
-import akka.actor.PoisonPill;
-import akka.actor.Props;
-import akka.actor.Status;
-import akka.actor.Status.Success;
-import akka.cluster.ClusterEvent;
-import akka.cluster.ClusterEvent.MemberExited;
-import akka.cluster.ClusterEvent.MemberRemoved;
-import akka.cluster.ClusterEvent.MemberUp;
-import akka.cluster.ClusterEvent.MemberWeaklyUp;
-import akka.cluster.ClusterEvent.ReachableMember;
-import akka.cluster.ClusterEvent.UnreachableMember;
-import akka.cluster.Member;
-import akka.dispatch.OnComplete;
-import akka.pattern.Patterns;
-import akka.util.Timeout;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.TimeUnit;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.common.actor.AbstractUntypedPersistentActor;
-import org.opendaylight.controller.cluster.datastore.ClusterWrapper;
-import org.opendaylight.controller.cluster.datastore.DistributedDataStoreInterface;
-import org.opendaylight.controller.cluster.datastore.config.PrefixShardConfiguration;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
-import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
-import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
-import org.opendaylight.controller.cluster.sharding.messages.LookupPrefixShard;
-import org.opendaylight.controller.cluster.sharding.messages.NotifyProducerCreated;
-import org.opendaylight.controller.cluster.sharding.messages.NotifyProducerRemoved;
-import org.opendaylight.controller.cluster.sharding.messages.PrefixShardCreated;
-import org.opendaylight.controller.cluster.sharding.messages.PrefixShardRemovalLookup;
-import org.opendaylight.controller.cluster.sharding.messages.PrefixShardRemoved;
-import org.opendaylight.controller.cluster.sharding.messages.ProducerCreated;
-import org.opendaylight.controller.cluster.sharding.messages.ProducerRemoved;
-import org.opendaylight.controller.cluster.sharding.messages.StartConfigShardLookup;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeProducer;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeProducerException;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeShard;
-import org.opendaylight.mdsal.dom.broker.DOMDataTreeShardRegistration;
-import org.opendaylight.mdsal.dom.spi.DOMDataTreePrefixTableEntry;
-import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.compat.java8.FutureConverters;
-import scala.concurrent.Future;
-import scala.concurrent.duration.FiniteDuration;
-
-/**
- * Actor that tracks currently open producers/shards on remote nodes and handles notifications of remote
- * nodes of newly open producers/shards on the local node.
- */
-public class ShardedDataTreeActor extends AbstractUntypedPersistentActor {
-
- private static final Logger LOG = LoggerFactory.getLogger(ShardedDataTreeActor.class);
-
- private static final String PERSISTENCE_ID = "sharding-service-actor";
- private static final Timeout DEFAULT_ASK_TIMEOUT = new Timeout(15, TimeUnit.SECONDS);
-
- static final FiniteDuration SHARD_LOOKUP_TASK_INTERVAL = new FiniteDuration(1L, TimeUnit.SECONDS);
-
- private final DistributedShardedDOMDataTree shardingService;
- private final ActorSystem actorSystem;
- private final ClusterWrapper clusterWrapper;
- // helper actorContext used only for static calls to executeAsync etc
- // for calls that need specific actor context tied to a datastore use the one provided in the DistributedDataStore
- private final ActorUtils actorUtils;
- private final ShardingServiceAddressResolver resolver;
- private final DistributedDataStoreInterface distributedConfigDatastore;
- private final DistributedDataStoreInterface distributedOperDatastore;
- private final int lookupTaskMaxRetries;
-
- private final Map<DOMDataTreeIdentifier, ActorProducerRegistration> idToProducer = new HashMap<>();
-
- ShardedDataTreeActor(final ShardedDataTreeActorCreator builder) {
- LOG.debug("Creating ShardedDataTreeActor on {}", builder.getClusterWrapper().getCurrentMemberName());
-
- shardingService = builder.getShardingService();
- actorSystem = builder.getActorSystem();
- clusterWrapper = builder.getClusterWrapper();
- distributedConfigDatastore = builder.getDistributedConfigDatastore();
- distributedOperDatastore = builder.getDistributedOperDatastore();
- lookupTaskMaxRetries = builder.getLookupTaskMaxRetries();
- actorUtils = distributedConfigDatastore.getActorUtils();
- resolver = new ShardingServiceAddressResolver(
- DistributedShardedDOMDataTree.ACTOR_ID, clusterWrapper.getCurrentMemberName());
-
- clusterWrapper.subscribeToMemberEvents(self());
- }
-
- @Override
- public void preStart() {
- }
-
- @Override
- protected void handleRecover(final Object message) {
- LOG.debug("Received a recover message {}", message);
- }
-
- @Override
- protected void handleCommand(final Object message) {
- LOG.debug("{} : Received {}", clusterWrapper.getCurrentMemberName(), message);
- if (message instanceof ClusterEvent.MemberUp) {
- memberUp((ClusterEvent.MemberUp) message);
- } else if (message instanceof ClusterEvent.MemberWeaklyUp) {
- memberWeaklyUp((ClusterEvent.MemberWeaklyUp) message);
- } else if (message instanceof ClusterEvent.MemberExited) {
- memberExited((ClusterEvent.MemberExited) message);
- } else if (message instanceof ClusterEvent.MemberRemoved) {
- memberRemoved((ClusterEvent.MemberRemoved) message);
- } else if (message instanceof ClusterEvent.UnreachableMember) {
- memberUnreachable((ClusterEvent.UnreachableMember) message);
- } else if (message instanceof ClusterEvent.ReachableMember) {
- memberReachable((ClusterEvent.ReachableMember) message);
- } else if (message instanceof ProducerCreated) {
- onProducerCreated((ProducerCreated) message);
- } else if (message instanceof NotifyProducerCreated) {
- onNotifyProducerCreated((NotifyProducerCreated) message);
- } else if (message instanceof ProducerRemoved) {
- onProducerRemoved((ProducerRemoved) message);
- } else if (message instanceof NotifyProducerRemoved) {
- onNotifyProducerRemoved((NotifyProducerRemoved) message);
- } else if (message instanceof PrefixShardCreated) {
- onPrefixShardCreated((PrefixShardCreated) message);
- } else if (message instanceof LookupPrefixShard) {
- onLookupPrefixShard((LookupPrefixShard) message);
- } else if (message instanceof PrefixShardRemovalLookup) {
- onPrefixShardRemovalLookup((PrefixShardRemovalLookup) message);
- } else if (message instanceof PrefixShardRemoved) {
- onPrefixShardRemoved((PrefixShardRemoved) message);
- } else if (message instanceof StartConfigShardLookup) {
- onStartConfigShardLookup((StartConfigShardLookup) message);
- }
- }
-
- @Override
- public String persistenceId() {
- return PERSISTENCE_ID;
- }
-
- private void memberUp(final MemberUp message) {
- final MemberName memberName = memberToName(message.member());
-
- LOG.info("{}: Received MemberUp: memberName: {}, address: {}", persistenceId(), memberName,
- message.member().address());
-
- resolver.addPeerAddress(memberName, message.member().address());
- }
-
- private void memberWeaklyUp(final MemberWeaklyUp message) {
- final MemberName memberName = memberToName(message.member());
-
- LOG.info("{}: Received MemberWeaklyUp: memberName: {}, address: {}", persistenceId(), memberName,
- message.member().address());
-
- resolver.addPeerAddress(memberName, message.member().address());
- }
-
- private void memberExited(final MemberExited message) {
- final MemberName memberName = memberToName(message.member());
-
- LOG.info("{}: Received MemberExited: memberName: {}, address: {}", persistenceId(), memberName,
- message.member().address());
-
- resolver.removePeerAddress(memberName);
- }
-
- private void memberRemoved(final MemberRemoved message) {
- final MemberName memberName = memberToName(message.member());
-
- LOG.info("{}: Received MemberRemoved: memberName: {}, address: {}", persistenceId(), memberName,
- message.member().address());
-
- resolver.removePeerAddress(memberName);
- }
-
- private void memberUnreachable(final UnreachableMember message) {
- final MemberName memberName = memberToName(message.member());
- LOG.debug("Received UnreachableMember: memberName {}, address: {}", memberName, message.member().address());
-
- resolver.removePeerAddress(memberName);
- }
-
- private void memberReachable(final ReachableMember message) {
- final MemberName memberName = memberToName(message.member());
- LOG.debug("Received ReachableMember: memberName {}, address: {}", memberName, message.member().address());
-
- resolver.addPeerAddress(memberName, message.member().address());
- }
-
- private void onProducerCreated(final ProducerCreated message) {
- LOG.debug("Received ProducerCreated: {}", message);
-
- // fastpath if we have no peers
- if (resolver.getShardingServicePeerActorAddresses().isEmpty()) {
- getSender().tell(new Status.Success(null), ActorRef.noSender());
- }
-
- final ActorRef sender = getSender();
- final Collection<DOMDataTreeIdentifier> subtrees = message.getSubtrees();
-
- final List<CompletableFuture<Object>> futures = new ArrayList<>();
-
- for (final String address : resolver.getShardingServicePeerActorAddresses()) {
- final ActorSelection actorSelection = actorSystem.actorSelection(address);
- futures.add(
- FutureConverters.toJava(
- actorUtils.executeOperationAsync(
- actorSelection, new NotifyProducerCreated(subtrees), DEFAULT_ASK_TIMEOUT))
- .toCompletableFuture());
- }
-
- final CompletableFuture<Void> combinedFuture = CompletableFuture.allOf(
- futures.toArray(new CompletableFuture[futures.size()]));
-
- combinedFuture
- .thenRun(() -> sender.tell(new Success(null), ActorRef.noSender()))
- .exceptionally(throwable -> {
- sender.tell(new Status.Failure(throwable), self());
- return null;
- });
- }
-
- private void onNotifyProducerCreated(final NotifyProducerCreated message) {
- LOG.debug("Received NotifyProducerCreated: {}", message);
-
- final Collection<DOMDataTreeIdentifier> subtrees = message.getSubtrees();
-
- try {
- final ActorProducerRegistration registration =
- new ActorProducerRegistration(shardingService.localCreateProducer(subtrees), subtrees);
- subtrees.forEach(id -> idToProducer.put(id, registration));
- sender().tell(new Status.Success(null), self());
- } catch (final IllegalArgumentException e) {
- sender().tell(new Status.Failure(e), getSelf());
- }
- }
-
- private void onProducerRemoved(final ProducerRemoved message) {
- LOG.debug("Received ProducerRemoved: {}", message);
-
- final List<CompletableFuture<Object>> futures = new ArrayList<>();
-
- for (final String address : resolver.getShardingServicePeerActorAddresses()) {
- final ActorSelection selection = actorSystem.actorSelection(address);
-
- futures.add(FutureConverters.toJava(
- actorUtils.executeOperationAsync(selection, new NotifyProducerRemoved(message.getSubtrees())))
- .toCompletableFuture());
- }
-
- final CompletableFuture<Void> combinedFuture = CompletableFuture.allOf(
- futures.toArray(new CompletableFuture[futures.size()]));
-
- final ActorRef respondTo = getSender();
-
- combinedFuture
- .thenRun(() -> respondTo.tell(new Status.Success(null), self()))
- .exceptionally(e -> {
- respondTo.tell(new Status.Failure(null), self());
- return null;
- });
-
- }
-
- private void onNotifyProducerRemoved(final NotifyProducerRemoved message) {
- LOG.debug("Received NotifyProducerRemoved: {}", message);
-
- final ActorProducerRegistration registration = idToProducer.remove(message.getSubtrees().iterator().next());
- if (registration == null) {
- LOG.warn("The notification contained a path on which no producer is registered, throwing away");
- getSender().tell(new Status.Success(null), ActorRef.noSender());
- return;
- }
-
- try {
- registration.close();
- getSender().tell(new Status.Success(null), ActorRef.noSender());
- } catch (final DOMDataTreeProducerException e) {
- LOG.error("Unable to close producer", e);
- getSender().tell(new Status.Failure(e), ActorRef.noSender());
- }
- }
-
- @SuppressWarnings("checkstyle:IllegalCatch")
- private void onLookupPrefixShard(final LookupPrefixShard message) {
- LOG.debug("Member: {}, Received LookupPrefixShard: {}", clusterWrapper.getCurrentMemberName(), message);
-
- final DOMDataTreeIdentifier prefix = message.getPrefix();
-
- final ActorUtils utils = prefix.getDatastoreType() == LogicalDatastoreType.CONFIGURATION
- ? distributedConfigDatastore.getActorUtils() : distributedOperDatastore.getActorUtils();
-
- // schedule a notification task for the reply
- actorSystem.scheduler().scheduleOnce(SHARD_LOOKUP_TASK_INTERVAL,
- new ShardCreationLookupTask(actorSystem, getSender(), clusterWrapper,
- utils, shardingService, prefix, lookupTaskMaxRetries), actorSystem.dispatcher());
- }
-
- private void onPrefixShardCreated(final PrefixShardCreated message) {
- LOG.debug("Member: {}, Received PrefixShardCreated: {}", clusterWrapper.getCurrentMemberName(), message);
-
- final PrefixShardConfiguration config = message.getConfiguration();
-
- shardingService.resolveShardAdditions(Collections.singleton(config.getPrefix()));
- }
-
- private void onPrefixShardRemovalLookup(final PrefixShardRemovalLookup message) {
- LOG.debug("Member: {}, Received PrefixShardRemovalLookup: {}", clusterWrapper.getCurrentMemberName(), message);
-
- final ShardRemovalLookupTask removalTask =
- new ShardRemovalLookupTask(actorSystem, getSender(),
- actorUtils, message.getPrefix(), lookupTaskMaxRetries);
-
- actorSystem.scheduler().scheduleOnce(SHARD_LOOKUP_TASK_INTERVAL, removalTask, actorSystem.dispatcher());
- }
-
- private void onPrefixShardRemoved(final PrefixShardRemoved message) {
- LOG.debug("Received PrefixShardRemoved: {}", message);
-
- shardingService.resolveShardRemovals(Collections.singleton(message.getPrefix()));
- }
-
- private void onStartConfigShardLookup(final StartConfigShardLookup message) {
- LOG.debug("Received StartConfigShardLookup: {}", message);
-
- final ActorUtils context =
- message.getType().equals(LogicalDatastoreType.CONFIGURATION)
- ? distributedConfigDatastore.getActorUtils() : distributedOperDatastore.getActorUtils();
-
- // schedule a notification task for the reply
- actorSystem.scheduler().scheduleOnce(SHARD_LOOKUP_TASK_INTERVAL,
- new ConfigShardLookupTask(
- actorSystem, getSender(), context, message, lookupTaskMaxRetries),
- actorSystem.dispatcher());
- }
-
- private static MemberName memberToName(final Member member) {
- return MemberName.forName(member.roles().iterator().next());
- }
-
- private class ActorProducerRegistration {
-
- private final DOMDataTreeProducer producer;
- private final Collection<DOMDataTreeIdentifier> subtrees;
-
- ActorProducerRegistration(final DOMDataTreeProducer producer,
- final Collection<DOMDataTreeIdentifier> subtrees) {
- this.producer = producer;
- this.subtrees = subtrees;
- }
-
- void close() throws DOMDataTreeProducerException {
- producer.close();
- subtrees.forEach(idToProducer::remove);
- }
- }
-
- private static class ShardFrontendRegistration extends
- AbstractObjectRegistration<ListenerRegistration<DistributedShardFrontend>> {
-
- private final ActorRef clientActor;
- private final ListenerRegistration<DistributedShardFrontend> shardRegistration;
-
- ShardFrontendRegistration(final ActorRef clientActor,
- final ListenerRegistration<DistributedShardFrontend> shardRegistration) {
- super(shardRegistration);
- this.clientActor = clientActor;
- this.shardRegistration = shardRegistration;
- }
-
- @Override
- protected void removeRegistration() {
- shardRegistration.close();
- clientActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
- }
- }
-
- /**
- * Handles the lookup step of cds shard creation once the configuration is updated.
- */
- private static class ShardCreationLookupTask extends LookupTask {
-
- private final ActorSystem system;
- private final ActorRef replyTo;
- private final ClusterWrapper clusterWrapper;
- private final ActorUtils context;
- private final DistributedShardedDOMDataTree shardingService;
- private final DOMDataTreeIdentifier toLookup;
- private final int lookupMaxRetries;
-
- ShardCreationLookupTask(final ActorSystem system,
- final ActorRef replyTo,
- final ClusterWrapper clusterWrapper,
- final ActorUtils context,
- final DistributedShardedDOMDataTree shardingService,
- final DOMDataTreeIdentifier toLookup,
- final int lookupMaxRetries) {
- super(replyTo, lookupMaxRetries);
- this.system = system;
- this.replyTo = replyTo;
- this.clusterWrapper = clusterWrapper;
- this.context = context;
- this.shardingService = shardingService;
- this.toLookup = toLookup;
- this.lookupMaxRetries = lookupMaxRetries;
- }
-
- @Override
- public void run() {
- final Future<ActorRef> localShardFuture =
- context.findLocalShardAsync(ClusterUtils.getCleanShardName(toLookup.getRootIdentifier()));
-
- localShardFuture.onComplete(new OnComplete<ActorRef>() {
- @Override
- public void onComplete(final Throwable throwable, final ActorRef actorRef) {
- if (throwable != null) {
- tryReschedule(throwable);
- } else {
- LOG.debug("Local backend for shard[{}] lookup successful, starting leader lookup..", toLookup);
-
- system.scheduler().scheduleOnce(
- SHARD_LOOKUP_TASK_INTERVAL,
- new ShardLeaderLookupTask(system, replyTo, context, clusterWrapper, actorRef,
- shardingService, toLookup, lookupMaxRetries),
- system.dispatcher());
- }
- }
- }, system.dispatcher());
- }
-
- @Override
- void reschedule(final int retries) {
- LOG.debug("Local backend for shard[{}] not found, try: {}, rescheduling..", toLookup, retries);
- system.scheduler().scheduleOnce(
- SHARD_LOOKUP_TASK_INTERVAL, ShardCreationLookupTask.this, system.dispatcher());
- }
- }
-
- /**
- * Handles the readiness step by waiting for a leader of the created shard.
- */
- private static class ShardLeaderLookupTask extends LookupTask {
-
- private final ActorSystem system;
- private final ActorRef replyTo;
- private final ActorUtils context;
- private final ClusterWrapper clusterWrapper;
- private final ActorRef shard;
- private final DistributedShardedDOMDataTree shardingService;
- private final DOMDataTreeIdentifier toLookup;
- private final int lookupMaxRetries;
-
- ShardLeaderLookupTask(final ActorSystem system,
- final ActorRef replyTo,
- final ActorUtils context,
- final ClusterWrapper clusterWrapper,
- final ActorRef shard,
- final DistributedShardedDOMDataTree shardingService,
- final DOMDataTreeIdentifier toLookup,
- final int lookupMaxRetries) {
- super(replyTo, lookupMaxRetries);
- this.system = system;
- this.replyTo = replyTo;
- this.context = context;
- this.clusterWrapper = clusterWrapper;
- this.shard = shard;
- this.shardingService = shardingService;
- this.toLookup = toLookup;
- this.lookupMaxRetries = lookupMaxRetries;
- }
-
- @Override
- public void run() {
-
- final Future<Object> ask = Patterns.ask(shard, FindLeader.INSTANCE, context.getOperationTimeout());
-
- ask.onComplete(new OnComplete<>() {
- @Override
- public void onComplete(final Throwable throwable, final Object findLeaderReply) {
- if (throwable != null) {
- tryReschedule(throwable);
- } else {
- final FindLeaderReply findLeader = (FindLeaderReply) findLeaderReply;
- final Optional<String> leaderActor = findLeader.getLeaderActor();
- if (leaderActor.isPresent()) {
- // leader is found, backend seems ready, check if the frontend is ready
- LOG.debug("{} - Leader for shard[{}] backend ready, starting frontend lookup..",
- clusterWrapper.getCurrentMemberName(), toLookup);
- system.scheduler().scheduleOnce(
- SHARD_LOOKUP_TASK_INTERVAL,
- new FrontendLookupTask(
- system, replyTo, shardingService, toLookup, lookupMaxRetries),
- system.dispatcher());
- } else {
- tryReschedule(null);
- }
- }
- }
- }, system.dispatcher());
-
- }
-
- @Override
- void reschedule(final int retries) {
- LOG.debug("{} - Leader for shard[{}] backend not found on try: {}, retrying..",
- clusterWrapper.getCurrentMemberName(), toLookup, retries);
- system.scheduler().scheduleOnce(
- SHARD_LOOKUP_TASK_INTERVAL, ShardLeaderLookupTask.this, system.dispatcher());
- }
- }
-
- /**
- * After backend is ready this handles the last step - checking if we have a frontend shard for the backend,
- * once this completes(which should be ready by the time the backend is created, this is just a sanity check in
- * case they race), the future for the cds shard creation is completed and the shard is ready for use.
- */
- private static final class FrontendLookupTask extends LookupTask {
-
- private final ActorSystem system;
- private final ActorRef replyTo;
- private final DistributedShardedDOMDataTree shardingService;
- private final DOMDataTreeIdentifier toLookup;
-
- FrontendLookupTask(final ActorSystem system,
- final ActorRef replyTo,
- final DistributedShardedDOMDataTree shardingService,
- final DOMDataTreeIdentifier toLookup,
- final int lookupMaxRetries) {
- super(replyTo, lookupMaxRetries);
- this.system = system;
- this.replyTo = replyTo;
- this.shardingService = shardingService;
- this.toLookup = toLookup;
- }
-
- @Override
- public void run() {
- final DOMDataTreePrefixTableEntry<DOMDataTreeShardRegistration<DOMDataTreeShard>> entry =
- shardingService.lookupShardFrontend(toLookup);
-
- if (entry != null && tableEntryIdCheck(entry, toLookup) && entry.getValue() != null) {
- replyTo.tell(new Success(null), ActorRef.noSender());
- } else {
- tryReschedule(null);
- }
- }
-
- private boolean tableEntryIdCheck(final DOMDataTreePrefixTableEntry<?> entry,
- final DOMDataTreeIdentifier prefix) {
- if (entry == null) {
- return false;
- }
-
- if (YangInstanceIdentifier.empty().equals(prefix.getRootIdentifier())) {
- return true;
- }
-
- if (entry.getIdentifier().equals(toLookup.getRootIdentifier().getLastPathArgument())) {
- return true;
- }
-
- return false;
- }
-
- @Override
- void reschedule(final int retries) {
- LOG.debug("Frontend for shard[{}] not found on try: {}, retrying..", toLookup, retries);
- system.scheduler().scheduleOnce(
- SHARD_LOOKUP_TASK_INTERVAL, FrontendLookupTask.this, system.dispatcher());
- }
- }
-
- /**
- * Task that is run once a cds shard registration is closed and completes once the backend shard is removed from the
- * configuration.
- */
- private static class ShardRemovalLookupTask extends LookupTask {
-
- private final ActorSystem system;
- private final ActorRef replyTo;
- private final ActorUtils context;
- private final DOMDataTreeIdentifier toLookup;
-
- ShardRemovalLookupTask(final ActorSystem system,
- final ActorRef replyTo,
- final ActorUtils context,
- final DOMDataTreeIdentifier toLookup,
- final int lookupMaxRetries) {
- super(replyTo, lookupMaxRetries);
- this.system = system;
- this.replyTo = replyTo;
- this.context = context;
- this.toLookup = toLookup;
- }
-
- @Override
- public void run() {
- final Future<ActorRef> localShardFuture =
- context.findLocalShardAsync(ClusterUtils.getCleanShardName(toLookup.getRootIdentifier()));
-
- localShardFuture.onComplete(new OnComplete<ActorRef>() {
- @Override
- public void onComplete(final Throwable throwable, final ActorRef actorRef) {
- if (throwable != null) {
- //TODO Shouldn't we check why findLocalShard failed?
- LOG.debug("Backend shard[{}] removal lookup successful notifying the registration future",
- toLookup);
- replyTo.tell(new Success(null), ActorRef.noSender());
- } else {
- tryReschedule(null);
- }
- }
- }, system.dispatcher());
- }
-
- @Override
- void reschedule(final int retries) {
- LOG.debug("Backend shard[{}] removal lookup failed, shard is still present, try: {}, rescheduling..",
- toLookup, retries);
- system.scheduler().scheduleOnce(
- SHARD_LOOKUP_TASK_INTERVAL, ShardRemovalLookupTask.this, system.dispatcher());
- }
- }
-
- /**
- * Task for handling the lookup of the backend for the configuration shard.
- */
- private static class ConfigShardLookupTask extends LookupTask {
-
- private final ActorSystem system;
- private final ActorRef replyTo;
- private final ActorUtils context;
-
- ConfigShardLookupTask(final ActorSystem system,
- final ActorRef replyTo,
- final ActorUtils context,
- final StartConfigShardLookup message,
- final int lookupMaxRetries) {
- super(replyTo, lookupMaxRetries);
- this.system = system;
- this.replyTo = replyTo;
- this.context = context;
- }
-
- @Override
- void reschedule(final int retries) {
- LOG.debug("Local backend for prefix configuration shard not found, try: {}, rescheduling..", retries);
- system.scheduler().scheduleOnce(
- SHARD_LOOKUP_TASK_INTERVAL, ConfigShardLookupTask.this, system.dispatcher());
- }
-
- @Override
- public void run() {
- final Optional<ActorRef> localShard =
- context.findLocalShard(ClusterUtils.PREFIX_CONFIG_SHARD_ID);
-
- if (!localShard.isPresent()) {
- tryReschedule(null);
- } else {
- LOG.debug("Local backend for prefix configuration shard lookup successful");
- replyTo.tell(new Status.Success(null), ActorRef.noSender());
- }
- }
- }
-
- /**
- * Task for handling the readiness state of the config shard. Reports success once the leader is elected.
- */
- private static class ConfigShardReadinessTask extends LookupTask {
-
- private final ActorSystem system;
- private final ActorRef replyTo;
- private final ActorUtils context;
- private final ClusterWrapper clusterWrapper;
- private final ActorRef shard;
-
- ConfigShardReadinessTask(final ActorSystem system,
- final ActorRef replyTo,
- final ActorUtils context,
- final ClusterWrapper clusterWrapper,
- final ActorRef shard,
- final int lookupMaxRetries) {
- super(replyTo, lookupMaxRetries);
- this.system = system;
- this.replyTo = replyTo;
- this.context = context;
- this.clusterWrapper = clusterWrapper;
- this.shard = shard;
- }
-
- @Override
- void reschedule(final int retries) {
- LOG.debug("{} - Leader for config shard not found on try: {}, retrying..",
- clusterWrapper.getCurrentMemberName(), retries);
- system.scheduler().scheduleOnce(
- SHARD_LOOKUP_TASK_INTERVAL, ConfigShardReadinessTask.this, system.dispatcher());
- }
-
- @Override
- public void run() {
- final Future<Object> ask = Patterns.ask(shard, FindLeader.INSTANCE, context.getOperationTimeout());
-
- ask.onComplete(new OnComplete<>() {
- @Override
- public void onComplete(final Throwable throwable, final Object findLeaderReply) {
- if (throwable != null) {
- tryReschedule(throwable);
- } else {
- final FindLeaderReply findLeader = (FindLeaderReply) findLeaderReply;
- final Optional<String> leaderActor = findLeader.getLeaderActor();
- if (leaderActor.isPresent()) {
- // leader is found, backend seems ready, check if the frontend is ready
- LOG.debug("{} - Leader for config shard is ready. Ending lookup.",
- clusterWrapper.getCurrentMemberName());
- replyTo.tell(new Status.Success(null), ActorRef.noSender());
- } else {
- tryReschedule(null);
- }
- }
- }
- }, system.dispatcher());
- }
- }
-
- public static class ShardedDataTreeActorCreator {
-
- private DistributedShardedDOMDataTree shardingService;
- private DistributedDataStoreInterface distributedConfigDatastore;
- private DistributedDataStoreInterface distributedOperDatastore;
- private ActorSystem actorSystem;
- private ClusterWrapper cluster;
- private int maxRetries;
-
- public DistributedShardedDOMDataTree getShardingService() {
- return shardingService;
- }
-
- public ShardedDataTreeActorCreator setShardingService(final DistributedShardedDOMDataTree shardingService) {
- this.shardingService = shardingService;
- return this;
- }
-
- public ActorSystem getActorSystem() {
- return actorSystem;
- }
-
- public ShardedDataTreeActorCreator setActorSystem(final ActorSystem actorSystem) {
- this.actorSystem = actorSystem;
- return this;
- }
-
- public ShardedDataTreeActorCreator setClusterWrapper(final ClusterWrapper clusterWrapper) {
- this.cluster = clusterWrapper;
- return this;
- }
-
- public ClusterWrapper getClusterWrapper() {
- return cluster;
- }
-
- public DistributedDataStoreInterface getDistributedConfigDatastore() {
- return distributedConfigDatastore;
- }
-
- public ShardedDataTreeActorCreator setDistributedConfigDatastore(
- final DistributedDataStoreInterface distributedConfigDatastore) {
- this.distributedConfigDatastore = distributedConfigDatastore;
- return this;
- }
-
- public DistributedDataStoreInterface getDistributedOperDatastore() {
- return distributedOperDatastore;
- }
-
- public ShardedDataTreeActorCreator setDistributedOperDatastore(
- final DistributedDataStoreInterface distributedOperDatastore) {
- this.distributedOperDatastore = distributedOperDatastore;
- return this;
- }
-
- public ShardedDataTreeActorCreator setLookupTaskMaxRetries(final int newMaxRetries) {
- this.maxRetries = newMaxRetries;
- return this;
- }
-
- public int getLookupTaskMaxRetries() {
- return maxRetries;
- }
-
- private void verify() {
- requireNonNull(shardingService);
- requireNonNull(actorSystem);
- requireNonNull(cluster);
- requireNonNull(distributedConfigDatastore);
- requireNonNull(distributedOperDatastore);
- }
-
- public Props props() {
- verify();
- return Props.create(ShardedDataTreeActor.class, this);
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.sharding;
-
-import static com.google.common.base.Preconditions.checkNotNull;
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.Address;
-import java.util.Collection;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.stream.Collectors;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-
-/**
- * Resolver for remote {@link ShardedDataTreeActor}'s.
- */
-public class ShardingServiceAddressResolver {
-
- private final ConcurrentMap<MemberName, Address> memberNameToAddress = new ConcurrentHashMap<>();
- private final String shardingServiceActorIdentifier;
- private final MemberName localMemberName;
-
- public ShardingServiceAddressResolver(final String shardingServiceActorIdentifier,
- final MemberName localMemberName) {
- this.shardingServiceActorIdentifier = shardingServiceActorIdentifier;
- this.localMemberName = localMemberName;
- }
-
- void addPeerAddress(final MemberName memberName, final Address address) {
- memberNameToAddress.put(memberName, address);
- }
-
- void removePeerAddress(final MemberName memberName) {
- memberNameToAddress.remove(memberName);
- }
-
- Address getPeerAddress(final MemberName memberName) {
- return memberNameToAddress.get(memberName);
- }
-
- StringBuilder getActorPathBuilder(final Address address) {
- return new StringBuilder().append(address.toString()).append("/user/").append(shardingServiceActorIdentifier);
- }
-
- Collection<String> getShardingServicePeerActorAddresses() {
- final Collection<String> peerAddresses =
- memberNameToAddress
- .entrySet()
- .stream()
- .filter(entry -> !localMemberName.equals(entry.getKey()))
- .map(entry -> getActorPathBuilder(entry.getValue()).toString())
- .collect(Collectors.toList());
-
- return peerAddresses;
- }
-
- public String resolve(final MemberName memberName) {
- final Address address = memberNameToAddress.get(requireNonNull(memberName));
- checkNotNull(address, "Requested member[%s] is not present in the resolver", memberName);
- return getActorPathBuilder(address).toString();
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding.messages;
-
-/**
- * Message sent to the local ShardManager, once the shard configuration shard is ready and the ShardManager should
- * start its listener.
- */
-public final class InitConfigListener {
-
- public static final InitConfigListener INSTANCE = new InitConfigListener();
-
- private InitConfigListener() {
-
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.sharding.messages;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.annotations.Beta;
-import java.io.Serializable;
-import org.opendaylight.controller.cluster.sharding.ShardedDataTreeActor;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-
-/**
- * Sent to the local {@link ShardedDataTreeActor} when there was a shard created
- * on the local node. The local actor should notify the remote actors with {@link PrefixShardCreated} which should
- * create the required frontend/backend shards.
- */
-@Beta
-public class LookupPrefixShard implements Serializable {
- private static final long serialVersionUID = 1L;
-
- private final DOMDataTreeIdentifier prefix;
-
- public LookupPrefixShard(final DOMDataTreeIdentifier prefix) {
- this.prefix = requireNonNull(prefix);
- }
-
- public DOMDataTreeIdentifier getPrefix() {
- return prefix;
- }
-
-
- @Override
- public String toString() {
- return "LookupPrefixShard{"
- + "prefix="
- + prefix
- + '}';
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.sharding.messages;
-
-import com.google.common.annotations.Beta;
-import com.google.common.collect.ImmutableList;
-import java.io.Serializable;
-import java.util.Collection;
-import org.opendaylight.controller.cluster.sharding.ShardedDataTreeActor;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-
-/**
- * Message sent to remote {@link ShardedDataTreeActor}'s when attempting
- * to create a producer. The remote node should attempt to create a producer in the local sharding service and reply
- * with success/failure based on the attempt result.
- */
-@Beta
-public class NotifyProducerCreated implements Serializable {
- private static final long serialVersionUID = 1L;
- private final Collection<DOMDataTreeIdentifier> subtrees;
-
- public NotifyProducerCreated(final Collection<DOMDataTreeIdentifier> subtrees) {
- this.subtrees = ImmutableList.copyOf(subtrees);
- }
-
- public Collection<DOMDataTreeIdentifier> getSubtrees() {
- return subtrees;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.sharding.messages;
-
-import com.google.common.annotations.Beta;
-import com.google.common.collect.ImmutableList;
-import java.io.Serializable;
-import java.util.Collection;
-import org.opendaylight.controller.cluster.sharding.ShardedDataTreeActor;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-
-/**
- * Message sent to remote {@link ShardedDataTreeActor}'s when attempting
- * to close a producer. The remote node should attempt to close a producer in the local sharding service and reply
- * with success/failure based on the attempt result. If the producer doesn't exist on this node report Success.
- */
-@Beta
-public class NotifyProducerRemoved implements Serializable {
- private static final long serialVersionUID = 1L;
- private final Collection<DOMDataTreeIdentifier> subtrees;
-
- public NotifyProducerRemoved(final Collection<DOMDataTreeIdentifier> subtrees) {
- this.subtrees = ImmutableList.copyOf(subtrees);
- }
-
- public Collection<DOMDataTreeIdentifier> getSubtrees() {
- return subtrees;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding.messages;
-
-import com.google.common.annotations.Beta;
-import org.opendaylight.controller.cluster.datastore.config.PrefixShardConfiguration;
-import org.opendaylight.controller.cluster.sharding.ShardedDataTreeActor;
-
-/**
- * Message sent to the local {@link ShardedDataTreeActor} when a clustered
- * shard was created locally. The backend shards/replicas will be handled by the ShardManager but the
- * {@link ShardedDataTreeActor} needs to handle the registration of the
- * frontends into the {@link org.opendaylight.mdsal.dom.api.DOMDataTreeShardingService}. The configuration only contains
- * the Member nodes that this is still yet to be distributed to. The last node will receive PrefixShardConfiguration
- * with only it's member present.
- */
-@Beta
-public class PrefixShardCreated {
- private final PrefixShardConfiguration configuration;
-
- public PrefixShardCreated(final PrefixShardConfiguration configuration) {
- this.configuration = configuration;
- }
-
- public PrefixShardConfiguration getConfiguration() {
- return configuration;
- }
-
- @Override
- public String toString() {
- return "PrefixShardCreated{"
- + "configuration=" + configuration
- + '}';
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.sharding.messages;
-
-import static java.util.Objects.requireNonNull;
-
-import org.opendaylight.controller.cluster.sharding.ShardedDataTreeActor;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-
-/**
- * Sent to the local {@link ShardedDataTreeActor} to initiate the lookup of the shard, once the shard is removed from
- * the system entirely the actor responds with a success.
- */
-public class PrefixShardRemovalLookup {
- private final DOMDataTreeIdentifier prefix;
-
- public PrefixShardRemovalLookup(final DOMDataTreeIdentifier prefix) {
- this.prefix = requireNonNull(prefix);
- }
-
- public DOMDataTreeIdentifier getPrefix() {
- return prefix;
- }
-
- @Override
- public String toString() {
- return "PrefixShardRemovalLookup{" + "prefix=" + prefix + '}';
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding.messages;
-
-import com.google.common.annotations.Beta;
-import java.io.Serializable;
-import org.opendaylight.controller.cluster.sharding.ShardedDataTreeActor;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-
-/**
- * Message sent to remote {@link ShardedDataTreeActor}'s when there is an attempt to remove the shard,
- * the ShardedDataTreeActor should remove the shard from the current configuration so that the change is picked up
- * in the backend ShardManager.
- */
-@Beta
-public class PrefixShardRemoved implements Serializable {
- private static final long serialVersionUID = 1L;
-
- private final DOMDataTreeIdentifier prefix;
-
- public PrefixShardRemoved(final DOMDataTreeIdentifier prefix) {
- this.prefix = prefix;
- }
-
- public DOMDataTreeIdentifier getPrefix() {
- return prefix;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding.messages;
-
-import com.google.common.annotations.Beta;
-import java.util.Collection;
-import org.opendaylight.controller.cluster.sharding.ShardedDataTreeActor;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-
-/**
- * Message sent to local {@link ShardedDataTreeActor}'s when there was an
- * attempt to create a producer on the local node.
- */
-@Beta
-public class ProducerCreated {
- private final Collection<DOMDataTreeIdentifier> subtrees;
-
- public ProducerCreated(final Collection<DOMDataTreeIdentifier> subtrees) {
- this.subtrees = subtrees;
- }
-
- public Collection<DOMDataTreeIdentifier> getSubtrees() {
- return subtrees;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding.messages;
-
-import com.google.common.annotations.Beta;
-import java.util.Collection;
-import org.opendaylight.controller.cluster.sharding.ShardedDataTreeActor;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-
-/**
- * Message sent to local {@link ShardedDataTreeActor}'s when there was an
- * attempt to close a producer on the local node.
- */
-@Beta
-public class ProducerRemoved {
-
- private final Collection<DOMDataTreeIdentifier> subtrees;
-
- public ProducerRemoved(final Collection<DOMDataTreeIdentifier> subtrees) {
- this.subtrees = subtrees;
- }
-
- public Collection<DOMDataTreeIdentifier> getSubtrees() {
- return subtrees;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding.messages;
-
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-
-/**
- * Message that should be sent to ShardedDataTreeActor when the lookup of the prefix config shard should begin.
- * Replied to with Succes once the shard has a leader.
- */
-public class StartConfigShardLookup {
-
- private LogicalDatastoreType type;
-
- public StartConfigShardLookup(final LogicalDatastoreType type) {
- this.type = type;
- }
-
- public LogicalDatastoreType getType() {
- return type;
- }
-
- @Override
- public String toString() {
- return "StartConfigShardLookup{type=" + type + '}';
- }
-}
// vi: set smarttab et sw=4 tabstop=4:
module distributed-datastore-provider {
-
yang-version 1;
namespace "urn:opendaylight:params:xml:ns:yang:controller:config:distributed-datastore-provider";
prefix "distributed-datastore-provider";
description
"This module contains the base YANG definitions for
- the distributed datastore provider implementation";
+ the distributed datastore provider implementation";
+
+ revision "2023-12-29" {
+ description "Remote use-tell-based-protocol and shard-snapshot-chunk-size leaves";
+ }
revision "2014-06-12" {
description
}
grouping data-store-properties {
- leaf max-shard-data-change-executor-queue-size {
- default 1000;
- type non-zero-uint32-type;
- description "The maximum queue size for each shard's data store data change notification executor.";
- }
-
- leaf max-shard-data-change-executor-pool-size {
- default 20;
- type non-zero-uint32-type;
- description "The maximum thread pool size for each shard's data store data change notification executor.";
- }
-
- leaf max-shard-data-change-listener-queue-size {
- default 1000;
- type non-zero-uint32-type;
- description "The maximum queue size for each shard's data store data change listener.";
- }
-
- leaf max-shard-data-store-executor-queue-size {
- default 5000;
- type non-zero-uint32-type;
- description "The maximum queue size for each shard's data store executor.";
- }
-
leaf shard-transaction-idle-timeout-in-minutes {
default 10;
type non-zero-uint32-type;
leaf shard-snapshot-data-threshold-percentage {
default 12;
type percentage;
- description "The percentage of Runtime.maxMemory() used by the in-memory journal log before a snapshot is to be taken";
+ description "The percentage of Runtime.maxMemory() used by the in-memory journal log before a snapshot is to be taken.
+ Disabled, if direct threshold is enabled.";
+ }
+
+ leaf shard-snapshot-data-threshold {
+ default 0;
+ type uint32 {
+ range "0..max";
+ }
+ description "The threshold of in-memory journal size before a snapshot is to be taken. If set to 0, direct threshold
+ is disabled and percentage is used instead.";
}
cannot be found then the default raft behavior will be applied";
}
- leaf shard-snapshot-chunk-size {
- status deprecated;
- default 2048000;
- type non-zero-uint32-type;
- description "When sending a snapshot to a follower, this is the maximum size in bytes for
- a chunk of data.";
- }
-
leaf maximum-message-slice-size {
- default 2048000;
+ default 491520;
type non-zero-uint32-type;
description "When fragmenting messages thru the akka remoting framework, this is the
maximum size in bytes for a message slice.";
}
- leaf use-tell-based-protocol {
- default false;
- type boolean;
- description "Use a newer protocol between the frontend and backend. This feature is considered
- exprerimental at this point.";
- }
-
leaf file-backed-streaming-threshold-in-megabytes {
default 128;
type non-zero-uint32-type;
description "Use lz4 compression for snapshots, sent from leader to follower, for snapshots stored
by LocalSnapshotStore, use akka.conf configuration.";
}
+
+ leaf export-on-recovery {
+ default off;
+ type enumeration {
+ enum off;
+ enum json;
+ }
+ description "Export snapshot and journal during recovery. Possible modes: off(default),
+ json(export to json files). Note that in case of large snapshot,
+ export will take a lot of time.";
+ }
+
+ leaf recovery-export-base-dir {
+ default persistence-export;
+ type string;
+ description "Directory name for snapshot and journal dumps.";
+ }
}
container data-store-properties-container {
+++ /dev/null
-module prefix-shard-configuration {
- yang-version 1;
- namespace "urn:opendaylight:params:xml:ns:yang:controller:md:sal:clustering:prefix-shard-configuration";
- prefix "prefix-config";
-
- description
- "This module contains the base YANG definitions for
- shards based on prefix configuration";
-
- revision "2017-01-10" {
- description "Initial revision.";
- }
-
- container prefix-shards {
-
- list shard {
- key prefix;
- leaf prefix {
- type instance-identifier;
- description "Prefix that this shard is rooted at.";
- }
-
- container replicas {
- leaf-list replica {
- type string;
- }
-
- description "List of cluster member nodes that this shard is replicated on";
- }
-
- description "List of prefix-based shards configured.";
- }
- }
-}
+++ /dev/null
-/*
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.databroker;
-
-import static org.junit.Assert.assertTrue;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.doThrow;
-
-import com.google.common.util.concurrent.FluentFuture;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.concurrent.ExecutionException;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.common.api.TransactionCommitFailedException;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-
-public class AbstractDOMBrokerWriteTransactionTest {
-
- @Mock
- private AbstractDOMTransactionFactory abstractDOMTransactionFactory;
-
- @Mock
- private DOMStoreWriteTransaction domStoreWriteTransaction;
-
- private class AbstractDOMBrokerWriteTransactionTestImpl
- extends AbstractDOMBrokerWriteTransaction<DOMStoreWriteTransaction> {
-
- AbstractDOMBrokerWriteTransactionTestImpl() {
- super(new Object(), Collections.emptyMap(), abstractDOMTransactionFactory);
- }
-
- @Override
- protected DOMStoreWriteTransaction createTransaction(LogicalDatastoreType key) {
- return null;
- }
-
- @Override
- protected Collection<DOMStoreWriteTransaction> getSubtransactions() {
- return Collections.singletonList(domStoreWriteTransaction);
- }
- }
-
- @Before
- public void setup() {
- MockitoAnnotations.initMocks(this);
- }
-
- @Test
- public void readyRuntimeExceptionAndCancel() throws InterruptedException {
- RuntimeException thrown = new RuntimeException();
- doThrow(thrown).when(domStoreWriteTransaction).ready();
- AbstractDOMBrokerWriteTransactionTestImpl abstractDOMBrokerWriteTransactionTestImpl =
- new AbstractDOMBrokerWriteTransactionTestImpl();
-
- FluentFuture<? extends CommitInfo> submitFuture = abstractDOMBrokerWriteTransactionTestImpl.commit();
- try {
- submitFuture.get();
- Assert.fail("TransactionCommitFailedException expected");
- } catch (ExecutionException e) {
- assertTrue(e.getCause() instanceof TransactionCommitFailedException);
- assertTrue(e.getCause().getCause() == thrown);
- abstractDOMBrokerWriteTransactionTestImpl.cancel();
- }
- }
-
- @Test
- public void submitRuntimeExceptionAndCancel() throws InterruptedException {
- RuntimeException thrown = new RuntimeException();
- doThrow(thrown).when(abstractDOMTransactionFactory).commit(any(), any());
- AbstractDOMBrokerWriteTransactionTestImpl abstractDOMBrokerWriteTransactionTestImpl
- = new AbstractDOMBrokerWriteTransactionTestImpl();
-
- FluentFuture<? extends CommitInfo> submitFuture = abstractDOMBrokerWriteTransactionTestImpl.commit();
- try {
- submitFuture.get();
- Assert.fail("TransactionCommitFailedException expected");
- } catch (ExecutionException e) {
- assertTrue(e.getCause() instanceof TransactionCommitFailedException);
- assertTrue(e.getCause().getCause() == thrown);
- abstractDOMBrokerWriteTransactionTestImpl.cancel();
- }
- }
-}
package org.opendaylight.controller.cluster.databroker;
import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-import org.junit.AfterClass;
+import akka.util.Timeout;
+import com.google.common.base.Stopwatch;
+import com.google.common.util.concurrent.Uninterruptibles;
+import java.util.concurrent.ForkJoinPool;
+import java.util.concurrent.TimeUnit;
import org.junit.Before;
-import org.junit.BeforeClass;
import org.junit.Test;
+import org.junit.runner.RunWith;
import org.mockito.Mock;
-import org.mockito.Mockito;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
import org.opendaylight.controller.cluster.access.concepts.FrontendType;
import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient;
import org.opendaylight.controller.cluster.datastore.DatastoreContext;
import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
+import org.opendaylight.yangtools.yang.common.Empty;
+import scala.concurrent.duration.FiniteDuration;
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
public class ClientBackedDataStoreTest {
private static final ClientIdentifier UNKNOWN_ID = ClientIdentifier.create(
MemberName.forName("member"), FrontendType.forName("frontend"));
private static final ClientIdentifier CLIENT_IDENTIFIER = ClientIdentifier.create(FRONTEND_IDENTIFIER, 0);
- private static final LocalHistoryIdentifier HISTORY_ID = new LocalHistoryIdentifier(CLIENT_IDENTIFIER, 0);
- private static final TransactionIdentifier TRANSACTION_IDENTIFIER = new TransactionIdentifier(HISTORY_ID, 0);
-
- private static EffectiveModelContext SCHEMA_CONTEXT;
+ private static final TransactionIdentifier TRANSACTION_IDENTIFIER =
+ new TransactionIdentifier(new LocalHistoryIdentifier(CLIENT_IDENTIFIER, 0), 0);
@Mock
private DataStoreClient clientActor;
-
+ @Mock
+ private DatastoreContext datastoreContext;
+ @Mock
+ private Timeout shardElectionTimeout;
@Mock
private ActorUtils actorUtils;
-
@Mock
private ClientLocalHistory clientLocalHistory;
-
@Mock
private ClientTransaction clientTransaction;
-
@Mock
private ClientSnapshot clientSnapshot;
- @BeforeClass
- public static void beforeClass() {
- SCHEMA_CONTEXT = TestModel.createTestContext();
- }
-
- @AfterClass
- public static void afterClass() {
- SCHEMA_CONTEXT = null;
- }
@Before
public void setUp() {
- MockitoAnnotations.initMocks(this);
-
- when(actorUtils.getSchemaContext()).thenReturn(SCHEMA_CONTEXT);
- when(actorUtils.getDatastoreContext()).thenReturn(DatastoreContext.newBuilder().build());
- when(clientTransaction.getIdentifier()).thenReturn(TRANSACTION_IDENTIFIER);
- when(clientSnapshot.getIdentifier()).thenReturn(TRANSACTION_IDENTIFIER);
+ doReturn(DatastoreContext.newBuilder().build()).when(actorUtils).getDatastoreContext();
+ doReturn(TRANSACTION_IDENTIFIER).when(clientTransaction).getIdentifier();
+ doReturn(TRANSACTION_IDENTIFIER).when(clientSnapshot).getIdentifier();
- when(clientActor.getIdentifier()).thenReturn(CLIENT_IDENTIFIER);
- when(clientActor.createTransaction()).thenReturn(clientTransaction);
- when(clientActor.createLocalHistory()).thenReturn(clientLocalHistory);
- when(clientActor.createSnapshot()).thenReturn(clientSnapshot);
+ doReturn(clientTransaction).when(clientActor).createTransaction();
+ doReturn(clientLocalHistory).when(clientActor).createLocalHistory();
+ doReturn(clientSnapshot).when(clientActor).createSnapshot();
}
@Test
public void testCreateTransactionChain() {
- try (ClientBackedDataStore clientBackedDataStore = new ClientBackedDataStore(
- actorUtils, UNKNOWN_ID, clientActor)) {
- final DOMStoreTransactionChain txChain = clientBackedDataStore.createTransactionChain();
- assertNotNull(txChain);
- verify(clientActor, Mockito.times(1)).createLocalHistory();
+ try (var clientBackedDataStore = new ClientBackedDataStore(actorUtils, UNKNOWN_ID, clientActor)) {
+ assertNotNull(clientBackedDataStore.createTransactionChain());
+ verify(clientActor, times(1)).createLocalHistory();
}
}
@Test
public void testNewReadOnlyTransaction() {
- try (ClientBackedDataStore clientBackedDataStore = new ClientBackedDataStore(
- actorUtils, UNKNOWN_ID, clientActor)) {
- final DOMStoreReadTransaction tx = clientBackedDataStore.newReadOnlyTransaction();
- assertNotNull(tx);
- verify(clientActor, Mockito.times(1)).createSnapshot();
+ try (var clientBackedDataStore = new ClientBackedDataStore(actorUtils, UNKNOWN_ID, clientActor)) {
+ assertNotNull(clientBackedDataStore.newReadOnlyTransaction());
+ verify(clientActor, times(1)).createSnapshot();
}
}
@Test
public void testNewWriteOnlyTransaction() {
- try (ClientBackedDataStore clientBackedDataStore = new ClientBackedDataStore(
- actorUtils, UNKNOWN_ID, clientActor)) {
- final DOMStoreWriteTransaction tx = clientBackedDataStore.newWriteOnlyTransaction();
- assertNotNull(tx);
- verify(clientActor, Mockito.times(1)).createTransaction();
+ try (var clientBackedDataStore = new ClientBackedDataStore(actorUtils, UNKNOWN_ID, clientActor)) {
+ assertNotNull(clientBackedDataStore.newWriteOnlyTransaction());
+ verify(clientActor, times(1)).createTransaction();
}
}
@Test
public void testNewReadWriteTransaction() {
- try (ClientBackedDataStore clientBackedDataStore = new ClientBackedDataStore(
- actorUtils, UNKNOWN_ID, clientActor)) {
- final DOMStoreReadWriteTransaction tx = clientBackedDataStore.newReadWriteTransaction();
- assertNotNull(tx);
- verify(clientActor, Mockito.times(1)).createTransaction();
+ try (var clientBackedDataStore = new ClientBackedDataStore(actorUtils, UNKNOWN_ID, clientActor)) {
+ assertNotNull(clientBackedDataStore.newReadWriteTransaction());
+ verify(clientActor, times(1)).createTransaction();
+ }
+ }
+
+ @Test
+ public void testWaitTillReadyBlocking() {
+ doReturn(datastoreContext).when(actorUtils).getDatastoreContext();
+ doReturn(shardElectionTimeout).when(datastoreContext).getShardLeaderElectionTimeout();
+ doReturn(1).when(datastoreContext).getInitialSettleTimeoutMultiplier();
+ doReturn(FiniteDuration.apply(50, TimeUnit.MILLISECONDS)).when(shardElectionTimeout).duration();
+ try (var clientBackedDataStore = new ClientBackedDataStore(actorUtils, UNKNOWN_ID, clientActor)) {
+ final var sw = Stopwatch.createStarted();
+ clientBackedDataStore.waitTillReady();
+ final var elapsedMillis = sw.stop().elapsed(TimeUnit.MILLISECONDS);
+
+ assertTrue("Expected to be blocked for 50 millis", elapsedMillis >= 50);
+ }
+ }
+
+ @Test
+ public void testWaitTillReadyCountDown() {
+ try (var clientBackedDataStore = new ClientBackedDataStore(actorUtils, UNKNOWN_ID, clientActor)) {
+ doReturn(datastoreContext).when(actorUtils).getDatastoreContext();
+
+ ForkJoinPool.commonPool().submit(() -> {
+ Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
+ clientBackedDataStore.readinessFuture().set(Empty.value());
+ });
+
+ final var sw = Stopwatch.createStarted();
+ clientBackedDataStore.waitTillReady();
+ final var elapsedMillis = sw.stop().elapsed(TimeUnit.MILLISECONDS);
+
+ assertTrue("Expected to be released in 500 millis", elapsedMillis < 5000);
}
}
}
package org.opendaylight.controller.cluster.databroker;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.doReturn;
import static org.opendaylight.yangtools.util.concurrent.FluentFutures.immediateFluentFuture;
import static org.opendaylight.yangtools.util.concurrent.FluentFutures.immediateTrueFluentFuture;
-import com.google.common.util.concurrent.ListenableFuture;
import java.util.Optional;
import org.junit.Before;
import org.junit.Test;
+import org.junit.runner.RunWith;
import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
import org.opendaylight.controller.cluster.access.client.ClientActorContext;
import org.opendaylight.controller.cluster.databroker.actors.dds.ClientSnapshot;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
public class ClientBackedReadTransactionTest extends ClientBackedTransactionTest<ClientBackedReadTransaction> {
private ClientBackedReadTransaction object;
@Mock
- private NormalizedNode<?, ?> data;
+ private ContainerNode data;
@Mock
private ClientActorContext clientContext;
@Mock
@Before
public void setUp() {
- MockitoAnnotations.initMocks(this);
-
- doReturn(CLIENT_ID).when(clientContext).getIdentifier();
doReturn(TRANSACTION_ID).when(delegate).getIdentifier();
- doReturn(immediateTrueFluentFuture()).when(delegate).exists(YangInstanceIdentifier.empty());
- doReturn(immediateFluentFuture(Optional.of(data))).when(delegate).read(YangInstanceIdentifier.empty());
+ doReturn(immediateTrueFluentFuture()).when(delegate).exists(YangInstanceIdentifier.of());
+ doReturn(immediateFluentFuture(Optional.of(data))).when(delegate).read(YangInstanceIdentifier.of());
object = new ClientBackedReadTransaction(delegate, null, null);
}
@Test
public void testRead() throws Exception {
- final ListenableFuture<Optional<NormalizedNode<?, ?>>> result = object().read(YangInstanceIdentifier.empty());
- final Optional<NormalizedNode<?, ?>> resultData = result.get();
- assertTrue(resultData.isPresent());
- assertEquals(data, resultData.get());
+ assertEquals(Optional.of(data), object().read(YangInstanceIdentifier.of()).get());
}
@Test
public void testExists() throws Exception {
- final ListenableFuture<Boolean> result = object().exists(YangInstanceIdentifier.empty());
- assertEquals(Boolean.TRUE, result.get());
+ assertEquals(Boolean.TRUE, object().exists(YangInstanceIdentifier.of()).get());
}
}
package org.opendaylight.controller.cluster.databroker;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.doReturn;
import static org.opendaylight.yangtools.util.concurrent.FluentFutures.immediateFluentFuture;
import static org.opendaylight.yangtools.util.concurrent.FluentFutures.immediateTrueFluentFuture;
-import com.google.common.util.concurrent.FluentFuture;
import java.util.Optional;
import org.junit.Before;
import org.junit.Test;
+import org.junit.runner.RunWith;
import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
import org.opendaylight.controller.cluster.databroker.actors.dds.ClientTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
public class ClientBackedReadWriteTransactionTest
extends ClientBackedTransactionTest<ClientBackedReadWriteTransaction> {
private ClientBackedReadWriteTransaction object;
@Mock
private ClientTransaction delegate;
@Mock
- private NormalizedNode<?, ?> data;
- @Mock
- private DOMStoreThreePhaseCommitCohort readyCohort;
+ private ContainerNode data;
@Override
ClientBackedReadWriteTransaction object() {
@Before
public void setUp() {
- MockitoAnnotations.initMocks(this);
-
doReturn(TRANSACTION_ID).when(delegate).getIdentifier();
- doReturn(readyCohort).when(delegate).ready();
- doReturn(immediateTrueFluentFuture()).when(delegate).exists(YangInstanceIdentifier.empty());
- doReturn(immediateFluentFuture(Optional.of(data))).when(delegate).read(YangInstanceIdentifier.empty());
+ doReturn(immediateTrueFluentFuture()).when(delegate).exists(YangInstanceIdentifier.of());
+ doReturn(immediateFluentFuture(Optional.of(data))).when(delegate).read(YangInstanceIdentifier.of());
object = new ClientBackedReadWriteTransaction(delegate, null);
}
@Test
public void testRead() throws Exception {
- final FluentFuture<Optional<NormalizedNode<?, ?>>> result = object().read(YangInstanceIdentifier.empty());
- final Optional<NormalizedNode<?, ?>> resultData = result.get();
- assertTrue(resultData.isPresent());
- assertEquals(data, resultData.get());
+ assertEquals(Optional.of(data), object().read(YangInstanceIdentifier.of()).get());
}
@Test
public void testExists() throws Exception {
- assertEquals(Boolean.TRUE, object().exists(YangInstanceIdentifier.empty()).get());
+ assertEquals(Boolean.TRUE, object().exists(YangInstanceIdentifier.of()).get());
}
}
*/
package org.opendaylight.controller.cluster.databroker;
-import org.junit.Assert;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+
import org.junit.Before;
import org.junit.Test;
+import org.junit.runner.RunWith;
import org.mockito.Mock;
-import org.mockito.Mockito;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
import org.opendaylight.controller.cluster.access.concepts.FrontendType;
import org.opendaylight.controller.cluster.databroker.actors.dds.ClientSnapshot;
import org.opendaylight.controller.cluster.databroker.actors.dds.ClientTransaction;
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
public class ClientBackedTransactionChainTest {
private ClientBackedTransactionChain chain;
@Before
public void setUp() {
- MockitoAnnotations.initMocks(this);
-
final FrontendIdentifier frontendId = FrontendIdentifier.create(
MemberName.forName("member"), FrontendType.forName("frontend"));
final ClientIdentifier clientId = ClientIdentifier.create(frontendId, 0);
final LocalHistoryIdentifier historyId = new LocalHistoryIdentifier(clientId, 0);
final TransactionIdentifier transactionId = new TransactionIdentifier(historyId, 0);
- Mockito.when(history.getIdentifier()).thenReturn(historyId);
- Mockito.when(transaction.getIdentifier()).thenReturn(transactionId);
- Mockito.when(snapshot.getIdentifier()).thenReturn(transactionId);
- Mockito.when(history.takeSnapshot()).thenReturn(snapshot);
- Mockito.when(history.createTransaction()).thenReturn(transaction);
+ doReturn(transactionId).when(transaction).getIdentifier();
+ doReturn(transactionId).when(snapshot).getIdentifier();
+ doReturn(snapshot).when(history).takeSnapshot();
+ doReturn(transaction).when(history).createTransaction();
chain = new ClientBackedTransactionChain(history, false);
}
@Test
public void testNewReadOnlyTransaction() {
- Assert.assertNotNull(chain.newReadOnlyTransaction());
- Mockito.verify(history).takeSnapshot();
+ assertNotNull(chain.newReadOnlyTransaction());
+ verify(history).takeSnapshot();
}
@Test
public void testNewReadWriteTransaction() {
- Assert.assertNotNull(chain.newReadWriteTransaction());
- Mockito.verify(history).createTransaction();
+ assertNotNull(chain.newReadWriteTransaction());
+ verify(history).createTransaction();
}
@Test
public void testNewWriteOnlyTransaction() {
- Assert.assertNotNull(chain.newWriteOnlyTransaction());
- Mockito.verify(history).createTransaction();
+ assertNotNull(chain.newWriteOnlyTransaction());
+ verify(history).createTransaction();
}
@Test
public void testClose() {
chain.newReadOnlyTransaction();
chain.close();
- Mockito.verify(snapshot).abort();
- Mockito.verify(history).close();
+ verify(snapshot).abort();
+ verify(history).close();
}
@Test
chain.snapshotClosed(snapshot);
// snap is removed, so cannot be aborted
chain.close();
- Mockito.verify(snapshot, Mockito.never()).abort();
- Mockito.verify(history).close();
+ verify(snapshot, never()).abort();
+ verify(history).close();
}
}
\ No newline at end of file
*/
package org.opendaylight.controller.cluster.databroker;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
import org.junit.Test;
-import org.mockito.Mockito;
import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
import org.opendaylight.controller.cluster.access.concepts.FrontendType;
import org.opendaylight.controller.cluster.databroker.actors.dds.AbstractClientHandle;
public abstract class ClientBackedTransactionTest<T extends ClientBackedTransaction<?>> {
- private static FrontendIdentifier FRONTEND_ID = FrontendIdentifier.create(
+ private static final FrontendIdentifier FRONTEND_ID = FrontendIdentifier.create(
MemberName.forName("member"), FrontendType.forName("frontend"));
protected static final ClientIdentifier CLIENT_ID = ClientIdentifier.create(FRONTEND_ID, 0);
- private static LocalHistoryIdentifier HISTORY_ID = new LocalHistoryIdentifier(CLIENT_ID, 0);
+ private static final LocalHistoryIdentifier HISTORY_ID = new LocalHistoryIdentifier(CLIENT_ID, 0);
protected static final TransactionIdentifier TRANSACTION_ID = new TransactionIdentifier(HISTORY_ID, 0);
abstract T object();
public void testClose() {
final AbstractClientHandle<?> delegate = object().delegate();
object().close();
- Mockito.verify(delegate).abort();
+ // Called twice because of immediate cleaning
+ verify(delegate, times(2)).abort();
}
}
\ No newline at end of file
*/
package org.opendaylight.controller.cluster.databroker;
-import org.junit.Assert;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.verify;
+
import org.junit.Before;
import org.junit.Test;
+import org.junit.runner.RunWith;
import org.mockito.Mock;
-import org.mockito.Mockito;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
import org.opendaylight.controller.cluster.databroker.actors.dds.ClientTransaction;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
public class ClientBackedWriteTransactionTest extends ClientBackedTransactionTest<ClientBackedWriteTransaction> {
private ClientBackedWriteTransaction object;
@Mock
private ClientTransaction delegate;
@Mock
- private NormalizedNode<?, ?> data;
- @Mock
- private YangInstanceIdentifier path;
+ private ContainerNode data;
@Mock
private DOMStoreThreePhaseCommitCohort readyCohort;
@Before
public void setUp() {
- MockitoAnnotations.initMocks(this);
-
- Mockito.doReturn(TRANSACTION_ID).when(delegate).getIdentifier();
- Mockito.doReturn(readyCohort).when(delegate).ready();
+ doReturn(TRANSACTION_ID).when(delegate).getIdentifier();
+ doReturn(readyCohort).when(delegate).ready();
object = new ClientBackedWriteTransaction(delegate, null);
}
@Test
public void testWrite() {
- object().write(path, data);
- Mockito.verify(delegate).write(path, data);
+ object().write(YangInstanceIdentifier.of(), data);
+ verify(delegate).write(YangInstanceIdentifier.of(), data);
}
@Test
public void testMerge() {
- object().merge(path, data);
- Mockito.verify(delegate).merge(path, data);
+ object().merge(YangInstanceIdentifier.of(), data);
+ verify(delegate).merge(YangInstanceIdentifier.of(), data);
}
@Test
public void testDelete() {
- object().delete(path);
- Mockito.verify(delegate).delete(path);
+ object().delete(YangInstanceIdentifier.of());
+ verify(delegate).delete(YangInstanceIdentifier.of());
}
@Test
public void testReady() {
final DOMStoreThreePhaseCommitCohort result = object().ready();
- Assert.assertNotNull(result);
- Mockito.verify(delegate).ready();
+ assertNotNull(result);
+ verify(delegate).ready();
}
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
-package org.opendaylight.controller.cluster.datastore.jmx.mbeans;
+package org.opendaylight.controller.cluster.databroker;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
* @author Thomas Pantelis
*/
public class CommitStatsMXBeanImplTest {
-
@Test
public void test() {
-
DurationStatisticsTracker commitStatsTracker = DurationStatisticsTracker.createConcurrent();
- CommitStatsMXBeanImpl bean =
- new CommitStatsMXBeanImpl(commitStatsTracker, "Test");
+ CommitStatsMXBeanImpl bean = new CommitStatsMXBeanImpl(commitStatsTracker, "Test");
commitStatsTracker.addDuration(100);
import static org.opendaylight.yangtools.util.concurrent.FluentFutures.immediateTrueFluentFuture;
import com.google.common.base.Throwables;
-import com.google.common.collect.ClassToInstanceMap;
import com.google.common.collect.ImmutableMap;
import com.google.common.util.concurrent.FluentFuture;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.SettableFuture;
import com.google.common.util.concurrent.Uninterruptibles;
import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import org.junit.Test;
import org.mockito.InOrder;
import org.mockito.stubbing.Answer;
-import org.opendaylight.controller.cluster.datastore.DistributedDataStore;
-import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.mdsal.common.api.DataStoreUnavailableException;
import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
import org.opendaylight.mdsal.common.api.TransactionCommitFailedException;
-import org.opendaylight.mdsal.dom.api.DOMDataBrokerExtension;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeService;
+import org.opendaylight.mdsal.dom.api.DOMDataBroker.CommitCohortExtension;
+import org.opendaylight.mdsal.dom.api.DOMDataBroker.DataTreeChangeExtension;
import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistry;
import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
import org.opendaylight.mdsal.dom.api.DOMDataTreeReadTransaction;
import org.opendaylight.mdsal.dom.api.DOMDataTreeReadWriteTransaction;
import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
-import org.opendaylight.mdsal.dom.broker.TransactionCommitFailedExceptionMapper;
+import org.opendaylight.mdsal.dom.spi.TransactionCommitFailedExceptionMapper;
import org.opendaylight.mdsal.dom.spi.store.DOMStore;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
import org.opendaylight.mdsal.dom.store.inmemory.InMemoryDOMDataStore;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
/**
* Unit tests for DOMConcurrentDataCommitCoordinator.
public class ConcurrentDOMDataBrokerTest {
private final DOMDataTreeWriteTransaction transaction = mock(DOMDataTreeWriteTransaction.class);
- private final DOMStoreThreePhaseCommitCohort mockCohort1 = mock(DOMStoreThreePhaseCommitCohort.class);
- private final DOMStoreThreePhaseCommitCohort mockCohort2 = mock(DOMStoreThreePhaseCommitCohort.class);
+ private final DOMStoreThreePhaseCommitCohort mockCohort = mock(DOMStoreThreePhaseCommitCohort.class);
private final ThreadPoolExecutor futureExecutor =
new ThreadPoolExecutor(0, 1, 5, TimeUnit.SECONDS, new SynchronousQueue<>());
private ConcurrentDOMDataBroker coordinator;
final SettableFuture<Boolean> future = SettableFuture.create();
if (doAsync) {
new Thread(() -> {
- Uninterruptibles.awaitUninterruptibly(asyncCanCommitContinue,
- 10, TimeUnit.SECONDS);
+ Uninterruptibles.awaitUninterruptibly(asyncCanCommitContinue, 10, TimeUnit.SECONDS);
future.set(Boolean.TRUE);
}).start();
} else {
return future;
};
- doAnswer(asyncCanCommit).when(mockCohort1).canCommit();
- doReturn(immediateNullFluentFuture()).when(mockCohort1).preCommit();
- doReturn(immediateNullFluentFuture()).when(mockCohort1).commit();
+ doAnswer(asyncCanCommit).when(mockCohort).canCommit();
+ doReturn(immediateNullFluentFuture()).when(mockCohort).preCommit();
+ doReturn(immediateNullFluentFuture()).when(mockCohort).commit();
- doReturn(immediateTrueFluentFuture()).when(mockCohort2).canCommit();
- doReturn(immediateNullFluentFuture()).when(mockCohort2).preCommit();
- doReturn(immediateNullFluentFuture()).when(mockCohort2).commit();
-
- ListenableFuture<? extends CommitInfo> future =
- coordinator.commit(transaction, Arrays.asList(mockCohort1, mockCohort2));
+ ListenableFuture<? extends CommitInfo> future = coordinator.commit(transaction, mockCohort);
final CountDownLatch doneLatch = new CountDownLatch(1);
final AtomicReference<Throwable> caughtEx = new AtomicReference<>();
assertEquals("Task count", doAsync ? 1 : 0, futureExecutor.getTaskCount());
- InOrder inOrder = inOrder(mockCohort1, mockCohort2);
- inOrder.verify(mockCohort1).canCommit();
- inOrder.verify(mockCohort2).canCommit();
- inOrder.verify(mockCohort1).preCommit();
- inOrder.verify(mockCohort2).preCommit();
- inOrder.verify(mockCohort1).commit();
- inOrder.verify(mockCohort2).commit();
+ InOrder inOrder = inOrder(mockCohort);
+ inOrder.verify(mockCohort, times(1)).canCommit();
+ inOrder.verify(mockCohort, times(1)).preCommit();
+ inOrder.verify(mockCohort, times(1)).commit();
}
@Test
public void testSubmitWithNegativeCanCommitResponse() throws Exception {
- doReturn(immediateTrueFluentFuture()).when(mockCohort1).canCommit();
- doReturn(immediateNullFluentFuture()).when(mockCohort1).abort();
-
- doReturn(Futures.immediateFuture(Boolean.FALSE)).when(mockCohort2).canCommit();
- doReturn(immediateNullFluentFuture()).when(mockCohort2).abort();
-
- DOMStoreThreePhaseCommitCohort mockCohort3 = mock(DOMStoreThreePhaseCommitCohort.class);
- doReturn(Futures.immediateFuture(Boolean.FALSE)).when(mockCohort3).canCommit();
- doReturn(immediateNullFluentFuture()).when(mockCohort3).abort();
-
- ListenableFuture<? extends CommitInfo> future = coordinator.commit(
- transaction, Arrays.asList(mockCohort1, mockCohort2, mockCohort3));
+ doReturn(Futures.immediateFuture(Boolean.FALSE)).when(mockCohort).canCommit();
+ doReturn(immediateNullFluentFuture()).when(mockCohort).abort();
- assertFailure(future, null, mockCohort1, mockCohort2, mockCohort3);
+ assertFailure(coordinator.commit(transaction, mockCohort), null, mockCohort);
}
private static void assertFailure(final ListenableFuture<?> future, final Exception expCause,
- final DOMStoreThreePhaseCommitCohort... mockCohorts) throws Exception {
+ final DOMStoreThreePhaseCommitCohort mockCohort) throws Exception {
try {
future.get(5, TimeUnit.SECONDS);
fail("Expected TransactionCommitFailedException");
if (expCause != null) {
assertSame("Expected cause", expCause.getClass(), tcf.getCause().getClass());
}
-
- InOrder inOrder = inOrder((Object[])mockCohorts);
- for (DOMStoreThreePhaseCommitCohort c: mockCohorts) {
- inOrder.verify(c).abort();
- }
+ verify(mockCohort, times(1)).abort();
} catch (TimeoutException e) {
throw e;
}
@Test
public void testSubmitWithCanCommitException() throws Exception {
- doReturn(immediateTrueFluentFuture()).when(mockCohort1).canCommit();
- doReturn(immediateNullFluentFuture()).when(mockCohort1).abort();
-
- IllegalStateException cause = new IllegalStateException("mock");
- doReturn(Futures.immediateFailedFuture(cause)).when(mockCohort2).canCommit();
- doReturn(immediateNullFluentFuture()).when(mockCohort2).abort();
-
- FluentFuture<? extends CommitInfo> future = coordinator.commit(
- transaction, Arrays.asList(mockCohort1, mockCohort2));
+ final Exception cause = new IllegalStateException("mock");
+ doReturn(Futures.immediateFailedFuture(cause)).when(mockCohort).canCommit();
+ doReturn(immediateNullFluentFuture()).when(mockCohort).abort();
- assertFailure(future, cause, mockCohort1, mockCohort2);
- }
-
- @Test
- public void testSubmitWithCanCommitDataStoreUnavailableException() throws Exception {
- doReturn(immediateTrueFluentFuture()).when(mockCohort1).canCommit();
- doReturn(immediateNullFluentFuture()).when(mockCohort1).abort();
- NoShardLeaderException rootCause = new NoShardLeaderException("mock");
- DataStoreUnavailableException cause = new DataStoreUnavailableException(rootCause.getMessage(), rootCause);
- doReturn(Futures.immediateFailedFuture(rootCause)).when(mockCohort2).canCommit();
- doReturn(immediateNullFluentFuture()).when(mockCohort2).abort();
-
- FluentFuture<? extends CommitInfo> future = coordinator.commit(
- transaction, Arrays.asList(mockCohort1, mockCohort2));
-
- assertFailure(future, cause, mockCohort1, mockCohort2);
+ assertFailure(coordinator.commit(transaction, mockCohort), cause, mockCohort);
}
@Test
public void testSubmitWithPreCommitException() throws Exception {
- doReturn(immediateTrueFluentFuture()).when(mockCohort1).canCommit();
- doReturn(immediateNullFluentFuture()).when(mockCohort1).preCommit();
- doReturn(immediateNullFluentFuture()).when(mockCohort1).abort();
-
- doReturn(immediateTrueFluentFuture()).when(mockCohort2).canCommit();
- IllegalStateException cause = new IllegalStateException("mock");
- doReturn(Futures.immediateFailedFuture(cause)).when(mockCohort2).preCommit();
- doReturn(immediateNullFluentFuture()).when(mockCohort2).abort();
-
- DOMStoreThreePhaseCommitCohort mockCohort3 = mock(DOMStoreThreePhaseCommitCohort.class);
- doReturn(immediateTrueFluentFuture()).when(mockCohort3).canCommit();
- doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock2")))
- .when(mockCohort3).preCommit();
- doReturn(immediateNullFluentFuture()).when(mockCohort3).abort();
-
- FluentFuture<? extends CommitInfo> future = coordinator.commit(
- transaction, Arrays.asList(mockCohort1, mockCohort2, mockCohort3));
+ doReturn(immediateTrueFluentFuture()).when(mockCohort).canCommit();
+ final IllegalStateException cause = new IllegalStateException("mock");
+ doReturn(Futures.immediateFailedFuture(cause)).when(mockCohort).preCommit();
+ doReturn(immediateNullFluentFuture()).when(mockCohort).abort();
- assertFailure(future, cause, mockCohort1, mockCohort2, mockCohort3);
+ assertFailure(coordinator.commit(transaction, mockCohort), cause, mockCohort);
}
@Test
public void testSubmitWithCommitException() throws Exception {
- doReturn(immediateTrueFluentFuture()).when(mockCohort1).canCommit();
- doReturn(immediateNullFluentFuture()).when(mockCohort1).preCommit();
- doReturn(immediateNullFluentFuture()).when(mockCohort1).commit();
- doReturn(immediateNullFluentFuture()).when(mockCohort1).abort();
-
- doReturn(immediateTrueFluentFuture()).when(mockCohort2).canCommit();
- doReturn(immediateNullFluentFuture()).when(mockCohort2).preCommit();
- IllegalStateException cause = new IllegalStateException("mock");
- doReturn(Futures.immediateFailedFuture(cause)).when(mockCohort2).commit();
- doReturn(immediateNullFluentFuture()).when(mockCohort2).abort();
-
- DOMStoreThreePhaseCommitCohort mockCohort3 = mock(DOMStoreThreePhaseCommitCohort.class);
- doReturn(immediateTrueFluentFuture()).when(mockCohort3).canCommit();
- doReturn(immediateNullFluentFuture()).when(mockCohort3).preCommit();
- doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock2")))
- .when(mockCohort3).commit();
- doReturn(immediateNullFluentFuture()).when(mockCohort3).abort();
-
- FluentFuture<? extends CommitInfo> future = coordinator.commit(
- transaction, Arrays.asList(mockCohort1, mockCohort2, mockCohort3));
-
- assertFailure(future, cause, mockCohort1, mockCohort2, mockCohort3);
+ doReturn(immediateTrueFluentFuture()).when(mockCohort).canCommit();
+ doReturn(immediateNullFluentFuture()).when(mockCohort).preCommit();
+ final IllegalStateException cause = new IllegalStateException("mock");
+ doReturn(Futures.immediateFailedFuture(cause)).when(mockCohort).commit();
+ doReturn(immediateNullFluentFuture()).when(mockCohort).abort();
+
+ assertFailure(coordinator.commit(transaction, mockCohort), cause, mockCohort);
}
@Test
public void testSubmitWithAbortException() throws Exception {
- doReturn(immediateTrueFluentFuture()).when(mockCohort1).canCommit();
- doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock abort error")))
- .when(mockCohort1).abort();
+ final Exception canCommitCause = new IllegalStateException("canCommit error");
+ doReturn(Futures.immediateFailedFuture(canCommitCause)).when(mockCohort).canCommit();
+ final Exception abortCause = new IllegalStateException("abort error");
+ doReturn(Futures.immediateFailedFuture(abortCause)).when(mockCohort).abort();
- IllegalStateException cause = new IllegalStateException("mock canCommit error");
- doReturn(Futures.immediateFailedFuture(cause)).when(mockCohort2).canCommit();
- doReturn(immediateNullFluentFuture()).when(mockCohort2).abort();
-
- FluentFuture<? extends CommitInfo> future = coordinator.commit(
- transaction, Arrays.asList(mockCohort1, mockCohort2));
-
- assertFailure(future, cause, mockCohort1, mockCohort2);
+ assertFailure(coordinator.commit(transaction, mockCohort), canCommitCause, mockCohort);
}
@Test
configDomStore), futureExecutor)) {
DOMDataTreeReadWriteTransaction dataTxn = dataBroker.newReadWriteTransaction();
- dataTxn.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.empty(), mock(NormalizedNode.class));
- dataTxn.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.empty(), mock(NormalizedNode.class));
- dataTxn.read(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.empty());
+ dataTxn.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of(), mock(ContainerNode.class));
+ dataTxn.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of(), mock(ContainerNode.class));
+ dataTxn.read(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of());
verify(configDomStore, never()).newReadWriteTransaction();
verify(operationalDomStore, times(1)).newReadWriteTransaction();
-
- dataTxn.put(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.empty(), mock(NormalizedNode.class));
-
- verify(configDomStore, times(1)).newReadWriteTransaction();
- verify(operationalDomStore, times(1)).newReadWriteTransaction();
}
}
configDomStore), futureExecutor)) {
DOMDataTreeWriteTransaction dataTxn = dataBroker.newWriteOnlyTransaction();
- dataTxn.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.empty(), mock(NormalizedNode.class));
- dataTxn.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.empty(), mock(NormalizedNode.class));
+ dataTxn.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of(), mock(ContainerNode.class));
+ dataTxn.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of(), mock(ContainerNode.class));
verify(configDomStore, never()).newWriteOnlyTransaction();
verify(operationalDomStore, times(1)).newWriteOnlyTransaction();
-
- dataTxn.put(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.empty(), mock(NormalizedNode.class));
-
- verify(configDomStore, times(1)).newWriteOnlyTransaction();
- verify(operationalDomStore, times(1)).newWriteOnlyTransaction();
}
}
configDomStore), futureExecutor)) {
DOMDataTreeReadTransaction dataTxn = dataBroker.newReadOnlyTransaction();
- dataTxn.read(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.empty());
- dataTxn.read(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.empty());
+ dataTxn.read(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of());
+ dataTxn.read(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of());
verify(configDomStore, never()).newReadOnlyTransaction();
verify(operationalDomStore, times(1)).newReadOnlyTransaction();
-
- dataTxn.read(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.empty());
-
- verify(configDomStore, times(1)).newReadOnlyTransaction();
- verify(operationalDomStore, times(1)).newReadOnlyTransaction();
}
}
DOMStore configDomStore = mock(DOMStore.class);
DOMStore operationalDomStore = mock(DOMStore.class);
DOMStoreReadWriteTransaction mockStoreReadWriteTransaction = mock(DOMStoreReadWriteTransaction.class);
- DOMStoreThreePhaseCommitCohort mockCohort = mock(DOMStoreThreePhaseCommitCohort.class);
doReturn(mockStoreReadWriteTransaction).when(operationalDomStore).newReadWriteTransaction();
doReturn(mockCohort).when(mockStoreReadWriteTransaction).ready();
configDomStore), futureExecutor) {
@Override
public FluentFuture<? extends CommitInfo> commit(DOMDataTreeWriteTransaction writeTx,
- Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
- commitCohorts.addAll(cohorts);
+ DOMStoreThreePhaseCommitCohort cohort) {
+ commitCohorts.add(cohort);
latch.countDown();
- return super.commit(writeTx, cohorts);
+ return super.commit(writeTx, cohort);
}
}) {
DOMDataTreeReadWriteTransaction domDataReadWriteTransaction = dataBroker.newReadWriteTransaction();
- domDataReadWriteTransaction.delete(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.empty());
+ domDataReadWriteTransaction.delete(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of());
domDataReadWriteTransaction.commit();
}
}
- @Test
- public void testSubmitWithOnlyTwoSubTransactions() throws InterruptedException {
- DOMStore configDomStore = mock(DOMStore.class);
- DOMStore operationalDomStore = mock(DOMStore.class);
- DOMStoreReadWriteTransaction operationalTransaction = mock(DOMStoreReadWriteTransaction.class);
- DOMStoreReadWriteTransaction configTransaction = mock(DOMStoreReadWriteTransaction.class);
- DOMStoreThreePhaseCommitCohort mockCohortOperational = mock(DOMStoreThreePhaseCommitCohort.class);
- DOMStoreThreePhaseCommitCohort mockCohortConfig = mock(DOMStoreThreePhaseCommitCohort.class);
-
- doReturn(operationalTransaction).when(operationalDomStore).newReadWriteTransaction();
- doReturn(configTransaction).when(configDomStore).newReadWriteTransaction();
-
- doReturn(mockCohortOperational).when(operationalTransaction).ready();
- doReturn(immediateFalseFluentFuture()).when(mockCohortOperational).canCommit();
- doReturn(immediateNullFluentFuture()).when(mockCohortOperational).abort();
-
- doReturn(mockCohortConfig).when(configTransaction).ready();
- doReturn(immediateFalseFluentFuture()).when(mockCohortConfig).canCommit();
- doReturn(immediateNullFluentFuture()).when(mockCohortConfig).abort();
-
- final CountDownLatch latch = new CountDownLatch(1);
- final List<DOMStoreThreePhaseCommitCohort> commitCohorts = new ArrayList<>();
-
- try (ConcurrentDOMDataBroker dataBroker = new ConcurrentDOMDataBroker(ImmutableMap.of(
- LogicalDatastoreType.OPERATIONAL, operationalDomStore, LogicalDatastoreType.CONFIGURATION,
- configDomStore), futureExecutor) {
- @Override
- @SuppressWarnings("checkstyle:hiddenField")
- public FluentFuture<? extends CommitInfo> commit(DOMDataTreeWriteTransaction writeTx,
- Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
- commitCohorts.addAll(cohorts);
- latch.countDown();
- return super.commit(writeTx, cohorts);
- }
- }) {
- DOMDataTreeReadWriteTransaction domDataReadWriteTransaction = dataBroker.newReadWriteTransaction();
-
- domDataReadWriteTransaction.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.empty(),
- mock(NormalizedNode.class));
- domDataReadWriteTransaction.merge(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.empty(),
- mock(NormalizedNode.class));
-
- domDataReadWriteTransaction.commit();
-
- assertTrue(latch.await(10, TimeUnit.SECONDS));
-
- assertTrue(commitCohorts.size() == 2);
- }
- }
-
@Test
public void testCreateTransactionChain() {
DOMStore domStore = mock(DOMStore.class);
LogicalDatastoreType.OPERATIONAL, domStore, LogicalDatastoreType.CONFIGURATION, domStore),
futureExecutor)) {
- dataBroker.createTransactionChain(mock(DOMTransactionChainListener.class));
+ dataBroker.createTransactionChain();
verify(domStore, times(2)).createTransactionChain();
}
doReturn(mockChain).when(domStore).createTransactionChain();
doReturn(operationalTransaction).when(mockChain).newWriteOnlyTransaction();
- DOMTransactionChain transactionChain = dataBroker.createTransactionChain(
- mock(DOMTransactionChainListener.class));
+ DOMTransactionChain transactionChain = dataBroker.createTransactionChain();
DOMDataTreeWriteTransaction domDataWriteTransaction = transactionChain.newWriteOnlyTransaction();
verify(mockChain, never()).newWriteOnlyTransaction();
- domDataWriteTransaction.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.empty(),
- mock(NormalizedNode.class));
+ domDataWriteTransaction.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of(),
+ mock(ContainerNode.class));
}
}
@Test
public void testExtensions() {
- DistributedDataStore mockConfigStore = mock(DistributedDataStore.class);
- DistributedDataStore mockOperStore = mock(DistributedDataStore.class);
- try (ConcurrentDOMDataBroker dataBroker = new ConcurrentDOMDataBroker(ImmutableMap.of(
+ final var mockConfigStore = mock(ClientBackedDataStore.class);
+ final var mockOperStore = mock(ClientBackedDataStore.class);
+ try (var dataBroker = new ConcurrentDOMDataBroker(ImmutableMap.of(
LogicalDatastoreType.OPERATIONAL, mockOperStore,
LogicalDatastoreType.CONFIGURATION, mockConfigStore), futureExecutor)) {
+ assertNotNull(dataBroker.extension(DataTreeChangeExtension.class));
- ClassToInstanceMap<DOMDataBrokerExtension> supportedExtensions = dataBroker.getExtensions();
- assertNotNull(supportedExtensions.getInstance(DOMDataTreeChangeService.class));
-
- DOMDataTreeCommitCohortRegistry cohortRegistry = supportedExtensions.getInstance(
- DOMDataTreeCommitCohortRegistry.class);
+ final var cohortRegistry = dataBroker.extension(CommitCohortExtension.class);
assertNotNull(cohortRegistry);
- DOMDataTreeCommitCohort mockCohort = mock(DOMDataTreeCommitCohort.class);
- DOMDataTreeIdentifier path = new DOMDataTreeIdentifier(
- org.opendaylight.mdsal.common.api.LogicalDatastoreType.CONFIGURATION,
- YangInstanceIdentifier.empty());
- cohortRegistry.registerCommitCohort(path, mockCohort);
+ final var cohort = mock(DOMDataTreeCommitCohort.class);
+ final var path = DOMDataTreeIdentifier.of(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.of());
+ cohortRegistry.registerCommitCohort(path, cohort);
- verify(mockConfigStore).registerCommitCohort(path, mockCohort);
+ verify(mockConfigStore).registerCommitCohort(path, cohort);
}
}
}
*/
package org.opendaylight.controller.cluster.databroker.actors.dds;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.lenient;
import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.CLIENT_ID;
import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.HISTORY_ID;
import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.TRANSACTION_ID;
import akka.actor.ActorSystem;
import akka.testkit.TestProbe;
import akka.testkit.javadsl.TestKit;
-import java.util.Collection;
-import java.util.Collections;
+import java.util.List;
+import java.util.Map;
import org.junit.After;
-import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
+import org.junit.runner.RunWith;
import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
import org.opendaylight.controller.cluster.access.client.AbstractClientConnection;
import org.opendaylight.controller.cluster.access.client.AccessClientUtil;
import org.opendaylight.controller.cluster.access.client.ClientActorContext;
import org.opendaylight.controller.cluster.access.concepts.RequestSuccess;
import org.opendaylight.controller.cluster.access.concepts.Response;
import org.opendaylight.controller.cluster.access.concepts.SuccessEnvelope;
+import org.opendaylight.controller.cluster.datastore.DatastoreContext;
import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
import scala.concurrent.Promise;
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
public abstract class AbstractClientHandleTest<T extends AbstractClientHandle<AbstractProxyTransaction>> {
-
private static final String PERSISTENCE_ID = "per-1";
- private static final YangInstanceIdentifier PATH = YangInstanceIdentifier.empty();
+ private static final YangInstanceIdentifier PATH = YangInstanceIdentifier.of();
@Mock
private DataTree dataTree;
@Before
public void setUp() throws Exception {
- MockitoAnnotations.initMocks(this);
system = ActorSystem.apply();
final TestProbe contextProbe = new TestProbe(system, "context");
final TestProbe clientContextProbe = new TestProbe(system, "client-context");
client.getConnection(0L);
contextProbe.expectMsgClass(ConnectClientRequest.class);
final long sequence = 0L;
- contextProbe.reply(new ConnectClientSuccess(CLIENT_ID, sequence, backendProbe.ref(),
- Collections.emptyList(), dataTree, 3));
+ contextProbe.reply(new ConnectClientSuccess(CLIENT_ID, sequence, backendProbe.ref(), List.of(), dataTree, 3));
final InternalCommand<ShardBackendInfo> command = clientContextProbe.expectMsgClass(InternalCommand.class);
command.execute(client);
//data tree mock
- when(dataTree.takeSnapshot()).thenReturn(dataTreeSnapshot);
+ doReturn(dataTreeSnapshot).when(dataTree).takeSnapshot();
handle = createHandle(parent);
}
@Test
public void testGetIdentifier() {
- Assert.assertEquals(TRANSACTION_ID, handle.getIdentifier());
+ assertEquals(TRANSACTION_ID, handle.getIdentifier());
}
@Test
handle.abort();
final Envelope<?> envelope = backendProbe.expectMsgClass(Envelope.class);
final AbortLocalTransactionRequest request = (AbortLocalTransactionRequest) envelope.getMessage();
- Assert.assertEquals(TRANSACTION_ID, request.getTarget());
+ assertEquals(TRANSACTION_ID, request.getTarget());
checkClosed();
}
handle.localAbort(new RuntimeException("fail"));
final Envelope<?> envelope = backendProbe.expectMsgClass(Envelope.class);
final AbortLocalTransactionRequest request = (AbortLocalTransactionRequest) envelope.getMessage();
- Assert.assertEquals(TRANSACTION_ID, request.getTarget());
+ assertEquals(TRANSACTION_ID, request.getTarget());
checkClosed();
}
@Test
public void testEnsureClosed() {
doHandleOperation(handle);
- final Collection<AbstractProxyTransaction> transactions = handle.ensureClosed();
- Assert.assertNotNull(transactions);
- Assert.assertEquals(1, transactions.size());
+ final Map<Long, AbstractProxyTransaction> transactions = handle.ensureClosed();
+ assertNotNull(transactions);
+ assertEquals(1, transactions.size());
}
@Test
public void testEnsureProxy() {
- final AbstractProxyTransaction expected = mock(AbstractProxyTransaction.class);
- final AbstractProxyTransaction proxy = handle.ensureProxy(PATH);
- Assert.assertEquals(0, proxy.getIdentifier().getTransactionId());
+ final var proxy = handle.ensureProxy(PATH);
+ assertEquals(0, proxy.getIdentifier().getTransactionId());
}
@Test
public void testParent() {
- Assert.assertEquals(parent, handle.parent());
+ assertEquals(parent, handle.parent());
}
protected void checkClosed() throws Exception {
protected <R extends Request<?, R>> R backendRespondToRequest(final Class<R> expectedRequestClass,
final Response<?, ?> response) {
final RequestEnvelope envelope = backendProbe.expectMsgClass(RequestEnvelope.class);
- Assert.assertEquals(expectedRequestClass, envelope.getMessage().getClass());
+ assertEquals(expectedRequestClass, envelope.getMessage().getClass());
final AbstractClientConnection<ShardBackendInfo> connection = client.getConnection(0L);
final long sessionId = envelope.getSessionId();
final long txSequence = envelope.getTxSequence();
final ActorSelection selection = system.actorSelection(actor.path());
final PrimaryShardInfo shardInfo = new PrimaryShardInfo(selection, (short) 0);
promise.success(shardInfo);
- when(mock.findPrimaryShardAsync(any())).thenReturn(promise.future());
+ doReturn(promise.future()).when(mock).findPrimaryShardAsync(any());
+
+ final EffectiveModelContext context = mock(EffectiveModelContext.class);
+ lenient().doCallRealMethod().when(context).getQName();
+ lenient().doReturn(context).when(mock).getSchemaContext();
+ lenient().doReturn(DatastoreContext.newBuilder().build()).when(mock).getDatastoreContext();
+
return mock;
}
-
}
*/
package org.opendaylight.controller.cluster.databroker.actors.dds;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.CLIENT_ID;
-import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.TRANSACTION_ID;
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
import akka.actor.ActorSystem;
import com.google.common.primitives.UnsignedLong;
import java.util.Optional;
-import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mock;
-import org.mockito.Mockito;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.controller.cluster.access.client.AccessClientUtil;
import org.opendaylight.controller.cluster.access.client.ClientActorContext;
import org.opendaylight.controller.cluster.access.client.ConnectedClientConnection;
import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
+import org.opendaylight.controller.cluster.datastore.DatastoreContext;
import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
import scala.concurrent.Promise;
import scala.concurrent.impl.Promise.DefaultPromise;
public abstract class AbstractClientHistoryTest<T extends AbstractClientHistory> {
protected static final String SHARD_NAME = "default";
protected static final String PERSISTENCE_ID = "per-1";
- protected static final LocalHistoryIdentifier HISTORY_ID = new LocalHistoryIdentifier(CLIENT_ID, 1L);
+ protected static final LocalHistoryIdentifier HISTORY_ID = new LocalHistoryIdentifier(TestUtils.CLIENT_ID, 1L);
@Mock
private DataTree tree;
+ @Mock
+ private DatastoreContext datastoreContext;
protected abstract T object();
@Test
public void testCreateSnapshotProxy() {
- final AbstractProxyTransaction snapshotProxy = object().createSnapshotProxy(TRANSACTION_ID, 0L);
- Assert.assertNotNull(snapshotProxy);
- Assert.assertNotEquals(TRANSACTION_ID, snapshotProxy.getIdentifier());
+ final AbstractProxyTransaction snapshotProxy = object().createSnapshotProxy(TestUtils.TRANSACTION_ID, 0L);
+ assertNotNull(snapshotProxy);
+ assertNotEquals(TestUtils.TRANSACTION_ID, snapshotProxy.getIdentifier());
}
@Test
public void testCreateTransactionProxy() {
- AbstractProxyTransaction transactionProxy = object().createTransactionProxy(TRANSACTION_ID, 0L);
- Assert.assertNotNull(transactionProxy);
- Assert.assertNotEquals(TRANSACTION_ID, transactionProxy.getIdentifier());
+ AbstractProxyTransaction transactionProxy = object().createTransactionProxy(TestUtils.TRANSACTION_ID, 0L);
+ assertNotNull(transactionProxy);
+ assertNotEquals(TestUtils.TRANSACTION_ID, transactionProxy.getIdentifier());
}
@Test
public void testState() {
- Assert.assertEquals(AbstractClientHistory.State.IDLE, object().state());
+ assertEquals(AbstractClientHistory.State.IDLE, object().state());
}
@Test
public void testUpdateState() {
object().updateState(AbstractClientHistory.State.IDLE, AbstractClientHistory.State.CLOSED);
- Assert.assertEquals(AbstractClientHistory.State.CLOSED, object().state());
+ assertEquals(AbstractClientHistory.State.CLOSED, object().state());
}
@Test
public void testDoClose() {
- object().createTransactionProxy(TRANSACTION_ID, 0L);
+ object().createTransactionProxy(TestUtils.TRANSACTION_ID, 0L);
object().doClose();
- Assert.assertEquals(AbstractClientHistory.State.CLOSED, object().state());
+ assertEquals(AbstractClientHistory.State.CLOSED, object().state());
}
@Test
public void testGetIdentifier() {
- Assert.assertEquals(HISTORY_ID, object().getIdentifier());
+ assertEquals(HISTORY_ID, object().getIdentifier());
}
@Test
public void testNextTx() {
- Assert.assertTrue(object().nextTx() + 1 == object().nextTx());
+ assertEquals(object().nextTx() + 1, object().nextTx());
}
@Test
public void testResolveShardForPath() {
- final Long shardForPath = object().resolveShardForPath(YangInstanceIdentifier.empty());
- Assert.assertEquals(0L, shardForPath.longValue());
+ final Long shardForPath = object().resolveShardForPath(YangInstanceIdentifier.of());
+ assertNotNull(shardForPath);
+ assertEquals(0L, (long) shardForPath);
}
@Test
public void testLocalAbort() {
object().localAbort(new Throwable());
- Assert.assertEquals(AbstractClientHistory.State.CLOSED, object().state());
+ assertEquals(AbstractClientHistory.State.CLOSED, object().state());
}
@Test
public void testOnProxyDestroyed() {
- final ProxyHistory proxyHistory = Mockito.mock(ProxyHistory.class);
- when(proxyHistory.getIdentifier()).thenReturn(HISTORY_ID);
+ final ProxyHistory proxyHistory = mock(ProxyHistory.class);
+ doReturn(HISTORY_ID).when(proxyHistory).getIdentifier();
object().onProxyDestroyed(proxyHistory);
verify(proxyHistory).getIdentifier();
@Test
public void testCreateTransaction() {
final ClientTransaction transaction = object().createTransaction();
- Assert.assertNotNull(transaction);
+ assertNotNull(transaction);
}
@Test
public void testTakeSnapshot() {
final ClientSnapshot clientSnapshot = object().takeSnapshot();
- Assert.assertEquals(object().getIdentifier(), clientSnapshot.getIdentifier().getHistoryId());
+ assertEquals(object().getIdentifier(), clientSnapshot.getIdentifier().getHistoryId());
}
@Test
SHARD_NAME, UnsignedLong.ZERO, Optional.of(tree), 10);
final ConnectedClientConnection<ShardBackendInfo> newConn = AccessClientUtil.createConnectedConnection(
clientActorContext(), cookie, info);
- object().createSnapshotProxy(TRANSACTION_ID, shard);
+ object().createSnapshotProxy(TestUtils.TRANSACTION_ID, shard);
final HistoryReconnectCohort reconnectCohort = object().startReconnect(newConn);
- Assert.assertNotNull(reconnectCohort);
+ assertNotNull(reconnectCohort);
}
@Test
SHARD_NAME, UnsignedLong.ZERO, Optional.of(tree), 10);
final ConnectedClientConnection<ShardBackendInfo> newConn = AccessClientUtil.createConnectedConnection(
clientActorContext(), cookie, info);
- object().createSnapshotProxy(TRANSACTION_ID, shard);
+ object().createSnapshotProxy(TestUtils.TRANSACTION_ID, shard);
final HistoryReconnectCohort reconnectCohort = object().startReconnect(newConn);
- Assert.assertNull(reconnectCohort);
+ assertNull(reconnectCohort);
}
- protected static ActorUtils createActorUtilsMock(final ActorSystem system, final ActorRef actor) {
+ protected final ActorUtils createActorUtilsMock(final ActorSystem system, final ActorRef actor) {
final ActorUtils mock = mock(ActorUtils.class);
final Promise<PrimaryShardInfo> promise = new DefaultPromise<>();
final ActorSelection selection = system.actorSelection(actor.path());
final PrimaryShardInfo shardInfo = new PrimaryShardInfo(selection, (short) 0);
promise.success(shardInfo);
- when(mock.findPrimaryShardAsync(any())).thenReturn(promise.future());
+ doReturn(promise.future()).when(mock).findPrimaryShardAsync(any());
+ doReturn(1000).when(datastoreContext).getShardBatchedModificationCount();
+ doReturn(datastoreContext).when(mock).getDatastoreContext();
+
return mock;
}
}
\ No newline at end of file
*/
package org.opendaylight.controller.cluster.databroker.actors.dds;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertThrows;
+import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.CLIENT_ID;
import akka.actor.ActorRef;
import akka.actor.Status;
import akka.testkit.TestProbe;
import akka.testkit.javadsl.TestKit;
-import java.util.Collections;
+import java.util.List;
import java.util.Optional;
import org.junit.After;
-import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.cluster.access.client.AbstractClientConnection;
import org.opendaylight.controller.cluster.access.client.InternalCommand;
import org.opendaylight.controller.cluster.access.commands.ConnectClientRequest;
import org.opendaylight.controller.cluster.access.commands.ConnectClientSuccess;
+import org.opendaylight.controller.cluster.datastore.DatastoreContext;
import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.CursorAwareDataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.CursorAwareDataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
import scala.concurrent.Promise;
public abstract class AbstractDataStoreClientBehaviorTest {
private TestProbe clientActorProbe;
private TestProbe actorContextProbe;
private AbstractDataStoreClientBehavior behavior;
+ private ActorUtils util;
@Before
public void setUp() {
system = ActorSystem.apply();
clientActorProbe = new TestProbe(system, "client");
actorContextProbe = new TestProbe(system, "actor-context");
- final ActorUtils context = createActorContextMock(system, actorContextProbe.ref());
+ util = createActorContextMock(system, actorContextProbe.ref());
clientContext =
AccessClientUtil.createClientActorContext(system, clientActorProbe.ref(), CLIENT_ID, PERSISTENCE_ID);
- behavior = createBehavior(clientContext, context);
+ behavior = createBehavior(clientContext, util);
}
@SuppressWarnings("checkstyle:hiddenField")
@Test
public void testResolveShardForPath() {
- Assert.assertEquals(0L, behavior.resolveShardForPath(YangInstanceIdentifier.empty()).longValue());
+ assertEquals(0L, behavior.resolveShardForPath(YangInstanceIdentifier.of()).longValue());
}
@Test
final GetClientRequest request = new GetClientRequest(probe.ref());
final AbstractDataStoreClientBehavior nextBehavior = behavior.onCommand(request);
final Status.Success success = probe.expectMsgClass(Status.Success.class);
- Assert.assertEquals(behavior, success.status());
- Assert.assertSame(behavior, nextBehavior);
+ assertEquals(behavior, success.status());
+ assertSame(behavior, nextBehavior);
}
@Test
public void testOnCommandUnhandled() {
final AbstractDataStoreClientBehavior nextBehavior = behavior.onCommand("unhandled");
- Assert.assertSame(behavior, nextBehavior);
+ assertSame(behavior, nextBehavior);
}
@Test
public void testCreateLocalHistory() {
final ClientLocalHistory history = behavior.createLocalHistory();
- Assert.assertEquals(behavior.getIdentifier(), history.getIdentifier().getClientId());
+ assertEquals(behavior.getIdentifier(), history.getIdentifier().getClientId());
}
@Test
public void testCreateTransaction() {
final ClientTransaction transaction = behavior.createTransaction();
- Assert.assertEquals(behavior.getIdentifier(), transaction.getIdentifier().getHistoryId().getClientId());
+ assertEquals(behavior.getIdentifier(), transaction.getIdentifier().getHistoryId().getClientId());
}
@Test
public void testCreateSnapshot() {
final ClientSnapshot snapshot = behavior.createSnapshot();
- Assert.assertEquals(behavior.getIdentifier(), snapshot.getIdentifier().getHistoryId().getClientId());
+ assertEquals(behavior.getIdentifier(), snapshot.getIdentifier().getHistoryId().getClientId());
}
@Test
final InternalCommand<ShardBackendInfo> internalCommand =
clientActorProbe.expectMsgClass(InternalCommand.class);
internalCommand.execute(behavior);
- try {
- behavior.createLocalHistory();
- Assert.fail("Behavior is closed and shouldn't allow to create new history.");
- } catch (final IllegalStateException e) {
- //ok
- }
+
+ assertThrows(IllegalStateException.class, () -> behavior.createLocalHistory());
}
@Test
public void testGetIdentifier() {
- Assert.assertEquals(CLIENT_ID, behavior.getIdentifier());
+ assertEquals(CLIENT_ID, behavior.getIdentifier());
}
@Test
public void testGetConnection() {
+ final var datastoreContext = mock(DatastoreContext.class);
+ doReturn(1000).when(datastoreContext).getShardBatchedModificationCount();
+ doReturn(datastoreContext).when(util).getDatastoreContext();
+
//set up data tree mock
final CursorAwareDataTreeModification modification = mock(CursorAwareDataTreeModification.class);
- when(modification.readNode(YangInstanceIdentifier.empty())).thenReturn(Optional.empty());
+ doReturn(Optional.empty()).when(modification).readNode(YangInstanceIdentifier.of());
final DataTreeSnapshot snapshot = mock(DataTreeSnapshot.class);
- when(snapshot.newModification()).thenReturn(modification);
+ doReturn(modification).when(snapshot).newModification();
final DataTree dataTree = mock(DataTree.class);
- when(dataTree.takeSnapshot()).thenReturn(snapshot);
+ doReturn(snapshot).when(dataTree).takeSnapshot();
final TestProbe backendProbe = new TestProbe(system, "backend");
final long shard = 0L;
- behavior.createTransaction().read(YangInstanceIdentifier.empty());
+
+ behavior.createTransaction().read(YangInstanceIdentifier.of());
final AbstractClientConnection<ShardBackendInfo> connection = behavior.getConnection(shard);
//check cached connection for same shard
- Assert.assertSame(connection, behavior.getConnection(shard));
+ assertSame(connection, behavior.getConnection(shard));
final ConnectClientRequest connectClientRequest = actorContextProbe.expectMsgClass(ConnectClientRequest.class);
- Assert.assertEquals(CLIENT_ID, connectClientRequest.getTarget());
+ assertEquals(CLIENT_ID, connectClientRequest.getTarget());
final long sequence = 0L;
- Assert.assertEquals(sequence, connectClientRequest.getSequence());
- actorContextProbe.reply(new ConnectClientSuccess(CLIENT_ID, sequence, backendProbe.ref(),
- Collections.emptyList(), dataTree, 3));
- Assert.assertEquals(clientActorProbe.ref(), connection.localActor());
+ assertEquals(sequence, connectClientRequest.getSequence());
+ actorContextProbe.reply(new ConnectClientSuccess(CLIENT_ID, sequence, backendProbe.ref(), List.of(), dataTree,
+ 3));
+ assertEquals(clientActorProbe.ref(), connection.localActor());
//capture and execute command passed to client context
final InternalCommand<ShardBackendInfo> command = clientActorProbe.expectMsgClass(InternalCommand.class);
command.execute(behavior);
//check, whether command was reaplayed
- verify(modification).readNode(YangInstanceIdentifier.empty());
+ verify(modification).readNode(YangInstanceIdentifier.of());
}
private static ActorUtils createActorContextMock(final ActorSystem system, final ActorRef actor) {
final ActorSelection selection = system.actorSelection(actor.path());
final PrimaryShardInfo shardInfo = new PrimaryShardInfo(selection, (short) 0);
promise.success(shardInfo);
- when(mock.findPrimaryShardAsync(SHARD)).thenReturn(promise.future());
+ doReturn(promise.future()).when(mock).findPrimaryShardAsync(SHARD);
return mock;
}
-
}
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
+import org.junit.runner.RunWith;
import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.controller.cluster.access.client.AbstractClientConnection;
import org.opendaylight.controller.cluster.access.client.AccessClientUtil;
import org.opendaylight.controller.cluster.access.concepts.RequestEnvelope;
import org.opendaylight.controller.cluster.access.concepts.Response;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.DatastoreContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
+import org.opendaylight.yangtools.yang.common.Empty;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.CursorAwareDataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.tree.api.CursorAwareDataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
public abstract class AbstractProxyTransactionTest<T extends AbstractProxyTransaction> {
protected static final TransactionIdentifier TRANSACTION_ID = TestUtils.TRANSACTION_ID;
private static final ClientIdentifier CLIENT_ID = TestUtils.CLIENT_ID;
protected static final YangInstanceIdentifier PATH_3 = YangInstanceIdentifier.builder()
.node(QName.create("ns-1", "node-3"))
.build();
- protected static final ContainerNode DATA_1 = Builders.containerBuilder()
- .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(PATH_1.getLastPathArgument().getNodeType()))
+ protected static final ContainerNode DATA_1 = ImmutableNodes.newContainerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(PATH_1.getLastPathArgument().getNodeType()))
.build();
- protected static final ContainerNode DATA_2 = Builders.containerBuilder()
- .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(PATH_2.getLastPathArgument().getNodeType()))
+ protected static final ContainerNode DATA_2 = ImmutableNodes.newContainerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(PATH_2.getLastPathArgument().getNodeType()))
.build();
protected static final String PERSISTENCE_ID = "per-1";
private DataTreeSnapshot snapshot;
@Mock
private AbstractClientHistory history;
+ @Mock
+ private DatastoreContext datastoreContext;
+ @Mock
+ private ActorUtils actorUtils;
+
private ActorSystem system;
private TestProbe backendProbe;
private TestProbe clientContextProbe;
@Before
public void setUp() {
- MockitoAnnotations.initMocks(this);
system = ActorSystem.apply();
clientContextProbe = new TestProbe(system, "clientContext");
backendProbe = new TestProbe(system, "backend");
context = AccessClientUtil.createClientActorContext(system, clientContextProbe.ref(), CLIENT_ID,
PERSISTENCE_ID);
- final ShardBackendInfo backend = new ShardBackendInfo(backendProbe.ref(), 0L, ABIVersion.BORON,
+ final ShardBackendInfo backend = new ShardBackendInfo(backendProbe.ref(), 0L, ABIVersion.current(),
"default", UnsignedLong.ZERO, Optional.empty(), 3);
final AbstractClientConnection<ShardBackendInfo> connection =
AccessClientUtil.createConnectedConnection(context, 0L, backend);
+
final ProxyHistory parent = ProxyHistory.createClient(history, connection, HISTORY_ID);
transaction = createTransaction(parent, TestUtils.TRANSACTION_ID, snapshot);
tester = new TransactionTester<>(transaction, connection, backendProbe);
}
+ protected final void mockForRemote() {
+ doReturn(1000).when(datastoreContext).getShardBatchedModificationCount();
+ doReturn(datastoreContext).when(actorUtils).getDatastoreContext();
+ doReturn(actorUtils).when(history).actorUtils();
+ }
+
@SuppressWarnings("checkstyle:hiddenField")
protected abstract T createTransaction(ProxyHistory parent, TransactionIdentifier id, DataTreeSnapshot snapshot);
final ModifyTransactionRequest transformed = successor.expectTransactionRequest(ModifyTransactionRequest.class);
assertNotNull(transformed);
assertEquals(successful1.getSequence(), transformed.getSequence());
- assertTrue(transformed.getPersistenceProtocol().isPresent());
- assertEquals(PersistenceProtocol.ABORT, transformed.getPersistenceProtocol().get());
+ assertEquals(Optional.of(PersistenceProtocol.ABORT), transformed.getPersistenceProtocol());
ReadTransactionRequest tmpRead = successor.expectTransactionRequest(ReadTransactionRequest.class);
assertNotNull(tmpRead);
}
@SuppressWarnings("checkstyle:hiddenField")
- protected <R extends TransactionRequest<R>> void testRequestResponse(final Consumer<VotingFuture<Void>> consumer,
+ protected <R extends TransactionRequest<R>> void testRequestResponse(final Consumer<VotingFuture<Empty>> consumer,
final Class<R> expectedRequest,
final BiFunction<TransactionIdentifier, Long, TransactionSuccess<?>> replySupplier) {
final TransactionTester<T> tester = getTester();
- final VotingFuture<Void> future = mock(VotingFuture.class);
+ final VotingFuture<Empty> future = mock(VotingFuture.class);
transaction.seal();
consumer.accept(future);
final TransactionRequest<?> req = tester.expectTransactionRequest(expectedRequest);
final TestProbe clientContextProbe = new TestProbe(system, "clientContext2");
final ClientActorContext context =
AccessClientUtil.createClientActorContext(system, clientContextProbe.ref(), CLIENT_ID, PERSISTENCE_ID);
- final ShardBackendInfo backend = new ShardBackendInfo(backendProbe.ref(), 0L, ABIVersion.BORON,
+ final ShardBackendInfo backend = new ShardBackendInfo(backendProbe.ref(), 0L, ABIVersion.current(),
"default", UnsignedLong.ZERO, Optional.empty(), 3);
final AbstractClientConnection<ShardBackendInfo> connection =
AccessClientUtil.createConnectedConnection(context, 0L, backend);
final TestProbe clientContextProbe = new TestProbe(system, "remoteClientContext");
final TestProbe backendProbe = new TestProbe(system, "remoteBackend");
final AbstractClientHistory history = mock(AbstractClientHistory.class);
+ doReturn(1000).when(datastoreContext).getShardBatchedModificationCount();
+ doReturn(datastoreContext).when(actorUtils).getDatastoreContext();
+ doReturn(actorUtils).when(history).actorUtils();
+
final ClientActorContext context =
AccessClientUtil.createClientActorContext(system, clientContextProbe.ref(), CLIENT_ID, PERSISTENCE_ID);
- final ShardBackendInfo backend = new ShardBackendInfo(backendProbe.ref(), 0L, ABIVersion.BORON,
+ final ShardBackendInfo backend = new ShardBackendInfo(backendProbe.ref(), 0L, ABIVersion.current(),
"default", UnsignedLong.ZERO, Optional.empty(), 5);
final AbstractClientConnection<ShardBackendInfo> connection =
AccessClientUtil.createConnectedConnection(context, 0L, backend);
final ProxyHistory proxyHistory = ProxyHistory.createClient(history, connection, HISTORY_ID);
+
final RemoteProxyTransaction transaction =
new RemoteProxyTransaction(proxyHistory, TRANSACTION_ID, false, false, false);
return new TransactionTester<>(transaction, connection, backendProbe);
*/
package org.opendaylight.controller.cluster.databroker.actors.dds;
-import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.CLIENT_ID;
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.CoreMatchers.endsWith;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
import akka.actor.ActorSystem;
import akka.testkit.TestProbe;
import akka.testkit.javadsl.TestKit;
import org.junit.After;
-import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
+import org.junit.runner.RunWith;
import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
import org.opendaylight.controller.cluster.access.client.AbstractClientConnection;
import org.opendaylight.controller.cluster.access.client.AccessClientUtil;
import org.opendaylight.controller.cluster.access.client.ClientActorContext;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
public class ClientLocalHistoryTest extends AbstractClientHistoryTest<ClientLocalHistory> {
private ActorSystem system;
private AbstractDataStoreClientBehavior behavior;
@Before
public void setUp() {
- MockitoAnnotations.initMocks(this);
-
system = ActorSystem.apply();
final TestProbe clientContextProbe = new TestProbe(system, "client");
final TestProbe actorContextProbe = new TestProbe(system, "actor-context");
clientActorContext = AccessClientUtil.createClientActorContext(
- system, clientContextProbe.ref(), CLIENT_ID, PERSISTENCE_ID);
+ system, clientContextProbe.ref(), TestUtils.CLIENT_ID, PERSISTENCE_ID);
final ActorUtils actorUtilsMock = createActorUtilsMock(system, actorContextProbe.ref());
behavior = new SimpleDataStoreClientBehavior(clientActorContext, actorUtilsMock, SHARD_NAME);
@Test
public void testClose() {
object().close();
- Assert.assertEquals(AbstractClientHistory.State.CLOSED, object().state());
+ assertEquals(AbstractClientHistory.State.CLOSED, object().state());
}
@Override
@Test
public void testDoCreateTransaction() {
final ClientTransaction clientTransaction = object().doCreateTransaction();
- Assert.assertEquals(object().getIdentifier(), clientTransaction.getIdentifier().getHistoryId());
+ assertEquals(object().getIdentifier(), clientTransaction.getIdentifier().getHistoryId());
}
@Override
@Test
public void testOnTransactionAbort() {
final ClientSnapshot clientSnapshot = object().doCreateSnapshot();
- Assert.assertTrue(clientSnapshot.abort());
+ assertTrue(clientSnapshot.abort());
}
@Override
public void testCreateHistoryProxy() {
final AbstractClientConnection<ShardBackendInfo> clientConnection = behavior.getConnection(0L);
final ProxyHistory historyProxy = object().createHistoryProxy(HISTORY_ID, clientConnection);
- Assert.assertEquals(object().getIdentifier(), historyProxy.getIdentifier());
+ assertEquals(object().getIdentifier(), historyProxy.getIdentifier());
}
@Override
@Test
public void testDoCreateSnapshot() {
final ClientSnapshot clientSnapshot = object().doCreateSnapshot();
- Assert.assertEquals(new TransactionIdentifier(object().getIdentifier(), object().nextTx()).getHistoryId(),
+ assertEquals(new TransactionIdentifier(object().getIdentifier(), object().nextTx()).getHistoryId(),
clientSnapshot.getIdentifier().getHistoryId());
}
// make transaction ready
object().onTransactionReady(tx, cohort);
// state should be set to IDLE
- Assert.assertEquals(AbstractClientHistory.State.IDLE, object.state());
+ assertEquals(AbstractClientHistory.State.IDLE, object.state());
// complete transaction
object().onTransactionComplete(tx.getIdentifier());
// state is still IDLE
- Assert.assertEquals(AbstractClientHistory.State.IDLE, object.state());
+ assertEquals(AbstractClientHistory.State.IDLE, object.state());
}
@Override
@Test
public void testOnTransactionReady() {
- final AbstractTransactionCommitCohort result = object().onTransactionReady(
- object().createTransaction(), cohort);
- Assert.assertEquals(result, cohort);
+ final AbstractTransactionCommitCohort result = object().onTransactionReady(object().createTransaction(),
+ cohort);
+ assertEquals(result, cohort);
}
@Override
- @Test(expected = IllegalStateException.class)
+ @Test
public void testOnTransactionReadyDuplicate() {
final ClientTransaction tx = object().createTransaction();
object().onTransactionReady(tx, cohort);
- object().onTransactionReady(tx, cohort);
+ final IllegalStateException ise = assertThrows(IllegalStateException.class,
+ () -> object().onTransactionReady(tx, cohort));
+ assertThat(ise.getMessage(), containsString(" is idle when readying transaction "));
}
@Test
object().updateState(AbstractClientHistory.State.IDLE, AbstractClientHistory.State.TX_OPEN);
final AbstractTransactionCommitCohort transactionCommitCohort =
object().onTransactionReady(transaction, cohort);
- Assert.assertEquals(cohort, transactionCommitCohort);
+ assertEquals(cohort, transactionCommitCohort);
}
@Test
object().updateState(AbstractClientHistory.State.IDLE, AbstractClientHistory.State.CLOSED);
final AbstractTransactionCommitCohort transactionCommitCohort =
object().onTransactionReady(transaction, cohort);
- Assert.assertEquals(cohort, transactionCommitCohort);
+ assertEquals(cohort, transactionCommitCohort);
}
- @Test(expected = IllegalStateException.class)
+ @Test
public void testOnTransactionReadyAndCompleteIdleFail() {
- object().onTransactionReady(transaction, cohort);
+ final IllegalStateException ise = assertThrows(IllegalStateException.class,
+ () -> object().onTransactionReady(transaction, cohort));
+ assertThat(ise.getMessage(), endsWith(" is idle when readying transaction null"));
}
}
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
+import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.getWithTimeout;
-import com.google.common.util.concurrent.ListenableFuture;
import java.util.Optional;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
public class ClientSnapshotTest extends AbstractClientHandleTest<ClientSnapshot> {
-
- private static final YangInstanceIdentifier PATH = YangInstanceIdentifier.empty();
+ private static final YangInstanceIdentifier PATH = YangInstanceIdentifier.of();
@Before
@Override
public void setUp() throws Exception {
super.setUp();
- when(getDataTreeSnapshot().readNode(PATH)).thenReturn(Optional.empty());
+ doReturn(Optional.empty()).when(getDataTreeSnapshot()).readNode(PATH);
}
@Override
@Test
public void testExists() throws Exception {
- final ListenableFuture<Boolean> exists = getHandle().exists(PATH);
+ final var exists = getHandle().exists(PATH);
verify(getDataTreeSnapshot()).readNode(PATH);
assertEquals(Boolean.FALSE, getWithTimeout(exists));
}
@Test
public void testRead() throws Exception {
- final ListenableFuture<Optional<NormalizedNode<?, ?>>> exists = getHandle().read(PATH);
+ final var read = getHandle().read(PATH);
verify(getDataTreeSnapshot()).readNode(PATH);
- assertFalse(getWithTimeout(exists).isPresent());
+ assertFalse(getWithTimeout(read).isPresent());
}
-
}
*/
package org.opendaylight.controller.cluster.databroker.actors.dds;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.doReturn;
import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.CLIENT_ID;
import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.HISTORY_ID;
import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.TRANSACTION_ID;
import java.util.function.Function;
import java.util.stream.Collectors;
import org.junit.After;
-import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
+import org.junit.runner.RunWith;
import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.controller.cluster.access.client.AbstractClientConnection;
import org.opendaylight.controller.cluster.access.client.AccessClientUtil;
import org.opendaylight.controller.cluster.access.commands.TransactionPreCommitSuccess;
import org.opendaylight.controller.cluster.access.concepts.RequestSuccess;
import org.opendaylight.controller.cluster.access.concepts.RuntimeRequestException;
+import org.opendaylight.controller.cluster.datastore.DatastoreContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
+import org.opendaylight.mdsal.common.api.CommitInfo;
+import org.opendaylight.yangtools.yang.common.Empty;
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
public class ClientTransactionCommitCohortTest {
-
private static final String PERSISTENCE_ID = "per-1";
private static final int TRANSACTIONS = 3;
+ private final List<TransactionTester<RemoteProxyTransaction>> transactions = new ArrayList<>();
+
@Mock
private AbstractClientHistory history;
+ @Mock
+ private DatastoreContext datastoreContext;
+ @Mock
+ private ActorUtils actorUtils;
+
private ActorSystem system;
- private List<TransactionTester<RemoteProxyTransaction>> transactions;
private ClientTransactionCommitCohort cohort;
@Before
public void setUp() {
- MockitoAnnotations.initMocks(this);
system = ActorSystem.apply();
final TestProbe clientContextProbe = new TestProbe(system, "clientContext");
final ClientActorContext context =
AccessClientUtil.createClientActorContext(system, clientContextProbe.ref(), CLIENT_ID, PERSISTENCE_ID);
- transactions = new ArrayList<>();
+ doReturn(1000).when(datastoreContext).getShardBatchedModificationCount();
+ doReturn(datastoreContext).when(actorUtils).getDatastoreContext();
+ doReturn(actorUtils).when(history).actorUtils();
+
for (int i = 0; i < TRANSACTIONS; i++) {
transactions.add(createTransactionTester(new TestProbe(system, "backend" + i), context, history));
}
@Test
public void testPreCommit() throws Exception {
testOpSuccess(ClientTransactionCommitCohort::preCommit, this::expectPreCommit, this::replyPreCommitSuccess,
- null);
+ Empty.value());
}
@Test
@Test
public void testCommit() throws Exception {
- testOpSuccess(ClientTransactionCommitCohort::commit, this::expectCommit, this::replyCommitSuccess, null);
+ testOpSuccess(ClientTransactionCommitCohort::commit, this::expectCommit, this::replyCommitSuccess,
+ CommitInfo.empty());
}
@Test
@Test
public void testAbort() throws Exception {
- testOpSuccess(ClientTransactionCommitCohort::abort, this::expectAbort, this::replyAbortSuccess, null);
+ testOpSuccess(ClientTransactionCommitCohort::abort, this::expectAbort, this::replyAbortSuccess, Empty.value());
}
@Test
private void expectCanCommit(final TransactionTester<RemoteProxyTransaction> tester) {
final ModifyTransactionRequest request = tester.expectTransactionRequest(ModifyTransactionRequest.class);
- Assert.assertTrue(request.getPersistenceProtocol().isPresent());
- Assert.assertEquals(PersistenceProtocol.THREE_PHASE, request.getPersistenceProtocol().get());
+ assertEquals(Optional.of(PersistenceProtocol.THREE_PHASE), request.getPersistenceProtocol());
}
void expectPreCommit(final TransactionTester<?> tester) {
private static TransactionTester<RemoteProxyTransaction> createTransactionTester(final TestProbe backendProbe,
final ClientActorContext context,
final AbstractClientHistory history) {
- final ShardBackendInfo backend = new ShardBackendInfo(backendProbe.ref(), 0L, ABIVersion.BORON,
+ final ShardBackendInfo backend = new ShardBackendInfo(backendProbe.ref(), 0L, ABIVersion.current(),
"default", UnsignedLong.ZERO, Optional.empty(), 3);
final AbstractClientConnection<ShardBackendInfo> connection =
AccessClientUtil.createConnectedConnection(context, 0L, backend);
final T expectedResult) throws Exception {
final ListenableFuture<T> result = operation.apply(cohort);
replySuccess(transactions, expectFunction, replyFunction);
- Assert.assertEquals(expectedResult, getWithTimeout(result));
+ assertEquals(expectedResult, getWithTimeout(result));
}
/**
//check future fail
final ExecutionException exception =
assertOperationThrowsException(() -> getWithTimeout(canCommit), ExecutionException.class);
- Assert.assertEquals(e, exception.getCause());
+ assertEquals(e, exception.getCause());
}
}
+++ /dev/null
-/*
- * Copyright (c) 2017 Pantheon Technologies s.r.o. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.databroker.actors.dds;
-
-import static org.mockito.Mockito.verify;
-
-import java.util.Arrays;
-import java.util.stream.Collectors;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-
-public class ClientTransactionCursorTest {
-
- private static final QName NODE_1 = QName.create("ns-1", "node-1");
- private static final QName NODE_2 = QName.create(NODE_1, "node-2");
- private static final QName NODE_3 = QName.create(NODE_1, "node-3");
-
- @Mock
- private ClientTransaction transaction;
- private ClientTransactionCursor cursor;
-
- @Before
- public void setUp() {
- MockitoAnnotations.initMocks(this);
- cursor = new ClientTransactionCursor(transaction);
- }
-
- @Test
- public void testEnterOneNode() {
- cursor.enter(YangInstanceIdentifier.NodeIdentifier.create(NODE_1));
- cursor.delete(YangInstanceIdentifier.NodeIdentifier.create(NODE_2));
- final YangInstanceIdentifier expected = createId(NODE_1, NODE_2);
- verify(transaction).delete(expected);
- }
-
- @Test
- public void testEnterNodeIterables() {
- final Iterable<YangInstanceIdentifier.PathArgument> collect = toPathArg(NODE_1, NODE_2);
- cursor.enter(collect);
- cursor.delete(YangInstanceIdentifier.NodeIdentifier.create(NODE_3));
- final YangInstanceIdentifier expected = createId(NODE_1, NODE_2, NODE_3);
- verify(transaction).delete(expected);
- }
-
- @Test
- public void testEnterNodeVarargs() {
- cursor.enter(YangInstanceIdentifier.NodeIdentifier.create(NODE_1),
- YangInstanceIdentifier.NodeIdentifier.create(NODE_2));
- cursor.delete(YangInstanceIdentifier.NodeIdentifier.create(NODE_3));
- final YangInstanceIdentifier expected = createId(NODE_1, NODE_2, NODE_3);
- verify(transaction).delete(expected);
- }
-
- @Test
- public void testExitOneLevel() {
- cursor.enter(toPathArg(NODE_1, NODE_2));
- cursor.exit();
- cursor.delete(YangInstanceIdentifier.NodeIdentifier.create(NODE_2));
- final YangInstanceIdentifier expected = createId(NODE_1, NODE_2);
- verify(transaction).delete(expected);
- }
-
- @Test
- public void testExitTwoLevels() {
- cursor.enter(toPathArg(NODE_1, NODE_2, NODE_3));
- cursor.exit(2);
- cursor.delete(YangInstanceIdentifier.NodeIdentifier.create(NODE_2));
- final YangInstanceIdentifier expected = createId(NODE_1, NODE_2);
- verify(transaction).delete(expected);
- }
-
- @Test
- public void testClose() {
- cursor.close();
- verify(transaction).closeCursor(cursor);
- }
-
- @Test
- public void testDelete() {
- cursor.delete(YangInstanceIdentifier.NodeIdentifier.create(NODE_1));
- final YangInstanceIdentifier expected = createId(NODE_1);
- verify(transaction).delete(expected);
- }
-
- @Test
- public void testMerge() {
- final YangInstanceIdentifier.NodeIdentifier path = YangInstanceIdentifier.NodeIdentifier.create(NODE_1);
- final ContainerNode data = createData(path.getNodeType());
- cursor.merge(path, data);
- final YangInstanceIdentifier expected = createId(NODE_1);
- verify(transaction).merge(expected, data);
- }
-
- @Test
- public void testWrite() {
- final YangInstanceIdentifier.NodeIdentifier path = YangInstanceIdentifier.NodeIdentifier.create(NODE_1);
- final ContainerNode data = createData(path.getNodeType());
- cursor.write(path, data);
- final YangInstanceIdentifier expected = createId(NODE_1);
- verify(transaction).write(expected, data);
- }
-
- private static Iterable<YangInstanceIdentifier.PathArgument> toPathArg(final QName... pathArguments) {
- return Arrays.stream(pathArguments)
- .map(YangInstanceIdentifier.NodeIdentifier::create)
- .collect(Collectors.toList());
- }
-
- private static YangInstanceIdentifier createId(final QName... pathArguments) {
- return YangInstanceIdentifier.create(toPathArg(pathArguments));
- }
-
- private static ContainerNode createData(final QName id) {
- return Builders.containerBuilder()
- .withNodeIdentifier(YangInstanceIdentifier.NodeIdentifier.create(id))
- .build();
- }
-
-}
\ No newline at end of file
package org.opendaylight.controller.cluster.databroker.actors.dds;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.TRANSACTION_ID;
import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.assertFutureEquals;
-import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.assertOperationThrowsException;
import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.getWithTimeout;
import com.google.common.util.concurrent.FluentFuture;
import org.mockito.Mock;
import org.opendaylight.controller.cluster.access.commands.CommitLocalTransactionRequest;
import org.opendaylight.controller.cluster.access.commands.TransactionCommitSuccess;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteCursor;
+import org.opendaylight.mdsal.common.api.CommitInfo;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.yangtools.yang.common.Empty;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.CursorAwareDataTreeModification;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.tree.api.CursorAwareDataTreeModification;
public class ClientTransactionTest extends AbstractClientHandleTest<ClientTransaction> {
-
- private static final YangInstanceIdentifier PATH = YangInstanceIdentifier.builder()
- .node(QName.create("ns-1", "node-1"))
- .build();
- private static final NormalizedNode<?, ?> DATA = Builders.containerBuilder()
- .withNodeIdentifier(YangInstanceIdentifier.NodeIdentifier.create(PATH.getLastPathArgument().getNodeType()))
- .build();
+ private static final YangInstanceIdentifier PATH = YangInstanceIdentifier.of(QName.create("ns-1", "node-1"));
+ private static final ContainerNode DATA = ImmutableNodes.newContainerBuilder()
+ .withNodeIdentifier(NodeIdentifier.create(PATH.getLastPathArgument().getNodeType()))
+ .build();
@Mock
private CursorAwareDataTreeModification modification;
transaction.read(PATH);
}
- @Test
- public void testOpenCloseCursor() {
- final DOMDataTreeWriteCursor cursor = getHandle().openCursor();
- getHandle().closeCursor(cursor);
- getHandle().openCursor().delete(PATH.getLastPathArgument());
- verify(modification).delete(PATH);
- }
-
- @Test
- public void testOpenSecondCursor() throws Exception {
- getHandle().openCursor();
- assertOperationThrowsException(getHandle()::openCursor, IllegalStateException.class);
- }
-
@Test
public void testExists() throws Exception {
final FluentFuture<Boolean> exists = getHandle().exists(PATH);
@Test
public void testRead() throws Exception {
- final FluentFuture<Optional<NormalizedNode<?, ?>>> resultFuture = getHandle().read(PATH);
+ final FluentFuture<Optional<NormalizedNode>> resultFuture = getHandle().read(PATH);
verify(modification).readNode(PATH);
- final Optional<NormalizedNode<?, ?>> result = getWithTimeout(resultFuture);
- assertTrue(result.isPresent());
- assertEquals(DATA, result.get());
+ assertEquals(Optional.of(DATA), getWithTimeout(resultFuture));
}
@Test
public void testReadyEmpty() throws Exception {
final DOMStoreThreePhaseCommitCohort cohort = getHandle().ready();
assertFutureEquals(Boolean.TRUE, cohort.canCommit());
- assertFutureEquals(null, cohort.preCommit());
- assertFutureEquals(null, cohort.commit());
+ assertFutureEquals(Empty.value(), cohort.preCommit());
+ assertFutureEquals(CommitInfo.empty(), cohort.commit());
}
@Test
backendRespondToRequest(CommitLocalTransactionRequest.class, response);
assertEquals(modification, request.getModification());
assertFutureEquals(Boolean.TRUE, actual);
- assertFutureEquals(null, cohort.preCommit());
- assertFutureEquals(null, cohort.commit());
+ assertFutureEquals(Empty.value(), cohort.preCommit());
+ assertFutureEquals(CommitInfo.empty(), cohort.commit());
}
@Test
package org.opendaylight.controller.cluster.databroker.actors.dds;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.verify;
import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.CLIENT_ID;
import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.HISTORY_ID;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
+import org.junit.runner.RunWith;
import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
import org.opendaylight.controller.cluster.access.ABIVersion;
import org.opendaylight.controller.cluster.access.client.AbstractClientConnection;
import org.opendaylight.controller.cluster.access.client.AccessClientUtil;
import org.opendaylight.controller.cluster.access.commands.PersistenceProtocol;
import org.opendaylight.controller.cluster.access.commands.TransactionCommitSuccess;
import org.opendaylight.controller.cluster.access.concepts.RequestSuccess;
+import org.opendaylight.controller.cluster.datastore.DatastoreContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
public class DirectTransactionCommitCohortTest {
-
private static final String PERSISTENCE_ID = "per-1";
@Mock
private AbstractClientHistory history;
+ @Mock
+ private DatastoreContext datastoreContext;
+ @Mock
+ private ActorUtils actorUtils;
+
private ActorSystem system;
private TransactionTester<?> transaction;
private DirectTransactionCommitCohort cohort;
@Before
public void setUp() {
- MockitoAnnotations.initMocks(this);
system = ActorSystem.apply();
final TestProbe clientContextProbe = new TestProbe(system, "clientContext");
final ClientActorContext context =
AccessClientUtil.createClientActorContext(system, clientContextProbe.ref(), CLIENT_ID, PERSISTENCE_ID);
+ doReturn(1000).when(datastoreContext).getShardBatchedModificationCount();
+ doReturn(datastoreContext).when(actorUtils).getDatastoreContext();
+ doReturn(actorUtils).when(history).actorUtils();
+
transaction = createTransactionTester(new TestProbe(system, "backend"), context, history);
final AbstractProxyTransaction proxy = transaction.getTransaction();
proxy.seal();
public void testCanCommit() throws Exception {
final ListenableFuture<Boolean> canCommit = cohort.canCommit();
final ModifyTransactionRequest request = transaction.expectTransactionRequest(ModifyTransactionRequest.class);
- assertTrue(request.getPersistenceProtocol().isPresent());
- assertEquals(PersistenceProtocol.SIMPLE, request.getPersistenceProtocol().get());
+ assertEquals(Optional.of(PersistenceProtocol.SIMPLE), request.getPersistenceProtocol());
final RequestSuccess<?, ?> success = new TransactionCommitSuccess(transaction.getTransaction().getIdentifier(),
transaction.getLastReceivedMessage().getSequence());
transaction.replySuccess(success);
@Test
public void testPreCommit() throws Exception {
- final ListenableFuture<Void> preCommit = cohort.preCommit();
- assertNull(getWithTimeout(preCommit));
+ final ListenableFuture<?> preCommit = cohort.preCommit();
+ assertNotNull(getWithTimeout(preCommit));
}
@Test
public void testAbort() throws Exception {
- final ListenableFuture<Void> abort = cohort.abort();
+ final ListenableFuture<?> abort = cohort.abort();
verify(history).onTransactionComplete(transaction.getTransaction().getIdentifier());
- assertNull(getWithTimeout(abort));
+ assertNotNull(getWithTimeout(abort));
}
@Test
public void testCommit() throws Exception {
- final ListenableFuture<Void> commit = cohort.commit();
+ final ListenableFuture<?> commit = cohort.commit();
verify(history).onTransactionComplete(transaction.getTransaction().getIdentifier());
- assertNull(getWithTimeout(commit));
+ assertNotNull(getWithTimeout(commit));
}
private static TransactionTester<?> createTransactionTester(final TestProbe backendProbe,
final ClientActorContext context,
final AbstractClientHistory history) {
- final ShardBackendInfo backend = new ShardBackendInfo(backendProbe.ref(), 0L, ABIVersion.BORON,
+ final ShardBackendInfo backend = new ShardBackendInfo(backendProbe.ref(), 0L, ABIVersion.current(),
"default", UnsignedLong.ZERO, Optional.empty(), 3);
final AbstractClientConnection<ShardBackendInfo> connection =
AccessClientUtil.createConnectedConnection(context, 0L, backend);
package org.opendaylight.controller.cluster.databroker.actors.dds;
import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
+import java.util.Set;
import org.opendaylight.controller.cluster.access.client.ClientActorContext;
+import org.opendaylight.controller.cluster.datastore.config.Configuration;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategy;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
@Override
protected AbstractDataStoreClientBehavior createBehavior(final ClientActorContext clientContext,
final ActorUtils context) {
- final ShardStrategyFactory factory = mock(ShardStrategyFactory.class);
final ShardStrategy strategy = mock(ShardStrategy.class);
- when(strategy.findShard(any())).thenReturn(SHARD);
- when(factory.getStrategy(any())).thenReturn(strategy);
- when(context.getShardStrategyFactory()).thenReturn(factory);
+ doReturn(SHARD).when(strategy).findShard(any());
+ final ShardStrategyFactory factory = mock(ShardStrategyFactory.class);
+ doReturn(strategy).when(factory).getStrategy(any());
+ doReturn(factory).when(context).getShardStrategyFactory();
+
+ final Configuration config = mock(Configuration.class);
+ doReturn(Set.of(SHARD)).when(config).getAllShardNames();
+ doReturn(config).when(context).getConfiguration();
+
return new DistributedDataStoreClientBehavior(clientContext, context);
}
}
package org.opendaylight.controller.cluster.databroker.actors.dds;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertNotNull;
import static org.mockito.Mockito.verify;
import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.TRANSACTION_ID;
import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.getWithTimeout;
import com.google.common.util.concurrent.ListenableFuture;
-import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
+import org.junit.runner.RunWith;
import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
public class EmptyTransactionCommitCohortTest {
-
@Mock
private AbstractClientHistory history;
@Before
public void setUp() {
- MockitoAnnotations.initMocks(this);
cohort = new EmptyTransactionCommitCohort(history, TRANSACTION_ID);
}
@Test
public void testPreCommit() throws Exception {
- final ListenableFuture<Void> preCommit = cohort.preCommit();
- assertNull(getWithTimeout(preCommit));
+ assertNotNull(getWithTimeout(cohort.preCommit()));
}
@Test
public void testAbort() throws Exception {
- final ListenableFuture<Void> abort = cohort.abort();
+ final ListenableFuture<?> abort = cohort.abort();
verify(history).onTransactionComplete(TRANSACTION_ID);
- assertNull(getWithTimeout(abort));
+ assertNotNull(getWithTimeout(abort));
}
@Test
public void testCommit() throws Exception {
- final ListenableFuture<Void> commit = cohort.commit();
+ final ListenableFuture<?> commit = cohort.commit();
verify(history).onTransactionComplete(TRANSACTION_ID);
- Assert.assertNull(getWithTimeout(commit));
+ assertNotNull(getWithTimeout(commit));
}
}
\ No newline at end of file
*/
package org.opendaylight.controller.cluster.databroker.actors.dds;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import com.google.common.base.Ticker;
import java.util.Optional;
import java.util.function.Consumer;
-import org.junit.Assert;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
import org.mockito.invocation.InvocationOnMock;
import org.opendaylight.controller.cluster.access.commands.TransactionPurgeRequest;
import org.opendaylight.controller.cluster.access.commands.TransactionRequest;
import org.opendaylight.controller.cluster.access.concepts.Response;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.CursorAwareDataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModificationCursor;
+import org.opendaylight.yangtools.yang.data.tree.api.CursorAwareDataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModificationCursor;
public abstract class LocalProxyTransactionTest<T extends LocalProxyTransaction>
extends AbstractProxyTransactionTest<T> {
@SuppressWarnings("unchecked")
private void setupExecuteInActor() {
doAnswer(inv -> {
- inv.<InternalCommand<?>>getArgument(0).execute(mock(ClientActorBehavior.class));
+ inv.getArgument(0, InternalCommand.class).execute(mock(ClientActorBehavior.class));
return null;
}).when(context).executeInActor(any(InternalCommand.class));
}
final ArgumentCaptor<Response<?, ?>> captor = ArgumentCaptor.forClass(Response.class);
verify(callback).accept(captor.capture());
final Response<?, ?> value = captor.getValue();
- Assert.assertTrue(value instanceof ReadTransactionSuccess);
+ assertTrue(value instanceof ReadTransactionSuccess);
final ReadTransactionSuccess success = (ReadTransactionSuccess) value;
- Assert.assertTrue(success.getData().isPresent());
- Assert.assertEquals(DATA_1, success.getData().get());
+ assertEquals(Optional.of(DATA_1), success.getData());
}
@Test
final ArgumentCaptor<Response<?, ?>> captor = ArgumentCaptor.forClass(Response.class);
verify(callback).accept(captor.capture());
final Response<?, ?> value = captor.getValue();
- Assert.assertTrue(value instanceof ExistsTransactionSuccess);
+ assertTrue(value instanceof ExistsTransactionSuccess);
final ExistsTransactionSuccess success = (ExistsTransactionSuccess) value;
- Assert.assertTrue(success.getExists());
+ assertTrue(success.getExists());
}
@Test
public void testHandleForwardedRemotePurgeRequest() {
final TestProbe probe = createProbe();
- final TransactionPurgeRequest request =
- new TransactionPurgeRequest(TRANSACTION_ID, 0L, probe.ref());
+ final TransactionPurgeRequest request = new TransactionPurgeRequest(TRANSACTION_ID, 0L, probe.ref());
testHandleForwardedRemoteRequest(request);
}
final TestProbe probe = createProbe();
final AbortLocalTransactionRequest request = new AbortLocalTransactionRequest(TRANSACTION_ID, probe.ref());
final ModifyTransactionRequest modifyRequest = testForwardToRemote(request, ModifyTransactionRequest.class);
- Assert.assertTrue(modifyRequest.getPersistenceProtocol().isPresent());
- Assert.assertEquals(PersistenceProtocol.ABORT, modifyRequest.getPersistenceProtocol().get());
+ assertEquals(Optional.of(PersistenceProtocol.ABORT), modifyRequest.getPersistenceProtocol());
}
@Override
doAnswer(LocalProxyTransactionTest::applyToCursorAnswer).when(modification).applyToCursor(any());
final ModifyTransactionRequest modifyRequest = testForwardToRemote(request, ModifyTransactionRequest.class);
verify(modification).applyToCursor(any());
- Assert.assertTrue(modifyRequest.getPersistenceProtocol().isPresent());
- Assert.assertEquals(PersistenceProtocol.THREE_PHASE, modifyRequest.getPersistenceProtocol().get());
+ assertEquals(Optional.of(PersistenceProtocol.THREE_PHASE), modifyRequest.getPersistenceProtocol());
checkModifications(modifyRequest);
}
}
protected <R extends TransactionRequest<R>> R testForwardToLocal(final TransactionRequest<?> toForward,
- final Class<R> expectedMessageClass) {
+ final Class<R> expectedMessageClass) {
final Consumer<Response<?, ?>> callback = createCallbackMock();
final TransactionTester<LocalReadWriteProxyTransaction> transactionTester = createLocalProxy();
final LocalReadWriteProxyTransaction successor = transactionTester.getTransaction();
*/
package org.opendaylight.controller.cluster.databroker.actors.dds;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.when;
import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.assertOperationThrowsException;
import com.google.common.base.Ticker;
import com.google.common.base.VerifyException;
import java.util.Optional;
-import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.controller.cluster.access.commands.AbortLocalTransactionRequest;
import org.opendaylight.controller.cluster.access.commands.ModifyTransactionRequest;
import org.opendaylight.controller.cluster.access.commands.ModifyTransactionRequestBuilder;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
public class LocalReadOnlyProxyTransactionTest extends LocalProxyTransactionTest<LocalReadOnlyProxyTransaction> {
-
private DataTreeSnapshot snapshot;
@Override
@Test
public void testIsSnapshotOnly() {
- Assert.assertTrue(transaction.isSnapshotOnly());
+ assertTrue(transaction.isSnapshotOnly());
}
@Test
public void testReadOnlyView() {
- Assert.assertEquals(snapshot, transaction.readOnlyView());
+ assertEquals(snapshot, transaction.readOnlyView());
}
+ @Test
@Override
- @Test(expected = UnsupportedOperationException.class)
public void testDirectCommit() {
- transaction.directCommit();
+ assertThrows(UnsupportedOperationException.class, () -> transaction.directCommit());
}
+ @Test
@Override
- @Test(expected = UnsupportedOperationException.class)
public void testCanCommit() {
- transaction.canCommit(new VotingFuture<>(new Object(), 1));
+ assertThrows(UnsupportedOperationException.class,
+ () -> transaction.canCommit(new VotingFuture<>(new Object(), 1)));
}
+ @Test
@Override
- @Test(expected = UnsupportedOperationException.class)
public void testPreCommit() {
- transaction.preCommit(new VotingFuture<>(new Object(), 1));
+ assertThrows(UnsupportedOperationException.class,
+ () -> transaction.preCommit(new VotingFuture<>(new Object(), 1)));
}
+ @Test
@Override
- @Test(expected = UnsupportedOperationException.class)
public void testDoCommit() {
- transaction.doCommit(new VotingFuture<>(new Object(), 1));
+ assertThrows(UnsupportedOperationException.class,
+ () -> transaction.doCommit(new VotingFuture<>(new Object(), 1)));
}
+ @Test
@Override
- @Test(expected = UnsupportedOperationException.class)
public void testDelete() {
- transaction.delete(PATH_1);
+ assertThrows(UnsupportedOperationException.class, () -> transaction.delete(PATH_1));
}
@Override
- @Test(expected = UnsupportedOperationException.class)
public void testMerge() {
- transaction.merge(PATH_1, DATA_1);
+ assertThrows(UnsupportedOperationException.class, () -> transaction.merge(PATH_1, DATA_1));
}
+ @Test
@Override
- @Test(expected = UnsupportedOperationException.class)
public void testWrite() {
- transaction.write(PATH_1, DATA_1);
+ assertThrows(UnsupportedOperationException.class, () -> transaction.write(PATH_1, DATA_1));
}
- @Test(expected = UnsupportedOperationException.class)
+ @Test
public void testDoDelete() {
- transaction.doDelete(PATH_1);
+ assertThrows(UnsupportedOperationException.class, () -> transaction.doDelete(PATH_1));
}
- @Test(expected = UnsupportedOperationException.class)
+ @Test
public void testDoMerge() {
- transaction.doMerge(PATH_1, DATA_1);
+ assertThrows(UnsupportedOperationException.class, () -> transaction.doMerge(PATH_1, DATA_1));
}
- @Test(expected = UnsupportedOperationException.class)
+ @Test
public void testDoWrite() {
- transaction.doWrite(PATH_1, DATA_1);
+ assertThrows(UnsupportedOperationException.class, () -> transaction.doWrite(PATH_1, DATA_1));
}
- @Test(expected = UnsupportedOperationException.class)
+ @Test
public void testCommitRequest() {
- transaction.commitRequest(true);
+ assertThrows(UnsupportedOperationException.class, () -> transaction.commitRequest(true));
}
@Test
*/
package org.opendaylight.controller.cluster.databroker.actors.dds;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import com.google.common.util.concurrent.ListenableFuture;
import java.util.Optional;
import java.util.function.Consumer;
-import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mock;
import org.opendaylight.controller.cluster.access.commands.AbortLocalTransactionRequest;
import org.opendaylight.controller.cluster.access.commands.TransactionWrite;
import org.opendaylight.controller.cluster.access.concepts.Response;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.CursorAwareDataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.CursorAwareDataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
public class LocalReadWriteProxyTransactionTest extends LocalProxyTransactionTest<LocalReadWriteProxyTransaction> {
@Mock
@Test
public void testIsSnapshotOnly() {
- Assert.assertFalse(transaction.isSnapshotOnly());
+ assertFalse(transaction.isSnapshotOnly());
}
@Test
public void testReadOnlyView() {
- Assert.assertEquals(modification, transaction.readOnlyView());
+ assertEquals(modification, transaction.readOnlyView());
}
@Test
transaction.doWrite(PATH_1, DATA_1);
final boolean coordinated = true;
final CommitLocalTransactionRequest request = transaction.commitRequest(coordinated);
- Assert.assertEquals(coordinated, request.isCoordinated());
- Assert.assertEquals(modification, request.getModification());
+ assertEquals(coordinated, request.isCoordinated());
+ assertEquals(modification, request.getModification());
}
@Test
public void testSealOnly() throws Exception {
assertOperationThrowsException(() -> transaction.getSnapshot(), IllegalStateException.class);
transaction.sealOnly();
- Assert.assertEquals(modification, transaction.getSnapshot());
+ assertEquals(modification, transaction.getSnapshot());
}
@Test
final RemoteProxyTransaction successor = transactionTester.getTransaction();
doAnswer(LocalProxyTransactionTest::applyToCursorAnswer).when(modification).applyToCursor(any());
transaction.sealOnly();
- final TransactionRequest<?> request = transaction.flushState().get();
+ final TransactionRequest<?> request = transaction.flushState().orElseThrow();
transaction.forwardToSuccessor(successor, request, null);
verify(modification).applyToCursor(any());
transactionTester.getTransaction().seal();
verify(modification).delete(PATH_3);
final CommitLocalTransactionRequest commitRequest =
getTester().expectTransactionRequest(CommitLocalTransactionRequest.class);
- Assert.assertEquals(modification, commitRequest.getModification());
- Assert.assertEquals(coordinated, commitRequest.isCoordinated());
+ assertEquals(modification, commitRequest.getModification());
+ assertEquals(coordinated, commitRequest.isCoordinated());
}
}
*/
package org.opendaylight.controller.cluster.databroker.actors.dds;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.reset;
import static org.mockito.Mockito.timeout;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.verifyNoMoreInteractions;
-import static org.mockito.Mockito.when;
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
import akka.testkit.TestProbe;
import akka.testkit.javadsl.TestKit;
import com.google.common.util.concurrent.Uninterruptibles;
-import java.util.Collections;
+import java.util.List;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
import org.junit.After;
-import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
+import org.junit.runner.RunWith;
import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
import org.opendaylight.controller.cluster.access.commands.ConnectClientFailure;
import org.opendaylight.controller.cluster.access.commands.ConnectClientRequest;
import org.opendaylight.controller.cluster.access.commands.ConnectClientSuccess;
import org.opendaylight.controller.cluster.datastore.utils.PrimaryShardInfoFutureCache;
import org.opendaylight.yangtools.concepts.Registration;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
import scala.concurrent.Promise;
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
public class ModuleShardBackendResolverTest {
private static final MemberName MEMBER_NAME = MemberName.forName("member-1");
@Before
public void setUp() {
- MockitoAnnotations.initMocks(this);
system = ActorSystem.apply();
contextProbe = new TestProbe(system, "context");
shardManagerProbe = new TestProbe(system, "ShardManager");
final ActorUtils actorUtils = createActorUtilsMock(system, contextProbe.ref());
- when(actorUtils.getShardManager()).thenReturn(shardManagerProbe.ref());
+ doReturn(shardManagerProbe.ref()).when(actorUtils).getShardManager();
moduleShardBackendResolver = new ModuleShardBackendResolver(CLIENT_ID, actorUtils);
- when(actorUtils.getShardStrategyFactory()).thenReturn(shardStrategyFactory);
- when(shardStrategyFactory.getStrategy(YangInstanceIdentifier.empty())).thenReturn(shardStrategy);
+ doReturn(shardStrategyFactory).when(actorUtils).getShardStrategyFactory();
+ doReturn(shardStrategy).when(shardStrategyFactory).getStrategy(YangInstanceIdentifier.of());
final PrimaryShardInfoFutureCache cache = new PrimaryShardInfoFutureCache();
- when(actorUtils.getPrimaryShardInfoCache()).thenReturn(cache);
+ doReturn(cache).when(actorUtils).getPrimaryShardInfoCache();
}
@After
@Test
public void testResolveShardForPathNonNullCookie() {
- when(shardStrategy.findShard(YangInstanceIdentifier.empty())).thenReturn(DefaultShardStrategy.DEFAULT_SHARD);
- final Long cookie = moduleShardBackendResolver.resolveShardForPath(YangInstanceIdentifier.empty());
- Assert.assertEquals(0L, cookie.longValue());
+ doReturn(DefaultShardStrategy.DEFAULT_SHARD).when(shardStrategy).findShard(YangInstanceIdentifier.of());
+ final Long cookie = moduleShardBackendResolver.resolveShardForPath(YangInstanceIdentifier.of());
+ assertEquals(0L, (long) cookie);
}
@Test
public void testResolveShardForPathNullCookie() {
- when(shardStrategy.findShard(YangInstanceIdentifier.empty())).thenReturn("foo");
- final Long cookie = moduleShardBackendResolver.resolveShardForPath(YangInstanceIdentifier.empty());
- Assert.assertEquals(1L, cookie.longValue());
+ doReturn("foo").when(shardStrategy).findShard(YangInstanceIdentifier.of());
+ final Long cookie = moduleShardBackendResolver.resolveShardForPath(YangInstanceIdentifier.of());
+ assertEquals(1L, (long) cookie);
}
@Test
contextProbe.expectMsgClass(ConnectClientRequest.class);
final TestProbe backendProbe = new TestProbe(system, "backend");
final ConnectClientSuccess msg = new ConnectClientSuccess(CLIENT_ID, 0L, backendProbe.ref(),
- Collections.emptyList(), dataTree, 3);
+ List.of(), dataTree, 3);
contextProbe.reply(msg);
final CompletionStage<ShardBackendInfo> stage = moduleShardBackendResolver.getBackendInfo(0L);
final ShardBackendInfo shardBackendInfo = TestUtils.getWithTimeout(stage.toCompletableFuture());
- Assert.assertEquals(0L, shardBackendInfo.getCookie().longValue());
- Assert.assertEquals(dataTree, shardBackendInfo.getDataTree().get());
- Assert.assertEquals(DefaultShardStrategy.DEFAULT_SHARD, shardBackendInfo.getName());
+ assertEquals(0L, shardBackendInfo.getCookie().longValue());
+ assertEquals(dataTree, shardBackendInfo.getDataTree().orElseThrow());
+ assertEquals(DefaultShardStrategy.DEFAULT_SHARD, shardBackendInfo.getName());
}
@Test
final ExecutionException caught =
TestUtils.assertOperationThrowsException(() -> TestUtils.getWithTimeout(stage.toCompletableFuture()),
ExecutionException.class);
- Assert.assertEquals(cause, caught.getCause());
+ assertEquals(cause, caught.getCause());
}
@Test
contextProbe.expectMsgClass(ConnectClientRequest.class);
final TestProbe staleBackendProbe = new TestProbe(system, "staleBackend");
final ConnectClientSuccess msg = new ConnectClientSuccess(CLIENT_ID, 0L, staleBackendProbe.ref(),
- Collections.emptyList(), dataTree, 3);
+ List.of(), dataTree, 3);
contextProbe.reply(msg);
//get backend info
final ShardBackendInfo staleBackendInfo = TestUtils.getWithTimeout(backendInfo.toCompletableFuture());
contextProbe.expectMsgClass(ConnectClientRequest.class);
final TestProbe refreshedBackendProbe = new TestProbe(system, "refreshedBackend");
final ConnectClientSuccess msg2 = new ConnectClientSuccess(CLIENT_ID, 1L, refreshedBackendProbe.ref(),
- Collections.emptyList(), dataTree, 3);
+ List.of(), dataTree, 3);
contextProbe.reply(msg2);
final ShardBackendInfo refreshedBackendInfo = TestUtils.getWithTimeout(refreshed.toCompletableFuture());
- Assert.assertEquals(staleBackendInfo.getCookie(), refreshedBackendInfo.getCookie());
- Assert.assertEquals(refreshedBackendProbe.ref(), refreshedBackendInfo.getActor());
+ assertEquals(staleBackendInfo.getCookie(), refreshedBackendInfo.getCookie());
+ assertEquals(refreshedBackendProbe.ref(), refreshedBackendInfo.getActor());
}
@SuppressWarnings("unchecked")
final Registration callbackReg = moduleShardBackendResolver.notifyWhenBackendInfoIsStale(mockCallback);
regMessage.getCallback().accept(DefaultShardStrategy.DEFAULT_SHARD);
- verify(mockCallback, timeout(5000)).accept(Long.valueOf(0));
+ verify(mockCallback, timeout(5000)).accept((long) 0);
reset(mockCallback);
callbackReg.close();
final ActorSelection selection = system.actorSelection(actor.path());
final PrimaryShardInfo shardInfo = new PrimaryShardInfo(selection, (short) 0);
promise.success(shardInfo);
- when(mock.findPrimaryShardAsync(DefaultShardStrategy.DEFAULT_SHARD)).thenReturn(promise.future());
- when(mock.getClientDispatcher()).thenReturn(system.dispatchers().defaultGlobalDispatcher());
+ doReturn(promise.future()).when(mock).findPrimaryShardAsync(DefaultShardStrategy.DEFAULT_SHARD);
return mock;
}
}
import static org.hamcrest.CoreMatchers.isA;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.assertFutureEquals;
import akka.testkit.TestProbe;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
public class RemoteProxyTransactionTest extends AbstractProxyTransactionTest<RemoteProxyTransaction> {
-
@Override
protected RemoteProxyTransaction createTransaction(final ProxyHistory parent, final TransactionIdentifier id,
final DataTreeSnapshot snapshot) {
+ mockForRemote();
return new RemoteProxyTransaction(parent, TRANSACTION_ID, false, false, false);
}
@Test
public void testRead() throws Exception {
final TransactionTester<RemoteProxyTransaction> tester = getTester();
- final FluentFuture<Optional<NormalizedNode<?, ?>>> read = transaction.read(PATH_2);
+ final FluentFuture<Optional<NormalizedNode>> read = transaction.read(PATH_2);
final ReadTransactionRequest req = tester.expectTransactionRequest(ReadTransactionRequest.class);
- final Optional<NormalizedNode<?, ?>> result = Optional.of(DATA_1);
+ final Optional<NormalizedNode> result = Optional.of(DATA_1);
tester.replySuccess(new ReadTransactionSuccess(TRANSACTION_ID, req.getSequence(), result));
assertFutureEquals(result, read);
}
final ListenableFuture<Boolean> result = transaction.directCommit();
final TransactionTester<RemoteProxyTransaction> tester = getTester();
final ModifyTransactionRequest req = tester.expectTransactionRequest(ModifyTransactionRequest.class);
- assertTrue(req.getPersistenceProtocol().isPresent());
- assertEquals(PersistenceProtocol.SIMPLE, req.getPersistenceProtocol().get());
+ assertEquals(Optional.of(PersistenceProtocol.SIMPLE), req.getPersistenceProtocol());
tester.replySuccess(new TransactionCommitSuccess(TRANSACTION_ID, req.getSequence()));
assertFutureEquals(true, result);
}
final ModifyTransactionRequest request = builder.build();
final ModifyTransactionRequest received = testForwardToRemote(request, ModifyTransactionRequest.class);
assertEquals(request.getTarget(), received.getTarget());
- assertTrue(received.getPersistenceProtocol().isPresent());
- assertEquals(PersistenceProtocol.ABORT, received.getPersistenceProtocol().get());
+ assertEquals(Optional.of(PersistenceProtocol.ABORT), received.getPersistenceProtocol());
}
@Test
*/
package org.opendaylight.controller.cluster.databroker.actors.dds;
-import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.CLIENT_ID;
+import static org.hamcrest.CoreMatchers.startsWith;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
import akka.actor.ActorSystem;
import akka.testkit.TestProbe;
import akka.testkit.javadsl.TestKit;
import org.junit.After;
-import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
+import org.junit.runner.RunWith;
import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
import org.opendaylight.controller.cluster.access.client.AbstractClientConnection;
import org.opendaylight.controller.cluster.access.client.AccessClientUtil;
import org.opendaylight.controller.cluster.access.client.ClientActorContext;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
public class SingleClientHistoryTest extends AbstractClientHistoryTest<SingleClientHistory> {
private ActorSystem system;
private AbstractDataStoreClientBehavior behavior;
@Before
public void setUp() {
- MockitoAnnotations.initMocks(this);
-
system = ActorSystem.apply();
final TestProbe clientContextProbe = new TestProbe(system, "client");
final TestProbe actorContextProbe = new TestProbe(system, "actor-context");
clientActorContext = AccessClientUtil.createClientActorContext(
- system, clientContextProbe.ref(), CLIENT_ID, PERSISTENCE_ID);
+ system, clientContextProbe.ref(), TestUtils.CLIENT_ID, PERSISTENCE_ID);
final ActorUtils actorUtilsMock = createActorUtilsMock(system, actorContextProbe.ref());
behavior = new SimpleDataStoreClientBehavior(clientActorContext, actorUtilsMock, SHARD_NAME);
@Test
public void testDoCreateTransaction() {
final ClientTransaction clientTransaction = object().doCreateTransaction();
- Assert.assertEquals(object().getIdentifier(), clientTransaction.getIdentifier().getHistoryId());
+ assertEquals(object().getIdentifier(), clientTransaction.getIdentifier().getHistoryId());
}
@Override
public void testCreateHistoryProxy() {
final AbstractClientConnection<ShardBackendInfo> clientConnection = behavior.getConnection(0L);
final ProxyHistory historyProxy = object().createHistoryProxy(HISTORY_ID, clientConnection);
- Assert.assertEquals(object().getIdentifier(), historyProxy.getIdentifier());
+ assertEquals(object().getIdentifier(), historyProxy.getIdentifier());
}
@Override
@Test
public void testDoCreateSnapshot() {
final ClientSnapshot clientSnapshot = object().doCreateSnapshot();
- Assert.assertEquals(new TransactionIdentifier(object().getIdentifier(), object().nextTx()).getHistoryId(),
+ assertEquals(new TransactionIdentifier(object().getIdentifier(), object().nextTx()).getHistoryId(),
clientSnapshot.getIdentifier().getHistoryId());
}
object().onTransactionComplete(transaction.getIdentifier());
// it is possible to make transaction ready again
final AbstractTransactionCommitCohort result = object().onTransactionReady(transaction, cohort);
- Assert.assertEquals(result, cohort);
+ assertEquals(result, cohort);
}
@Override
@Test
public void testOnTransactionAbort() {
final ClientSnapshot clientSnapshot = object().doCreateSnapshot();
- Assert.assertTrue(clientSnapshot.abort());
+ assertTrue(clientSnapshot.abort());
}
@Override
public void testOnTransactionReady() {
final AbstractTransactionCommitCohort result = object().onTransactionReady(
object().createTransaction(), cohort);
- Assert.assertEquals(result, cohort);
+ assertEquals(result, cohort);
}
@Override
- @Test(expected = IllegalStateException.class)
+ @Test
public void testOnTransactionReadyDuplicate() {
final ClientTransaction transaction = object().createTransaction();
object().onTransactionReady(transaction, cohort);
- object().onTransactionReady(transaction, cohort);
+ final IllegalStateException ise = assertThrows(IllegalStateException.class,
+ () -> object().onTransactionReady(transaction, cohort));
+ assertThat(ise.getMessage(), startsWith("Duplicate cohort "));
}
}
\ No newline at end of file
*/
package org.opendaylight.controller.cluster.databroker.actors.dds;
+import static org.junit.Assert.assertEquals;
+
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
-import org.junit.Assert;
import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
import org.opendaylight.controller.cluster.access.concepts.FrontendType;
* @param <T> type
* @throws Exception exception
*/
- static <T> void assertFutureEquals(final T expected, final Future<T> actual) throws Exception {
- Assert.assertEquals(expected, getWithTimeout(actual));
+ static <T> void assertFutureEquals(final T expected, final Future<? extends T> actual) throws Exception {
+ assertEquals(expected, getWithTimeout(actual));
}
/**
import org.opendaylight.controller.cluster.access.client.AccessClientUtil;
import org.opendaylight.controller.cluster.access.commands.TransactionFailure;
import org.opendaylight.controller.cluster.access.commands.TransactionRequest;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestFailureProxy;
import org.opendaylight.controller.cluster.access.concepts.FailureEnvelope;
import org.opendaylight.controller.cluster.access.concepts.RequestEnvelope;
import org.opendaylight.controller.cluster.access.concepts.RequestException;
}
@Override
- protected AbstractRequestFailureProxy<TransactionIdentifier, TransactionFailure> externalizableProxy(
+ protected RequestFailure.SerialForm<TransactionIdentifier, TransactionFailure> externalizableProxy(
final ABIVersion version) {
throw new UnsupportedOperationException("Not implemented");
}
package org.opendaylight.controller.cluster.datastore;
import static org.awaitility.Awaitility.await;
+import static org.hamcrest.CoreMatchers.instanceOf;
+import static org.hamcrest.MatcherAssert.assertThat;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThrows;
import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.timeout;
import static org.mockito.Mockito.verify;
-import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Range;
-import com.google.common.primitives.UnsignedLong;
import com.google.common.util.concurrent.FluentFuture;
+import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.MoreExecutors;
import com.google.common.util.concurrent.Uninterruptibles;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
-import java.util.Iterator;
import java.util.List;
import java.util.Optional;
-import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import org.junit.Ignore;
import org.junit.Test;
import org.junit.runners.Parameterized.Parameter;
-import org.mockito.Mockito;
import org.opendaylight.controller.cluster.access.client.RequestTimeoutException;
+import org.opendaylight.controller.cluster.databroker.ClientBackedDataStore;
import org.opendaylight.controller.cluster.databroker.ConcurrentDOMDataBroker;
import org.opendaylight.controller.cluster.datastore.TestShard.RequestFrontendMetadata;
-import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
import org.opendaylight.controller.cluster.datastore.messages.FindLocalShard;
import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
-import org.opendaylight.controller.cluster.datastore.persisted.FrontendHistoryMetadata;
import org.opendaylight.controller.cluster.datastore.persisted.FrontendShardDataTreeSnapshotMetadata;
import org.opendaylight.controller.cluster.datastore.persisted.MetadataShardDataTreeSnapshot;
import org.opendaylight.controller.cluster.datastore.persisted.ShardSnapshotState;
import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
import org.opendaylight.mdsal.dom.api.DOMTransactionChainClosedException;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
import org.opendaylight.mdsal.dom.spi.store.DOMStore;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.yang.common.Uint64;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
public abstract class AbstractDistributedDataStoreIntegrationTest {
-
@Parameter
- public Class<? extends AbstractDataStore> testParameter;
+ public Class<? extends ClientBackedDataStore> testParameter;
protected ActorSystem system;
@Test
public void testWriteTransactionWithSingleShard() throws Exception {
final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
- try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
- testParameter, "transactionIntegrationTest", "test-1")) {
+ try (var dataStore = testKit.setupDataStore(testParameter, "transactionIntegrationTest", "test-1")) {
testKit.testWriteTransaction(dataStore, TestModel.TEST_PATH,
ImmutableNodes.containerNode(TestModel.TEST_QNAME));
@Test
public void testWriteTransactionWithMultipleShards() throws Exception {
final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
- try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
- testParameter, "testWriteTransactionWithMultipleShards", "cars-1", "people-1")) {
+ try (var dataStore = testKit.setupDataStore(testParameter, "testWriteTransactionWithMultipleShards",
+ "cars-1", "people-1")) {
DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
assertNotNull("newWriteOnlyTransaction returned null", writeTx);
// Verify the data in the store
final DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
- Optional<NormalizedNode<?, ?>> optional = readTx.read(carPath).get(5, TimeUnit.SECONDS);
- assertTrue("isPresent", optional.isPresent());
- assertEquals("Data node", car, optional.get());
-
- optional = readTx.read(personPath).get(5, TimeUnit.SECONDS);
- assertTrue("isPresent", optional.isPresent());
- assertEquals("Data node", person, optional.get());
+ assertEquals(Optional.of(car), readTx.read(carPath).get(5, TimeUnit.SECONDS));
+ assertEquals(Optional.of(person), readTx.read(personPath).get(5, TimeUnit.SECONDS));
}
}
@Test
public void testReadWriteTransactionWithSingleShard() throws Exception {
final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
- try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
- testParameter, "testReadWriteTransactionWithSingleShard", "test-1")) {
+ try (var dataStore = testKit.setupDataStore(testParameter, "testReadWriteTransactionWithSingleShard",
+ "test-1")) {
// 1. Create a read-write Tx
final DOMStoreReadWriteTransaction readWriteTx = dataStore.newReadWriteTransaction();
// 2. Write some data
final YangInstanceIdentifier nodePath = TestModel.TEST_PATH;
- final NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ final NormalizedNode nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
readWriteTx.write(nodePath, nodeToWrite);
// 3. Read the data from Tx
final Boolean exists = readWriteTx.exists(nodePath).get(5, TimeUnit.SECONDS);
assertEquals("exists", Boolean.TRUE, exists);
- Optional<NormalizedNode<?, ?>> optional = readWriteTx.read(nodePath).get(5, TimeUnit.SECONDS);
- assertTrue("isPresent", optional.isPresent());
- assertEquals("Data node", nodeToWrite, optional.get());
+ assertEquals(Optional.of(nodeToWrite), readWriteTx.read(nodePath).get(5, TimeUnit.SECONDS));
// 4. Ready the Tx for commit
final DOMStoreThreePhaseCommitCohort cohort = readWriteTx.ready();
// 6. Verify the data in the store
final DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
- optional = readTx.read(nodePath).get(5, TimeUnit.SECONDS);
- assertTrue("isPresent", optional.isPresent());
- assertEquals("Data node", nodeToWrite, optional.get());
+ assertEquals(Optional.of(nodeToWrite), readTx.read(nodePath).get(5, TimeUnit.SECONDS));
}
}
@Test
public void testReadWriteTransactionWithMultipleShards() throws Exception {
final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
- try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
- testParameter, "testReadWriteTransactionWithMultipleShards", "cars-1", "people-1")) {
+ try (var dataStore = testKit.setupDataStore(testParameter, "testReadWriteTransactionWithMultipleShards",
+ "cars-1", "people-1")) {
DOMStoreReadWriteTransaction readWriteTx = dataStore.newReadWriteTransaction();
assertNotNull("newReadWriteTransaction returned null", readWriteTx);
final Boolean exists = readWriteTx.exists(carPath).get(5, TimeUnit.SECONDS);
assertEquals("exists", Boolean.TRUE, exists);
- Optional<NormalizedNode<?, ?>> optional = readWriteTx.read(carPath).get(5, TimeUnit.SECONDS);
- assertTrue("isPresent", optional.isPresent());
- assertEquals("Data node", car, optional.get());
+ assertEquals("Data node", Optional.of(car), readWriteTx.read(carPath).get(5, TimeUnit.SECONDS));
testKit.doCommit(readWriteTx.ready());
// Verify the data in the store
DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
- optional = readTx.read(carPath).get(5, TimeUnit.SECONDS);
- assertTrue("isPresent", optional.isPresent());
- assertEquals("Data node", car, optional.get());
-
- optional = readTx.read(personPath).get(5, TimeUnit.SECONDS);
- assertTrue("isPresent", optional.isPresent());
- assertEquals("Data node", person, optional.get());
+ assertEquals(Optional.of(car), readTx.read(carPath).get(5, TimeUnit.SECONDS));
+ assertEquals(Optional.of(person), readTx.read(personPath).get(5, TimeUnit.SECONDS));
}
}
@Test
- @Ignore("Flushes a closed tx leak in single node, needs to be handled separately")
public void testSingleTransactionsWritesInQuickSuccession() throws Exception {
- final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
- try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
- testParameter, "testSingleTransactionsWritesInQuickSuccession", "cars-1")) {
+ final var testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
+ try (var dataStore = testKit.setupDataStore(testParameter, "testSingleTransactionsWritesInQuickSuccession",
+ "cars-1")) {
- final DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
+ final var txChain = dataStore.createTransactionChain();
- DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
+ var writeTx = txChain.newWriteOnlyTransaction();
writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
testKit.doCommit(writeTx.ready());
int numCars = 5;
for (int i = 0; i < numCars; i++) {
writeTx = txChain.newWriteOnlyTransaction();
- writeTx.write(CarsModel.newCarPath("car" + i),
- CarsModel.newCarEntry("car" + i, Uint64.valueOf(20000)));
+ writeTx.write(CarsModel.newCarPath("car" + i), CarsModel.newCarEntry("car" + i, Uint64.valueOf(20000)));
testKit.doCommit(writeTx.ready());
- DOMStoreReadTransaction domStoreReadTransaction = txChain.newReadOnlyTransaction();
- domStoreReadTransaction.read(CarsModel.BASE_PATH).get();
-
- domStoreReadTransaction.close();
- }
-
- // verify frontend metadata has no holes in purged transactions causing overtime memory leak
- Optional<ActorRef> localShard = dataStore.getActorUtils().findLocalShard("cars-1");
- FrontendShardDataTreeSnapshotMetadata frontendMetadata =
- (FrontendShardDataTreeSnapshotMetadata) dataStore.getActorUtils()
- .executeOperation(localShard.get(), new RequestFrontendMetadata());
-
- if (dataStore.getActorUtils().getDatastoreContext().isUseTellBasedProtocol()) {
- Iterator<FrontendHistoryMetadata> iterator =
- frontendMetadata.getClients().get(0).getCurrentHistories().iterator();
- FrontendHistoryMetadata metadata = iterator.next();
- while (iterator.hasNext() && metadata.getHistoryId() != 1) {
- metadata = iterator.next();
+ try (var tx = txChain.newReadOnlyTransaction()) {
+ tx.read(CarsModel.BASE_PATH).get();
}
- Set<Range<UnsignedLong>> ranges = metadata.getPurgedTransactions().asRanges();
-
- assertEquals(1, ranges.size());
- } else {
- // ask based should track no metadata
- assertTrue(frontendMetadata.getClients().get(0).getCurrentHistories().isEmpty());
}
- final Optional<NormalizedNode<?, ?>> optional = txChain.newReadOnlyTransaction()
- .read(CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS);
- assertTrue("isPresent", optional.isPresent());
- assertEquals("# cars", numCars, ((Collection<?>) optional.get().getValue()).size());
+ // wait to let the shard catch up with purged
+ await("transaction state propagation").atMost(5, TimeUnit.SECONDS)
+ .pollInterval(500, TimeUnit.MILLISECONDS)
+ .untilAsserted(() -> {
+ // verify frontend metadata has no holes in purged transactions causing overtime memory leak
+ final var localShard = dataStore.getActorUtils().findLocalShard("cars-1") .orElseThrow();
+ final var frontendMetadata = (FrontendShardDataTreeSnapshotMetadata) dataStore.getActorUtils()
+ .executeOperation(localShard, new RequestFrontendMetadata());
+
+ final var clientMeta = frontendMetadata.getClients().get(0);
+ final var iterator = clientMeta.getCurrentHistories().iterator();
+ var metadata = iterator.next();
+ while (iterator.hasNext() && metadata.getHistoryId() != 1) {
+ metadata = iterator.next();
+ }
+ assertEquals("[[0..10]]", metadata.getPurgedTransactions().ranges().toString());
+ });
+
+ final var body = txChain.newReadOnlyTransaction().read(CarsModel.CAR_LIST_PATH)
+ .get(5, TimeUnit.SECONDS)
+ .orElseThrow()
+ .body();
+ assertThat(body, instanceOf(Collection.class));
+ assertEquals("# cars", numCars, ((Collection<?>) body).size());
}
}
datastoreContextBuilder.shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(1)
.shardInitializationTimeout(200, TimeUnit.MILLISECONDS).frontendRequestTimeoutInSeconds(2);
- try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(testParameter, testName, false, shardName)) {
-
+ try (var dataStore = testKit.setupDataStore(testParameter, testName, false, shardName)) {
final Object result = dataStore.getActorUtils().executeOperation(
dataStore.getActorUtils().getShardManager(), new FindLocalShard(shardName, true));
assertTrue("Expected LocalShardFound. Actual: " + result, result instanceof LocalShardFound);
// leader was elected in time, the Tx
// should have timed out and throw an appropriate
// exception cause.
- try {
- txCohort.get().canCommit().get(10, TimeUnit.SECONDS);
- fail("Expected NoShardLeaderException");
- } catch (final ExecutionException e) {
- final String msg = "Unexpected exception: "
- + Throwables.getStackTraceAsString(e.getCause());
- if (DistributedDataStore.class.isAssignableFrom(testParameter)) {
- assertTrue(Throwables.getRootCause(e) instanceof NoShardLeaderException);
- } else {
- assertTrue(msg, Throwables.getRootCause(e) instanceof RequestTimeoutException);
- }
- }
+ final var ex = assertThrows(ExecutionException.class,
+ () -> txCohort.get().canCommit().get(10, TimeUnit.SECONDS));
+ assertTrue("Unexpected exception: " + Throwables.getStackTraceAsString(ex.getCause()),
+ Throwables.getRootCause(ex) instanceof RequestTimeoutException);
} finally {
try {
if (writeTxToClose != null) {
@Test
public void testTransactionAbort() throws Exception {
final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
- try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
- testParameter, "transactionAbortIntegrationTest", "test-1")) {
+ try (var dataStore = testKit.setupDataStore(testParameter, "transactionAbortIntegrationTest", "test-1")) {
final DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
assertNotNull("newWriteOnlyTransaction returned null", writeTx);
@SuppressWarnings("checkstyle:IllegalCatch")
public void testTransactionChainWithSingleShard() throws Exception {
final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
- try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
- testParameter, "testTransactionChainWithSingleShard", "test-1")) {
+ try (var dataStore = testKit.setupDataStore(testParameter, "testTransactionChainWithSingleShard", "test-1")) {
// 1. Create a Tx chain and write-only Tx
final DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
assertNotNull("newWriteOnlyTransaction returned null", writeTx);
// 2. Write some data
- final NormalizedNode<?, ?> testNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ final NormalizedNode testNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
writeTx.write(TestModel.TEST_PATH, testNode);
// 3. Ready the Tx for commit
// the data from the first
// Tx is visible after being readied.
DOMStoreReadTransaction readTx = txChain.newReadOnlyTransaction();
- Optional<NormalizedNode<?, ?>> optional = readTx.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
- assertTrue("isPresent", optional.isPresent());
- assertEquals("Data node", testNode, optional.get());
+ assertEquals(Optional.of(testNode), readTx.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS));
// 6. Create a new RW Tx from the chain, write more data,
// and ready it
// from the last RW Tx to
// verify it is visible.
readTx = txChain.newReadWriteTransaction();
- optional = readTx.read(TestModel.OUTER_LIST_PATH).get(5, TimeUnit.SECONDS);
- assertTrue("isPresent", optional.isPresent());
- assertEquals("Data node", outerNode, optional.get());
+ assertEquals(Optional.of(outerNode), readTx.read(TestModel.OUTER_LIST_PATH).get(5, TimeUnit.SECONDS));
// 8. Wait for the 2 commits to complete and close the
// chain.
// 9. Create a new read Tx from the data store and verify
// committed data.
readTx = dataStore.newReadOnlyTransaction();
- optional = readTx.read(TestModel.OUTER_LIST_PATH).get(5, TimeUnit.SECONDS);
- assertTrue("isPresent", optional.isPresent());
- assertEquals("Data node", outerNode, optional.get());
+ assertEquals(Optional.of(outerNode), readTx.read(TestModel.OUTER_LIST_PATH).get(5, TimeUnit.SECONDS));
}
}
@Test
public void testTransactionChainWithMultipleShards() throws Exception {
final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
- try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
- testParameter, "testTransactionChainWithMultipleShards", "cars-1", "people-1")) {
+ try (var dataStore = testKit.setupDataStore(testParameter, "testTransactionChainWithMultipleShards",
+ "cars-1", "people-1")) {
final DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
final YangInstanceIdentifier personPath = PeopleModel.newPersonPath("jack");
readWriteTx.merge(personPath, person);
- Optional<NormalizedNode<?, ?>> optional = readWriteTx.read(carPath).get(5, TimeUnit.SECONDS);
- assertTrue("isPresent", optional.isPresent());
- assertEquals("Data node", car, optional.get());
-
- optional = readWriteTx.read(personPath).get(5, TimeUnit.SECONDS);
- assertTrue("isPresent", optional.isPresent());
- assertEquals("Data node", person, optional.get());
+ assertEquals(Optional.of(car), readWriteTx.read(carPath).get(5, TimeUnit.SECONDS));
+ assertEquals(Optional.of(person), readWriteTx.read(personPath).get(5, TimeUnit.SECONDS));
final DOMStoreThreePhaseCommitCohort cohort2 = readWriteTx.ready();
final DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
- optional = readTx.read(carPath).get(5, TimeUnit.SECONDS);
- assertFalse("isPresent", optional.isPresent());
-
- optional = readTx.read(personPath).get(5, TimeUnit.SECONDS);
- assertTrue("isPresent", optional.isPresent());
- assertEquals("Data node", person, optional.get());
+ assertEquals(Optional.empty(), readTx.read(carPath).get(5, TimeUnit.SECONDS));
+ assertEquals(Optional.of(person), readTx.read(personPath).get(5, TimeUnit.SECONDS));
}
}
@Test
public void testCreateChainedTransactionsInQuickSuccession() throws Exception {
final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
- try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
- testParameter, "testCreateChainedTransactionsInQuickSuccession", "cars-1")) {
+ try (var dataStore = testKit.setupDataStore(testParameter, "testCreateChainedTransactionsInQuickSuccession",
+ "cars-1")) {
final ConcurrentDOMDataBroker broker = new ConcurrentDOMDataBroker(
ImmutableMap.<LogicalDatastoreType, DOMStore>builder()
.put(LogicalDatastoreType.CONFIGURATION, dataStore).build(),
MoreExecutors.directExecutor());
- final DOMTransactionChainListener listener = Mockito.mock(DOMTransactionChainListener.class);
- DOMTransactionChain txChain = broker.createTransactionChain(listener);
+ DOMTransactionChain txChain = broker.createTransactionChain();
final List<ListenableFuture<?>> futures = new ArrayList<>();
f.get(5, TimeUnit.SECONDS);
}
- final Optional<NormalizedNode<?, ?>> optional = txChain.newReadOnlyTransaction()
+ final Optional<NormalizedNode> optional = txChain.newReadOnlyTransaction()
.read(LogicalDatastoreType.CONFIGURATION, CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS);
assertTrue("isPresent", optional.isPresent());
- assertEquals("# cars", numCars, ((Collection<?>) optional.get().getValue()).size());
+ assertEquals("# cars", numCars, ((Collection<?>) optional.orElseThrow().body()).size());
txChain.close();
@Test
public void testCreateChainedTransactionAfterEmptyTxReadied() throws Exception {
final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
- try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
- testParameter, "testCreateChainedTransactionAfterEmptyTxReadied", "test-1")) {
+ try (var dataStore = testKit.setupDataStore(testParameter, "testCreateChainedTransactionAfterEmptyTxReadied",
+ "test-1")) {
final DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
final DOMStoreReadWriteTransaction rwTx2 = txChain.newReadWriteTransaction();
- final Optional<NormalizedNode<?, ?>> optional = rwTx2.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
+ final Optional<NormalizedNode> optional = rwTx2.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
assertFalse("isPresent", optional.isPresent());
txChain.close();
@Test
public void testCreateChainedTransactionWhenPreviousNotReady() throws Exception {
final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
- try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
- testParameter, "testCreateChainedTransactionWhenPreviousNotReady", "test-1")) {
+ try (var dataStore = testKit.setupDataStore(testParameter, "testCreateChainedTransactionWhenPreviousNotReady",
+ "test-1")) {
final DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
@Test
public void testCreateChainedTransactionAfterClose() throws Exception {
final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
- try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
- testParameter, "testCreateChainedTransactionAfterClose", "test-1")) {
+ try (var dataStore = testKit.setupDataStore(testParameter, "testCreateChainedTransactionAfterClose",
+ "test-1")) {
final DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
txChain.close();
@Test
public void testChainWithReadOnlyTxAfterPreviousReady() throws Exception {
final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
- try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
- testParameter, "testChainWithReadOnlyTxAfterPreviousReady", "test-1")) {
+ try (var dataStore = testKit.setupDataStore(testParameter, "testChainWithReadOnlyTxAfterPreviousReady",
+ "test-1")) {
final DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
final DOMStoreThreePhaseCommitCohort cohort1 = writeTx.ready();
// Create read-only tx's and issue a read.
- FluentFuture<Optional<NormalizedNode<?, ?>>> readFuture1 = txChain
+ FluentFuture<Optional<NormalizedNode>> readFuture1 = txChain
.newReadOnlyTransaction().read(TestModel.TEST_PATH);
- FluentFuture<Optional<NormalizedNode<?, ?>>> readFuture2 = txChain
+ FluentFuture<Optional<NormalizedNode>> readFuture2 = txChain
.newReadOnlyTransaction().read(TestModel.TEST_PATH);
// Create another write tx and issue the write.
@Test
public void testChainedTransactionFailureWithSingleShard() throws Exception {
- final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
- try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
- testParameter, "testChainedTransactionFailureWithSingleShard", "cars-1")) {
+ final var testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
+ try (var dataStore = testKit.setupDataStore(testParameter, "testChainedTransactionFailureWithSingleShard",
+ "cars-1")) {
- final ConcurrentDOMDataBroker broker = new ConcurrentDOMDataBroker(
+ final var broker = new ConcurrentDOMDataBroker(
ImmutableMap.<LogicalDatastoreType, DOMStore>builder()
.put(LogicalDatastoreType.CONFIGURATION, dataStore).build(),
MoreExecutors.directExecutor());
- final DOMTransactionChainListener listener = Mockito.mock(DOMTransactionChainListener.class);
- final DOMTransactionChain txChain = broker.createTransactionChain(listener);
+ final var listener = mock(FutureCallback.class);
+ final var txChain = broker.createTransactionChain();
+ txChain.addCallback(listener);
- final DOMDataTreeReadWriteTransaction writeTx = txChain.newReadWriteTransaction();
+ final var writeTx = txChain.newReadWriteTransaction();
writeTx.put(LogicalDatastoreType.CONFIGURATION, PeopleModel.BASE_PATH,
PeopleModel.emptyContainer());
- final ContainerNode invalidData = ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(CarsModel.BASE_QNAME))
- .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk")).build();
+ final var invalidData = Builders.containerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(CarsModel.BASE_QNAME))
+ .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk"))
+ .build();
writeTx.merge(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH, invalidData);
- try {
- writeTx.commit().get(5, TimeUnit.SECONDS);
- fail("Expected TransactionCommitFailedException");
- } catch (final ExecutionException e) {
- // Expected
- }
+ assertThrows(ExecutionException.class, () -> writeTx.commit().get(5, TimeUnit.SECONDS));
- verify(listener, timeout(5000)).onTransactionChainFailed(eq(txChain), eq(writeTx),
- any(Throwable.class));
+ verify(listener, timeout(5000)).onFailure(any());
txChain.close();
broker.close();
@Test
public void testChainedTransactionFailureWithMultipleShards() throws Exception {
final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
- try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
- testParameter, "testChainedTransactionFailureWithMultipleShards", "cars-1", "people-1")) {
+ try (var dataStore = testKit.setupDataStore(testParameter, "testChainedTransactionFailureWithMultipleShards",
+ "cars-1", "people-1")) {
final ConcurrentDOMDataBroker broker = new ConcurrentDOMDataBroker(
ImmutableMap.<LogicalDatastoreType, DOMStore>builder()
.put(LogicalDatastoreType.CONFIGURATION, dataStore).build(),
MoreExecutors.directExecutor());
- final DOMTransactionChainListener listener = Mockito.mock(DOMTransactionChainListener.class);
- final DOMTransactionChain txChain = broker.createTransactionChain(listener);
+ final var listener = mock(FutureCallback.class);
+ final DOMTransactionChain txChain = broker.createTransactionChain();
+ txChain.addCallback(listener);
final DOMDataTreeWriteTransaction writeTx = txChain.newReadWriteTransaction();
writeTx.put(LogicalDatastoreType.CONFIGURATION, PeopleModel.BASE_PATH,
PeopleModel.emptyContainer());
- final ContainerNode invalidData = ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(CarsModel.BASE_QNAME))
- .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk")).build();
+ final ContainerNode invalidData = Builders.containerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(CarsModel.BASE_QNAME))
+ .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk"))
+ .build();
writeTx.merge(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH, invalidData);
// Note that merge will validate the data and fail but put
// succeeds b/c deep validation is not
// done for put for performance reasons.
- try {
- writeTx.commit().get(5, TimeUnit.SECONDS);
- fail("Expected TransactionCommitFailedException");
- } catch (final ExecutionException e) {
- // Expected
- }
+ assertThrows(ExecutionException.class, () -> writeTx.commit().get(5, TimeUnit.SECONDS));
- verify(listener, timeout(5000)).onTransactionChainFailed(eq(txChain), eq(writeTx),
- any(Throwable.class));
+ verify(listener, timeout(5000)).onFailure(any());
txChain.close();
broker.close();
@Test
public void testDataTreeChangeListenerRegistration() throws Exception {
final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
- try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
- testParameter, "testDataTreeChangeListenerRegistration", "test-1")) {
+ try (var dataStore = testKit.setupDataStore(testParameter, "testDataTreeChangeListenerRegistration",
+ "test-1")) {
testKit.testWriteTransaction(dataStore, TestModel.TEST_PATH,
ImmutableNodes.containerNode(TestModel.TEST_QNAME));
final MockDataTreeChangeListener listener = new MockDataTreeChangeListener(1);
- ListenerRegistration<MockDataTreeChangeListener> listenerReg = dataStore
- .registerTreeChangeListener(TestModel.TEST_PATH, listener);
+ final var listenerReg = dataStore.registerTreeChangeListener(TestModel.TEST_PATH, listener);
assertNotNull("registerTreeChangeListener returned null", listenerReg);
DataTree dataTree = new InMemoryDataTreeFactory().create(
DataTreeConfiguration.DEFAULT_OPERATIONAL, SchemaContextHelper.full());
AbstractShardTest.writeToStore(dataTree, CarsModel.BASE_PATH, carsNode);
- NormalizedNode<?, ?> root = AbstractShardTest.readStore(dataTree, YangInstanceIdentifier.empty());
+ NormalizedNode root = AbstractShardTest.readStore(dataTree, YangInstanceIdentifier.of());
final Snapshot carsSnapshot = Snapshot.create(
new ShardSnapshotState(new MetadataShardDataTreeSnapshot(root)),
dataTree = new InMemoryDataTreeFactory().create(DataTreeConfiguration.DEFAULT_OPERATIONAL,
SchemaContextHelper.full());
- final NormalizedNode<?, ?> peopleNode = PeopleModel.create();
+ final NormalizedNode peopleNode = PeopleModel.create();
AbstractShardTest.writeToStore(dataTree, PeopleModel.BASE_PATH, peopleNode);
- root = AbstractShardTest.readStore(dataTree, YangInstanceIdentifier.empty());
+ root = AbstractShardTest.readStore(dataTree, YangInstanceIdentifier.of());
final Snapshot peopleSnapshot = Snapshot.create(
new ShardSnapshotState(new MetadataShardDataTreeSnapshot(root)),
new DatastoreSnapshot.ShardSnapshot("cars", carsSnapshot),
new DatastoreSnapshot.ShardSnapshot("people", peopleSnapshot)));
- try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
- testParameter, name, "module-shards-member1.conf", true, "cars", "people")) {
+ try (var dataStore = testKit.setupDataStore(testParameter, name, "module-shards-member1.conf", true,
+ "cars", "people")) {
final DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
// two reads
- Optional<NormalizedNode<?, ?>> optional = readTx.read(CarsModel.BASE_PATH).get(5, TimeUnit.SECONDS);
- assertTrue("isPresent", optional.isPresent());
- assertEquals("Data node", carsNode, optional.get());
-
- optional = readTx.read(PeopleModel.BASE_PATH).get(5, TimeUnit.SECONDS);
- assertTrue("isPresent", optional.isPresent());
- assertEquals("Data node", peopleNode, optional.get());
+ assertEquals(Optional.of(carsNode), readTx.read(CarsModel.BASE_PATH).get(5, TimeUnit.SECONDS));
+ assertEquals(Optional.of(peopleNode), readTx.read(PeopleModel.BASE_PATH).get(5, TimeUnit.SECONDS));
}
}
@Test
+ @Ignore("ClientBackedDatastore does not have stable indexes/term, the snapshot index seems to fluctuate")
+ // FIXME: re-enable this test
public void testSnapshotOnRootOverwrite() throws Exception {
- if (!DistributedDataStore.class.isAssignableFrom(testParameter)) {
- // FIXME: ClientBackedDatastore does not have stable indexes/term, the snapshot index seems to fluctuate
- return;
- }
-
- final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(),
- datastoreContextBuilder.snapshotOnRootOverwrite(true));
- try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
- testParameter, "testRootOverwrite", "module-shards-default-cars-member1.conf",
- true, "cars", "default")) {
+ final var testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder.snapshotOnRootOverwrite(true));
+ try (var dataStore = testKit.setupDataStore(testParameter, "testRootOverwrite",
+ "module-shards-default-cars-member1.conf", true, "cars", "default")) {
- ContainerNode rootNode = ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(YangInstanceIdentifier.NodeIdentifier.create(SchemaContext.NAME))
- .withChild((ContainerNode) CarsModel.create())
- .build();
+ final var rootNode = Builders.containerBuilder()
+ .withNodeIdentifier(NodeIdentifier.create(SchemaContext.NAME))
+ .withChild(CarsModel.create())
+ .build();
- testKit.testWriteTransaction(dataStore, YangInstanceIdentifier.empty(), rootNode);
+ testKit.testWriteTransaction(dataStore, YangInstanceIdentifier.of(), rootNode);
IntegrationTestKit.verifyShardState(dataStore, "cars",
state -> assertEquals(1, state.getSnapshotIndex()));
verifySnapshot("member-1-shard-cars-testRootOverwrite", 1, 1);
// root overwrite so expect a snapshot
- testKit.testWriteTransaction(dataStore, YangInstanceIdentifier.empty(), rootNode);
+ testKit.testWriteTransaction(dataStore, YangInstanceIdentifier.of(), rootNode);
// this was a real snapshot so everything should be in it(1 + 10 + 1)
IntegrationTestKit.verifyShardState(dataStore, "cars",
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore;
-
-import static org.mockito.Mockito.mock;
-import static org.mockito.MockitoAnnotations.initMocks;
-
-import akka.actor.ActorRef;
-import akka.actor.PoisonPill;
-import akka.actor.Props;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.concurrent.TimeUnit;
-import org.junit.After;
-import org.junit.Before;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.config.Configuration;
-import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
-import org.opendaylight.controller.cluster.datastore.shardmanager.ShardManagerTest.TestShardManager;
-import org.opendaylight.controller.cluster.raft.TestActorFactory;
-import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
-import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
-import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
-
-public class AbstractShardManagerTest extends AbstractClusterRefActorTest {
-
- protected static final MemberName MEMBER_1 = MemberName.forName("member-1");
-
- protected static int ID_COUNTER = 1;
- protected static ActorRef mockShardActor;
- protected static ShardIdentifier mockShardName;
-
- protected final String shardMrgIDSuffix = "config" + ID_COUNTER++;
- protected final TestActorFactory actorFactory = new TestActorFactory(getSystem());
- protected final DatastoreContext.Builder datastoreContextBuilder = DatastoreContext.newBuilder()
- .dataStoreName(shardMrgIDSuffix).shardInitializationTimeout(600, TimeUnit.MILLISECONDS)
- .shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(6);
-
- protected static SettableFuture<Void> ready;
-
- protected TestShardManager.Builder newTestShardMgrBuilder() {
- return TestShardManager.builder(datastoreContextBuilder).distributedDataStore(mock(DistributedDataStore.class));
- }
-
- protected TestShardManager.Builder newTestShardMgrBuilder(final Configuration config) {
- return TestShardManager.builder(datastoreContextBuilder).configuration(config)
- .distributedDataStore(mock(DistributedDataStore.class));
- }
-
- protected Props newShardMgrProps(final Configuration config) {
- return newTestShardMgrBuilder(config).readinessFuture(ready).props();
- }
-
- @Before
- public void setUp() {
- initMocks(this);
- ready = SettableFuture.create();
-
- InMemoryJournal.clear();
- InMemorySnapshotStore.clear();
-
- if (mockShardActor == null) {
- mockShardName = ShardIdentifier.create(Shard.DEFAULT_NAME, MEMBER_1, "config");
- mockShardActor = getSystem().actorOf(MessageCollectorActor.props(), mockShardName.toString());
- }
-
- MessageCollectorActor.clearMessages(mockShardActor);
- }
-
- @After
- public void tearDown() {
- InMemoryJournal.clear();
- InMemorySnapshotStore.clear();
-
- mockShardActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
- mockShardActor = null;
-
- actorFactory.close();
- }
-}
import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.yangtools.yang.common.Empty;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateTip;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
import scala.concurrent.Await;
import scala.concurrent.Future;
public abstract class AbstractShardTest extends AbstractActorTest {
protected static final EffectiveModelContext SCHEMA_CONTEXT = TestModel.createTestContext();
- private static final AtomicInteger NEXT_SHARD_NUM = new AtomicInteger();
-
+ protected static final AtomicInteger SHARD_NUM = new AtomicInteger();
protected static final int HEARTBEAT_MILLIS = 100;
- protected final ShardIdentifier shardID = ShardIdentifier.create("inventory", MemberName.forName("member-1"),
- "config" + NEXT_SHARD_NUM.getAndIncrement());
-
protected final Builder dataStoreContextBuilder = DatastoreContext.newBuilder()
.shardJournalRecoveryLogBatchSize(3).shardSnapshotBatchCount(5000)
.shardHeartbeatIntervalInMillis(HEARTBEAT_MILLIS);
protected final TestActorFactory actorFactory = new TestActorFactory(getSystem());
+ protected final int nextShardNum = SHARD_NUM.getAndIncrement();
+ protected final ShardIdentifier shardID = ShardIdentifier.create("inventory", MemberName.forName("member-1"),
+ "config" + nextShardNum);
@Before
- public void setUp() {
+ public void setUp() throws Exception {
InMemorySnapshotStore.clear();
InMemoryJournal.clear();
}
.schemaContextProvider(() -> SCHEMA_CONTEXT);
}
- protected void testRecovery(final Set<Integer> listEntryKeys) throws Exception {
+ protected void testRecovery(final Set<Integer> listEntryKeys, final boolean stopActorOnFinish) throws Exception {
// Create the actor and wait for recovery complete.
final int nListEntries = listEntryKeys.size();
// Verify data in the data store.
- final NormalizedNode<?, ?> outerList = readStore(shard, TestModel.OUTER_LIST_PATH);
+ final NormalizedNode outerList = readStore(shard, TestModel.OUTER_LIST_PATH);
assertNotNull(TestModel.OUTER_LIST_QNAME.getLocalName() + " not found", outerList);
assertTrue(TestModel.OUTER_LIST_QNAME.getLocalName() + " value is not Iterable",
- outerList.getValue() instanceof Iterable);
- for (final Object entry: (Iterable<?>) outerList.getValue()) {
+ outerList.body() instanceof Iterable);
+ for (final Object entry: (Iterable<?>) outerList.body()) {
assertTrue(TestModel.OUTER_LIST_QNAME.getLocalName() + " entry is not MapEntryNode",
entry instanceof MapEntryNode);
final MapEntryNode mapEntry = (MapEntryNode)entry;
- final Optional<DataContainerChild<? extends PathArgument, ?>> idLeaf =
- mapEntry.getChild(new YangInstanceIdentifier.NodeIdentifier(TestModel.ID_QNAME));
+ final Optional<DataContainerChild> idLeaf =
+ mapEntry.findChildByArg(new YangInstanceIdentifier.NodeIdentifier(TestModel.ID_QNAME));
assertTrue("Missing leaf " + TestModel.ID_QNAME.getLocalName(), idLeaf.isPresent());
- final Object value = idLeaf.get().getValue();
+ final Object value = idLeaf.orElseThrow().body();
assertTrue("Unexpected value for leaf " + TestModel.ID_QNAME.getLocalName() + ": " + value,
listEntryKeys.remove(value));
}
assertEquals("Last applied", nListEntries,
shard.underlyingActor().getShardMBean().getLastApplied());
- shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ if (stopActorOnFinish) {
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }
}
protected void verifyLastApplied(final TestActorRef<Shard> shard, final long expectedValue) {
}
protected static BatchedModifications prepareBatchedModifications(final TransactionIdentifier transactionID,
- final YangInstanceIdentifier path, final NormalizedNode<?, ?> data, final boolean doCommitOnReady) {
+ final YangInstanceIdentifier path, final NormalizedNode data, final boolean doCommitOnReady) {
final MutableCompositeModification modification = new MutableCompositeModification();
modification.addModification(new WriteModification(path, data));
return prepareBatchedModifications(transactionID, modification, doCommitOnReady);
protected static ForwardedReadyTransaction prepareForwardedReadyTransaction(final TestActorRef<Shard> shard,
final TransactionIdentifier transactionID, final YangInstanceIdentifier path,
- final NormalizedNode<?, ?> data, final boolean doCommitOnReady) {
+ final NormalizedNode data, final boolean doCommitOnReady) {
ReadWriteShardDataTreeTransaction rwTx = shard.underlyingActor().getDataStore()
.newReadWriteTransaction(transactionID);
rwTx.getSnapshot().write(path, data);
return new ForwardedReadyTransaction(transactionID, CURRENT_VERSION, rwTx, doCommitOnReady, Optional.empty());
}
- public static NormalizedNode<?,?> readStore(final TestActorRef<? extends Shard> shard,
+ public static NormalizedNode readStore(final TestActorRef<? extends Shard> shard,
final YangInstanceIdentifier id) {
return shard.underlyingActor().getDataStore().readNode(id).orElse(null);
}
- public static NormalizedNode<?,?> readStore(final DataTree store, final YangInstanceIdentifier id) {
+ public static NormalizedNode readStore(final DataTree store, final YangInstanceIdentifier id) {
return store.takeSnapshot().readNode(id).orElse(null);
}
public void writeToStore(final TestActorRef<Shard> shard, final YangInstanceIdentifier id,
- final NormalizedNode<?,?> node) throws InterruptedException, ExecutionException {
+ final NormalizedNode node) throws InterruptedException, ExecutionException {
Future<Object> future = Patterns.ask(shard, newBatchedModifications(nextTransactionId(),
id, node, true, true, 1), new Timeout(5, TimeUnit.SECONDS));
try {
}
public static void writeToStore(final ShardDataTree store, final YangInstanceIdentifier id,
- final NormalizedNode<?,?> node) throws DataValidationFailedException {
+ final NormalizedNode node) throws DataValidationFailedException {
BatchedModifications batched = newBatchedModifications(nextTransactionId(), id, node, true, true, 1);
DataTreeModification modification = store.getDataTree().takeSnapshot().newModification();
batched.apply(modification);
store.notifyListeners(commitTransaction(store.getDataTree(), modification));
}
- public static void writeToStore(final DataTree store, final YangInstanceIdentifier id,
- final NormalizedNode<?,?> node) throws DataValidationFailedException {
+ public static void writeToStore(final DataTree store, final YangInstanceIdentifier id, final NormalizedNode node)
+ throws DataValidationFailedException {
final DataTreeModification transaction = store.takeSnapshot().newModification();
transaction.write(id, node);
store.commit(candidate);
}
- public void mergeToStore(final ShardDataTree store, final YangInstanceIdentifier id,
- final NormalizedNode<?,?> node) throws DataValidationFailedException {
+ public void mergeToStore(final ShardDataTree store, final YangInstanceIdentifier id, final NormalizedNode node)
+ throws DataValidationFailedException {
final BatchedModifications batched = new BatchedModifications(nextTransactionId(), CURRENT_VERSION);
batched.addModification(new MergeModification(id, node));
batched.setReady();
writeToStore(testStore, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
- final NormalizedNode<?, ?> root = readStore(testStore, YangInstanceIdentifier.empty());
+ final NormalizedNode root = readStore(testStore, YangInstanceIdentifier.of());
InMemorySnapshotStore.addSnapshot(shardID.toString(), Snapshot.create(
new ShardSnapshotState(new MetadataShardDataTreeSnapshot(root)),
}
static BatchedModifications newBatchedModifications(final TransactionIdentifier transactionID,
- final YangInstanceIdentifier path, final NormalizedNode<?, ?> data, final boolean ready,
+ final YangInstanceIdentifier path, final NormalizedNode data, final boolean ready,
final boolean doCommitOnReady, final int messagesSent) {
final BatchedModifications batched = new BatchedModifications(transactionID, CURRENT_VERSION);
batched.addModification(new WriteModification(path, data));
}
static BatchedModifications newReadyBatchedModifications(final TransactionIdentifier transactionID,
- final YangInstanceIdentifier path, final NormalizedNode<?, ?> data,
+ final YangInstanceIdentifier path, final NormalizedNode data,
final SortedSet<String> participatingShardNames) {
final BatchedModifications batched = new BatchedModifications(transactionID, CURRENT_VERSION);
batched.addModification(new WriteModification(path, data));
@SuppressWarnings("unchecked")
static void verifyOuterListEntry(final TestActorRef<Shard> shard, final Object expIDValue) {
- final NormalizedNode<?, ?> outerList = readStore(shard, TestModel.OUTER_LIST_PATH);
+ final NormalizedNode outerList = readStore(shard, TestModel.OUTER_LIST_PATH);
assertNotNull(TestModel.OUTER_LIST_QNAME.getLocalName() + " not found", outerList);
assertTrue(TestModel.OUTER_LIST_QNAME.getLocalName() + " value is not Iterable",
- outerList.getValue() instanceof Iterable);
- final Object entry = ((Iterable<Object>)outerList.getValue()).iterator().next();
+ outerList.body() instanceof Iterable);
+ final Object entry = ((Iterable<Object>)outerList.body()).iterator().next();
assertTrue(TestModel.OUTER_LIST_QNAME.getLocalName() + " entry is not MapEntryNode",
entry instanceof MapEntryNode);
final MapEntryNode mapEntry = (MapEntryNode)entry;
- final Optional<DataContainerChild<? extends PathArgument, ?>> idLeaf =
- mapEntry.getChild(new YangInstanceIdentifier.NodeIdentifier(TestModel.ID_QNAME));
+ final Optional<DataContainerChild> idLeaf =
+ mapEntry.findChildByArg(new YangInstanceIdentifier.NodeIdentifier(TestModel.ID_QNAME));
assertTrue("Missing leaf " + TestModel.ID_QNAME.getLocalName(), idLeaf.isPresent());
- assertEquals(TestModel.ID_QNAME.getLocalName() + " value", expIDValue, idLeaf.get().getValue());
+ assertEquals(TestModel.ID_QNAME.getLocalName() + " value", expIDValue, idLeaf.orElseThrow().body());
}
public static DataTreeCandidateTip mockCandidate(final String name) {
final DataTreeCandidateTip mockCandidate = mock(DataTreeCandidateTip.class, name);
final DataTreeCandidateNode mockCandidateNode = mock(DataTreeCandidateNode.class, name + "-node");
- doReturn(ModificationType.WRITE).when(mockCandidateNode).getModificationType();
- doReturn(Optional.of(ImmutableNodes.containerNode(CarsModel.CARS_QNAME)))
- .when(mockCandidateNode).getDataAfter();
+ doReturn(ModificationType.WRITE).when(mockCandidateNode).modificationType();
+ doReturn(ImmutableNodes.containerNode(CarsModel.CARS_QNAME)).when(mockCandidateNode).dataAfter();
doReturn(CarsModel.BASE_PATH).when(mockCandidate).getRootPath();
doReturn(mockCandidateNode).when(mockCandidate).getRootNode();
return mockCandidate;
static DataTreeCandidateTip mockUnmodifiedCandidate(final String name) {
final DataTreeCandidateTip mockCandidate = mock(DataTreeCandidateTip.class, name);
final DataTreeCandidateNode mockCandidateNode = mock(DataTreeCandidateNode.class, name + "-node");
- doReturn(ModificationType.UNMODIFIED).when(mockCandidateNode).getModificationType();
- doReturn(YangInstanceIdentifier.empty()).when(mockCandidate).getRootPath();
+ doReturn(ModificationType.UNMODIFIED).when(mockCandidateNode).modificationType();
+ doReturn(YangInstanceIdentifier.of()).when(mockCandidate).getRootPath();
doReturn(mockCandidateNode).when(mockCandidate).getRootNode();
return mockCandidate;
}
public static class CapturingShardDataTreeCohort extends ShardDataTreeCohort {
private volatile ShardDataTreeCohort delegate;
- private FutureCallback<Void> canCommit;
+ private FutureCallback<Empty> canCommit;
private FutureCallback<DataTreeCandidate> preCommit;
private FutureCallback<UnsignedLong> commit;
this.delegate = delegate;
}
- public FutureCallback<Void> getCanCommit() {
+ public FutureCallback<Empty> getCanCommit() {
assertNotNull("canCommit was not invoked", canCommit);
return canCommit;
}
}
@Override
- public TransactionIdentifier getIdentifier() {
- return delegate.getIdentifier();
+ TransactionIdentifier transactionId() {
+ return delegate.transactionId();
}
@Override
}
@Override
- public void canCommit(final FutureCallback<Void> callback) {
+ public void canCommit(final FutureCallback<Empty> callback) {
canCommit = mockFutureCallback(callback);
delegate.canCommit(canCommit);
}
}
@Override
- public void abort(final FutureCallback<Void> callback) {
+ public void abort(final FutureCallback<Empty> callback) {
delegate.abort(callback);
}
TX_COUNTER.set(1L);
}
+ protected static TransactionIdentifier newTransactionId(final long txId) {
+ return new TransactionIdentifier(HISTORY_ID, txId);
+ }
+
protected static TransactionIdentifier nextTransactionId() {
- return new TransactionIdentifier(HISTORY_ID, TX_COUNTER.getAndIncrement());
+ return newTransactionId(TX_COUNTER.getAndIncrement());
+ }
+
+ protected static LocalHistoryIdentifier newHistoryId(final long historyId) {
+ return new LocalHistoryIdentifier(CLIENT_ID, historyId);
}
protected static LocalHistoryIdentifier nextHistoryId() {
- return new LocalHistoryIdentifier(CLIENT_ID, HISTORY_COUNTER.incrementAndGet());
+ return newHistoryId(HISTORY_COUNTER.incrementAndGet());
}
protected static <T> T waitOnAsyncTask(final CompletionStage<T> completionStage, final FiniteDuration timeout)
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.argThat;
-import static org.mockito.ArgumentMatchers.eq;
-import static org.mockito.ArgumentMatchers.isA;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-
-import akka.actor.ActorRef;
-import akka.actor.ActorSelection;
-import akka.actor.ActorSystem;
-import akka.actor.Props;
-import akka.dispatch.Futures;
-import akka.testkit.javadsl.TestKit;
-import akka.util.Timeout;
-import com.codahale.metrics.MetricRegistry;
-import com.codahale.metrics.Timer;
-import com.google.common.base.Throwables;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.FluentFuture;
-import com.typesafe.config.Config;
-import com.typesafe.config.ConfigFactory;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.mockito.ArgumentCaptor;
-import org.mockito.ArgumentMatcher;
-import org.mockito.Mock;
-import org.mockito.Mockito;
-import org.mockito.MockitoAnnotations;
-import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
-import org.opendaylight.controller.cluster.datastore.TransactionProxyTest.TestException;
-import org.opendaylight.controller.cluster.datastore.config.Configuration;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
-import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.DataExists;
-import org.opendaylight.controller.cluster.datastore.messages.DataExistsReply;
-import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
-import org.opendaylight.controller.cluster.datastore.messages.ReadData;
-import org.opendaylight.controller.cluster.datastore.messages.ReadDataReply;
-import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
-import org.opendaylight.controller.cluster.datastore.modification.AbstractModification;
-import org.opendaylight.controller.cluster.datastore.modification.Modification;
-import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
-import org.opendaylight.controller.cluster.datastore.shardstrategy.DefaultShardStrategy;
-import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategy;
-import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.controller.cluster.datastore.utils.MockConfiguration;
-import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
-import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
-import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.common.api.ReadFailedException;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Await;
-import scala.concurrent.Future;
-import scala.concurrent.duration.FiniteDuration;
-
-/**
- * Abstract base class for TransactionProxy unit tests.
- *
- * @author Thomas Pantelis
- */
-public abstract class AbstractTransactionProxyTest extends AbstractTest {
- protected final Logger log = LoggerFactory.getLogger(getClass());
-
- private static ActorSystem system;
- private static SchemaContext SCHEMA_CONTEXT;
-
- private final Configuration configuration = new MockConfiguration() {
- Map<String, ShardStrategy> strategyMap = ImmutableMap.<String, ShardStrategy>builder().put(
- TestModel.JUNK_QNAME.getLocalName(), new ShardStrategy() {
- @Override
- public String findShard(final YangInstanceIdentifier path) {
- return TestModel.JUNK_QNAME.getLocalName();
- }
-
- @Override
- public YangInstanceIdentifier getPrefixForPath(final YangInstanceIdentifier path) {
- return YangInstanceIdentifier.empty();
- }
- }).put(
- CarsModel.BASE_QNAME.getLocalName(), new ShardStrategy() {
- @Override
- public String findShard(final YangInstanceIdentifier path) {
- return CarsModel.BASE_QNAME.getLocalName();
- }
-
- @Override
- public YangInstanceIdentifier getPrefixForPath(final YangInstanceIdentifier path) {
- return YangInstanceIdentifier.empty();
- }
- }).build();
-
- @Override
- public ShardStrategy getStrategyForModule(final String moduleName) {
- return strategyMap.get(moduleName);
- }
-
- @Override
- public String getModuleNameFromNameSpace(final String nameSpace) {
- if (TestModel.JUNK_QNAME.getNamespace().toASCIIString().equals(nameSpace)) {
- return TestModel.JUNK_QNAME.getLocalName();
- } else if (CarsModel.BASE_QNAME.getNamespace().toASCIIString().equals(nameSpace)) {
- return CarsModel.BASE_QNAME.getLocalName();
- }
- return null;
- }
- };
-
- @Mock
- protected ActorUtils mockActorContext;
-
- protected TransactionContextFactory mockComponentFactory;
-
- @Mock
- private ClusterWrapper mockClusterWrapper;
-
- protected final String memberName = "mock-member";
-
- private final int operationTimeoutInSeconds = 2;
- protected final Builder dataStoreContextBuilder = DatastoreContext.newBuilder()
- .operationTimeoutInSeconds(operationTimeoutInSeconds);
-
- @BeforeClass
- public static void setUpClass() {
-
- Config config = ConfigFactory.parseMap(ImmutableMap.<String, Object>builder()
- .put("akka.actor.default-dispatcher.type",
- "akka.testkit.CallingThreadDispatcherConfigurator").build())
- .withFallback(ConfigFactory.load());
- system = ActorSystem.create("test", config);
- SCHEMA_CONTEXT = TestModel.createTestContext();
- }
-
- @AfterClass
- public static void tearDownClass() {
- TestKit.shutdownActorSystem(system);
- system = null;
- SCHEMA_CONTEXT = null;
- }
-
- @Before
- public void setUp() {
- MockitoAnnotations.initMocks(this);
-
- doReturn(getSystem()).when(mockActorContext).getActorSystem();
- doReturn(getSystem().dispatchers().defaultGlobalDispatcher()).when(mockActorContext).getClientDispatcher();
- doReturn(MemberName.forName(memberName)).when(mockActorContext).getCurrentMemberName();
- doReturn(new ShardStrategyFactory(configuration,
- LogicalDatastoreType.CONFIGURATION)).when(mockActorContext).getShardStrategyFactory();
- doReturn(SCHEMA_CONTEXT).when(mockActorContext).getSchemaContext();
- doReturn(new Timeout(operationTimeoutInSeconds, TimeUnit.SECONDS)).when(mockActorContext).getOperationTimeout();
- doReturn(mockClusterWrapper).when(mockActorContext).getClusterWrapper();
- doReturn(mockClusterWrapper).when(mockActorContext).getClusterWrapper();
- doReturn(dataStoreContextBuilder.build()).when(mockActorContext).getDatastoreContext();
- doReturn(new Timeout(5, TimeUnit.SECONDS)).when(mockActorContext).getTransactionCommitOperationTimeout();
-
- final ClientIdentifier mockClientId = MockIdentifiers.clientIdentifier(getClass(), memberName);
- mockComponentFactory = new TransactionContextFactory(mockActorContext, mockClientId);
-
- Timer timer = new MetricRegistry().timer("test");
- doReturn(timer).when(mockActorContext).getOperationTimer(any(String.class));
- }
-
- protected ActorSystem getSystem() {
- return system;
- }
-
- protected CreateTransaction eqCreateTransaction(final String expMemberName,
- final TransactionType type) {
- class CreateTransactionArgumentMatcher implements ArgumentMatcher<CreateTransaction> {
- @Override
- public boolean matches(CreateTransaction argument) {
- return argument.getTransactionId().getHistoryId().getClientId().getFrontendId().getMemberName()
- .getName().equals(expMemberName) && argument.getTransactionType() == type.ordinal();
- }
- }
-
- return argThat(new CreateTransactionArgumentMatcher());
- }
-
- protected DataExists eqDataExists() {
- class DataExistsArgumentMatcher implements ArgumentMatcher<DataExists> {
- @Override
- public boolean matches(DataExists argument) {
- return argument.getPath().equals(TestModel.TEST_PATH);
- }
- }
-
- return argThat(new DataExistsArgumentMatcher());
- }
-
- protected ReadData eqReadData() {
- return eqReadData(TestModel.TEST_PATH);
- }
-
- protected ReadData eqReadData(final YangInstanceIdentifier path) {
- class ReadDataArgumentMatcher implements ArgumentMatcher<ReadData> {
- @Override
- public boolean matches(ReadData argument) {
- return argument.getPath().equals(path);
- }
- }
-
- return argThat(new ReadDataArgumentMatcher());
- }
-
- protected Future<Object> readyTxReply(final String path) {
- return Futures.successful((Object)new ReadyTransactionReply(path));
- }
-
-
- protected Future<ReadDataReply> readDataReply(final NormalizedNode<?, ?> data) {
- return Futures.successful(new ReadDataReply(data, DataStoreVersions.CURRENT_VERSION));
- }
-
- protected Future<DataExistsReply> dataExistsReply(final boolean exists) {
- return Futures.successful(new DataExistsReply(exists, DataStoreVersions.CURRENT_VERSION));
- }
-
- protected Future<BatchedModificationsReply> batchedModificationsReply(final int count) {
- return Futures.successful(new BatchedModificationsReply(count));
- }
-
- @SuppressWarnings("unchecked")
- protected Future<Object> incompleteFuture() {
- return mock(Future.class);
- }
-
- protected ActorSelection actorSelection(final ActorRef actorRef) {
- return getSystem().actorSelection(actorRef.path());
- }
-
- protected void expectBatchedModifications(final ActorRef actorRef, final int count) {
- doReturn(batchedModificationsReply(count)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), isA(BatchedModifications.class), any(Timeout.class));
- }
-
- protected void expectBatchedModifications(final int count) {
- doReturn(batchedModificationsReply(count)).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), isA(BatchedModifications.class), any(Timeout.class));
- }
-
- protected void expectBatchedModificationsReady(final ActorRef actorRef) {
- expectBatchedModificationsReady(actorRef, false);
- }
-
- protected void expectBatchedModificationsReady(final ActorRef actorRef, final boolean doCommitOnReady) {
- doReturn(doCommitOnReady ? Futures.successful(new CommitTransactionReply().toSerializable()) :
- readyTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), isA(BatchedModifications.class), any(Timeout.class));
- }
-
- protected void expectIncompleteBatchedModifications() {
- doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), isA(BatchedModifications.class), any(Timeout.class));
- }
-
- protected void expectFailedBatchedModifications(final ActorRef actorRef) {
- doReturn(Futures.failed(new TestException())).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), isA(BatchedModifications.class), any(Timeout.class));
- }
-
- protected void expectReadyLocalTransaction(final ActorRef actorRef, final boolean doCommitOnReady) {
- doReturn(doCommitOnReady ? Futures.successful(new CommitTransactionReply().toSerializable()) :
- readyTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), isA(ReadyLocalTransaction.class), any(Timeout.class));
- }
-
- protected CreateTransactionReply createTransactionReply(final ActorRef actorRef, final short transactionVersion) {
- return new CreateTransactionReply(actorRef.path().toString(), nextTransactionId(), transactionVersion);
- }
-
- protected ActorRef setupActorContextWithoutInitialCreateTransaction(final ActorSystem actorSystem) {
- return setupActorContextWithoutInitialCreateTransaction(actorSystem, DefaultShardStrategy.DEFAULT_SHARD);
- }
-
- protected ActorRef setupActorContextWithoutInitialCreateTransaction(final ActorSystem actorSystem,
- final String shardName) {
- return setupActorContextWithoutInitialCreateTransaction(actorSystem, shardName,
- DataStoreVersions.CURRENT_VERSION);
- }
-
- protected ActorRef setupActorContextWithoutInitialCreateTransaction(final ActorSystem actorSystem,
- final String shardName, final short transactionVersion) {
- ActorRef actorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
- log.info("Created mock shard actor {}", actorRef);
-
- doReturn(actorSystem.actorSelection(actorRef.path()))
- .when(mockActorContext).actorSelection(actorRef.path().toString());
-
- doReturn(primaryShardInfoReply(actorSystem, actorRef, transactionVersion))
- .when(mockActorContext).findPrimaryShardAsync(eq(shardName));
-
- return actorRef;
- }
-
- protected Future<PrimaryShardInfo> primaryShardInfoReply(final ActorSystem actorSystem, final ActorRef actorRef) {
- return primaryShardInfoReply(actorSystem, actorRef, DataStoreVersions.CURRENT_VERSION);
- }
-
- protected Future<PrimaryShardInfo> primaryShardInfoReply(final ActorSystem actorSystem, final ActorRef actorRef,
- final short transactionVersion) {
- return Futures.successful(new PrimaryShardInfo(actorSystem.actorSelection(actorRef.path()),
- transactionVersion));
- }
-
- protected ActorRef setupActorContextWithInitialCreateTransaction(final ActorSystem actorSystem,
- final TransactionType type, final short transactionVersion, final String shardName) {
- ActorRef shardActorRef = setupActorContextWithoutInitialCreateTransaction(actorSystem, shardName,
- transactionVersion);
-
- return setupActorContextWithInitialCreateTransaction(actorSystem, type, transactionVersion,
- memberName, shardActorRef);
- }
-
- protected ActorRef setupActorContextWithInitialCreateTransaction(final ActorSystem actorSystem,
- final TransactionType type, final short transactionVersion, final String prefix,
- final ActorRef shardActorRef) {
-
- ActorRef txActorRef;
- if (type == TransactionType.WRITE_ONLY
- && dataStoreContextBuilder.build().isWriteOnlyTransactionOptimizationsEnabled()) {
- txActorRef = shardActorRef;
- } else {
- txActorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
- log.info("Created mock shard Tx actor {}", txActorRef);
-
- doReturn(actorSystem.actorSelection(txActorRef.path()))
- .when(mockActorContext).actorSelection(txActorRef.path().toString());
-
- doReturn(Futures.successful(createTransactionReply(txActorRef, transactionVersion))).when(mockActorContext)
- .executeOperationAsync(eq(actorSystem.actorSelection(shardActorRef.path())),
- eqCreateTransaction(prefix, type), any(Timeout.class));
- }
-
- return txActorRef;
- }
-
- protected ActorRef setupActorContextWithInitialCreateTransaction(final ActorSystem actorSystem,
- final TransactionType type) {
- return setupActorContextWithInitialCreateTransaction(actorSystem, type, DataStoreVersions.CURRENT_VERSION,
- DefaultShardStrategy.DEFAULT_SHARD);
- }
-
- protected ActorRef setupActorContextWithInitialCreateTransaction(final ActorSystem actorSystem,
- final TransactionType type,
- final String shardName) {
- return setupActorContextWithInitialCreateTransaction(actorSystem, type, DataStoreVersions.CURRENT_VERSION,
- shardName);
- }
-
- @SuppressWarnings({"checkstyle:avoidHidingCauseException", "checkstyle:IllegalThrows"})
- protected void propagateReadFailedExceptionCause(final FluentFuture<?> future) throws Throwable {
- try {
- future.get(5, TimeUnit.SECONDS);
- fail("Expected ReadFailedException");
- } catch (ExecutionException e) {
- final Throwable cause = e.getCause();
- assertTrue("Unexpected cause: " + cause.getClass(), cause instanceof ReadFailedException);
- throw Throwables.getRootCause(cause);
- }
- }
-
- protected List<BatchedModifications> captureBatchedModifications(final ActorRef actorRef) {
- ArgumentCaptor<BatchedModifications> batchedModificationsCaptor =
- ArgumentCaptor.forClass(BatchedModifications.class);
- verify(mockActorContext, Mockito.atLeastOnce()).executeOperationAsync(
- eq(actorSelection(actorRef)), batchedModificationsCaptor.capture(), any(Timeout.class));
-
- List<BatchedModifications> batchedModifications = filterCaptured(
- batchedModificationsCaptor, BatchedModifications.class);
- return batchedModifications;
- }
-
- protected <T> List<T> filterCaptured(final ArgumentCaptor<T> captor, final Class<T> type) {
- List<T> captured = new ArrayList<>();
- for (T c: captor.getAllValues()) {
- if (type.isInstance(c)) {
- captured.add(c);
- }
- }
-
- return captured;
- }
-
- protected void verifyOneBatchedModification(final ActorRef actorRef, final Modification expected,
- final boolean expIsReady) {
- List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
- assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
-
- verifyBatchedModifications(batchedModifications.get(0), expIsReady, expIsReady, expected);
- }
-
- protected void verifyBatchedModifications(final Object message, final boolean expIsReady,
- final Modification... expected) {
- verifyBatchedModifications(message, expIsReady, false, expected);
- }
-
- protected void verifyBatchedModifications(final Object message, final boolean expIsReady,
- final boolean expIsDoCommitOnReady, final Modification... expected) {
- assertEquals("Message type", BatchedModifications.class, message.getClass());
- BatchedModifications batchedModifications = (BatchedModifications)message;
- assertEquals("BatchedModifications size", expected.length, batchedModifications.getModifications().size());
- assertEquals("isReady", expIsReady, batchedModifications.isReady());
- assertEquals("isDoCommitOnReady", expIsDoCommitOnReady, batchedModifications.isDoCommitOnReady());
- for (int i = 0; i < batchedModifications.getModifications().size(); i++) {
- Modification actual = batchedModifications.getModifications().get(i);
- assertEquals("Modification type", expected[i].getClass(), actual.getClass());
- assertEquals("getPath", ((AbstractModification)expected[i]).getPath(),
- ((AbstractModification)actual).getPath());
- if (actual instanceof WriteModification) {
- assertEquals("getData", ((WriteModification)expected[i]).getData(),
- ((WriteModification)actual).getData());
- }
- }
- }
-
- @SuppressWarnings("checkstyle:IllegalCatch")
- protected void verifyCohortFutures(final AbstractThreePhaseCommitCohort<?> proxy,
- final Object... expReplies) {
- assertEquals("getReadyOperationFutures size", expReplies.length,
- proxy.getCohortFutures().size());
-
- List<Object> futureResults = new ArrayList<>();
- for (Future<?> future : proxy.getCohortFutures()) {
- assertNotNull("Ready operation Future is null", future);
- try {
- futureResults.add(Await.result(future, FiniteDuration.create(5, TimeUnit.SECONDS)));
- } catch (Exception e) {
- futureResults.add(e);
- }
- }
-
- for (Object expReply : expReplies) {
- boolean found = false;
- Iterator<?> iter = futureResults.iterator();
- while (iter.hasNext()) {
- Object actual = iter.next();
- if (CommitTransactionReply.isSerializedType(expReply)
- && CommitTransactionReply.isSerializedType(actual)) {
- found = true;
- } else if (expReply instanceof ActorSelection && Objects.equals(expReply, actual)) {
- found = true;
- } else if (expReply instanceof Class && ((Class<?>) expReply).isInstance(actual)) {
- found = true;
- }
-
- if (found) {
- iter.remove();
- break;
- }
- }
-
- if (!found) {
- fail(String.format("No cohort Future response found for %s. Actual: %s", expReply, futureResults));
- }
- }
- }
-}
package org.opendaylight.controller.cluster.datastore;
import static org.junit.Assert.assertFalse;
-import static org.mockito.ArgumentMatchers.anyCollection;
+import static org.mockito.ArgumentMatchers.anyList;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import org.opendaylight.controller.cluster.datastore.messages.DataTreeChangedReply;
import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
public class DataTreeChangeListenerActorTest extends AbstractActorTest {
private TestKit testKit;
testKit.within(Duration.ofSeconds(1), () -> {
testKit.expectNoMessage();
- verify(mockListener, never()).onDataTreeChanged(anyCollection());
+ verify(mockListener, never()).onDataTreeChanged(anyList());
return null;
});
}
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import com.google.common.util.concurrent.MoreExecutors;
import com.google.common.util.concurrent.Uninterruptibles;
import java.time.Duration;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit;
+import java.util.function.Consumer;
+import org.eclipse.jdt.annotation.NonNullByDefault;
import org.junit.Test;
-import org.mockito.stubbing.Answer;
+import org.mockito.ArgumentCaptor;
import org.opendaylight.controller.cluster.common.actor.Dispatchers;
import org.opendaylight.controller.cluster.datastore.config.Configuration;
import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.mdsal.dom.api.ClusteredDOMDataTreeChangeListener;
import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import scala.concurrent.ExecutionContextExecutor;
-import scala.concurrent.Future;
public class DataTreeChangeListenerProxyTest extends AbstractActorTest {
private final DOMDataTreeChangeListener mockListener = mock(DOMDataTreeChangeListener.class);
@Test(timeout = 10000)
public void testSuccessfulRegistration() {
- final TestKit kit = new TestKit(getSystem());
- ActorUtils actorUtils = new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
+ final var kit = new TestKit(getSystem());
+ final var actorUtils = new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
mock(Configuration.class));
- final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
- final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy = new DataTreeChangeListenerProxy<>(
- actorUtils, mockListener, path);
+ final var path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
+ final var proxy = startProxyAsync(actorUtils, path, false);
- new Thread(() -> proxy.init("shard-1")).start();
-
- Duration timeout = Duration.ofSeconds(5);
- FindLocalShard findLocalShard = kit.expectMsgClass(timeout, FindLocalShard.class);
- assertEquals("getShardName", "shard-1", findLocalShard.getShardName());
+ final var timeout = Duration.ofSeconds(5);
+ final var findLocalShard = kit.expectMsgClass(timeout, FindLocalShard.class);
+ assertEquals("shard-1", findLocalShard.getShardName());
kit.reply(new LocalShardFound(kit.getRef()));
- RegisterDataTreeChangeListener registerMsg = kit.expectMsgClass(timeout,
- RegisterDataTreeChangeListener.class);
- assertEquals("getPath", path, registerMsg.getPath());
- assertFalse("isRegisterOnAllInstances", registerMsg.isRegisterOnAllInstances());
+ final var registerMsg = kit.expectMsgClass(timeout, RegisterDataTreeChangeListener.class);
+ assertEquals(path, registerMsg.getPath());
+ assertFalse(registerMsg.isRegisterOnAllInstances());
kit.reply(new RegisterDataTreeNotificationListenerReply(kit.getRef()));
Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
}
- assertEquals("getListenerRegistrationActor", getSystem().actorSelection(kit.getRef().path()),
- proxy.getListenerRegistrationActor());
+ assertEquals(getSystem().actorSelection(kit.getRef().path()), proxy.getListenerRegistrationActor());
kit.watch(proxy.getDataChangeListenerActor());
@Test(timeout = 10000)
public void testSuccessfulRegistrationForClusteredListener() {
- final TestKit kit = new TestKit(getSystem());
- ActorUtils actorUtils = new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
+ final var kit = new TestKit(getSystem());
+ final var actorUtils = new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
mock(Configuration.class));
- ClusteredDOMDataTreeChangeListener mockClusteredListener = mock(
- ClusteredDOMDataTreeChangeListener.class);
-
- final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
- final DataTreeChangeListenerProxy<ClusteredDOMDataTreeChangeListener> proxy =
- new DataTreeChangeListenerProxy<>(actorUtils, mockClusteredListener, path);
+ final var path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
+ final var proxy = startProxyAsync(actorUtils, path, true);
- new Thread(() -> proxy.init("shard-1")).start();
-
- Duration timeout = Duration.ofSeconds(5);
- FindLocalShard findLocalShard = kit.expectMsgClass(timeout, FindLocalShard.class);
- assertEquals("getShardName", "shard-1", findLocalShard.getShardName());
+ final var timeout = Duration.ofSeconds(5);
+ final var findLocalShard = kit.expectMsgClass(timeout, FindLocalShard.class);
+ assertEquals("shard-1", findLocalShard.getShardName());
kit.reply(new LocalShardFound(kit.getRef()));
- RegisterDataTreeChangeListener registerMsg = kit.expectMsgClass(timeout,
- RegisterDataTreeChangeListener.class);
- assertEquals("getPath", path, registerMsg.getPath());
- assertTrue("isRegisterOnAllInstances", registerMsg.isRegisterOnAllInstances());
+ final var registerMsg = kit.expectMsgClass(timeout, RegisterDataTreeChangeListener.class);
+ assertEquals(path, registerMsg.getPath());
+ assertTrue(registerMsg.isRegisterOnAllInstances());
proxy.close();
}
@Test(timeout = 10000)
public void testLocalShardNotFound() {
- final TestKit kit = new TestKit(getSystem());
- ActorUtils actorUtils = new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
+ final var kit = new TestKit(getSystem());
+ final var actorUtils = new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
mock(Configuration.class));
- final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
- final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy = new DataTreeChangeListenerProxy<>(
- actorUtils, mockListener, path);
-
- new Thread(() -> proxy.init("shard-1")).start();
+ final var path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
+ final var proxy = startProxyAsync(actorUtils, path, true);
- Duration timeout = Duration.ofSeconds(5);
- FindLocalShard findLocalShard = kit.expectMsgClass(timeout, FindLocalShard.class);
- assertEquals("getShardName", "shard-1", findLocalShard.getShardName());
+ final var timeout = Duration.ofSeconds(5);
+ final var findLocalShard = kit.expectMsgClass(timeout, FindLocalShard.class);
+ assertEquals("shard-1", findLocalShard.getShardName());
kit.reply(new LocalShardNotFound("shard-1"));
@Test(timeout = 10000)
public void testLocalShardNotInitialized() {
- final TestKit kit = new TestKit(getSystem());
- ActorUtils actorUtils = new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
+ final var kit = new TestKit(getSystem());
+ final var actorUtils = new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
mock(Configuration.class));
- final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
- final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy = new DataTreeChangeListenerProxy<>(
- actorUtils, mockListener, path);
+ final var path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
+ final var proxy = startProxyAsync(actorUtils, path, false);
- new Thread(() -> proxy.init("shard-1")).start();
-
- Duration timeout = Duration.ofSeconds(5);
- FindLocalShard findLocalShard = kit.expectMsgClass(timeout, FindLocalShard.class);
- assertEquals("getShardName", "shard-1", findLocalShard.getShardName());
+ final var timeout = Duration.ofSeconds(5);
+ final var findLocalShard = kit.expectMsgClass(timeout, FindLocalShard.class);
+ assertEquals("shard-1", findLocalShard.getShardName());
kit.reply(new NotInitializedException("not initialized"));
@Test
public void testFailedRegistration() {
- final TestKit kit = new TestKit(getSystem());
- ActorSystem mockActorSystem = mock(ActorSystem.class);
+ final var kit = new TestKit(getSystem());
+ final var mockActorSystem = mock(ActorSystem.class);
- ActorRef mockActor = getSystem().actorOf(Props.create(DoNothingActor.class), "testFailedRegistration");
+ final var mockActor = getSystem().actorOf(Props.create(DoNothingActor.class), "testFailedRegistration");
doReturn(mockActor).when(mockActorSystem).actorOf(any(Props.class));
- ExecutionContextExecutor executor = ExecutionContexts.fromExecutor(MoreExecutors.directExecutor());
+ final var executor = ExecutionContexts.fromExecutor(MoreExecutors.directExecutor());
- ActorUtils actorUtils = mock(ActorUtils.class);
- final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
+ final var actorUtils = mock(ActorUtils.class);
+ final var path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
doReturn(executor).when(actorUtils).getClientDispatcher();
doReturn(DatastoreContext.newBuilder().build()).when(actorUtils).getDatastoreContext();
doReturn(mockActorSystem).when(actorUtils).getActorSystem();
- String shardName = "shard-1";
- final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy = new DataTreeChangeListenerProxy<>(
- actorUtils, mockListener, path);
-
doReturn(kit.duration("5 seconds")).when(actorUtils).getOperationDuration();
- doReturn(Futures.successful(kit.getRef())).when(actorUtils).findLocalShardAsync(eq(shardName));
+ doReturn(Futures.successful(kit.getRef())).when(actorUtils).findLocalShardAsync("shard-1");
doReturn(Futures.failed(new RuntimeException("mock"))).when(actorUtils).executeOperationAsync(
any(ActorRef.class), any(Object.class), any(Timeout.class));
- doReturn(mock(DatastoreContext.class)).when(actorUtils).getDatastoreContext();
-
- proxy.init("shard-1");
- assertEquals("getListenerRegistrationActor", null, proxy.getListenerRegistrationActor());
+ final var proxy = DataTreeChangeListenerProxy.of(actorUtils, mockListener, path, true, "shard-1");
+ assertNull(proxy.getListenerRegistrationActor());
proxy.close();
}
@Test
public void testCloseBeforeRegistration() {
- final TestKit kit = new TestKit(getSystem());
- ActorUtils actorUtils = mock(ActorUtils.class);
-
- String shardName = "shard-1";
+ final var kit = new TestKit(getSystem());
+ final var actorUtils = mock(ActorUtils.class);
doReturn(DatastoreContext.newBuilder().build()).when(actorUtils).getDatastoreContext();
doReturn(getSystem().dispatchers().defaultGlobalDispatcher()).when(actorUtils).getClientDispatcher();
doReturn(getSystem().actorSelection(kit.getRef().path())).when(actorUtils).actorSelection(
kit.getRef().path());
doReturn(kit.duration("5 seconds")).when(actorUtils).getOperationDuration();
- doReturn(Futures.successful(kit.getRef())).when(actorUtils).findLocalShardAsync(eq(shardName));
+ doReturn(Futures.successful(kit.getRef())).when(actorUtils).findLocalShardAsync("shard-1");
- final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy = new DataTreeChangeListenerProxy<>(
- actorUtils, mockListener, YangInstanceIdentifier.of(TestModel.TEST_QNAME));
+ final var proxy = createProxy(actorUtils, YangInstanceIdentifier.of(TestModel.TEST_QNAME), true);
+ final var instance = proxy.getKey();
- Answer<Future<Object>> answer = invocation -> {
- proxy.close();
- return Futures.successful((Object) new RegisterDataTreeNotificationListenerReply(kit.getRef()));
- };
+ doAnswer(invocation -> {
+ instance.close();
+ return Futures.successful(new RegisterDataTreeNotificationListenerReply(kit.getRef()));
+ }).when(actorUtils).executeOperationAsync(any(ActorRef.class), any(Object.class), any(Timeout.class));
+ proxy.getValue().run();
- doAnswer(answer).when(actorUtils).executeOperationAsync(any(ActorRef.class), any(Object.class),
- any(Timeout.class));
+ kit.expectMsgClass(Duration.ofSeconds(5), CloseDataTreeNotificationListenerRegistration.class);
- proxy.init(shardName);
+ assertNull(instance.getListenerRegistrationActor());
+ }
- kit.expectMsgClass(Duration.ofSeconds(5), CloseDataTreeNotificationListenerRegistration.class);
+ @NonNullByDefault
+ private DataTreeChangeListenerProxy startProxyAsync(final ActorUtils actorUtils, final YangInstanceIdentifier path,
+ final boolean clustered) {
+ return startProxyAsync(actorUtils, path, clustered, Runnable::run);
+ }
+
+ @NonNullByDefault
+ private DataTreeChangeListenerProxy startProxyAsync(final ActorUtils actorUtils, final YangInstanceIdentifier path,
+ final boolean clustered, final Consumer<Runnable> execute) {
+ final var proxy = createProxy(actorUtils, path, clustered);
+ final var thread = new Thread(proxy.getValue());
+ thread.setDaemon(true);
+ thread.start();
+ return proxy.getKey();
+ }
- assertEquals("getListenerRegistrationActor", null, proxy.getListenerRegistrationActor());
+ @NonNullByDefault
+ private Entry<DataTreeChangeListenerProxy, Runnable> createProxy(final ActorUtils actorUtils,
+ final YangInstanceIdentifier path, final boolean clustered) {
+ final var executor = mock(Executor.class);
+ final var captor = ArgumentCaptor.forClass(Runnable.class);
+ doNothing().when(executor).execute(captor.capture());
+ final var proxy = DataTreeChangeListenerProxy.ofTesting(actorUtils, mockListener, path, clustered, "shard-1",
+ executor);
+ return Map.entry(proxy, captor.getValue());
}
}
import org.opendaylight.controller.cluster.datastore.utils.MockDataTreeChangeListener;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
import scala.concurrent.Await;
import scala.concurrent.duration.FiniteDuration;
@Override
@Before
- public void setUp() {
+ public void setUp() throws Exception {
super.setUp();
createShard();
}
@Test
public void testInitialChangeListenerEventWithContainerPath() throws DataValidationFailedException {
- writeToStore(shard.getDataStore(), TEST_PATH, ImmutableNodes.containerNode(TEST_QNAME));
+ writeToStore(shard.getDataStore(), TEST_PATH, ImmutableNodes.newContainerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
+ .build());
Entry<MockDataTreeChangeListener, ActorSelection> entry = registerChangeListener(TEST_PATH, 1);
MockDataTreeChangeListener listener = entry.getKey();
listener.reset(1);
- writeToStore(shard.getDataStore(), TEST_PATH, ImmutableNodes.containerNode(TEST_QNAME));
+ writeToStore(shard.getDataStore(), TEST_PATH, ImmutableNodes.newContainerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
+ .build());
listener.waitForChangeEvents();
listener.verifyNotifiedData(TEST_PATH);
entry.getValue().tell(CloseDataTreeNotificationListenerRegistration.getInstance(), kit.getRef());
kit.expectMsgClass(Duration.ofSeconds(5), CloseDataTreeNotificationListenerRegistrationReply.class);
- writeToStore(shard.getDataStore(), TEST_PATH, ImmutableNodes.containerNode(TEST_QNAME));
+ writeToStore(shard.getDataStore(), TEST_PATH, ImmutableNodes.newContainerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
+ .build());
listener.verifyNoNotifiedData(TEST_PATH);
}
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyCollection;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.reset;
import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort;
import org.opendaylight.yangtools.util.concurrent.FluentFutures;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
import scala.concurrent.Await;
/**
*/
public class DataTreeCohortActorTest extends AbstractActorTest {
private static final Collection<DOMDataTreeCandidate> CANDIDATES = new ArrayList<>();
- private static final SchemaContext MOCK_SCHEMA = mock(SchemaContext.class);
+ private static final EffectiveModelContext MOCK_SCHEMA = mock(EffectiveModelContext.class);
private final TestActorFactory actorFactory = new TestActorFactory(getSystem());
private final DOMDataTreeCommitCohort mockCohort = mock(DOMDataTreeCommitCohort.class);
private final PostCanCommitStep mockPostCanCommit = mock(PostCanCommitStep.class);
askAndAwait(cohortActor, new Commit(txId2));
}
- @SuppressWarnings("unchecked")
@Test
public void testAsyncCohort() throws Exception {
ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newSingleThreadExecutor());
doReturn(executeWithDelay(executor, mockPostCanCommit))
- .when(mockCohort).canCommit(any(Object.class), any(SchemaContext.class), any(Collection.class));
+ .when(mockCohort).canCommit(any(Object.class), any(EffectiveModelContext.class), anyCollection());
doReturn(executor.submit(() -> mockPostPreCommit)).when(mockPostCanCommit).preCommit();
executor.shutdownNow();
}
- @SuppressWarnings("unchecked")
@Test
public void testFailureOnCanCommit() throws Exception {
- DataValidationFailedException failure = new DataValidationFailedException(YangInstanceIdentifier.empty(),
+ DataValidationFailedException failure = new DataValidationFailedException(YangInstanceIdentifier.of(),
"mock");
doReturn(FluentFutures.immediateFailedFluentFuture(failure)).when(mockCohort).canCommit(any(Object.class),
- any(SchemaContext.class), any(Collection.class));
+ any(EffectiveModelContext.class), anyCollection());
ActorRef cohortActor = newCohortActor("testFailureOnCanCommit");
}
private ActorRef newCohortActor(final String name) {
- return actorFactory.createActor(DataTreeCohortActor.props(mockCohort, YangInstanceIdentifier.empty()), name);
+ return actorFactory.createActor(DataTreeCohortActor.props(mockCohort, YangInstanceIdentifier.of()), name);
}
- @SuppressWarnings("unchecked")
private void resetMockCohort() {
reset(mockCohort);
doReturn(ThreePhaseCommitStep.NOOP_ABORT_FUTURE).when(mockPostCanCommit).abort();
doReturn(Futures.immediateFuture(mockPostPreCommit)).when(mockPostCanCommit).preCommit();
doReturn(FluentFutures.immediateFluentFuture(mockPostCanCommit)).when(mockCohort).canCommit(any(Object.class),
- any(SchemaContext.class), any(Collection.class));
+ any(EffectiveModelContext.class), anyCollection());
doReturn(ThreePhaseCommitStep.NOOP_ABORT_FUTURE).when(mockPostPreCommit).abort();
doReturn(Futures.immediateFuture(null)).when(mockPostPreCommit).commit();
import static org.junit.Assert.assertSame;
import static org.junit.Assert.fail;
import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyCollection;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.reset;
import com.google.common.util.concurrent.FluentFuture;
import com.typesafe.config.ConfigFactory;
import java.util.Collection;
-import java.util.Optional;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import org.junit.AfterClass;
import org.junit.Ignore;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
+import org.opendaylight.controller.cluster.databroker.ClientBackedDataStore;
import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.mdsal.common.api.DataValidationFailedException;
import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.concepts.ObjectRegistration;
import org.opendaylight.yangtools.util.concurrent.FluentFutures;
import org.opendaylight.yangtools.yang.common.Uint64;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
public class DataTreeCohortIntegrationTest {
@SuppressWarnings({ "unchecked", "rawtypes" })
@Test
public void testSuccessfulCanCommitWithNoopPostStep() throws Exception {
- final DOMDataTreeCommitCohort cohort = mock(DOMDataTreeCommitCohort.class);
+ final var cohort = mock(DOMDataTreeCommitCohort.class);
doReturn(PostCanCommitStep.NOOP_SUCCESSFUL_FUTURE).when(cohort).canCommit(any(Object.class),
- any(SchemaContext.class), any(Collection.class));
+ any(EffectiveModelContext.class), anyCollection());
ArgumentCaptor<Collection> candidateCapt = ArgumentCaptor.forClass(Collection.class);
IntegrationTestKit kit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
- try (AbstractDataStore dataStore = kit.setupAbstractDataStore(
- DistributedDataStore.class, "testSuccessfulCanCommitWithNoopPostStep", "test-1")) {
- final ObjectRegistration<DOMDataTreeCommitCohort> cohortReg = dataStore.registerCommitCohort(TEST_ID,
- cohort);
+ try (var dataStore = kit.setupDataStore(ClientBackedDataStore.class, "testSuccessfulCanCommitWithNoopPostStep",
+ "test-1")) {
+
+ final var cohortReg = dataStore.registerCommitCohort(TEST_ID, cohort);
assertNotNull(cohortReg);
IntegrationTestKit.verifyShardState(dataStore, "test-1",
state -> assertEquals("Cohort registrations", 1, state.getCommitCohortActors().size()));
- final ContainerNode node = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ final var node = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
kit.testWriteTransaction(dataStore, TestModel.TEST_PATH, node);
- verify(cohort).canCommit(any(Object.class), any(SchemaContext.class), candidateCapt.capture());
+ verify(cohort).canCommit(any(Object.class), any(EffectiveModelContext.class), candidateCapt.capture());
assertDataTreeCandidate((DOMDataTreeCandidate) candidateCapt.getValue().iterator().next(), TEST_ID,
- ModificationType.WRITE, Optional.of(node), Optional.empty());
+ ModificationType.WRITE, node, null);
reset(cohort);
doReturn(PostCanCommitStep.NOOP_SUCCESSFUL_FUTURE).when(cohort).canCommit(any(Object.class),
- any(SchemaContext.class), any(Collection.class));
+ any(EffectiveModelContext.class), anyCollection());
kit.testWriteTransaction(dataStore, TestModel.OUTER_LIST_PATH,
ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME)
.withChild(ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 42))
.build());
- verify(cohort).canCommit(any(Object.class), any(SchemaContext.class), any(Collection.class));
+ verify(cohort).canCommit(any(Object.class), any(EffectiveModelContext.class), anyCollection());
cohortReg.close();
}
}
- @SuppressWarnings("unchecked")
@Test
public void testFailedCanCommit() throws Exception {
- final DOMDataTreeCommitCohort failedCohort = mock(DOMDataTreeCommitCohort.class);
+ final var failedCohort = mock(DOMDataTreeCommitCohort.class);
doReturn(FAILED_CAN_COMMIT_FUTURE).when(failedCohort).canCommit(any(Object.class),
- any(SchemaContext.class), any(Collection.class));
+ any(EffectiveModelContext.class), anyCollection());
- IntegrationTestKit kit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
- try (AbstractDataStore dataStore = kit.setupAbstractDataStore(
- DistributedDataStore.class, "testFailedCanCommit", "test-1")) {
+ final var kit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
+ try (var dataStore = kit.setupDataStore(ClientBackedDataStore.class, "testFailedCanCommit", "test-1")) {
dataStore.registerCommitCohort(TEST_ID, failedCohort);
IntegrationTestKit.verifyShardState(dataStore, "test-1",
@SuppressWarnings({ "unchecked", "rawtypes" })
@Test
public void testCanCommitWithListEntries() throws Exception {
- final DOMDataTreeCommitCohort cohort = mock(DOMDataTreeCommitCohort.class);
+ final var cohort = mock(DOMDataTreeCommitCohort.class);
doReturn(PostCanCommitStep.NOOP_SUCCESSFUL_FUTURE).when(cohort).canCommit(any(Object.class),
- any(SchemaContext.class), any(Collection.class));
- IntegrationTestKit kit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
+ any(EffectiveModelContext.class), anyCollection());
+ final var kit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
- try (AbstractDataStore dataStore = kit.setupAbstractDataStore(
- DistributedDataStore.class, "testCanCommitWithMultipleListEntries", "cars-1")) {
- final ObjectRegistration<DOMDataTreeCommitCohort> cohortReg = dataStore.registerCommitCohort(
+ try (var dataStore = kit.setupDataStore(ClientBackedDataStore.class, "testCanCommitWithMultipleListEntries",
+ "cars-1")) {
+
+ final var cohortReg = dataStore.registerCommitCohort(
new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, CarsModel.CAR_LIST_PATH
.node(CarsModel.CAR_QNAME)), cohort);
assertNotNull(cohortReg);
kit.doCommit(writeTx.ready());
ArgumentCaptor<Collection> candidateCapture = ArgumentCaptor.forClass(Collection.class);
- verify(cohort).canCommit(any(Object.class), any(SchemaContext.class), candidateCapture.capture());
+ verify(cohort).canCommit(any(Object.class), any(EffectiveModelContext.class), candidateCapture.capture());
assertDataTreeCandidate((DOMDataTreeCandidate) candidateCapture.getValue().iterator().next(),
new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, optimaPath), ModificationType.WRITE,
- Optional.of(optimaNode), Optional.empty());
+ optimaNode, null);
// Write replace the cars container with 2 new car entries. The cohort should get invoked with 3
// DOMDataTreeCandidates: once for each of the 2 new car entries (WRITE mod) and once for the deleted prior
reset(cohort);
doReturn(PostCanCommitStep.NOOP_SUCCESSFUL_FUTURE).when(cohort).canCommit(any(Object.class),
- any(SchemaContext.class), any(Collection.class));
+ any(EffectiveModelContext.class), anyCollection());
writeTx = dataStore.newWriteOnlyTransaction();
final YangInstanceIdentifier sportagePath = CarsModel.newCarPath("sportage");
kit.doCommit(writeTx.ready());
candidateCapture = ArgumentCaptor.forClass(Collection.class);
- verify(cohort).canCommit(any(Object.class), any(SchemaContext.class), candidateCapture.capture());
+ verify(cohort).canCommit(any(Object.class), any(EffectiveModelContext.class), candidateCapture.capture());
assertDataTreeCandidate(findCandidate(candidateCapture, sportagePath), new DOMDataTreeIdentifier(
LogicalDatastoreType.CONFIGURATION, sportagePath), ModificationType.WRITE,
- Optional.of(sportageNode), Optional.empty());
+ sportageNode, null);
assertDataTreeCandidate(findCandidate(candidateCapture, soulPath), new DOMDataTreeIdentifier(
LogicalDatastoreType.CONFIGURATION, soulPath), ModificationType.WRITE,
- Optional.of(soulNode), Optional.empty());
+ soulNode, null);
assertDataTreeCandidate(findCandidate(candidateCapture, optimaPath), new DOMDataTreeIdentifier(
LogicalDatastoreType.CONFIGURATION, optimaPath), ModificationType.DELETE,
- Optional.empty(), Optional.of(optimaNode));
+ null, optimaNode);
// Delete the cars container - cohort should be invoked for the 2 deleted car entries.
reset(cohort);
doReturn(PostCanCommitStep.NOOP_SUCCESSFUL_FUTURE).when(cohort).canCommit(any(Object.class),
- any(SchemaContext.class), any(Collection.class));
+ any(EffectiveModelContext.class), anyCollection());
writeTx = dataStore.newWriteOnlyTransaction();
writeTx.delete(CarsModel.BASE_PATH);
kit.doCommit(writeTx.ready());
candidateCapture = ArgumentCaptor.forClass(Collection.class);
- verify(cohort).canCommit(any(Object.class), any(SchemaContext.class), candidateCapture.capture());
+ verify(cohort).canCommit(any(Object.class), any(EffectiveModelContext.class), candidateCapture.capture());
assertDataTreeCandidate(findCandidate(candidateCapture, sportagePath), new DOMDataTreeIdentifier(
LogicalDatastoreType.CONFIGURATION, sportagePath), ModificationType.DELETE,
- Optional.empty(), Optional.of(sportageNode));
+ null, sportageNode);
assertDataTreeCandidate(findCandidate(candidateCapture, soulPath), new DOMDataTreeIdentifier(
LogicalDatastoreType.CONFIGURATION, soulPath), ModificationType.DELETE,
- Optional.empty(), Optional.of(soulNode));
+ null, soulNode);
}
}
* DataTreeCandidate) and since currently preCommit is a noop in the Shard backend (it is combined with commit),
* we can't actually test abort after canCommit.
*/
- @SuppressWarnings("unchecked")
@Test
@Ignore
public void testAbortAfterCanCommit() throws Exception {
- final DOMDataTreeCommitCohort cohortToAbort = mock(DOMDataTreeCommitCohort.class);
- final PostCanCommitStep stepToAbort = mock(PostCanCommitStep.class);
+ final var cohortToAbort = mock(DOMDataTreeCommitCohort.class);
+ final var stepToAbort = mock(PostCanCommitStep.class);
doReturn(ThreePhaseCommitStep.NOOP_ABORT_FUTURE).when(stepToAbort).abort();
doReturn(PostPreCommitStep.NOOP_FUTURE).when(stepToAbort).preCommit();
doReturn(FluentFutures.immediateFluentFuture(stepToAbort)).when(cohortToAbort).canCommit(any(Object.class),
- any(SchemaContext.class), any(Collection.class));
+ any(EffectiveModelContext.class), anyCollection());
- IntegrationTestKit kit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
- try (AbstractDataStore dataStore = kit.setupAbstractDataStore(
- DistributedDataStore.class, "testAbortAfterCanCommit", "test-1", "cars-1")) {
+ var kit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
+ try (var dataStore = kit.setupDataStore(ClientBackedDataStore.class, "testAbortAfterCanCommit",
+ "test-1", "cars-1")) {
dataStore.registerCommitCohort(TEST_ID, cohortToAbort);
IntegrationTestKit.verifyShardState(dataStore, "test-1",
state -> assertEquals("Cohort registrations", 1, state.getCommitCohortActors().size()));
- DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
+ var writeTx = dataStore.newWriteOnlyTransaction();
writeTx.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
- DOMStoreThreePhaseCommitCohort dsCohort = writeTx.ready();
+ var dsCohort = writeTx.ready();
dsCohort.canCommit().get(5, TimeUnit.SECONDS);
dsCohort.preCommit().get(5, TimeUnit.SECONDS);
private static void assertDataTreeCandidate(final DOMDataTreeCandidate candidate,
final DOMDataTreeIdentifier expTreeId, final ModificationType expType,
- final Optional<NormalizedNode<?, ?>> expDataAfter, final Optional<NormalizedNode<?, ?>> expDataBefore) {
+ final NormalizedNode expDataAfter, final NormalizedNode expDataBefore) {
assertNotNull("Expected candidate for path " + expTreeId.getRootIdentifier(), candidate);
assertEquals("rootPath", expTreeId, candidate.getRootPath());
- assertEquals("modificationType", expType, candidate.getRootNode().getModificationType());
-
- assertEquals("dataAfter present", expDataAfter.isPresent(), candidate.getRootNode().getDataAfter().isPresent());
- if (expDataAfter.isPresent()) {
- assertEquals("dataAfter", expDataAfter.get(), candidate.getRootNode().getDataAfter().get());
- }
-
- assertEquals("dataBefore present", expDataBefore.isPresent(),
- candidate.getRootNode().getDataBefore().isPresent());
- if (expDataBefore.isPresent()) {
- assertEquals("dataBefore", expDataBefore.get(), candidate.getRootNode().getDataBefore().get());
- }
+ assertEquals("modificationType", expType, candidate.getRootNode().modificationType());
+ assertEquals("dataAfter", expDataAfter, candidate.getRootNode().dataAfter());
+ assertEquals("dataBefore", expDataBefore, candidate.getRootNode().dataBefore());
}
}
return currProps.get(obj);
}
- private class DummyListenerImpl implements Listener {
-
+ private static final class DummyListenerImpl implements Listener {
private DatastoreContextFactory contextFactory;
@Override
import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS;
import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_OPERATION_TIMEOUT_IN_MS;
import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_INITIALIZATION_TIMEOUT;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD;
import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE;
import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT;
import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_TX_COMMIT_TIMEOUT_IN_SECONDS;
import java.util.HashMap;
import java.util.Map;
import org.junit.Test;
-import org.opendaylight.binding.runtime.spi.BindingRuntimeHelpers;
import org.opendaylight.mdsal.binding.dom.codec.impl.BindingCodecContext;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev140612.DataStorePropertiesContainer;
+import org.opendaylight.mdsal.binding.runtime.spi.BindingRuntimeHelpers;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev231229.DataStoreProperties.ExportOnRecovery;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev231229.DataStorePropertiesContainer;
/**
* Unit tests for DatastoreContextIntrospector.
properties.put("recovery-snapshot-interval-seconds", "360");
properties.put("shard-isolated-leader-check-interval-in-millis", "123");
properties.put("shard-snapshot-data-threshold-percentage", "100");
+ properties.put("shard-snapshot-data-threshold", "800");
properties.put("shard-election-timeout-factor", "21");
properties.put("shard-batched-modification-count", "901");
properties.put("transactionCreationInitialRateLimit", "200");
- properties.put("MaxShardDataChangeExecutorPoolSize", "41");
- properties.put("Max-Shard-Data-Change Executor-Queue Size", "1111");
- properties.put(" max shard data change listener queue size", "2222");
- properties.put("mAx-shaRd-data-STORE-executor-quEUe-size", "3333");
properties.put("persistent", "false");
properties.put("initial-payload-serialized-buffer-capacity", "600");
+ properties.put("export-on-recovery", "json");
+ properties.put("recovery-json-dump", "persistence-export");
boolean updated = introspector.update(properties);
assertTrue("updated", updated);
assertEquals(360, context.getShardRaftConfig().getRecoverySnapshotIntervalSeconds());
assertEquals(123, context.getShardRaftConfig().getIsolatedCheckIntervalInMillis());
assertEquals(100, context.getShardRaftConfig().getSnapshotDataThresholdPercentage());
+ assertEquals(800, context.getShardRaftConfig().getSnapshotDataThreshold());
assertEquals(21, context.getShardRaftConfig().getElectionTimeoutFactor());
assertEquals(901, context.getShardBatchedModificationCount());
assertEquals(200, context.getTransactionCreationInitialRateLimit());
assertEquals(600, context.getInitialPayloadSerializedBufferCapacity());
+ assertEquals("persistence-export", context.getRecoveryExportBaseDir());
+ assertEquals(ExportOnRecovery.Json, context.getExportOnRecovery());
assertFalse(context.isPersistent());
properties.put("shard-transaction-idle-timeout-in-minutes", "32");
assertEquals(6, context.getInitialSettleTimeoutMultiplier());
assertEquals(123, context.getShardRaftConfig().getIsolatedCheckIntervalInMillis());
assertEquals(100, context.getShardRaftConfig().getSnapshotDataThresholdPercentage());
+ assertEquals(800, context.getShardRaftConfig().getSnapshotDataThreshold());
assertEquals(22, context.getShardRaftConfig().getElectionTimeoutFactor());
assertEquals(200, context.getTransactionCreationInitialRateLimit());
assertTrue(context.isPersistent());
properties.put("shard-heartbeat-interval-in-millis", "99"); // bad - must be >= 100
properties.put("shard-transaction-commit-queue-capacity", "567"); // good
properties.put("shard-snapshot-data-threshold-percentage", "101"); // bad - must be 0-100
+ properties.put("shard-snapshot-data-threshold", "-1"); // bad - must be > 0
properties.put("shard-initialization-timeout-in-seconds", "-1"); // bad - must be > 0
- properties.put("max-shard-data-change-executor-pool-size", "bogus"); // bad - NaN
properties.put("unknownProperty", "1"); // bad - invalid property name
final boolean updated = introspector.update(properties);
assertEquals(567, context.getShardTransactionCommitQueueCapacity());
assertEquals(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE,
context.getShardRaftConfig().getSnapshotDataThresholdPercentage());
+ assertEquals(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD, context.getShardRaftConfig().getSnapshotDataThreshold());
assertEquals(DEFAULT_SHARD_INITIALIZATION_TIMEOUT, context.getShardInitializationTimeout());
}
import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_MAX_MESSAGE_SLICE_SIZE;
import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_OPERATION_TIMEOUT_IN_MS;
import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_PERSISTENT;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_RECOVERY_EXPORT_BASE_DIR;
import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_RECOVERY_SNAPSHOT_INTERVAL_SECONDS;
import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT;
import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR;
import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_INITIALIZATION_TIMEOUT;
import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_LEADER_ELECTION_TIMEOUT;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD;
import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE;
import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT;
import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_TX_COMMIT_QUEUE_CAPACITY;
import java.util.concurrent.TimeUnit;
import org.junit.Assert;
import org.junit.Test;
-import org.opendaylight.mdsal.dom.store.inmemory.InMemoryDOMDataStoreConfigProperties;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev231229.DataStoreProperties.ExportOnRecovery;
public class DatastoreContextTest {
context.getShardRaftConfig().getIsolatedCheckIntervalInMillis());
assertEquals(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE,
context.getShardRaftConfig().getSnapshotDataThresholdPercentage());
+ assertEquals(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD,
+ context.getShardRaftConfig().getSnapshotDataThreshold());
assertEquals(DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR, context.getShardRaftConfig().getElectionTimeoutFactor());
assertEquals(DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT, context.getTransactionCreationInitialRateLimit());
assertEquals(DatastoreContext.DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT,
context.getShardBatchedModificationCount());
assertEquals(DEFAULT_MAX_MESSAGE_SLICE_SIZE, context.getMaximumMessageSliceSize());
+ assertEquals(DEFAULT_RECOVERY_EXPORT_BASE_DIR, context.getRecoveryExportBaseDir());
}
@Test
builder.persistent(!DEFAULT_PERSISTENT);
builder.shardIsolatedLeaderCheckIntervalInMillis(DEFAULT_ISOLATED_LEADER_CHECK_INTERVAL_IN_MILLIS + 1);
builder.shardSnapshotDataThresholdPercentage(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE + 1);
+ builder.shardSnapshotDataThreshold(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD + 1);
builder.shardElectionTimeoutFactor(DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR + 1);
builder.transactionCreationInitialRateLimit(DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT + 1);
builder.shardBatchedModificationCount(DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT + 1);
- builder.maxShardDataChangeExecutorPoolSize(
- InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE + 1);
- builder.maxShardDataChangeExecutorQueueSize(
- InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE + 1);
- builder.maxShardDataChangeListenerQueueSize(
- InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_LISTENER_QUEUE_SIZE + 1);
- builder.maxShardDataStoreExecutorQueueSize(
- InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE + 1);
builder.maximumMessageSliceSize(DEFAULT_MAX_MESSAGE_SLICE_SIZE + 1);
builder.initialPayloadSerializedBufferCapacity(DEFAULT_INITIAL_PAYLOAD_SERIALIZED_BUFFER_CAPACITY + 1);
+ builder.exportOnRecovery(ExportOnRecovery.Json);
+ builder.recoveryExportBaseDir(DEFAULT_RECOVERY_EXPORT_BASE_DIR + "-new");
DatastoreContext context = builder.build();
context.getShardRaftConfig().getIsolatedCheckIntervalInMillis());
assertEquals(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE + 1,
context.getShardRaftConfig().getSnapshotDataThresholdPercentage());
+ assertEquals(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD + 1,
+ context.getShardRaftConfig().getSnapshotDataThreshold());
assertEquals(DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR + 1,
context.getShardRaftConfig().getElectionTimeoutFactor());
assertEquals(DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT + 1, context.getTransactionCreationInitialRateLimit());
assertEquals(DEFAULT_MAX_MESSAGE_SLICE_SIZE + 1, context.getMaximumMessageSliceSize());
assertEquals(DEFAULT_INITIAL_PAYLOAD_SERIALIZED_BUFFER_CAPACITY + 1,
context.getInitialPayloadSerializedBufferCapacity());
+ assertEquals(DEFAULT_RECOVERY_EXPORT_BASE_DIR + "-new",
+ context.getRecoveryExportBaseDir());
+ assertEquals(ExportOnRecovery.Json, context.getExportOnRecovery());
}
}
import org.opendaylight.yangtools.yang.common.Uint64;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
/**
* Unit tests for DatastoreSnapshotRestore.
}
private static ShardManagerSnapshot newShardManagerSnapshot(final String... shards) {
- return new ShardManagerSnapshot(Arrays.asList(shards), Collections.emptyMap());
+ return new ShardManagerSnapshot(Arrays.asList(shards));
}
- private static Snapshot newSnapshot(final YangInstanceIdentifier path, final NormalizedNode<?, ?> node)
- throws Exception {
+ private static Snapshot newSnapshot(final YangInstanceIdentifier path, final NormalizedNode node) throws Exception {
DataTree dataTree = new InMemoryDataTreeFactory().create(DataTreeConfiguration.DEFAULT_OPERATIONAL,
SchemaContextHelper.full());
AbstractShardTest.writeToStore(dataTree, path, node);
- NormalizedNode<?, ?> root = AbstractShardTest.readStore(dataTree, YangInstanceIdentifier.empty());
+ NormalizedNode root = AbstractShardTest.readStore(dataTree, YangInstanceIdentifier.of());
return Snapshot.create(new ShardSnapshotState(new MetadataShardDataTreeSnapshot(root)),
Collections.<ReplicatedLogEntry>emptyList(), 2, 1, 2, 1, 1, "member-1", null);
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static org.junit.Assert.assertSame;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyString;
-import static org.mockito.ArgumentMatchers.same;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.reset;
-import static org.mockito.Mockito.verify;
-
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import java.util.ArrayList;
-import java.util.List;
-import org.junit.Test;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.slf4j.Logger;
-import scala.concurrent.Future;
-
-/**
- * Unit tests for DebugThreePhaseCommitCohort.
- *
- * @author Thomas Pantelis
- */
-public class DebugThreePhaseCommitCohortTest {
- private final TransactionIdentifier transactionId = MockIdentifiers.transactionIdentifier(
- DebugThreePhaseCommitCohortTest.class, "mock");
-
- @Test
- public void test() {
- AbstractThreePhaseCommitCohort<?> mockDelegate = mock(AbstractThreePhaseCommitCohort.class);
- Exception failure = new Exception("mock failure");
- ListenableFuture<Object> expFailedFuture = Futures.immediateFailedFuture(failure);
- doReturn(expFailedFuture).when(mockDelegate).canCommit();
- doReturn(expFailedFuture).when(mockDelegate).preCommit();
- doReturn(expFailedFuture).when(mockDelegate).commit();
-
- ListenableFuture<Object> expAbortFuture = Futures.immediateFuture(null);
- doReturn(expAbortFuture).when(mockDelegate).abort();
-
- List<Future<Object>> expCohortFutures = new ArrayList<>();
- doReturn(expCohortFutures).when(mockDelegate).getCohortFutures();
-
- Throwable debugContext = new RuntimeException("mock");
- DebugThreePhaseCommitCohort cohort = new DebugThreePhaseCommitCohort(transactionId, mockDelegate, debugContext);
-
- Logger mockLogger = mock(Logger.class);
- cohort.setLogger(mockLogger);
-
- assertSame("canCommit", expFailedFuture, cohort.canCommit());
- verify(mockLogger).warn(anyString(), same(transactionId), same(failure), same(debugContext));
-
- reset(mockLogger);
- assertSame("preCommit", expFailedFuture, cohort.preCommit());
- verify(mockLogger).warn(anyString(), same(transactionId), same(failure), same(debugContext));
-
- reset(mockLogger);
- assertSame("commit", expFailedFuture, cohort.commit());
- verify(mockLogger).warn(anyString(), same(transactionId), same(failure), same(debugContext));
-
- assertSame("abort", expAbortFuture, cohort.abort());
-
- assertSame("getCohortFutures", expCohortFutures, cohort.getCohortFutures());
-
- reset(mockLogger);
- ListenableFuture<Boolean> expSuccessFuture = Futures.immediateFuture(Boolean.TRUE);
- doReturn(expSuccessFuture).when(mockDelegate).canCommit();
-
- assertSame("canCommit", expSuccessFuture, cohort.canCommit());
- verify(mockLogger, never()).warn(anyString(), any(TransactionIdentifier.class), any(Throwable.class),
- any(Throwable.class));
- }
-}
@Parameters(name = "{0}")
public static Collection<Object[]> data() {
return Arrays.asList(new Object[][] {
- { TestDistributedDataStore.class }, { TestClientBackedDataStore.class }
+ { TestClientBackedDataStore.class }
});
}
final CountDownLatch blockRecoveryLatch = new CountDownLatch(1);
InMemoryJournal.addBlockReadMessagesLatch(persistentID, blockRecoveryLatch);
- try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
- testParameter, testName, false, shardName)) {
-
+ try (var dataStore = testKit.setupDataStore(testParameter, testName, false, shardName)) {
// Create the write Tx
final DOMStoreWriteTransaction writeTx = writeOnly ? dataStore.newWriteOnlyTransaction()
: dataStore.newReadWriteTransaction();
// Verify the data in the store
final DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
- Optional<NormalizedNode<?, ?>> optional = readTx.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
+ Optional<NormalizedNode> optional = readTx.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
assertTrue("isPresent", optional.isPresent());
optional = readTx.read(TestModel.OUTER_LIST_PATH).get(5, TimeUnit.SECONDS);
final CountDownLatch blockRecoveryLatch = new CountDownLatch(1);
InMemoryJournal.addBlockReadMessagesLatch(persistentID, blockRecoveryLatch);
- try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
- testParameter, testName, false, shardName)) {
-
+ try (var dataStore = testKit.setupDataStore(testParameter, testName, false, shardName)) {
// Create the read-write Tx
final DOMStoreReadWriteTransaction readWriteTx = dataStore.newReadWriteTransaction();
assertNotNull("newReadWriteTransaction returned null", readWriteTx);
// Do some reads on the Tx on a separate thread.
final AtomicReference<FluentFuture<Boolean>> txExistsFuture = new AtomicReference<>();
- final AtomicReference<FluentFuture<Optional<NormalizedNode<?, ?>>>> txReadFuture = new AtomicReference<>();
+ final AtomicReference<FluentFuture<Optional<NormalizedNode>>> txReadFuture = new AtomicReference<>();
final AtomicReference<Exception> caughtEx = new AtomicReference<>();
final CountDownLatch txReadsDone = new CountDownLatch(1);
final Thread txThread = new Thread(() -> {
InMemoryJournal.addEntry(persistentID, 1, "Dummy data so akka will read from persistence");
- final AbstractDataStore dataStore = testKit.setupAbstractDataStore(testParameter, testName, false, shardName);
+ final var dataStore = testKit.setupDataStore(testParameter, testName, false, shardName);
// Create the write Tx
final DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
InMemoryJournal.addEntry(persistentID, 1, "Dummy data so akka will read from persistence");
- try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(testParameter, testName, false, shardName)) {
-
+ try (var dataStore = testKit.setupDataStore(testParameter, testName, false, shardName)) {
// Create the read-write Tx
final DOMStoreReadWriteTransaction readWriteTx = dataStore.newReadWriteTransaction();
assertNotNull("newReadWriteTransaction returned null", readWriteTx);
// Do a read on the Tx on a separate thread.
- final AtomicReference<FluentFuture<Optional<NormalizedNode<?, ?>>>> txReadFuture = new AtomicReference<>();
+ final AtomicReference<FluentFuture<Optional<NormalizedNode>>> txReadFuture = new AtomicReference<>();
final AtomicReference<Exception> caughtEx = new AtomicReference<>();
final CountDownLatch txReadDone = new CountDownLatch(1);
final Thread txThread = new Thread(() -> {
package org.opendaylight.controller.cluster.datastore;
import static org.awaitility.Awaitility.await;
+import static org.hamcrest.CoreMatchers.instanceOf;
+import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.equalTo;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThrows;
import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assume.assumeTrue;
+import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.ArgumentMatchers.anyString;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.timeout;
import static org.mockito.Mockito.verify;
import com.google.common.base.Stopwatch;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Range;
-import com.google.common.primitives.UnsignedLong;
+import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.MoreExecutors;
import com.google.common.util.concurrent.Uninterruptibles;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
-import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
+import java.util.Map;
import java.util.Optional;
-import java.util.Set;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
-import java.util.function.Supplier;
import org.junit.After;
-import org.junit.Assume;
import org.junit.Before;
-import org.junit.Ignore;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameter;
import org.junit.runners.Parameterized.Parameters;
-import org.mockito.Mockito;
import org.mockito.stubbing.Answer;
import org.opendaylight.controller.cluster.access.client.RequestTimeoutException;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.TestShard.RequestFrontendMetadata;
import org.opendaylight.controller.cluster.datastore.TestShard.StartDropMessages;
import org.opendaylight.controller.cluster.datastore.TestShard.StopDropMessages;
-import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
-import org.opendaylight.controller.cluster.datastore.exceptions.ShardLeaderNotRespondingException;
import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
import org.opendaylight.controller.cluster.datastore.messages.GetShardDataTree;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
-import org.opendaylight.controller.cluster.datastore.persisted.FrontendHistoryMetadata;
+import org.opendaylight.controller.cluster.datastore.persisted.FrontendClientMetadata;
import org.opendaylight.controller.cluster.datastore.persisted.FrontendShardDataTreeSnapshotMetadata;
import org.opendaylight.controller.cluster.datastore.persisted.MetadataShardDataTreeSnapshot;
import org.opendaylight.controller.cluster.datastore.persisted.ShardSnapshotState;
+import org.opendaylight.controller.cluster.datastore.utils.UnsignedLongBitmap;
import org.opendaylight.controller.cluster.raft.base.messages.TimeoutNow;
import org.opendaylight.controller.cluster.raft.client.messages.GetOnDemandRaftState;
import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
+import org.opendaylight.mdsal.common.api.OptimisticLockFailedException;
+import org.opendaylight.mdsal.common.api.TransactionCommitFailedException;
import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
import org.opendaylight.mdsal.dom.spi.store.DOMStore;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.common.Uint64;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.api.ConflictingModificationAppliedException;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import scala.collection.Set;
import scala.concurrent.Await;
import scala.concurrent.Future;
import scala.concurrent.duration.FiniteDuration;
@Parameters(name = "{0}")
public static Collection<Object[]> data() {
return Arrays.asList(new Object[][] {
- { TestDistributedDataStore.class, 7}, { TestClientBackedDataStore.class, 12 }
+ { TestClientBackedDataStore.class, 12 }
});
}
@Parameter(0)
- public Class<? extends AbstractDataStore> testParameter;
+ public Class<? extends ClientBackedDataStore> testParameter;
@Parameter(1)
public int commitTimeout;
private final TransactionIdentifier tx1 = nextTransactionId();
private final TransactionIdentifier tx2 = nextTransactionId();
- private AbstractDataStore followerDistributedDataStore;
- private AbstractDataStore leaderDistributedDataStore;
+ private ClientBackedDataStore followerDistributedDataStore;
+ private ClientBackedDataStore leaderDistributedDataStore;
private IntegrationTestKit followerTestKit;
private IntegrationTestKit leaderTestKit;
@After
public void tearDown() {
if (followerDistributedDataStore != null) {
- leaderDistributedDataStore.close();
+ followerDistributedDataStore.close();
}
if (leaderDistributedDataStore != null) {
leaderDistributedDataStore.close();
}
- TestKit.shutdownActorSystem(leaderSystem);
- TestKit.shutdownActorSystem(followerSystem);
- TestKit.shutdownActorSystem(follower2System);
+ TestKit.shutdownActorSystem(leaderSystem, true);
+ TestKit.shutdownActorSystem(followerSystem, true);
+ TestKit.shutdownActorSystem(follower2System,true);
InMemoryJournal.clear();
InMemorySnapshotStore.clear();
throws Exception {
leaderTestKit = new IntegrationTestKit(leaderSystem, leaderBuilder, commitTimeout);
- leaderDistributedDataStore = leaderTestKit.setupAbstractDataStore(
- testParameter, type, moduleShardsConfig, false, shards);
+ leaderDistributedDataStore = leaderTestKit.setupDataStore(testParameter, type, moduleShardsConfig, false,
+ shards);
followerTestKit = new IntegrationTestKit(followerSystem, followerBuilder, commitTimeout);
- followerDistributedDataStore = followerTestKit.setupAbstractDataStore(
+ followerDistributedDataStore = followerTestKit.setupDataStore(
testParameter, type, moduleShardsConfig, false, shards);
leaderTestKit.waitUntilLeader(leaderDistributedDataStore.getActorUtils(), shards);
private static void verifyCars(final DOMStoreReadTransaction readTx, final MapEntryNode... entries)
throws Exception {
- final Optional<NormalizedNode<?, ?>> optional = readTx.read(CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS);
- assertTrue("isPresent", optional.isPresent());
-
- final CollectionNodeBuilder<MapEntryNode, MapNode> listBuilder = ImmutableNodes.mapNodeBuilder(
- CarsModel.CAR_QNAME);
- for (final NormalizedNode<?, ?> entry: entries) {
- listBuilder.withChild((MapEntryNode) entry);
- }
-
- assertEquals("Car list node", listBuilder.build(), optional.get());
+ assertEquals("Car list node",
+ Optional.of(ImmutableNodes.mapNodeBuilder(CarsModel.CAR_QNAME).withValue(Arrays.asList(entries)).build()),
+ readTx.read(CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS));
}
private static void verifyNode(final DOMStoreReadTransaction readTx, final YangInstanceIdentifier path,
- final NormalizedNode<?, ?> expNode) throws Exception {
- final Optional<NormalizedNode<?, ?>> optional = readTx.read(path).get(5, TimeUnit.SECONDS);
- assertTrue("isPresent", optional.isPresent());
- assertEquals("Data node", expNode, optional.get());
+ final NormalizedNode expNode) throws Exception {
+ assertEquals(Optional.of(expNode), readTx.read(path).get(5, TimeUnit.SECONDS));
}
private static void verifyExists(final DOMStoreReadTransaction readTx, final YangInstanceIdentifier path)
throws Exception {
- final Boolean exists = readTx.exists(path).get(5, TimeUnit.SECONDS);
- assertEquals("exists", Boolean.TRUE, exists);
+ assertEquals("exists", Boolean.TRUE, readTx.exists(path).get(5, TimeUnit.SECONDS));
}
@Test
final ActorSystem newSystem = newActorSystem("reinstated-member2", "Member2");
- try (AbstractDataStore member2Datastore = new IntegrationTestKit(newSystem, leaderDatastoreContextBuilder,
- commitTimeout)
- .setupAbstractDataStore(testParameter, testName, "module-shards-member2", true, CARS)) {
+ try (var member2Datastore = new IntegrationTestKit(newSystem, leaderDatastoreContextBuilder, commitTimeout)
+ .setupDataStore(testParameter, testName, "module-shards-member2", true, CARS)) {
verifyCars(member2Datastore.newReadOnlyTransaction(), car2);
}
}
@Test
public void testSingleTransactionsWritesInQuickSuccession() throws Exception {
- final String testName = "testWriteTransactionWithSingleShard";
- initDatastoresWithCars(testName);
+ initDatastoresWithCars("testSingleTransactionsWritesInQuickSuccession");
final DOMStoreTransactionChain txChain = followerDistributedDataStore.createTransactionChain();
int numCars = 5;
for (int i = 0; i < numCars; i++) {
writeTx = txChain.newWriteOnlyTransaction();
- writeTx.write(CarsModel.newCarPath("car" + i),
- CarsModel.newCarEntry("car" + i, Uint64.valueOf(20000)));
-
+ writeTx.write(CarsModel.newCarPath("car" + i), CarsModel.newCarEntry("car" + i, Uint64.valueOf(20000)));
followerTestKit.doCommit(writeTx.ready());
- DOMStoreReadTransaction domStoreReadTransaction = txChain.newReadOnlyTransaction();
- domStoreReadTransaction.read(CarsModel.BASE_PATH).get();
-
- domStoreReadTransaction.close();
+ try (var tx = txChain.newReadOnlyTransaction()) {
+ tx.read(CarsModel.BASE_PATH).get();
+ }
}
// wait to let the shard catch up with purged
await("Range set leak test").atMost(5, TimeUnit.SECONDS)
- .pollInterval(500, TimeUnit.MILLISECONDS)
- .untilAsserted(() -> {
- Optional<ActorRef> localShard =
- leaderDistributedDataStore.getActorUtils().findLocalShard("cars");
- FrontendShardDataTreeSnapshotMetadata frontendMetadata =
- (FrontendShardDataTreeSnapshotMetadata) leaderDistributedDataStore.getActorUtils()
- .executeOperation(localShard.get(), new RequestFrontendMetadata());
-
- if (leaderDistributedDataStore.getActorUtils().getDatastoreContext().isUseTellBasedProtocol()) {
- Iterator<FrontendHistoryMetadata> iterator =
- frontendMetadata.getClients().get(0).getCurrentHistories().iterator();
- FrontendHistoryMetadata metadata = iterator.next();
- while (iterator.hasNext() && metadata.getHistoryId() != 1) {
- metadata = iterator.next();
- }
-
- assertEquals(0, metadata.getClosedTransactions().size());
- assertEquals(Range.closedOpen(UnsignedLong.valueOf(0), UnsignedLong.valueOf(11)),
- metadata.getPurgedTransactions().asRanges().iterator().next());
- } else {
- // ask based should track no metadata
- assertTrue(frontendMetadata.getClients().get(0).getCurrentHistories().isEmpty());
- }
- });
-
- final Optional<NormalizedNode<?, ?>> optional = txChain.newReadOnlyTransaction()
- .read(CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS);
- assertTrue("isPresent", optional.isPresent());
- assertEquals("# cars", numCars, ((Collection<?>) optional.get().getValue()).size());
+ .pollInterval(500, TimeUnit.MILLISECONDS)
+ .untilAsserted(() -> {
+ final var localShard = leaderDistributedDataStore.getActorUtils().findLocalShard("cars").orElseThrow();
+ final var frontendMetadata =
+ (FrontendShardDataTreeSnapshotMetadata) leaderDistributedDataStore.getActorUtils()
+ .executeOperation(localShard, new RequestFrontendMetadata());
+
+ assertClientMetadata(frontendMetadata.getClients().get(0), numCars * 2);
+ });
+
+ try (var tx = txChain.newReadOnlyTransaction()) {
+ final var body = assertInstanceOf(Collection.class,
+ tx.read(CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS).orElseThrow().body());
+ assertEquals(numCars, ((Collection<?>) body).size());
+ }
+ }
+
+ private static void assertClientMetadata(final FrontendClientMetadata clientMeta, final long lastPurged) {
+ final var iterator = clientMeta.getCurrentHistories().iterator();
+ var metadata = iterator.next();
+ while (iterator.hasNext() && metadata.getHistoryId() != 1) {
+ metadata = iterator.next();
+ }
+
+ assertEquals(UnsignedLongBitmap.of(), metadata.getClosedTransactions());
+ assertEquals("[[0.." + lastPurged + "]]", metadata.getPurgedTransactions().ranges().toString());
}
@Test
- @Ignore("Flushes out tell based leak needs to be handled separately")
public void testCloseTransactionMetadataLeak() throws Exception {
- // Ask based frontend seems to have some issues with back to back close
- Assume.assumeTrue(testParameter.isAssignableFrom(TestClientBackedDataStore.class));
-
- final String testName = "testWriteTransactionWithSingleShard";
- initDatastoresWithCars(testName);
+ initDatastoresWithCars("testCloseTransactionMetadataLeak");
- final DOMStoreTransactionChain txChain = followerDistributedDataStore.createTransactionChain();
+ final var txChain = followerDistributedDataStore.createTransactionChain();
- DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
+ var writeTx = txChain.newWriteOnlyTransaction();
writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
followerTestKit.doCommit(writeTx.ready());
int numCars = 5;
for (int i = 0; i < numCars; i++) {
- writeTx = txChain.newWriteOnlyTransaction();
- writeTx.close();
-
- DOMStoreReadTransaction domStoreReadTransaction = txChain.newReadOnlyTransaction();
- domStoreReadTransaction.read(CarsModel.BASE_PATH).get();
+ try (var tx = txChain.newWriteOnlyTransaction()) {
+ // Empty on purpose
+ }
- domStoreReadTransaction.close();
+ try (var tx = txChain.newReadOnlyTransaction()) {
+ tx.read(CarsModel.BASE_PATH).get();
+ }
}
- writeTx = txChain.newWriteOnlyTransaction();
- writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
- writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
- followerTestKit.doCommit(writeTx.ready());
-
// wait to let the shard catch up with purged
- await("Close transaction purge leak test.").atMost(5, TimeUnit.SECONDS)
- .pollInterval(500, TimeUnit.MILLISECONDS)
- .untilAsserted(() -> {
- Optional<ActorRef> localShard =
- leaderDistributedDataStore.getActorUtils().findLocalShard("cars");
- FrontendShardDataTreeSnapshotMetadata frontendMetadata =
- (FrontendShardDataTreeSnapshotMetadata) leaderDistributedDataStore.getActorUtils()
- .executeOperation(localShard.get(), new RequestFrontendMetadata());
-
- if (leaderDistributedDataStore.getActorUtils().getDatastoreContext().isUseTellBasedProtocol()) {
- Iterator<FrontendHistoryMetadata> iterator =
- frontendMetadata.getClients().get(0).getCurrentHistories().iterator();
- FrontendHistoryMetadata metadata = iterator.next();
- while (iterator.hasNext() && metadata.getHistoryId() != 1) {
- metadata = iterator.next();
- }
-
- Set<Range<UnsignedLong>> ranges = metadata.getPurgedTransactions().asRanges();
-
- assertEquals(0, metadata.getClosedTransactions().size());
- assertEquals(1, ranges.size());
- } else {
- // ask based should track no metadata
- assertTrue(frontendMetadata.getClients().get(0).getCurrentHistories().isEmpty());
- }
- });
-
- final Optional<NormalizedNode<?, ?>> optional = txChain.newReadOnlyTransaction()
- .read(CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS);
- assertTrue("isPresent", optional.isPresent());
- assertEquals("# cars", numCars, ((Collection<?>) optional.get().getValue()).size());
+ await("wait for purges to settle").atMost(5, TimeUnit.SECONDS)
+ .pollInterval(500, TimeUnit.MILLISECONDS)
+ .untilAsserted(() -> {
+ final var localShard = leaderDistributedDataStore.getActorUtils().findLocalShard("cars").orElseThrow();
+ final var frontendMetadata =
+ (FrontendShardDataTreeSnapshotMetadata) leaderDistributedDataStore.getActorUtils()
+ .executeOperation(localShard, new RequestFrontendMetadata());
+
+ assertClientMetadata(frontendMetadata.getClients().get(0), numCars * 2);
+ });
}
@Test
assertNotNull("newWriteOnlyTransaction returned null", writeTx);
final YangInstanceIdentifier carsPath = CarsModel.BASE_PATH;
- final NormalizedNode<?, ?> carsNode = CarsModel.emptyContainer();
+ final NormalizedNode carsNode = CarsModel.emptyContainer();
writeTx.write(carsPath, carsNode);
final YangInstanceIdentifier peoplePath = PeopleModel.BASE_PATH;
- final NormalizedNode<?, ?> peopleNode = PeopleModel.emptyContainer();
+ final NormalizedNode peopleNode = PeopleModel.emptyContainer();
writeTx.write(peoplePath, peopleNode);
followerTestKit.doCommit(writeTx.ready());
assertNotNull("newReadWriteTransaction returned null", rwTx);
final YangInstanceIdentifier carsPath = CarsModel.BASE_PATH;
- final NormalizedNode<?, ?> carsNode = CarsModel.emptyContainer();
+ final NormalizedNode carsNode = CarsModel.emptyContainer();
rwTx.write(carsPath, carsNode);
final YangInstanceIdentifier peoplePath = PeopleModel.BASE_PATH;
- final NormalizedNode<?, ?> peopleNode = PeopleModel.emptyContainer();
+ final NormalizedNode peopleNode = PeopleModel.emptyContainer();
rwTx.write(peoplePath, peopleNode);
followerTestKit.doCommit(rwTx.ready());
final YangInstanceIdentifier personPath = PeopleModel.newPersonPath("jack");
readWriteTx.merge(personPath, person);
- Optional<NormalizedNode<?, ?>> optional = readWriteTx.read(carPath).get(5, TimeUnit.SECONDS);
- assertTrue("isPresent", optional.isPresent());
- assertEquals("Data node", car, optional.get());
-
- optional = readWriteTx.read(personPath).get(5, TimeUnit.SECONDS);
- assertTrue("isPresent", optional.isPresent());
- assertEquals("Data node", person, optional.get());
+ assertEquals(Optional.of(car), readWriteTx.read(carPath).get(5, TimeUnit.SECONDS));
+ assertEquals(Optional.of(person), readWriteTx.read(personPath).get(5, TimeUnit.SECONDS));
final DOMStoreThreePhaseCommitCohort cohort2 = readWriteTx.ready();
final DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction();
verifyCars(readTx, car);
- optional = readTx.read(personPath).get(5, TimeUnit.SECONDS);
- assertFalse("isPresent", optional.isPresent());
+ assertEquals(Optional.empty(), readTx.read(personPath).get(5, TimeUnit.SECONDS));
}
@Test
LogicalDatastoreType.CONFIGURATION, followerDistributedDataStore).build(),
MoreExecutors.directExecutor());
- final DOMTransactionChainListener listener = Mockito.mock(DOMTransactionChainListener.class);
- final DOMTransactionChain txChain = broker.createTransactionChain(listener);
+ final var listener = mock(FutureCallback.class);
+ final DOMTransactionChain txChain = broker.createTransactionChain();
+ txChain.addCallback(listener);
final DOMDataTreeWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
- final ContainerNode invalidData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
- new YangInstanceIdentifier.NodeIdentifier(CarsModel.BASE_QNAME))
- .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk")).build();
+ writeTx.merge(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH, Builders.containerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(CarsModel.BASE_QNAME))
+ .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk"))
+ .build());
- writeTx.merge(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH, invalidData);
+ final var ex = assertThrows(ExecutionException.class, () -> writeTx.commit().get(5, TimeUnit.SECONDS));
+ assertInstanceOf(TransactionCommitFailedException.class, ex.getCause());
- try {
- writeTx.commit().get(5, TimeUnit.SECONDS);
- fail("Expected TransactionCommitFailedException");
- } catch (final ExecutionException e) {
- // Expected
- }
-
- verify(listener, timeout(5000)).onTransactionChainFailed(eq(txChain), eq(writeTx), any(Throwable.class));
+ verify(listener, timeout(5000)).onFailure(any());
txChain.close();
broker.close();
public void testChainedTransactionFailureWithMultipleShards() throws Exception {
initDatastoresWithCarsAndPeople("testChainedTransactionFailureWithMultipleShards");
- final ConcurrentDOMDataBroker broker = new ConcurrentDOMDataBroker(
- ImmutableMap.<LogicalDatastoreType, DOMStore>builder().put(
- LogicalDatastoreType.CONFIGURATION, followerDistributedDataStore).build(),
- MoreExecutors.directExecutor());
+ try (var broker = new ConcurrentDOMDataBroker(
+ Map.of(LogicalDatastoreType.CONFIGURATION, followerDistributedDataStore), MoreExecutors.directExecutor())) {
- final DOMTransactionChainListener listener = Mockito.mock(DOMTransactionChainListener.class);
- final DOMTransactionChain txChain = broker.createTransactionChain(listener);
+ final var listener = mock(FutureCallback.class);
+ final DOMTransactionChain txChain = broker.createTransactionChain();
+ txChain.addCallback(listener);
- final DOMDataTreeWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
+ final DOMDataTreeWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
- writeTx.put(LogicalDatastoreType.CONFIGURATION, PeopleModel.BASE_PATH, PeopleModel.emptyContainer());
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, PeopleModel.BASE_PATH, PeopleModel.emptyContainer());
- final ContainerNode invalidData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
- new YangInstanceIdentifier.NodeIdentifier(CarsModel.BASE_QNAME))
- .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk")).build();
+ // Note that merge will validate the data and fail but put succeeds b/c deep validation is not
+ // done for put for performance reasons.
+ writeTx.merge(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH, Builders.containerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(CarsModel.BASE_QNAME))
+ .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk"))
+ .build());
- // Note that merge will validate the data and fail but put succeeds b/c deep validation is not
- // done for put for performance reasons.
- writeTx.merge(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH, invalidData);
+ final var ex = assertThrows(ExecutionException.class, () -> writeTx.commit().get(5, TimeUnit.SECONDS))
+ .getCause();
+ assertThat(ex, instanceOf(TransactionCommitFailedException.class));
- try {
- writeTx.commit().get(5, TimeUnit.SECONDS);
- fail("Expected TransactionCommitFailedException");
- } catch (final ExecutionException e) {
- // Expected
- }
-
- verify(listener, timeout(5000)).onTransactionChainFailed(eq(txChain), eq(writeTx), any(Throwable.class));
+ verify(listener, timeout(5000)).onFailure(any());
- txChain.close();
- broker.close();
+ txChain.close();
+ }
}
@Test
.shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(5);
IntegrationTestKit newMember1TestKit = new IntegrationTestKit(leaderSystem, newMember1Builder, commitTimeout);
- try (AbstractDataStore ds =
- newMember1TestKit.setupAbstractDataStore(
- testParameter, testName, MODULE_SHARDS_CARS_ONLY_1_2, false, CARS)) {
+ try (var ds = newMember1TestKit.setupDataStore(testParameter, testName, MODULE_SHARDS_CARS_ONLY_1_2, false,
+ CARS)) {
followerTestKit.waitUntilLeader(followerDistributedDataStore.getActorUtils(), CARS);
}
}
- @SuppressWarnings("unchecked")
@Test
public void testReadyLocalTransactionForwardedToLeader() throws Exception {
initDatastoresWithCars("testReadyLocalTransactionForwardedToLeader");
ReadyLocalTransaction readyLocal = new ReadyLocalTransaction(tx1 , modification, true, Optional.empty());
- carsFollowerShard.get().tell(readyLocal, followerTestKit.getRef());
+ carsFollowerShard.orElseThrow().tell(readyLocal, followerTestKit.getRef());
Object resp = followerTestKit.expectMsgClass(Object.class);
if (resp instanceof akka.actor.Status.Failure) {
throw new AssertionError("Unexpected failure response", ((akka.actor.Status.Failure)resp).cause());
readyLocal = new ReadyLocalTransaction(tx2 , modification, false, Optional.empty());
- carsFollowerShard.get().tell(readyLocal, followerTestKit.getRef());
+ carsFollowerShard.orElseThrow().tell(readyLocal, followerTestKit.getRef());
resp = followerTestKit.expectMsgClass(Object.class);
if (resp instanceof akka.actor.Status.Failure) {
throw new AssertionError("Unexpected failure response", ((akka.actor.Status.Failure)resp).cause());
final ActorSelection txActor = leaderDistributedDataStore.getActorUtils().actorSelection(
((ReadyTransactionReply)resp).getCohortPath());
- final Supplier<Short> versionSupplier = Mockito.mock(Supplier.class);
- Mockito.doReturn(DataStoreVersions.CURRENT_VERSION).when(versionSupplier).get();
- ThreePhaseCommitCohortProxy cohort = new ThreePhaseCommitCohortProxy(
- leaderDistributedDataStore.getActorUtils(), Arrays.asList(
- new ThreePhaseCommitCohortProxy.CohortInfo(Futures.successful(txActor), versionSupplier)), tx2);
+ ThreePhaseCommitCohortProxy cohort = new ThreePhaseCommitCohortProxy(leaderDistributedDataStore.getActorUtils(),
+ List.of(new ThreePhaseCommitCohortProxy.CohortInfo(Futures.successful(txActor),
+ () -> DataStoreVersions.CURRENT_VERSION)), tx2);
cohort.canCommit().get(5, TimeUnit.SECONDS);
cohort.preCommit().get(5, TimeUnit.SECONDS);
cohort.commit().get(5, TimeUnit.SECONDS);
verifyCars(leaderDistributedDataStore.newReadOnlyTransaction(), car1, car2);
}
- @SuppressWarnings("unchecked")
@Test
public void testForwardedReadyTransactionForwardedToLeader() throws Exception {
initDatastoresWithCars("testForwardedReadyTransactionForwardedToLeader");
followerDistributedDataStore.getActorUtils().findLocalShard("cars");
assertTrue("Cars follower shard found", carsFollowerShard.isPresent());
- carsFollowerShard.get().tell(GetShardDataTree.INSTANCE, followerTestKit.getRef());
+ carsFollowerShard.orElseThrow().tell(GetShardDataTree.INSTANCE, followerTestKit.getRef());
final DataTree dataTree = followerTestKit.expectMsgClass(DataTree.class);
// Send a tx with immediate commit.
final MapEntryNode car1 = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
new WriteModification(CarsModel.newCarPath("optima"), car1).apply(modification);
- ForwardedReadyTransaction forwardedReady = new ForwardedReadyTransaction(tx1,
- DataStoreVersions.CURRENT_VERSION, new ReadWriteShardDataTreeTransaction(
- Mockito.mock(ShardDataTreeTransactionParent.class), tx1, modification), true,
- Optional.empty());
+ ForwardedReadyTransaction forwardedReady = new ForwardedReadyTransaction(tx1, DataStoreVersions.CURRENT_VERSION,
+ new ReadWriteShardDataTreeTransaction(mock(ShardDataTreeTransactionParent.class), tx1, modification),
+ true, Optional.empty());
- carsFollowerShard.get().tell(forwardedReady, followerTestKit.getRef());
+ carsFollowerShard.orElseThrow().tell(forwardedReady, followerTestKit.getRef());
Object resp = followerTestKit.expectMsgClass(Object.class);
if (resp instanceof akka.actor.Status.Failure) {
throw new AssertionError("Unexpected failure response", ((akka.actor.Status.Failure)resp).cause());
MapEntryNode car2 = CarsModel.newCarEntry("sportage", Uint64.valueOf(30000));
new WriteModification(CarsModel.newCarPath("sportage"), car2).apply(modification);
- forwardedReady = new ForwardedReadyTransaction(tx2,
- DataStoreVersions.CURRENT_VERSION, new ReadWriteShardDataTreeTransaction(
- Mockito.mock(ShardDataTreeTransactionParent.class), tx2, modification), false,
- Optional.empty());
+ forwardedReady = new ForwardedReadyTransaction(tx2, DataStoreVersions.CURRENT_VERSION,
+ new ReadWriteShardDataTreeTransaction(mock(ShardDataTreeTransactionParent.class), tx2, modification),
+ false, Optional.empty());
- carsFollowerShard.get().tell(forwardedReady, followerTestKit.getRef());
+ carsFollowerShard.orElseThrow().tell(forwardedReady, followerTestKit.getRef());
resp = followerTestKit.expectMsgClass(Object.class);
if (resp instanceof akka.actor.Status.Failure) {
throw new AssertionError("Unexpected failure response", ((akka.actor.Status.Failure)resp).cause());
ActorSelection txActor = leaderDistributedDataStore.getActorUtils().actorSelection(
((ReadyTransactionReply)resp).getCohortPath());
- final Supplier<Short> versionSupplier = Mockito.mock(Supplier.class);
- Mockito.doReturn(DataStoreVersions.CURRENT_VERSION).when(versionSupplier).get();
final ThreePhaseCommitCohortProxy cohort = new ThreePhaseCommitCohortProxy(
- leaderDistributedDataStore.getActorUtils(), Arrays.asList(
- new ThreePhaseCommitCohortProxy.CohortInfo(Futures.successful(txActor), versionSupplier)), tx2);
+ leaderDistributedDataStore.getActorUtils(), List.of(
+ new ThreePhaseCommitCohortProxy.CohortInfo(Futures.successful(txActor),
+ () -> DataStoreVersions.CURRENT_VERSION)), tx2);
cohort.canCommit().get(5, TimeUnit.SECONDS);
cohort.preCommit().get(5, TimeUnit.SECONDS);
cohort.commit().get(5, TimeUnit.SECONDS);
@Test
public void testTransactionForwardedToLeaderAfterRetry() throws Exception {
- // FIXME: remove when test passes also for ClientBackedDataStore
- Assume.assumeTrue(DistributedDataStore.class.isAssignableFrom(testParameter));
followerDatastoreContextBuilder.shardBatchedModificationCount(2);
leaderDatastoreContextBuilder.shardBatchedModificationCount(2);
initDatastoresWithCarsAndPeople("testTransactionForwardedToLeaderAfterRetry");
+ // Verify backend statistics on start
+ verifyCarsReadWriteTransactions(leaderDistributedDataStore, 0);
+ verifyCarsReadWriteTransactions(followerDistributedDataStore, 0);
+
// Do an initial write to get the primary shard info cached.
final DOMStoreWriteTransaction initialWriteTx = followerDistributedDataStore.newWriteOnlyTransaction();
cars.add(CarsModel.newCarEntry("car" + carIndex, Uint64.valueOf(carIndex)));
writeTx2.write(CarsModel.newCarPath("car" + carIndex), cars.getLast());
carIndex++;
- NormalizedNode<?, ?> people = ImmutableNodes.mapNodeBuilder(PeopleModel.PERSON_QNAME)
+ NormalizedNode people = ImmutableNodes.mapNodeBuilder(PeopleModel.PERSON_QNAME)
.withChild(PeopleModel.newPersonEntry("Dude")).build();
writeTx2.write(PeopleModel.PERSON_LIST_PATH, people);
final DOMStoreThreePhaseCommitCohort writeTx2Cohort = writeTx2.ready();
+ // At this point only leader should see the transactions
+ verifyCarsReadWriteTransactions(leaderDistributedDataStore, 2);
+ verifyCarsReadWriteTransactions(followerDistributedDataStore, 0);
+
// Prepare another WO that writes to a single shard and thus will be directly committed on ready. This
- // tx writes 5 cars so 2 BatchedModidifications messages will be sent initially and cached in the
- // leader shard (with shardBatchedModificationCount set to 2). The 3rd BatchedModidifications will be
- // sent on ready.
+ // tx writes 5 cars so 2 BatchedModifications messages will be sent initially and cached in the leader shard
+ // (with shardBatchedModificationCount set to 2). The 3rd BatchedModifications will be sent on ready.
final DOMStoreWriteTransaction writeTx3 = followerDistributedDataStore.newWriteOnlyTransaction();
for (int i = 1; i <= 5; i++, carIndex++) {
writeTx3.write(CarsModel.newCarPath("car" + carIndex), cars.getLast());
}
- // Prepare another WO that writes to a single shard. This will send a single BatchedModidifications
- // message on ready.
+ // Prepare another WO that writes to a single shard. This will send a single BatchedModifications message
+ // on ready.
final DOMStoreWriteTransaction writeTx4 = followerDistributedDataStore.newWriteOnlyTransaction();
cars.add(CarsModel.newCarEntry("car" + carIndex, Uint64.valueOf(carIndex)));
writeTx4.write(CarsModel.newCarPath("car" + carIndex), cars.getLast());
carIndex++;
- // Prepare a RW tx that will create a tx actor and send a ForwardedReadyTransaciton message to the
- // leader shard on ready.
+ // Prepare a RW tx that will create a tx actor and send a ForwardedReadyTransaction message to the leader shard
+ // on ready.
final DOMStoreReadWriteTransaction readWriteTx = followerDistributedDataStore.newReadWriteTransaction();
cars.add(CarsModel.newCarEntry("car" + carIndex, Uint64.valueOf(carIndex)));
- readWriteTx.write(CarsModel.newCarPath("car" + carIndex), cars.getLast());
+ final YangInstanceIdentifier carPath = CarsModel.newCarPath("car" + carIndex);
+ readWriteTx.write(carPath, cars.getLast());
- IntegrationTestKit.verifyShardStats(leaderDistributedDataStore, "cars",
- stats -> assertEquals("getReadWriteTransactionCount", 5, stats.getReadWriteTransactionCount()));
+ // There is a difference here between implementations: tell-based protocol enforces batching on per-transaction
+ // level whereas ask-based protocol has a global limit towards a shard -- and hence flushes out last two
+ // transactions eagerly.
+ verifyCarsReadWriteTransactions(leaderDistributedDataStore, 3);
+ verifyCarsReadWriteTransactions(followerDistributedDataStore, 0);
// Disable elections on the leader so it switches to follower.
followerTestKit.doCommit(writeTx4Cohort);
followerTestKit.doCommit(rwTxCohort);
+ // At this point everything is committed and the follower datastore should see 5 transactions, but leader should
+ // only see the initial transactions
+ verifyCarsReadWriteTransactions(leaderDistributedDataStore, 3);
+ verifyCarsReadWriteTransactions(followerDistributedDataStore, 5);
+
DOMStoreReadTransaction readTx = leaderDistributedDataStore.newReadOnlyTransaction();
verifyCars(readTx, cars.toArray(new MapEntryNode[cars.size()]));
verifyNode(readTx, PeopleModel.PERSON_LIST_PATH, people);
}
+ private static void verifyCarsReadWriteTransactions(final ClientBackedDataStore datastore, final int expected)
+ throws Exception {
+ IntegrationTestKit.verifyShardStats(datastore, "cars",
+ stats -> assertEquals("getReadWriteTransactionCount", expected, stats.getReadWriteTransactionCount()));
+ }
+
@Test
public void testLeadershipTransferOnShutdown() throws Exception {
- // FIXME: remove when test passes also for ClientBackedDataStore
- Assume.assumeTrue(DistributedDataStore.class.isAssignableFrom(testParameter));
leaderDatastoreContextBuilder.shardBatchedModificationCount(1);
followerDatastoreContextBuilder.shardElectionTimeoutFactor(10).customRaftPolicyImplementation(null);
final String testName = "testLeadershipTransferOnShutdown";
final IntegrationTestKit follower2TestKit = new IntegrationTestKit(follower2System,
DatastoreContext.newBuilderFrom(followerDatastoreContextBuilder.build()).operationTimeoutInMillis(500),
commitTimeout);
- try (AbstractDataStore follower2DistributedDataStore = follower2TestKit.setupAbstractDataStore(
- testParameter, testName, MODULE_SHARDS_CARS_PEOPLE_1_2_3, false)) {
+ try (var follower2DistributedDataStore = follower2TestKit.setupDataStore(testParameter, testName,
+ MODULE_SHARDS_CARS_PEOPLE_1_2_3, false)) {
followerTestKit.waitForMembersUp("member-3");
follower2TestKit.waitForMembersUp("member-1", "member-2");
writeTx.write(PeopleModel.BASE_PATH, PeopleModel.emptyContainer());
final DOMStoreThreePhaseCommitCohort cohort1 = writeTx.ready();
- IntegrationTestKit.verifyShardStats(leaderDistributedDataStore, "cars",
- stats -> assertEquals("getTxCohortCacheSize", 1, stats.getTxCohortCacheSize()));
+ // FIXME: this assertion should be made in an explicit Shard test
+ // IntegrationTestKit.verifyShardStats(leaderDistributedDataStore, "cars",
+ // stats -> assertEquals("getTxCohortCacheSize", 1, stats.getTxCohortCacheSize()));
writeTx = followerDistributedDataStore.newWriteOnlyTransaction();
final MapEntryNode car = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
writeTx.write(CarsModel.newCarPath("optima"), car);
final DOMStoreThreePhaseCommitCohort cohort2 = writeTx.ready();
- IntegrationTestKit.verifyShardStats(leaderDistributedDataStore, "cars",
- stats -> assertEquals("getTxCohortCacheSize", 2, stats.getTxCohortCacheSize()));
+ // FIXME: this assertion should be made in an explicit Shard test
+ // IntegrationTestKit.verifyShardStats(leaderDistributedDataStore, "cars",
+ // stats -> assertEquals("getTxCohortCacheSize", 2, stats.getTxCohortCacheSize()));
// Gracefully stop the leader via a Shutdown message.
@Test
public void testTransactionWithIsolatedLeader() throws Exception {
- // FIXME: remove when test passes also for ClientBackedDataStore
- Assume.assumeTrue(DistributedDataStore.class.isAssignableFrom(testParameter));
// Set the isolated leader check interval high so we can control the switch to IsolatedLeader.
leaderDatastoreContextBuilder.shardIsolatedLeaderCheckIntervalInMillis(10000000);
final String testName = "testTransactionWithIsolatedLeader";
MemberNode.verifyRaftState(leaderDistributedDataStore, "cars",
raftState -> assertEquals("getRaftState", "IsolatedLeader", raftState.getRaftState()));
- try {
- leaderTestKit.doCommit(noShardLeaderWriteTx.ready());
- fail("Expected NoShardLeaderException");
- } catch (final ExecutionException e) {
- assertEquals("getCause", NoShardLeaderException.class, Throwables.getRootCause(e).getClass());
- }
+ final var noShardLeaderCohort = noShardLeaderWriteTx.ready();
+ // tell-based canCommit() does not have a real timeout and hence continues
+ final var canCommit = noShardLeaderCohort.canCommit();
+ Uninterruptibles.sleepUninterruptibly(commitTimeout, TimeUnit.SECONDS);
+ assertFalse(canCommit.isDone());
sendDatastoreContextUpdate(leaderDistributedDataStore, leaderDatastoreContextBuilder
.shardElectionTimeoutFactor(100));
final DOMStoreThreePhaseCommitCohort successTxCohort = successWriteTx.ready();
- followerDistributedDataStore = followerTestKit.setupAbstractDataStore(
- testParameter, testName, MODULE_SHARDS_CARS_ONLY_1_2, false, CARS);
+ followerDistributedDataStore = followerTestKit.setupDataStore(testParameter, testName,
+ MODULE_SHARDS_CARS_ONLY_1_2, false, CARS);
leaderTestKit.doCommit(preIsolatedLeaderTxCohort);
leaderTestKit.doCommit(successTxCohort);
+
+ // continuation of canCommit(): readied transaction will complete commit, but will report an OLFE
+ final var ex = assertThrows(ExecutionException.class,
+ () -> canCommit.get(commitTimeout, TimeUnit.SECONDS)).getCause();
+ assertThat(ex, instanceOf(OptimisticLockFailedException.class));
+ assertEquals("Optimistic lock failed for path " + CarsModel.BASE_PATH, ex.getMessage());
+ final var cause = ex.getCause();
+ assertThat(cause, instanceOf(ConflictingModificationAppliedException.class));
+ final var cmae = (ConflictingModificationAppliedException) cause;
+ assertEquals("Node was created by other transaction.", cmae.getMessage());
+ assertEquals(CarsModel.BASE_PATH, cmae.getPath());
}
@Test
rwTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
- try {
- followerTestKit.doCommit(rwTx.ready());
- fail("Exception expected");
- } catch (final ExecutionException e) {
- final String msg = "Unexpected exception: " + Throwables.getStackTraceAsString(e.getCause());
- if (DistributedDataStore.class.isAssignableFrom(testParameter)) {
- assertTrue(msg, Throwables.getRootCause(e) instanceof NoShardLeaderException
- || e.getCause() instanceof ShardLeaderNotRespondingException);
- } else {
- assertTrue(msg, Throwables.getRootCause(e) instanceof RequestTimeoutException);
- }
- }
+ final var ex = assertThrows(ExecutionException.class, () -> followerTestKit.doCommit(rwTx.ready()));
+ assertThat("Unexpected exception: " + Throwables.getStackTraceAsString(ex.getCause()),
+ Throwables.getRootCause(ex), instanceOf(RequestTimeoutException.class));
}
@Test
rwTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
- try {
- followerTestKit.doCommit(rwTx.ready());
- fail("Exception expected");
- } catch (final ExecutionException e) {
- final String msg = "Unexpected exception: " + Throwables.getStackTraceAsString(e.getCause());
- if (DistributedDataStore.class.isAssignableFrom(testParameter)) {
- assertTrue(msg, Throwables.getRootCause(e) instanceof NoShardLeaderException);
- } else {
- assertTrue(msg, Throwables.getRootCause(e) instanceof RequestTimeoutException);
- }
- }
+ final var ex = assertThrows(ExecutionException.class, () -> followerTestKit.doCommit(rwTx.ready()));
+ assertThat("Unexpected exception: " + Throwables.getStackTraceAsString(ex.getCause()),
+ Throwables.getRootCause(ex), instanceOf(RequestTimeoutException.class));
}
@Test
final IntegrationTestKit follower2TestKit = new IntegrationTestKit(
follower2System, follower2DatastoreContextBuilder, commitTimeout);
- try (AbstractDataStore ds =
- follower2TestKit.setupAbstractDataStore(
- testParameter, testName, MODULE_SHARDS_CARS_1_2_3, false, CARS)) {
+ try (var ds = follower2TestKit.setupDataStore(testParameter, testName, MODULE_SHARDS_CARS_1_2_3, false, CARS)) {
followerTestKit.waitForMembersUp("member-1", "member-3");
follower2TestKit.waitForMembersUp("member-1", "member-2");
final IntegrationTestKit follower2TestKit = new IntegrationTestKit(
follower2System, follower2DatastoreContextBuilder, commitTimeout);
- final AbstractDataStore ds2 =
- follower2TestKit.setupAbstractDataStore(
- testParameter, testName, MODULE_SHARDS_CARS_1_2_3, false, CARS);
+ final var ds2 = follower2TestKit.setupDataStore(testParameter, testName, MODULE_SHARDS_CARS_1_2_3, false, CARS);
followerTestKit.waitForMembersUp("member-1", "member-3");
follower2TestKit.waitForMembersUp("member-1", "member-2");
- TestKit.shutdownActorSystem(follower2System);
+ // behavior is controlled by akka.coordinated-shutdown.run-by-actor-system-terminate configuration option
+ TestKit.shutdownActorSystem(follower2System, true);
- ActorRef cars = leaderDistributedDataStore.getActorUtils().findLocalShard("cars").get();
- OnDemandRaftState initialState = (OnDemandRaftState) leaderDistributedDataStore.getActorUtils()
+ ActorRef cars = leaderDistributedDataStore.getActorUtils().findLocalShard("cars").orElseThrow();
+ final OnDemandRaftState initialState = (OnDemandRaftState) leaderDistributedDataStore.getActorUtils()
.executeOperation(cars, GetOnDemandRaftState.INSTANCE);
Cluster leaderCluster = Cluster.get(leaderSystem);
Member follower2Member = follower2Cluster.readView().self();
await().atMost(10, TimeUnit.SECONDS)
- .until(() -> leaderCluster.readView().unreachableMembers().contains(follower2Member));
+ .until(() -> containsUnreachable(leaderCluster, follower2Member));
await().atMost(10, TimeUnit.SECONDS)
- .until(() -> followerCluster.readView().unreachableMembers().contains(follower2Member));
+ .until(() -> containsUnreachable(followerCluster, follower2Member));
- ActorRef followerCars = followerDistributedDataStore.getActorUtils().findLocalShard("cars").get();
+ ActorRef followerCars = followerDistributedDataStore.getActorUtils().findLocalShard("cars").orElseThrow();
// to simulate a follower not being able to receive messages, but still being able to send messages and becoming
// candidate, we can just send a couple of RequestVotes to both leader and follower.
ds2.close();
}
+ private static Boolean containsUnreachable(final Cluster cluster, final Member member) {
+ // unreachableMembers() returns scala.collection.immutable.Set, but we are using scala.collection.Set to fix JDT
+ // see https://bugs.eclipse.org/bugs/show_bug.cgi?id=468276#c32
+ final Set<Member> members = cluster.readView().unreachableMembers();
+ return members.contains(member);
+ }
+
@Test
public void testInstallSnapshot() throws Exception {
final String testName = "testInstallSnapshot";
CarsModel.newCarsMapNode(CarsModel.newCarEntry("optima", Uint64.valueOf(20000))));
AbstractShardTest.writeToStore(tree, CarsModel.BASE_PATH, carsNode);
- final NormalizedNode<?, ?> snapshotRoot = AbstractShardTest.readStore(tree, YangInstanceIdentifier.empty());
+ final NormalizedNode snapshotRoot = AbstractShardTest.readStore(tree, YangInstanceIdentifier.of());
final Snapshot initialSnapshot = Snapshot.create(
new ShardSnapshotState(new MetadataShardDataTreeSnapshot(snapshotRoot)),
Collections.emptyList(), 5, 1, 5, 1, 1, null, null);
initDatastoresWithCars(testName);
- final Optional<NormalizedNode<?, ?>> readOptional = leaderDistributedDataStore.newReadOnlyTransaction().read(
- CarsModel.BASE_PATH).get(5, TimeUnit.SECONDS);
- assertTrue("isPresent", readOptional.isPresent());
- assertEquals("Node", carsNode, readOptional.get());
+ assertEquals(Optional.of(carsNode), leaderDistributedDataStore.newReadOnlyTransaction().read(
+ CarsModel.BASE_PATH).get(5, TimeUnit.SECONDS));
verifySnapshot(InMemorySnapshotStore.waitForSavedSnapshot(leaderCarShardName, Snapshot.class),
initialSnapshot, snapshotRoot);
@Test
public void testReadWriteMessageSlicing() throws Exception {
- // The slicing is only implemented for tell-based protocol
- Assume.assumeTrue(ClientBackedDataStore.class.isAssignableFrom(testParameter));
-
leaderDatastoreContextBuilder.maximumMessageSliceSize(100);
followerDatastoreContextBuilder.maximumMessageSliceSize(100);
initDatastoresWithCars("testLargeReadReplySlicing");
final DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction();
- final NormalizedNode<?, ?> carsNode = CarsModel.create();
+ final NormalizedNode carsNode = CarsModel.create();
rwTx.write(CarsModel.BASE_PATH, carsNode);
verifyNode(rwTx, CarsModel.BASE_PATH, carsNode);
initialWriteTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
leaderTestKit.doCommit(initialWriteTx.ready());
- try (AbstractDataStore follower2DistributedDataStore = follower2TestKit.setupAbstractDataStore(
- testParameter, testName, MODULE_SHARDS_CARS_1_2_3, false)) {
+ try (var follower2DistributedDataStore = follower2TestKit.setupDataStore(testParameter, testName,
+ MODULE_SHARDS_CARS_1_2_3, false)) {
final ActorRef member3Cars = ((LocalShardStore) follower2DistributedDataStore).getLocalShards()
.getLocalShards().get("cars").getActor();
final ActorRef member2Cars = ((LocalShardStore)followerDistributedDataStore).getLocalShards()
.getLocalShards().get("cars").getActor();
- member2Cars.tell(new StartDropMessages(AppendEntries.class), null);
- member3Cars.tell(new StartDropMessages(AppendEntries.class), null);
+ member2Cars.tell(new StartDropMessages<>(AppendEntries.class), null);
+ member3Cars.tell(new StartDropMessages<>(AppendEntries.class), null);
final DOMStoreWriteTransaction newTx = leaderDistributedDataStore.newWriteOnlyTransaction();
newTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
"member-3-shard-cars-testRaftCallbackDuringLeadershipDrop", -1,
-1), member3Cars);
- member2Cars.tell(new StopDropMessages(AppendEntries.class), null);
- member3Cars.tell(new StopDropMessages(AppendEntries.class), null);
+ member2Cars.tell(new StopDropMessages<>(AppendEntries.class), null);
+ member3Cars.tell(new StopDropMessages<>(AppendEntries.class), null);
await("Is tx stuck in COMMIT_PENDING")
.atMost(10, TimeUnit.SECONDS).untilAtomic(submitDone, equalTo(true));
@Test
public void testSnapshotOnRootOverwrite() throws Exception {
- if (!DistributedDataStore.class.isAssignableFrom(testParameter)) {
- // FIXME: ClientBackedDatastore does not have stable indexes/term, the snapshot index seems to fluctuate
- return;
- }
-
- final String testName = "testSnapshotOnRootOverwrite";
- final String[] shards = {"cars", "default"};
- initDatastores(testName, "module-shards-default-cars-member1-and-2.conf", shards,
- leaderDatastoreContextBuilder.snapshotOnRootOverwrite(true),
- followerDatastoreContextBuilder.snapshotOnRootOverwrite(true));
+ initDatastores("testSnapshotOnRootOverwrite", "module-shards-default-cars-member1-and-2.conf",
+ new String[] {"cars", "default"},
+ leaderDatastoreContextBuilder.snapshotOnRootOverwrite(true),
+ followerDatastoreContextBuilder.snapshotOnRootOverwrite(true));
leaderTestKit.waitForMembersUp("member-2");
- final ContainerNode rootNode = ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(YangInstanceIdentifier.NodeIdentifier.create(SchemaContext.NAME))
- .withChild((ContainerNode) CarsModel.create())
+ final ContainerNode rootNode = Builders.containerBuilder()
+ .withNodeIdentifier(NodeIdentifier.create(SchemaContext.NAME))
+ .withChild(CarsModel.create())
.build();
- leaderTestKit.testWriteTransaction(leaderDistributedDataStore, YangInstanceIdentifier.empty(), rootNode);
+ leaderTestKit.testWriteTransaction(leaderDistributedDataStore, YangInstanceIdentifier.of(), rootNode);
+ // FIXME: CONTROLLER-2020: ClientBackedDatastore does not have stable indexes/term,
+ // the snapshot index seems to fluctuate
+ assumeTrue(false);
IntegrationTestKit.verifyShardState(leaderDistributedDataStore, "cars",
state -> assertEquals(1, state.getSnapshotIndex()));
verifySnapshot("member-2-shard-cars-testSnapshotOnRootOverwrite", 1);
// root overwrite so expect a snapshot
- leaderTestKit.testWriteTransaction(leaderDistributedDataStore, YangInstanceIdentifier.empty(), rootNode);
+ leaderTestKit.testWriteTransaction(leaderDistributedDataStore, YangInstanceIdentifier.of(), rootNode);
// this was a real snapshot so everything should be in it(1(DisableTrackingPayload) + 1 + 10 + 1)
IntegrationTestKit.verifyShardState(leaderDistributedDataStore, "cars",
verifySnapshot("member-2-shard-cars-testSnapshotOnRootOverwrite", 12);
}
- private void verifySnapshot(final String persistenceId, final long lastAppliedIndex) {
+ private static void verifySnapshot(final String persistenceId, final long lastAppliedIndex) {
await().atMost(5, TimeUnit.SECONDS).untilAsserted(() -> {
List<Snapshot> snap = InMemorySnapshotStore.getSnapshots(persistenceId, Snapshot.class);
assertEquals(1, snap.size());
}
private static void verifySnapshot(final Snapshot actual, final Snapshot expected,
- final NormalizedNode<?, ?> expRoot) {
+ final NormalizedNode expRoot) {
assertEquals("Snapshot getLastAppliedTerm", expected.getLastAppliedTerm(), actual.getLastAppliedTerm());
assertEquals("Snapshot getLastAppliedIndex", expected.getLastAppliedIndex(), actual.getLastAppliedIndex());
assertEquals("Snapshot getLastTerm", expected.getLastTerm(), actual.getLastTerm());
assertEquals("Snapshot state type", ShardSnapshotState.class, actual.getState().getClass());
MetadataShardDataTreeSnapshot shardSnapshot =
(MetadataShardDataTreeSnapshot) ((ShardSnapshotState)actual.getState()).getSnapshot();
- assertEquals("Snapshot root node", expRoot, shardSnapshot.getRootNode().get());
+ assertEquals("Snapshot root node", expRoot, shardSnapshot.getRootNode().orElseThrow());
}
- private static void sendDatastoreContextUpdate(final AbstractDataStore dataStore, final Builder builder) {
+ private static void sendDatastoreContextUpdate(final ClientBackedDataStore dataStore, final Builder builder) {
final Builder newBuilder = DatastoreContext.newBuilderFrom(builder.build());
- final DatastoreContextFactory mockContextFactory = Mockito.mock(DatastoreContextFactory.class);
+ final DatastoreContextFactory mockContextFactory = mock(DatastoreContextFactory.class);
final Answer<DatastoreContext> answer = invocation -> newBuilder.build();
- Mockito.doAnswer(answer).when(mockContextFactory).getBaseDatastoreContext();
- Mockito.doAnswer(answer).when(mockContextFactory).getShardDatastoreContext(Mockito.anyString());
+ doAnswer(answer).when(mockContextFactory).getBaseDatastoreContext();
+ doAnswer(answer).when(mockContextFactory).getShardDatastoreContext(anyString());
dataStore.onDatastoreContextUpdated(mockContextFactory);
}
}
+++ /dev/null
-/*
- * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore;
-
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
-import akka.util.Timeout;
-import com.google.common.util.concurrent.Uninterruptibles;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
-import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.FrontendType;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import scala.concurrent.duration.FiniteDuration;
-
-public class DistributedDataStoreTest extends AbstractActorTest {
- private static final ClientIdentifier UNKNOWN_ID = ClientIdentifier.create(
- FrontendIdentifier.create(MemberName.forName("local"), FrontendType.forName("unknown")), 0);
-
- private static SchemaContext SCHEMA_CONTEXT;
-
- @Mock
- private ActorUtils actorUtils;
-
- @Mock
- private DatastoreContext datastoreContext;
-
- @Mock
- private Timeout shardElectionTimeout;
-
- @BeforeClass
- public static void beforeClass() {
- SCHEMA_CONTEXT = TestModel.createTestContext();
- }
-
- @AfterClass
- public static void afterClass() {
- SCHEMA_CONTEXT = null;
- }
-
- @Before
- public void setUp() {
- MockitoAnnotations.initMocks(this);
-
- doReturn(SCHEMA_CONTEXT).when(actorUtils).getSchemaContext();
- doReturn(DatastoreContext.newBuilder().build()).when(actorUtils).getDatastoreContext();
- }
-
- @Test
- public void testRateLimitingUsedInReadWriteTxCreation() {
- try (DistributedDataStore distributedDataStore = new DistributedDataStore(actorUtils, UNKNOWN_ID)) {
-
- distributedDataStore.newReadWriteTransaction();
-
- verify(actorUtils, times(1)).acquireTxCreationPermit();
- }
- }
-
- @Test
- public void testRateLimitingUsedInWriteOnlyTxCreation() {
- try (DistributedDataStore distributedDataStore = new DistributedDataStore(actorUtils, UNKNOWN_ID)) {
-
- distributedDataStore.newWriteOnlyTransaction();
-
- verify(actorUtils, times(1)).acquireTxCreationPermit();
- }
- }
-
- @Test
- public void testRateLimitingNotUsedInReadOnlyTxCreation() {
- try (DistributedDataStore distributedDataStore = new DistributedDataStore(actorUtils, UNKNOWN_ID)) {
-
- distributedDataStore.newReadOnlyTransaction();
- distributedDataStore.newReadOnlyTransaction();
- distributedDataStore.newReadOnlyTransaction();
-
- verify(actorUtils, times(0)).acquireTxCreationPermit();
- }
- }
-
- @Test
- public void testWaitTillReadyBlocking() {
- doReturn(datastoreContext).when(actorUtils).getDatastoreContext();
- doReturn(shardElectionTimeout).when(datastoreContext).getShardLeaderElectionTimeout();
- doReturn(1).when(datastoreContext).getInitialSettleTimeoutMultiplier();
- doReturn(FiniteDuration.apply(50, TimeUnit.MILLISECONDS)).when(shardElectionTimeout).duration();
- try (DistributedDataStore distributedDataStore = new DistributedDataStore(actorUtils, UNKNOWN_ID)) {
-
- long start = System.currentTimeMillis();
-
- distributedDataStore.waitTillReady();
-
- long end = System.currentTimeMillis();
-
- assertTrue("Expected to be blocked for 50 millis", end - start >= 50);
- }
- }
-
- @Test
- public void testWaitTillReadyCountDown() {
- try (DistributedDataStore distributedDataStore = new DistributedDataStore(actorUtils, UNKNOWN_ID)) {
- doReturn(datastoreContext).when(actorUtils).getDatastoreContext();
- doReturn(shardElectionTimeout).when(datastoreContext).getShardLeaderElectionTimeout();
- doReturn(FiniteDuration.apply(5000, TimeUnit.MILLISECONDS)).when(shardElectionTimeout).duration();
-
- Executors.newSingleThreadExecutor().submit(() -> {
- Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
- distributedDataStore.readinessFuture().set(null);
- });
-
- long start = System.currentTimeMillis();
-
- distributedDataStore.waitTillReady();
-
- long end = System.currentTimeMillis();
-
- assertTrue("Expected to be released in 500 millis", end - start < 5000);
- }
- }
-}
import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.SystemMapNode;
+import org.opendaylight.yangtools.yang.data.api.schema.builder.CollectionNodeBuilder;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
@RunWith(Parameterized.class)
public class DistributedDataStoreWithSegmentedJournalIntegrationTest
@Parameters(name = "{0}")
public static Collection<Object[]> data() {
return Arrays.asList(new Object[][] {
- { TestDistributedDataStore.class }, { TestClientBackedDataStore.class }
+ { TestClientBackedDataStore.class }
});
}
@Test
public void testManyWritesDeletes() throws Exception {
final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
- CollectionNodeBuilder<MapEntryNode, MapNode> carMapBuilder = ImmutableNodes.mapNodeBuilder(CAR_QNAME);
+ CollectionNodeBuilder<MapEntryNode, SystemMapNode> carMapBuilder = ImmutableNodes.mapNodeBuilder(CAR_QNAME);
- try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
- testParameter, "testManyWritesDeletes", "module-shards-cars-member-1.conf", true, "cars")) {
+ try (var dataStore = testKit.setupDataStore(testParameter, "testManyWritesDeletes",
+ "module-shards-cars-member-1.conf", true, "cars")) {
DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
}
}
- final Optional<NormalizedNode<?, ?>> optional = txChain.newReadOnlyTransaction()
+ final Optional<NormalizedNode> optional = txChain.newReadOnlyTransaction()
.read(CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS);
assertTrue("isPresent", optional.isPresent());
MapNode cars = carMapBuilder.build();
- assertEquals("cars not matching result", cars, optional.get());
+ assertEquals("cars not matching result", cars, optional.orElseThrow());
txChain.close();
}
// test restoration from journal and verify data matches
- try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
- testParameter, "testManyWritesDeletes", "module-shards-cars-member-1.conf", true, "cars")) {
+ try (var dataStore = testKit.setupDataStore(testParameter, "testManyWritesDeletes",
+ "module-shards-cars-member-1.conf", true, "cars")) {
DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
MapNode cars = carMapBuilder.build();
- final Optional<NormalizedNode<?, ?>> optional = txChain.newReadOnlyTransaction()
+ final Optional<NormalizedNode> optional = txChain.newReadOnlyTransaction()
.read(CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS);
- assertTrue("isPresent", optional.isPresent());
- assertEquals("restored cars do not match snapshot", cars, optional.get());
+ assertEquals("restored cars do not match snapshot", Optional.of(cars), optional);
txChain.close();
}
*/
package org.opendaylight.controller.cluster.datastore;
+import static org.junit.Assert.assertSame;
+import static org.mockito.Mockito.mock;
+
import akka.actor.ActorRef;
-import java.util.Arrays;
-import java.util.Collection;
-import org.junit.Assert;
+import java.util.List;
import org.junit.Test;
-import org.mockito.Mockito;
import org.opendaylight.controller.cluster.datastore.messages.DataTreeChanged;
import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
public class ForwardingDataTreeChangeListenerTest extends AbstractActorTest {
ForwardingDataTreeChangeListener forwardingListener = new ForwardingDataTreeChangeListener(
getSystem().actorSelection(actorRef.path()), ActorRef.noSender());
- Collection<DataTreeCandidate> expected = Arrays.asList(Mockito.mock(DataTreeCandidate.class));
+ List<DataTreeCandidate> expected = List.of(mock(DataTreeCandidate.class));
forwardingListener.onDataTreeChanged(expected);
DataTreeChanged actual = MessageCollectorActor.expectFirstMatching(actorRef, DataTreeChanged.class, 5000);
- Assert.assertSame(expected, actual.getChanges());
+ assertSame(expected, actual.getChanges());
}
}
import org.opendaylight.controller.cluster.access.concepts.RequestException;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
public class FrontendReadWriteTransactionTest {
assertNotNull(handleRequest(readyReq));
verify(mockParent).finishTransaction(same(shardTransaction), eq(Optional.empty()));
- handleRequest(new ReadTransactionRequest(TX_ID, 0, mock(ActorRef.class), YangInstanceIdentifier.empty(), true));
+ handleRequest(new ReadTransactionRequest(TX_ID, 0, mock(ActorRef.class), YangInstanceIdentifier.of(), true));
}
@Test(expected = IllegalStateException.class)
assertNull(handleRequest(abortReq));
verify(mockParent).abortTransaction(same(shardTransaction), any(Runnable.class));
- handleRequest(new ReadTransactionRequest(TX_ID, 0, mock(ActorRef.class), YangInstanceIdentifier.empty(), true));
+ handleRequest(new ReadTransactionRequest(TX_ID, 0, mock(ActorRef.class), YangInstanceIdentifier.of(), true));
}
}
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertThrows;
import static org.junit.Assert.fail;
+import static org.mockito.ArgumentMatchers.anyString;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.Uninterruptibles;
-import java.lang.reflect.Constructor;
import java.util.Optional;
import java.util.Set;
-import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
-import org.mockito.Mockito;
import org.opendaylight.controller.cluster.databroker.ClientBackedDataStore;
import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
import org.opendaylight.controller.cluster.datastore.config.Configuration;
import org.opendaylight.controller.cluster.datastore.config.ConfigurationImpl;
-import org.opendaylight.controller.cluster.datastore.config.EmptyModuleShardConfigProvider;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.OnDemandShardState;
import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
return datastoreContextBuilder;
}
- public DistributedDataStore setupDistributedDataStore(final String typeName, final String moduleShardsConfig,
- final boolean waitUntilLeader,
- final EffectiveModelContext schemaContext) throws Exception {
- return setupDistributedDataStore(typeName, moduleShardsConfig, "modules.conf", waitUntilLeader, schemaContext);
+ public ClientBackedDataStore setupDataStore(final Class<? extends ClientBackedDataStore> implementation,
+ final String typeName, final String... shardNames) throws Exception {
+ return setupDataStore(implementation, typeName, "module-shards.conf", true, SchemaContextHelper.full(),
+ shardNames);
}
- public DistributedDataStore setupDistributedDataStore(final String typeName, final String moduleShardsConfig,
- final String modulesConfig,
- final boolean waitUntilLeader,
- final EffectiveModelContext schemaContext,
- final String... shardNames) throws Exception {
- return (DistributedDataStore) setupAbstractDataStore(DistributedDataStore.class, typeName, moduleShardsConfig,
- modulesConfig, waitUntilLeader, schemaContext, shardNames);
+ public ClientBackedDataStore setupDataStore(final Class<? extends ClientBackedDataStore> implementation,
+ final String typeName, final boolean waitUntilLeader, final String... shardNames) throws Exception {
+ return setupDataStore(implementation, typeName, "module-shards.conf", waitUntilLeader,
+ SchemaContextHelper.full(), shardNames);
}
- public AbstractDataStore setupAbstractDataStore(final Class<? extends AbstractDataStore> implementation,
- final String typeName, final String... shardNames)
- throws Exception {
- return setupAbstractDataStore(implementation, typeName, "module-shards.conf", true,
- SchemaContextHelper.full(), shardNames);
- }
-
- public AbstractDataStore setupAbstractDataStore(final Class<? extends AbstractDataStore> implementation,
- final String typeName, final boolean waitUntilLeader,
- final String... shardNames) throws Exception {
- return setupAbstractDataStore(implementation, typeName, "module-shards.conf", waitUntilLeader,
- SchemaContextHelper.full(), shardNames);
- }
-
- public AbstractDataStore setupAbstractDataStore(final Class<? extends AbstractDataStore> implementation,
- final String typeName, final String moduleShardsConfig,
- final boolean waitUntilLeader, final String... shardNames)
- throws Exception {
- return setupAbstractDataStore(implementation, typeName, moduleShardsConfig, waitUntilLeader,
- SchemaContextHelper.full(), shardNames);
+ public ClientBackedDataStore setupDataStore(final Class<? extends ClientBackedDataStore> implementation,
+ final String typeName, final String moduleShardsConfig, final boolean waitUntilLeader,
+ final String... shardNames) throws Exception {
+ return setupDataStore(implementation, typeName, moduleShardsConfig, waitUntilLeader,
+ SchemaContextHelper.full(), shardNames);
}
- public AbstractDataStore setupAbstractDataStore(final Class<? extends AbstractDataStore> implementation,
- final String typeName, final String moduleShardsConfig,
- final boolean waitUntilLeader,
- final EffectiveModelContext schemaContext,
- final String... shardNames) throws Exception {
- return setupAbstractDataStore(implementation, typeName, moduleShardsConfig, "modules.conf", waitUntilLeader,
- schemaContext, shardNames);
+ public ClientBackedDataStore setupDataStore(final Class<? extends ClientBackedDataStore> implementation,
+ final String typeName, final String moduleShardsConfig, final boolean waitUntilLeader,
+ final EffectiveModelContext schemaContext, final String... shardNames) throws Exception {
+ return setupDataStore(implementation, typeName, moduleShardsConfig, "modules.conf", waitUntilLeader,
+ schemaContext, shardNames);
}
- private AbstractDataStore setupAbstractDataStore(final Class<? extends AbstractDataStore> implementation,
- final String typeName, final String moduleShardsConfig,
- final String modulesConfig, final boolean waitUntilLeader,
- final EffectiveModelContext schemaContext,
- final String... shardNames)
- throws Exception {
+ private ClientBackedDataStore setupDataStore(final Class<? extends ClientBackedDataStore> implementation,
+ final String typeName, final String moduleShardsConfig, final String modulesConfig,
+ final boolean waitUntilLeader, final EffectiveModelContext schemaContext, final String... shardNames)
+ throws Exception {
final ClusterWrapper cluster = new ClusterWrapperImpl(getSystem());
final Configuration config = new ConfigurationImpl(moduleShardsConfig, modulesConfig);
setDataStoreName(typeName);
- // Make sure we set up datastore context correctly
- datastoreContextBuilder.useTellBasedProtocol(ClientBackedDataStore.class.isAssignableFrom(implementation));
-
final DatastoreContext datastoreContext = datastoreContextBuilder.build();
- final DatastoreContextFactory mockContextFactory = Mockito.mock(DatastoreContextFactory.class);
- Mockito.doReturn(datastoreContext).when(mockContextFactory).getBaseDatastoreContext();
- Mockito.doReturn(datastoreContext).when(mockContextFactory).getShardDatastoreContext(Mockito.anyString());
+ final DatastoreContextFactory mockContextFactory = mock(DatastoreContextFactory.class);
+ doReturn(datastoreContext).when(mockContextFactory).getBaseDatastoreContext();
+ doReturn(datastoreContext).when(mockContextFactory).getShardDatastoreContext(anyString());
- final Constructor<? extends AbstractDataStore> constructor = implementation.getDeclaredConstructor(
- ActorSystem.class, ClusterWrapper.class, Configuration.class,
- DatastoreContextFactory.class, DatastoreSnapshot.class);
+ final var constructor = implementation.getDeclaredConstructor(ActorSystem.class, ClusterWrapper.class,
+ Configuration.class, DatastoreContextFactory.class, DatastoreSnapshot.class);
- final AbstractDataStore dataStore = constructor.newInstance(getSystem(), cluster, config, mockContextFactory,
+ final var dataStore = constructor.newInstance(getSystem(), cluster, config, mockContextFactory,
restoreFromSnapshot);
dataStore.onModelContextUpdated(schemaContext);
}
}
- public DistributedDataStore setupDistributedDataStoreWithoutConfig(final String typeName,
- final EffectiveModelContext schemaContext) {
- final ClusterWrapper cluster = new ClusterWrapperImpl(getSystem());
- final ConfigurationImpl configuration = new ConfigurationImpl(new EmptyModuleShardConfigProvider());
-
- setDataStoreName(typeName);
-
- final DatastoreContext datastoreContext = getDatastoreContextBuilder().build();
-
- final DatastoreContextFactory mockContextFactory = Mockito.mock(DatastoreContextFactory.class);
- Mockito.doReturn(datastoreContext).when(mockContextFactory).getBaseDatastoreContext();
- Mockito.doReturn(datastoreContext).when(mockContextFactory).getShardDatastoreContext(Mockito.anyString());
-
- final DistributedDataStore dataStore = new DistributedDataStore(getSystem(), cluster,
- configuration, mockContextFactory, restoreFromSnapshot);
-
- dataStore.onModelContextUpdated(schemaContext);
-
- datastoreContextBuilder = DatastoreContext.newBuilderFrom(datastoreContext);
- return dataStore;
- }
-
- public DistributedDataStore setupDistributedDataStoreWithoutConfig(final String typeName,
- final EffectiveModelContext schemaContext,
- final LogicalDatastoreType storeType) {
- final ClusterWrapper cluster = new ClusterWrapperImpl(getSystem());
- final ConfigurationImpl configuration = new ConfigurationImpl(new EmptyModuleShardConfigProvider());
-
- setDataStoreName(typeName);
-
- final DatastoreContext datastoreContext =
- getDatastoreContextBuilder().logicalStoreType(storeType).build();
-
- final DatastoreContextFactory mockContextFactory = Mockito.mock(DatastoreContextFactory.class);
- Mockito.doReturn(datastoreContext).when(mockContextFactory).getBaseDatastoreContext();
- Mockito.doReturn(datastoreContext).when(mockContextFactory).getShardDatastoreContext(Mockito.anyString());
-
- final DistributedDataStore dataStore = new DistributedDataStore(getSystem(), cluster,
- configuration, mockContextFactory, restoreFromSnapshot);
-
- dataStore.onModelContextUpdated(schemaContext);
-
- datastoreContextBuilder = DatastoreContext.newBuilderFrom(datastoreContext);
- return dataStore;
- }
-
public void waitUntilLeader(final ActorUtils actorUtils, final String... shardNames) {
for (String shardName: shardNames) {
ActorRef shard = findLocalShard(actorUtils, shardName);
}
public static ActorRef findLocalShard(final ActorUtils actorUtils, final String shardName) {
- ActorRef shard = null;
- for (int i = 0; i < 20 * 5 && shard == null; i++) {
+ for (int i = 0; i < 20 * 5; i++) {
Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
Optional<ActorRef> shardReply = actorUtils.findLocalShard(shardName);
if (shardReply.isPresent()) {
- shard = shardReply.get();
+ return shardReply.orElseThrow();
}
}
- return shard;
+ return null;
}
public static void waitUntilShardIsDown(final ActorUtils actorUtils, final String shardName) {
throw new IllegalStateException("Shard[" + shardName + " did not shutdown in time");
}
- public static void verifyShardStats(final AbstractDataStore datastore, final String shardName,
+ public static void verifyShardStats(final ClientBackedDataStore datastore, final String shardName,
final ShardStatsVerifier verifier) throws Exception {
ActorUtils actorUtils = datastore.getActorUtils();
throw lastError;
}
- public static void verifyShardState(final AbstractDataStore datastore, final String shardName,
+ public static void verifyShardState(final ClientBackedDataStore datastore, final String shardName,
final Consumer<OnDemandShardState> verifier) throws Exception {
ActorUtils actorUtils = datastore.getActorUtils();
throw lastError;
}
- void testWriteTransaction(final AbstractDataStore dataStore, final YangInstanceIdentifier nodePath,
- final NormalizedNode<?, ?> nodeToWrite) throws Exception {
+ void testWriteTransaction(final ClientBackedDataStore dataStore, final YangInstanceIdentifier nodePath,
+ final NormalizedNode nodeToWrite) throws Exception {
// 1. Create a write-only Tx
// 5. Verify the data in the store
DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
-
- Optional<NormalizedNode<?, ?>> optional = readTx.read(nodePath).get(5, TimeUnit.SECONDS);
- assertTrue("isPresent", optional.isPresent());
- assertEquals("Data node", nodeToWrite, optional.get());
+ assertEquals(Optional.of(nodeToWrite), readTx.read(nodePath).get(5, TimeUnit.SECONDS));
}
public void doCommit(final DOMStoreThreePhaseCommitCohort cohort) throws Exception {
cohort.commit().get(5, TimeUnit.SECONDS);
}
- @SuppressWarnings("checkstyle:IllegalCatch")
- void assertExceptionOnCall(final Callable<Void> callable, final Class<? extends Exception> expType) {
- try {
- callable.call();
- fail("Expected " + expType.getSimpleName());
- } catch (Exception e) {
- assertEquals("Exception type", expType, e.getClass());
- }
- }
-
void assertExceptionOnTxChainCreates(final DOMStoreTransactionChain txChain,
final Class<? extends Exception> expType) {
- assertExceptionOnCall(() -> {
- txChain.newWriteOnlyTransaction();
- return null;
- }, expType);
-
- assertExceptionOnCall(() -> {
- txChain.newReadWriteTransaction();
- return null;
- }, expType);
-
- assertExceptionOnCall(() -> {
- txChain.newReadOnlyTransaction();
- return null;
- }, expType);
+ assertThrows(expType, () -> txChain.newWriteOnlyTransaction());
+ assertThrows(expType, () -> txChain.newReadWriteTransaction());
+ assertThrows(expType, () -> txChain.newReadOnlyTransaction());
}
public interface ShardStatsVerifier {
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import static org.awaitility.Awaitility.await;
+import static org.junit.Assert.assertEquals;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries;
+import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
+import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev231229.DataStoreProperties.ExportOnRecovery;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
+
+public class JsonExportTest extends AbstractShardTest {
+ private static final String DUMMY_DATA = "Dummy data as snapshot sequence number is set to 0 in "
+ + "InMemorySnapshotStore and journal recovery seq number will start from 1";
+ private static final String EXPECTED_JOURNAL_FILE = "expectedJournalExport.json";
+ private static final String EXPECTED_SNAPSHOT_FILE = "expectedSnapshotExport.json";
+ private static String actualJournalFilePath;
+ private static String actualSnapshotFilePath;
+ private DatastoreContext datastoreContext;
+
+ @Rule
+ public TemporaryFolder temporaryFolder = new TemporaryFolder();
+
+ @Override
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ final var exportTmpFolder = temporaryFolder.newFolder("persistence-export");
+ actualJournalFilePath = exportTmpFolder.getAbsolutePath() + "/journals/"
+ + "member-1-shard-inventory-config" + nextShardNum + "-journal.json";
+ actualSnapshotFilePath = exportTmpFolder.getAbsolutePath() + "/snapshots/"
+ + "member-1-shard-inventory-config" + nextShardNum + "-snapshot.json";
+ datastoreContext = DatastoreContext.newBuilder().shardJournalRecoveryLogBatchSize(1)
+ .shardSnapshotBatchCount(5000).shardHeartbeatIntervalInMillis(HEARTBEAT_MILLIS).persistent(true)
+ .exportOnRecovery(ExportOnRecovery.Json)
+ .recoveryExportBaseDir(exportTmpFolder.getAbsolutePath()).build();
+ }
+
+ @Override
+ protected DatastoreContext newDatastoreContext() {
+ return datastoreContext;
+ }
+
+ @Test
+ public void testJsonExport() throws Exception {
+ // Set up the InMemorySnapshotStore.
+ final var source = setupInMemorySnapshotStore();
+
+ final var writeMod = source.takeSnapshot().newModification();
+ writeMod.write(TestModel.OUTER_LIST_PATH, ImmutableNodes.newSystemMapBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TestModel.OUTER_LIST_QNAME))
+ .build());
+ writeMod.ready();
+ InMemoryJournal.addEntry(shardID.toString(), 0, DUMMY_DATA);
+
+ // Set up the InMemoryJournal.
+ InMemoryJournal.addEntry(shardID.toString(), 1, new SimpleReplicatedLogEntry(0, 1,
+ payloadForModification(source, writeMod, nextTransactionId())));
+
+ final int nListEntries = 16;
+ final Set<Integer> listEntryKeys = new HashSet<>();
+
+ // Add some ModificationPayload entries
+ for (int i = 1; i <= nListEntries; i++) {
+ final Integer value = i;
+ listEntryKeys.add(value);
+
+ final var path = YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH)
+ .nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, value).build();
+
+ final var mod = source.takeSnapshot().newModification();
+ mod.merge(path, ImmutableNodes.newMapEntryBuilder()
+ .withNodeIdentifier(
+ NodeIdentifierWithPredicates.of(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, value))
+ .withChild(ImmutableNodes.leafNode(TestModel.ID_QNAME, value))
+ .build());
+ mod.ready();
+
+ InMemoryJournal.addEntry(shardID.toString(), i + 1, new SimpleReplicatedLogEntry(i, 1,
+ payloadForModification(source, mod, nextTransactionId())));
+ }
+
+ InMemoryJournal.addEntry(shardID.toString(), nListEntries + 2,
+ new ApplyJournalEntries(nListEntries));
+
+ testRecovery(listEntryKeys, false);
+
+ verifyJournalExport();
+ verifySnapshotExport();
+ }
+
+ private static void verifyJournalExport() throws IOException {
+ final String expectedJournalData = readExpectedFile(EXPECTED_JOURNAL_FILE);
+ final String actualJournalData = readActualFile(actualJournalFilePath);
+ assertEquals("Exported journal is not expected ", expectedJournalData, actualJournalData);
+ }
+
+ private static void verifySnapshotExport() throws IOException {
+ final String expectedSnapshotData = readExpectedFile(EXPECTED_SNAPSHOT_FILE);
+ final String actualSnapshotData = readActualFile(actualSnapshotFilePath);
+ assertEquals("Exported snapshot is not expected ", expectedSnapshotData, actualSnapshotData);
+ }
+
+ private static String readExpectedFile(final String filePath) throws IOException {
+ final File exportFile = new File(JsonExportTest.class.getClassLoader().getResource(filePath).getFile());
+ return new String(Files.readAllBytes(Path.of(exportFile.getPath())));
+ }
+
+ private static String readActualFile(final String filePath) throws IOException {
+ final File exportFile = new File(filePath);
+ await().atMost(10, TimeUnit.SECONDS).until(exportFile::exists);
+ return new String(Files.readAllBytes(Path.of(filePath)));
+ }
+}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-
-import akka.actor.ActorSelection;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.Optional;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
-import org.opendaylight.controller.cluster.datastore.messages.DataExists;
-import org.opendaylight.controller.cluster.datastore.messages.ReadData;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.util.concurrent.FluentFutures;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import scala.concurrent.Future;
-
-public class LocalTransactionContextTest {
-
- @Mock
- private OperationLimiter limiter;
-
- @Mock
- private DOMStoreReadWriteTransaction readWriteTransaction;
-
- @Mock
- private LocalTransactionReadySupport mockReadySupport;
-
- private LocalTransactionContext localTransactionContext;
-
- @Before
- public void setUp() {
- MockitoAnnotations.initMocks(this);
- localTransactionContext = new LocalTransactionContext(readWriteTransaction, limiter.getIdentifier(),
- mockReadySupport) {
- @Override
- protected DOMStoreWriteTransaction getWriteDelegate() {
- return readWriteTransaction;
- }
-
- @Override
- protected DOMStoreReadTransaction getReadDelegate() {
- return readWriteTransaction;
- }
- };
- }
-
- @Test
- public void testWrite() {
- YangInstanceIdentifier yangInstanceIdentifier = YangInstanceIdentifier.empty();
- NormalizedNode<?, ?> normalizedNode = mock(NormalizedNode.class);
- localTransactionContext.executeWrite(yangInstanceIdentifier, normalizedNode, null);
- verify(readWriteTransaction).write(yangInstanceIdentifier, normalizedNode);
- }
-
- @Test
- public void testMerge() {
- YangInstanceIdentifier yangInstanceIdentifier = YangInstanceIdentifier.empty();
- NormalizedNode<?, ?> normalizedNode = mock(NormalizedNode.class);
- localTransactionContext.executeMerge(yangInstanceIdentifier, normalizedNode, null);
- verify(readWriteTransaction).merge(yangInstanceIdentifier, normalizedNode);
- }
-
- @Test
- public void testDelete() {
- YangInstanceIdentifier yangInstanceIdentifier = YangInstanceIdentifier.empty();
- localTransactionContext.executeDelete(yangInstanceIdentifier, null);
- verify(readWriteTransaction).delete(yangInstanceIdentifier);
- }
-
- @Test
- public void testRead() {
- YangInstanceIdentifier yangInstanceIdentifier = YangInstanceIdentifier.empty();
- NormalizedNode<?, ?> normalizedNode = mock(NormalizedNode.class);
- doReturn(FluentFutures.immediateFluentFuture(Optional.of(normalizedNode))).when(readWriteTransaction)
- .read(yangInstanceIdentifier);
- localTransactionContext.executeRead(new ReadData(yangInstanceIdentifier, DataStoreVersions.CURRENT_VERSION),
- SettableFuture.create(), null);
- verify(readWriteTransaction).read(yangInstanceIdentifier);
- }
-
- @Test
- public void testExists() {
- YangInstanceIdentifier yangInstanceIdentifier = YangInstanceIdentifier.empty();
- doReturn(FluentFutures.immediateTrueFluentFuture()).when(readWriteTransaction).exists(yangInstanceIdentifier);
- localTransactionContext.executeRead(new DataExists(yangInstanceIdentifier, DataStoreVersions.CURRENT_VERSION),
- SettableFuture.create(), null);
- verify(readWriteTransaction).exists(yangInstanceIdentifier);
- }
-
- @Test
- public void testReady() {
- final LocalThreePhaseCommitCohort mockCohort = mock(LocalThreePhaseCommitCohort.class);
- doReturn(akka.dispatch.Futures.successful(null)).when(mockCohort).initiateCoordinatedCommit(Optional.empty());
- doReturn(mockCohort).when(mockReadySupport).onTransactionReady(readWriteTransaction, null);
-
- Future<ActorSelection> future = localTransactionContext.readyTransaction(null, Optional.empty());
- assertTrue(future.isCompleted());
-
- verify(mockReadySupport).onTransactionReady(readWriteTransaction, null);
- }
-
- @Test
- public void testReadyWithWriteError() {
- YangInstanceIdentifier yangInstanceIdentifier = YangInstanceIdentifier.empty();
- NormalizedNode<?, ?> normalizedNode = mock(NormalizedNode.class);
- RuntimeException error = new RuntimeException("mock");
- doThrow(error).when(readWriteTransaction).write(yangInstanceIdentifier, normalizedNode);
-
- localTransactionContext.executeWrite(yangInstanceIdentifier, normalizedNode, null);
- localTransactionContext.executeWrite(yangInstanceIdentifier, normalizedNode, null);
-
- verify(readWriteTransaction).write(yangInstanceIdentifier, normalizedNode);
-
- doReadyWithExpectedError(error);
- }
-
- @Test
- public void testReadyWithMergeError() {
- YangInstanceIdentifier yangInstanceIdentifier = YangInstanceIdentifier.empty();
- NormalizedNode<?, ?> normalizedNode = mock(NormalizedNode.class);
- RuntimeException error = new RuntimeException("mock");
- doThrow(error).when(readWriteTransaction).merge(yangInstanceIdentifier, normalizedNode);
-
- localTransactionContext.executeMerge(yangInstanceIdentifier, normalizedNode, null);
- localTransactionContext.executeMerge(yangInstanceIdentifier, normalizedNode, null);
-
- verify(readWriteTransaction).merge(yangInstanceIdentifier, normalizedNode);
-
- doReadyWithExpectedError(error);
- }
-
- @Test
- public void testReadyWithDeleteError() {
- YangInstanceIdentifier yangInstanceIdentifier = YangInstanceIdentifier.empty();
- RuntimeException error = new RuntimeException("mock");
- doThrow(error).when(readWriteTransaction).delete(yangInstanceIdentifier);
-
- localTransactionContext.executeDelete(yangInstanceIdentifier, null);
- localTransactionContext.executeDelete(yangInstanceIdentifier, null);
-
- verify(readWriteTransaction).delete(yangInstanceIdentifier);
-
- doReadyWithExpectedError(error);
- }
-
- private void doReadyWithExpectedError(final RuntimeException expError) {
- LocalThreePhaseCommitCohort mockCohort = mock(LocalThreePhaseCommitCohort.class);
- doReturn(akka.dispatch.Futures.successful(null)).when(mockCohort).initiateCoordinatedCommit(Optional.empty());
- doReturn(mockCohort).when(mockReadySupport).onTransactionReady(readWriteTransaction, expError);
-
- localTransactionContext.readyTransaction(null, Optional.empty());
- }
-}
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.opendaylight.controller.cluster.access.concepts.MemberName;
+import org.opendaylight.controller.cluster.databroker.ClientBackedDataStore;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
import org.opendaylight.controller.cluster.raft.client.messages.GetOnDemandRaftState;
private static final String MEMBER_1_ADDRESS = "akka://cluster-test@127.0.0.1:2558";
private IntegrationTestKit kit;
- private AbstractDataStore configDataStore;
- private AbstractDataStore operDataStore;
+ private ClientBackedDataStore configDataStore;
+ private ClientBackedDataStore operDataStore;
private DatastoreContext.Builder datastoreContextBuilder;
private boolean cleanedUp;
}
- public AbstractDataStore configDataStore() {
+ public ClientBackedDataStore configDataStore() {
return configDataStore;
}
- public AbstractDataStore operDataStore() {
+ public ClientBackedDataStore operDataStore() {
return operDataStore;
}
Stopwatch sw = Stopwatch.createStarted();
while (sw.elapsed(TimeUnit.SECONDS) <= 10) {
CurrentClusterState state = Cluster.get(kit.getSystem()).state();
+
for (Member m : state.getUnreachable()) {
if (member.equals(m.getRoles().iterator().next())) {
return;
}
try {
- IntegrationTestKit.shutdownActorSystem(kit.getSystem(), Boolean.TRUE);
+ IntegrationTestKit.shutdownActorSystem(kit.getSystem(), true);
} catch (RuntimeException e) {
LoggerFactory.getLogger(MemberNode.class).warn("Failed to shutdown actor system", e);
}
}
}
- public static void verifyRaftState(final AbstractDataStore datastore, final String shardName,
+ public static void verifyRaftState(final ClientBackedDataStore datastore, final String shardName,
final RaftStateVerifier verifier) throws Exception {
ActorUtils actorUtils = datastore.getActorUtils();
throw lastError;
}
- public static void verifyRaftPeersPresent(final AbstractDataStore datastore, final String shardName,
+ public static void verifyRaftPeersPresent(final ClientBackedDataStore datastore, final String shardName,
final String... peerMemberNames) throws Exception {
final Set<String> peerIds = new HashSet<>();
for (String p: peerMemberNames) {
raftState.getPeerAddresses().keySet()));
}
- public static void verifyNoShardPresent(final AbstractDataStore datastore, final String shardName) {
+ public static void verifyNoShardPresent(final ClientBackedDataStore datastore, final String shardName) {
Stopwatch sw = Stopwatch.createStarted();
while (sw.elapsed(TimeUnit.SECONDS) <= 5) {
Optional<ActorRef> shardReply = datastore.getActorUtils().findLocalShard(shardName);
* @return this Builder
*/
public Builder moduleShardsConfig(final String newModuleShardsConfig) {
- this.moduleShardsConfig = newModuleShardsConfig;
+ moduleShardsConfig = newModuleShardsConfig;
return this;
}
* @return this Builder
*/
public Builder akkaConfig(final String newAkkaConfig) {
- this.akkaConfig = newAkkaConfig;
+ akkaConfig = newAkkaConfig;
return this;
}
* @return this Builder
*/
public Builder useAkkaArtery(final boolean newUseAkkaArtery) {
- this.useAkkaArtery = newUseAkkaArtery;
+ useAkkaArtery = newUseAkkaArtery;
return this;
}
* @return this Builder
*/
public Builder testName(final String newTestName) {
- this.testName = newTestName;
+ testName = newTestName;
return this;
}
* @return this Builder
*/
public Builder waitForShardLeader(final String... shardNames) {
- this.waitForshardLeader = shardNames;
+ waitForshardLeader = shardNames;
return this;
}
* @return this Builder
*/
public Builder createOperDatastore(final boolean value) {
- this.createOperDatastore = value;
+ createOperDatastore = value;
return this;
}
* @return this Builder
*/
public Builder schemaContext(final EffectiveModelContext newSchemaContext) {
- this.schemaContext = newSchemaContext;
+ schemaContext = newSchemaContext;
return this;
}
String memberName = new ClusterWrapperImpl(system).getCurrentMemberName().getName();
node.kit.getDatastoreContextBuilder().shardManagerPersistenceId("shard-manager-config-" + memberName);
- node.configDataStore = node.kit.setupAbstractDataStore(DistributedDataStore.class,
- "config_" + testName, moduleShardsConfig, true, schemaContext, waitForshardLeader);
+ node.configDataStore = node.kit.setupDataStore(ClientBackedDataStore.class, "config_" + testName,
+ moduleShardsConfig, true, schemaContext, waitForshardLeader);
if (createOperDatastore) {
node.kit.getDatastoreContextBuilder().shardManagerPersistenceId("shard-manager-oper-" + memberName);
- node.operDataStore = node.kit.setupAbstractDataStore(DistributedDataStore.class,
+ node.operDataStore = node.kit.setupDataStore(ClientBackedDataStore.class,
"oper_" + testName, moduleShardsConfig, true, schemaContext, waitForshardLeader);
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.datastore;
import java.util.concurrent.atomic.AtomicReference;
+@Deprecated(since = "9.0.0", forRemoval = true)
interface OperationCallback {
- OperationCallback NO_OP_CALLBACK = new OperationCallback() {
- @Override
- public void run() {
- }
-
- @Override
- public void success() {
- }
-
- @Override
- public void failure() {
- }
-
- @Override
- public void pause() {
- }
-
- @Override
- public void resume() {
- }
- };
-
class Reference extends AtomicReference<OperationCallback> {
private static final long serialVersionUID = 1L;
- Reference(OperationCallback initialValue) {
+ Reference(final OperationCallback initialValue) {
super(initialValue);
}
}
+++ /dev/null
-/*
- * Copyright (c) 2017 Pantheon Technologies, s.r.o. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.mock;
-
-import akka.actor.ActorRef;
-import akka.actor.Status.Failure;
-import akka.dispatch.ExecutionContexts;
-import akka.dispatch.OnComplete;
-import akka.testkit.javadsl.TestKit;
-import com.google.common.util.concurrent.MoreExecutors;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.FrontendType;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.config.Configuration;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
-import org.opendaylight.controller.cluster.datastore.messages.DataExists;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import scala.concurrent.Await;
-import scala.concurrent.Future;
-import scala.concurrent.duration.FiniteDuration;
-
-/**
- * Test whether RmoteTransactionContext operates correctly.
- */
-public class RemoteTransactionContextTest extends AbstractActorTest {
- private static final TransactionIdentifier TX_ID = new TransactionIdentifier(new LocalHistoryIdentifier(
- ClientIdentifier.create(FrontendIdentifier.create(MemberName.forName("test"), FrontendType.forName("test")), 0),
- 0), 0);
-
- private OperationLimiter limiter;
- private RemoteTransactionContext txContext;
- private ActorUtils actorUtils;
- private TestKit kit;
-
- @Before
- public void before() {
- kit = new TestKit(getSystem());
- actorUtils = Mockito.spy(new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
- mock(Configuration.class)));
- limiter = new OperationLimiter(TX_ID, 4, 0);
- txContext = new RemoteTransactionContext(TX_ID, actorUtils.actorSelection(kit.getRef().path()), actorUtils,
- DataStoreVersions.CURRENT_VERSION, limiter);
- txContext.operationHandOffComplete();
- }
-
- /**
- * OperationLimiter should be correctly released when a failure, like AskTimeoutException occurs. Future reads
- * need to complete immediately with the failure and modifications should not be throttled and thrown away
- * immediately.
- */
- @Test
- public void testLimiterOnFailure() throws TimeoutException, InterruptedException {
- txContext.executeDelete(null, null);
- txContext.executeDelete(null, null);
- assertEquals(2, limiter.availablePermits());
-
- final Future<Object> sendFuture = txContext.sendBatchedModifications();
- assertEquals(2, limiter.availablePermits());
-
- BatchedModifications msg = kit.expectMsgClass(BatchedModifications.class);
- assertEquals(2, msg.getModifications().size());
- assertEquals(1, msg.getTotalMessagesSent());
- sendReply(new Failure(new NullPointerException()));
- assertFuture(sendFuture, new OnComplete<>() {
- @Override
- public void onComplete(final Throwable failure, final Object success) {
- assertTrue(failure instanceof NullPointerException);
- assertEquals(4, limiter.availablePermits());
-
- // The transaction has failed, no throttling should occur
- txContext.executeDelete(null, null);
- assertEquals(4, limiter.availablePermits());
-
- // Executing a read should result in immediate failure
- final SettableFuture<Boolean> readFuture = SettableFuture.create();
- txContext.executeRead(new DataExists(), readFuture, null);
- assertTrue(readFuture.isDone());
- try {
- readFuture.get();
- fail("Read future did not fail");
- } catch (ExecutionException | InterruptedException e) {
- assertTrue(e.getCause() instanceof NullPointerException);
- }
- }
- });
-
- final Future<Object> commitFuture = txContext.directCommit(null);
-
- msg = kit.expectMsgClass(BatchedModifications.class);
- // Modification should have been thrown away by the dropped transmit induced by executeRead()
- assertEquals(0, msg.getModifications().size());
- assertTrue(msg.isDoCommitOnReady());
- assertTrue(msg.isReady());
- assertEquals(2, msg.getTotalMessagesSent());
- sendReply(new Failure(new IllegalStateException()));
- assertFuture(commitFuture, new OnComplete<>() {
- @Override
- public void onComplete(final Throwable failure, final Object success) {
- assertTrue(failure instanceof IllegalStateException);
- }
- });
-
- kit.expectNoMessage();
- }
-
- /**
- * OperationLimiter gives up throttling at some point -- {@link RemoteTransactionContext} needs to deal with that
- * case, too.
- */
- @Test
- public void testLimiterOnOverflowFailure() throws TimeoutException, InterruptedException {
- txContext.executeDelete(null, null);
- txContext.executeDelete(null, null);
- txContext.executeDelete(null, null);
- txContext.executeDelete(null, null);
- assertEquals(0, limiter.availablePermits());
- txContext.executeDelete(null, null);
- // Last acquire should have failed ...
- assertEquals(0, limiter.availablePermits());
-
- final Future<Object> future = txContext.sendBatchedModifications();
- assertEquals(0, limiter.availablePermits());
-
- BatchedModifications msg = kit.expectMsgClass(BatchedModifications.class);
- // ... so we are sending 5 modifications ...
- assertEquals(5, msg.getModifications().size());
- assertEquals(1, msg.getTotalMessagesSent());
- sendReply(new Failure(new NullPointerException()));
-
- assertFuture(future, new OnComplete<>() {
- @Override
- public void onComplete(final Throwable failure, final Object success) {
- assertTrue(failure instanceof NullPointerException);
- // ... but they account for only 4 permits.
- assertEquals(4, limiter.availablePermits());
- }
- });
-
- kit.expectNoMessage();
- }
-
- private void sendReply(final Object message) {
- final ActorRef askActor = kit.getLastSender();
- kit.watch(askActor);
- kit.reply(new Failure(new IllegalStateException()));
- kit.expectTerminated(askActor);
- }
-
- private static void assertFuture(final Future<Object> future, final OnComplete<Object> complete)
- throws TimeoutException, InterruptedException {
- Await.ready(future, FiniteDuration.apply(3, TimeUnit.SECONDS));
- future.onComplete(complete, ExecutionContexts.fromExecutor(MoreExecutors.directExecutor()));
- }
-}
import static org.mockito.Mockito.timeout;
import static org.mockito.Mockito.verify;
-import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
import akka.testkit.javadsl.TestKit;
import com.google.common.collect.ImmutableList;
import java.time.Duration;
import org.opendaylight.controller.md.cluster.datastore.model.PeopleModel;
import org.opendaylight.mdsal.dom.api.ClusteredDOMDataTreeChangeListener;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.spi.DataTreeCandidates;
public class RootDataTreeChangeListenerProxyTest extends AbstractActorTest {
ClusteredDOMDataTreeChangeListener mockClusteredListener = mock(
ClusteredDOMDataTreeChangeListener.class);
- final YangInstanceIdentifier path = YangInstanceIdentifier.empty();
+ final YangInstanceIdentifier path = YangInstanceIdentifier.of();
final RootDataTreeChangeListenerProxy<ClusteredDOMDataTreeChangeListener> rootListenerProxy =
new RootDataTreeChangeListenerProxy<>(actorUtils, mockClusteredListener,
Set.of("shard-1", "shard-2"));
assertEquals(registerForShard1.getListenerActorPath(), registerForShard2.getListenerActorPath());
final TestKit kit2 = new TestKit(getSystem());
- final ActorRef rootListenerActor = getSystem().actorFor(registerForShard1.getListenerActorPath());
+ final ActorSelection rootListenerActor = getSystem().actorSelection(registerForShard1.getListenerActorPath());
rootListenerActor.tell(new EnableNotification(true, "test"), kit.getRef());
- final DataTreeCandidate peopleCandidate = DataTreeCandidates.fromNormalizedNode(YangInstanceIdentifier.empty(),
+ final DataTreeCandidate peopleCandidate = DataTreeCandidates.fromNormalizedNode(YangInstanceIdentifier.of(),
PeopleModel.create());
rootListenerActor.tell(new DataTreeChanged(ImmutableList.of(peopleCandidate)), kit.getRef());
rootListenerActor.tell(new DataTreeChanged(ImmutableList.of(peopleCandidate)), kit2.getRef());
*
* @author Thomas Pantelis
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
public class ShardCommitCoordinationTest extends AbstractShardTest {
private static final Logger LOG = LoggerFactory.getLogger(ShardCommitCoordinationTest.class);
LOG.info("{} ending", testName);
}
- static void verifyInnerListEntry(TestActorRef<Shard> shard, int outerID, String innerID) {
+ static void verifyInnerListEntry(final TestActorRef<Shard> shard, final int outerID, final String innerID) {
final YangInstanceIdentifier path = innerEntryPath(outerID, innerID);
- final NormalizedNode<?, ?> innerListEntry = readStore(shard, path);
+ final NormalizedNode innerListEntry = readStore(shard, path);
assertNotNull(path + " not found", innerListEntry);
}
}
import org.mockito.invocation.InvocationOnMock;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.persisted.CommitTransactionPayload;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
public final class ShardDataTreeMocking {
}
public static ShardDataTreeCohort immediateCanCommit(final ShardDataTreeCohort cohort) {
- final FutureCallback<Void> callback = mockCallback();
- doNothing().when(callback).onSuccess(null);
+ final FutureCallback<Empty> callback = mockCallback();
+ doNothing().when(callback).onSuccess(Empty.value());
cohort.canCommit(callback);
- verify(callback).onSuccess(null);
+ verify(callback).onSuccess(Empty.value());
verifyNoMoreInteractions(callback);
return cohort;
}
- public static FutureCallback<Void> coordinatedCanCommit(final ShardDataTreeCohort cohort) {
- final FutureCallback<Void> callback = mockCallback();
- doNothing().when(callback).onSuccess(null);
+ public static FutureCallback<Empty> coordinatedCanCommit(final ShardDataTreeCohort cohort) {
+ final FutureCallback<Empty> callback = mockCallback();
+ doNothing().when(callback).onSuccess(Empty.value());
doNothing().when(callback).onFailure(any(Throwable.class));
cohort.canCommit(callback);
return callback;
}).when(preCommitCallback).onSuccess(any(DataTreeCandidate.class));
doNothing().when(preCommitCallback).onFailure(any(Throwable.class));
- final FutureCallback<Void> canCommit = mockCallback();
+ final FutureCallback<Empty> canCommit = mockCallback();
doAnswer(invocation -> {
cohort.preCommit(preCommitCallback);
return null;
- }).when(canCommit).onSuccess(null);
+ }).when(canCommit).onSuccess(Empty.value());
doNothing().when(canCommit).onFailure(any(Throwable.class));
cohort.canCommit(canCommit);
import com.google.common.primitives.UnsignedLong;
import com.google.common.util.concurrent.FutureCallback;
import java.io.IOException;
-import java.math.BigInteger;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import org.mockito.ArgumentCaptor;
import org.mockito.InOrder;
import org.mockito.Mockito;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.persisted.CommitTransactionPayload;
import org.opendaylight.controller.cluster.datastore.persisted.MetadataShardDataTreeSnapshot;
import org.opendaylight.controller.cluster.datastore.persisted.PayloadVersion;
import org.opendaylight.controller.md.cluster.datastore.model.PeopleModel;
import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.yang.common.Empty;
import org.opendaylight.yangtools.yang.common.Uint64;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
+import org.opendaylight.yangtools.yang.data.tree.api.TreeType;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.spi.DataTreeCandidates;
import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
@Before
public void setUp() {
doReturn(Ticker.systemTicker()).when(mockShard).ticker();
- doReturn(mock(ShardStats.class)).when(mockShard).getShardMBean();
+ doReturn(new ShardStats("shardName", "mxBeanType", mockShard)).when(mockShard).getShardMBean();
doReturn(DATASTORE_CONTEXT).when(mockShard).getDatastoreContext();
fullSchema = SchemaContextHelper.full();
final DataTreeSnapshot snapshot1 = readOnlyShardDataTreeTransaction.getSnapshot();
- final Optional<NormalizedNode<?, ?>> optional = snapshot1.readNode(CarsModel.BASE_PATH);
+ final Optional<NormalizedNode> optional = snapshot1.readNode(CarsModel.BASE_PATH);
assertEquals(expectedCarsPresent, optional.isPresent());
- final Optional<NormalizedNode<?, ?>> optional1 = snapshot1.readNode(PeopleModel.BASE_PATH);
+ final Optional<NormalizedNode> optional1 = snapshot1.readNode(PeopleModel.BASE_PATH);
assertEquals(expectedPeoplePresent, optional1.isPresent());
}
candidates.add(addCar(shardDataTree));
candidates.add(removeCar(shardDataTree));
- final NormalizedNode<?, ?> expected = getCars(shardDataTree);
+ final NormalizedNode expected = getCars(shardDataTree);
applyCandidates(shardDataTree, candidates);
- final NormalizedNode<?, ?> actual = getCars(shardDataTree);
+ final NormalizedNode actual = getCars(shardDataTree);
assertEquals(expected, actual);
}
candidates.add(addCar(shardDataTree));
candidates.add(removeCar(shardDataTree));
- final NormalizedNode<?, ?> expected = getCars(shardDataTree);
+ final NormalizedNode expected = getCars(shardDataTree);
applyCandidates(shardDataTree, candidates);
- final NormalizedNode<?, ?> actual = getCars(shardDataTree);
+ final NormalizedNode actual = getCars(shardDataTree);
assertEquals(expected, actual);
}
addCar(shardDataTree, "optima");
verifyOnDataTreeChanged(listener, dtc -> {
- assertEquals("getModificationType", ModificationType.WRITE, dtc.getRootNode().getModificationType());
+ assertEquals("getModificationType", ModificationType.WRITE, dtc.getRootNode().modificationType());
assertEquals("getRootPath", CarsModel.newCarPath("optima"), dtc.getRootPath());
});
addCar(shardDataTree, "sportage");
verifyOnDataTreeChanged(listener, dtc -> {
- assertEquals("getModificationType", ModificationType.WRITE, dtc.getRootNode().getModificationType());
+ assertEquals("getModificationType", ModificationType.WRITE, dtc.getRootNode().modificationType());
assertEquals("getRootPath", CarsModel.newCarPath("sportage"), dtc.getRootPath());
});
verifyOnDataTreeChanged(listener, dtc -> {
ModificationType expType = expChanges.remove(dtc.getRootPath());
assertNotNull("Got unexpected change for " + dtc.getRootPath(), expType);
- assertEquals("getModificationType", expType, dtc.getRootNode().getModificationType());
+ assertEquals("getModificationType", expType, dtc.getRootNode().modificationType());
});
if (!expChanges.isEmpty()) {
final ShardDataTreeCohort cohort2 = newShardDataTreeCohort(snapshot ->
snapshot.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode()));
- NormalizedNode<?, ?> peopleNode = PeopleModel.create();
+ NormalizedNode peopleNode = PeopleModel.create();
final ShardDataTreeCohort cohort3 = newShardDataTreeCohort(snapshot ->
snapshot.write(PeopleModel.BASE_PATH, peopleNode));
final ShardDataTreeCohort cohort4 = newShardDataTreeCohort(snapshot -> snapshot.write(carPath, carNode));
immediateCanCommit(cohort1);
- final FutureCallback<Void> canCommitCallback2 = coordinatedCanCommit(cohort2);
- final FutureCallback<Void> canCommitCallback3 = coordinatedCanCommit(cohort3);
- final FutureCallback<Void> canCommitCallback4 = coordinatedCanCommit(cohort4);
+ final FutureCallback<Empty> canCommitCallback2 = coordinatedCanCommit(cohort2);
+ final FutureCallback<Empty> canCommitCallback3 = coordinatedCanCommit(cohort3);
+ final FutureCallback<Empty> canCommitCallback4 = coordinatedCanCommit(cohort4);
final FutureCallback<DataTreeCandidate> preCommitCallback1 = coordinatedPreCommit(cohort1);
verify(preCommitCallback1).onSuccess(cohort1.getCandidate());
- verify(canCommitCallback2).onSuccess(null);
+ verify(canCommitCallback2).onSuccess(Empty.value());
final FutureCallback<DataTreeCandidate> preCommitCallback2 = coordinatedPreCommit(cohort2);
verify(preCommitCallback2).onSuccess(cohort2.getCandidate());
- verify(canCommitCallback3).onSuccess(null);
+ verify(canCommitCallback3).onSuccess(Empty.value());
final FutureCallback<DataTreeCandidate> preCommitCallback3 = coordinatedPreCommit(cohort3);
verify(preCommitCallback3).onSuccess(cohort3.getCandidate());
- verify(canCommitCallback4).onSuccess(null);
+ verify(canCommitCallback4).onSuccess(Empty.value());
final FutureCallback<DataTreeCandidate> preCommitCallback4 = coordinatedPreCommit(cohort4);
verify(preCommitCallback4).onSuccess(cohort4.getCandidate());
final FutureCallback<UnsignedLong> commitCallback2 = coordinatedCommit(cohort2);
- verify(mockShard, never()).persistPayload(eq(cohort1.getIdentifier()), any(CommitTransactionPayload.class),
+ verify(mockShard, never()).persistPayload(eq(cohort1.transactionId()), any(CommitTransactionPayload.class),
anyBoolean());
verifyNoMoreInteractions(commitCallback2);
final FutureCallback<UnsignedLong> commitCallback4 = coordinatedCommit(cohort4);
- verify(mockShard, never()).persistPayload(eq(cohort4.getIdentifier()), any(CommitTransactionPayload.class),
+ verify(mockShard, never()).persistPayload(eq(cohort4.transactionId()), any(CommitTransactionPayload.class),
anyBoolean());
verifyNoMoreInteractions(commitCallback4);
final FutureCallback<UnsignedLong> commitCallback1 = coordinatedCommit(cohort1);
InOrder inOrder = inOrder(mockShard);
- inOrder.verify(mockShard).persistPayload(eq(cohort1.getIdentifier()), any(CommitTransactionPayload.class),
+ inOrder.verify(mockShard).persistPayload(eq(cohort1.transactionId()), any(CommitTransactionPayload.class),
eq(true));
- inOrder.verify(mockShard).persistPayload(eq(cohort2.getIdentifier()), any(CommitTransactionPayload.class),
+ inOrder.verify(mockShard).persistPayload(eq(cohort2.transactionId()), any(CommitTransactionPayload.class),
eq(false));
verifyNoMoreInteractions(commitCallback1);
verifyNoMoreInteractions(commitCallback2);
final FutureCallback<UnsignedLong> commitCallback3 = coordinatedCommit(cohort3);
inOrder = inOrder(mockShard);
- inOrder.verify(mockShard).persistPayload(eq(cohort3.getIdentifier()), any(CommitTransactionPayload.class),
+ inOrder.verify(mockShard).persistPayload(eq(cohort3.transactionId()), any(CommitTransactionPayload.class),
eq(true));
- inOrder.verify(mockShard).persistPayload(eq(cohort4.getIdentifier()), any(CommitTransactionPayload.class),
+ inOrder.verify(mockShard).persistPayload(eq(cohort4.transactionId()), any(CommitTransactionPayload.class),
eq(false));
verifyNoMoreInteractions(commitCallback3);
verifyNoMoreInteractions(commitCallback4);
final ShardDataTreeCohort cohort5 = newShardDataTreeCohort(snapshot ->
snapshot.merge(CarsModel.BASE_PATH, CarsModel.emptyContainer()));
- final FutureCallback<Void> canCommitCallback5 = coordinatedCanCommit(cohort5);
+ final FutureCallback<Empty> canCommitCallback5 = coordinatedCanCommit(cohort5);
// The payload instance doesn't matter - it just needs to be of type CommitTransactionPayload.
CommitTransactionPayload mockPayload = CommitTransactionPayload.create(nextTransactionId(),
cohort1.getCandidate());
- shardDataTree.applyReplicatedPayload(cohort1.getIdentifier(), mockPayload);
- shardDataTree.applyReplicatedPayload(cohort2.getIdentifier(), mockPayload);
- shardDataTree.applyReplicatedPayload(cohort3.getIdentifier(), mockPayload);
- shardDataTree.applyReplicatedPayload(cohort4.getIdentifier(), mockPayload);
+ shardDataTree.applyReplicatedPayload(cohort1.transactionId(), mockPayload);
+ shardDataTree.applyReplicatedPayload(cohort2.transactionId(), mockPayload);
+ shardDataTree.applyReplicatedPayload(cohort3.transactionId(), mockPayload);
+ shardDataTree.applyReplicatedPayload(cohort4.transactionId(), mockPayload);
inOrder = inOrder(commitCallback1, commitCallback2, commitCallback3, commitCallback4);
inOrder.verify(commitCallback1).onSuccess(any(UnsignedLong.class));
inOrder.verify(commitCallback3).onSuccess(any(UnsignedLong.class));
inOrder.verify(commitCallback4).onSuccess(any(UnsignedLong.class));
- verify(canCommitCallback5).onSuccess(null);
+ verify(canCommitCallback5).onSuccess(Empty.value());
final DataTreeSnapshot snapshot =
shardDataTree.newReadOnlyTransaction(nextTransactionId()).getSnapshot();
- Optional<NormalizedNode<?, ?>> optional = snapshot.readNode(carPath);
- assertTrue("Car node present", optional.isPresent());
- assertEquals("Car node", carNode, optional.get());
-
- optional = snapshot.readNode(PeopleModel.BASE_PATH);
- assertTrue("People node present", optional.isPresent());
- assertEquals("People node", peopleNode, optional.get());
+ assertEquals("Car node", Optional.of(carNode), snapshot.readNode(carPath));
+ assertEquals("People node", Optional.of(peopleNode), snapshot.readNode(PeopleModel.BASE_PATH));
}
@Test
final FutureCallback<UnsignedLong> commitCallback1 = immediate3PhaseCommit(cohort1);
InOrder inOrder = inOrder(mockShard);
- inOrder.verify(mockShard).persistPayload(eq(cohort1.getIdentifier()), any(CommitTransactionPayload.class),
+ inOrder.verify(mockShard).persistPayload(eq(cohort1.transactionId()), any(CommitTransactionPayload.class),
eq(true));
- inOrder.verify(mockShard).persistPayload(eq(cohort2.getIdentifier()), any(CommitTransactionPayload.class),
+ inOrder.verify(mockShard).persistPayload(eq(cohort2.transactionId()), any(CommitTransactionPayload.class),
eq(true));
- inOrder.verify(mockShard).persistPayload(eq(cohort3.getIdentifier()), any(CommitTransactionPayload.class),
+ inOrder.verify(mockShard).persistPayload(eq(cohort3.transactionId()), any(CommitTransactionPayload.class),
eq(false));
// The payload instance doesn't matter - it just needs to be of type CommitTransactionPayload.
CommitTransactionPayload mockPayload = CommitTransactionPayload.create(nextTransactionId(),
cohort1.getCandidate());
- shardDataTree.applyReplicatedPayload(cohort1.getIdentifier(), mockPayload);
- shardDataTree.applyReplicatedPayload(cohort2.getIdentifier(), mockPayload);
- shardDataTree.applyReplicatedPayload(cohort3.getIdentifier(), mockPayload);
+ shardDataTree.applyReplicatedPayload(cohort1.transactionId(), mockPayload);
+ shardDataTree.applyReplicatedPayload(cohort2.transactionId(), mockPayload);
+ shardDataTree.applyReplicatedPayload(cohort3.transactionId(), mockPayload);
inOrder = inOrder(commitCallback1, commitCallback2, commitCallback3);
inOrder.verify(commitCallback1).onSuccess(any(UnsignedLong.class));
final DataTreeSnapshot snapshot =
shardDataTree.newReadOnlyTransaction(nextTransactionId()).getSnapshot();
- Optional<NormalizedNode<?, ?>> optional = snapshot.readNode(carPath);
- assertTrue("Car node present", optional.isPresent());
- assertEquals("Car node", carNode, optional.get());
+ assertEquals("Car node", Optional.of(carNode), snapshot.readNode(carPath));
}
@Test
inOrder.verify(commitCallback3).onSuccess(any(UnsignedLong.class));
final DataTreeSnapshot snapshot = shardDataTree.newReadOnlyTransaction(nextTransactionId()).getSnapshot();
- Optional<NormalizedNode<?, ?>> optional = snapshot.readNode(CarsModel.BASE_PATH);
+ Optional<NormalizedNode> optional = snapshot.readNode(CarsModel.BASE_PATH);
assertTrue("Car node present", optional.isPresent());
}
coordinatedPreCommit(cohort2);
coordinatedPreCommit(cohort3);
- FutureCallback<Void> mockAbortCallback = mock(FutureCallback.class);
- doNothing().when(mockAbortCallback).onSuccess(null);
+ FutureCallback<Empty> mockAbortCallback = mock(FutureCallback.class);
+ doNothing().when(mockAbortCallback).onSuccess(Empty.value());
cohort2.abort(mockAbortCallback);
- verify(mockAbortCallback).onSuccess(null);
+ verify(mockAbortCallback).onSuccess(Empty.value());
coordinatedPreCommit(cohort4);
coordinatedCommit(cohort1);
coordinatedCommit(cohort4);
InOrder inOrder = inOrder(mockShard);
- inOrder.verify(mockShard).persistPayload(eq(cohort1.getIdentifier()), any(CommitTransactionPayload.class),
+ inOrder.verify(mockShard).persistPayload(eq(cohort1.transactionId()), any(CommitTransactionPayload.class),
eq(false));
- inOrder.verify(mockShard).persistPayload(eq(cohort3.getIdentifier()), any(CommitTransactionPayload.class),
+ inOrder.verify(mockShard).persistPayload(eq(cohort3.transactionId()), any(CommitTransactionPayload.class),
eq(false));
- inOrder.verify(mockShard).persistPayload(eq(cohort4.getIdentifier()), any(CommitTransactionPayload.class),
+ inOrder.verify(mockShard).persistPayload(eq(cohort4.transactionId()), any(CommitTransactionPayload.class),
eq(false));
// The payload instance doesn't matter - it just needs to be of type CommitTransactionPayload.
CommitTransactionPayload mockPayload = CommitTransactionPayload.create(nextTransactionId(),
cohort1.getCandidate());
- shardDataTree.applyReplicatedPayload(cohort1.getIdentifier(), mockPayload);
- shardDataTree.applyReplicatedPayload(cohort3.getIdentifier(), mockPayload);
- shardDataTree.applyReplicatedPayload(cohort4.getIdentifier(), mockPayload);
+ shardDataTree.applyReplicatedPayload(cohort1.transactionId(), mockPayload);
+ shardDataTree.applyReplicatedPayload(cohort3.transactionId(), mockPayload);
+ shardDataTree.applyReplicatedPayload(cohort4.transactionId(), mockPayload);
final DataTreeSnapshot snapshot =
shardDataTree.newReadOnlyTransaction(nextTransactionId()).getSnapshot();
- Optional<NormalizedNode<?, ?>> optional = snapshot.readNode(carPath);
- assertTrue("Car node present", optional.isPresent());
- assertEquals("Car node", carNode, optional.get());
+ Optional<NormalizedNode> optional = snapshot.readNode(carPath);
+ assertEquals("Car node", Optional.of(carNode), optional);
}
@SuppressWarnings("unchecked")
final ShardDataTreeCohort cohort2 = newShardDataTreeCohort(snapshot ->
snapshot.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode()));
- NormalizedNode<?, ?> peopleNode = PeopleModel.create();
+ NormalizedNode peopleNode = PeopleModel.create();
final ShardDataTreeCohort cohort3 = newShardDataTreeCohort(snapshot ->
snapshot.write(PeopleModel.BASE_PATH, peopleNode));
immediateCanCommit(cohort1);
- FutureCallback<Void> canCommitCallback2 = coordinatedCanCommit(cohort2);
+ FutureCallback<Empty> canCommitCallback2 = coordinatedCanCommit(cohort2);
coordinatedPreCommit(cohort1);
- verify(canCommitCallback2).onSuccess(null);
+ verify(canCommitCallback2).onSuccess(Empty.value());
- FutureCallback<Void> mockAbortCallback = mock(FutureCallback.class);
- doNothing().when(mockAbortCallback).onSuccess(null);
+ FutureCallback<Empty> mockAbortCallback = mock(FutureCallback.class);
+ doNothing().when(mockAbortCallback).onSuccess(Empty.value());
cohort1.abort(mockAbortCallback);
- verify(mockAbortCallback).onSuccess(null);
+ verify(mockAbortCallback).onSuccess(Empty.value());
FutureCallback<DataTreeCandidate> preCommitCallback2 = coordinatedPreCommit(cohort2);
verify(preCommitCallback2).onFailure(any(Throwable.class));
final DataTreeSnapshot snapshot =
shardDataTree.newReadOnlyTransaction(nextTransactionId()).getSnapshot();
- Optional<NormalizedNode<?, ?>> optional = snapshot.readNode(PeopleModel.BASE_PATH);
- assertTrue("People node present", optional.isPresent());
- assertEquals("People node", peopleNode, optional.get());
+ Optional<NormalizedNode> optional = snapshot.readNode(PeopleModel.BASE_PATH);
+ assertEquals("People node", Optional.of(peopleNode), optional);
}
@Test
public void testUintCommitPayload() throws IOException {
shardDataTree.applyRecoveryPayload(CommitTransactionPayload.create(nextTransactionId(),
- DataTreeCandidates.fromNormalizedNode(YangInstanceIdentifier.empty(), bigIntegerRoot()),
- PayloadVersion.SODIUM_SR1));
+ DataTreeCandidates.fromNormalizedNode(YangInstanceIdentifier.of(), bigIntegerRoot()),
+ PayloadVersion.POTASSIUM));
assertCarsUint64();
}
.withNodeIdentifier(new NodeIdentifier(CarsModel.BASE_QNAME))
.withChild(Builders.mapBuilder()
.withNodeIdentifier(new NodeIdentifier(CarsModel.CAR_QNAME))
- .withChild(createCar("one", BigInteger.ONE))
+ .withChild(createCar("one", Uint64.ONE))
.build())
.build());
mod.ready();
dataTree.commit(first);
mod = dataTree.takeSnapshot().newModification();
- mod.write(CarsModel.newCarPath("two"), createCar("two", BigInteger.TWO));
+ mod.write(CarsModel.newCarPath("two"), createCar("two", Uint64.TWO));
mod.ready();
dataTree.validate(mod);
final DataTreeCandidate second = dataTree.prepare(mod);
mod = dataTree.takeSnapshot().newModification();
mod.merge(CarsModel.CAR_LIST_PATH, Builders.mapBuilder()
.withNodeIdentifier(new NodeIdentifier(CarsModel.CAR_QNAME))
- .withChild(createCar("three", BigInteger.TEN))
+ .withChild(createCar("three", Uint64.TEN))
.build());
mod.ready();
dataTree.validate(mod);
dataTree.commit(third);
// Apply first candidate as a snapshot
- shardDataTree.applyRecoverySnapshot(
- new ShardSnapshotState(new MetadataShardDataTreeSnapshot(first.getRootNode().getDataAfter().get()), true));
+ shardDataTree.applyRecoverySnapshot(new ShardSnapshotState(
+ new MetadataShardDataTreeSnapshot(first.getRootNode().getDataAfter()), true));
// Apply the other two snapshots as transactions
shardDataTree.applyRecoveryPayload(CommitTransactionPayload.create(nextTransactionId(), second,
- PayloadVersion.SODIUM_SR1));
+ PayloadVersion.POTASSIUM));
shardDataTree.applyRecoveryPayload(CommitTransactionPayload.create(nextTransactionId(), third,
- PayloadVersion.SODIUM_SR1));
+ PayloadVersion.POTASSIUM));
// Verify uint translation
final DataTreeSnapshot snapshot = shardDataTree.newReadOnlyTransaction(nextTransactionId()).getSnapshot();
- final NormalizedNode<?, ?> cars = snapshot.readNode(CarsModel.CAR_LIST_PATH).get();
assertEquals(Builders.mapBuilder()
.withNodeIdentifier(new NodeIdentifier(CarsModel.CAR_QNAME))
.withChild(createCar("one", Uint64.ONE))
.withChild(createCar("two", Uint64.TWO))
.withChild(createCar("three", Uint64.TEN))
- .build(), cars);
+ .build(), snapshot.readNode(CarsModel.CAR_LIST_PATH).orElseThrow());
}
private void assertCarsUint64() {
final DataTreeSnapshot snapshot = shardDataTree.newReadOnlyTransaction(nextTransactionId()).getSnapshot();
- final NormalizedNode<?, ?> cars = snapshot.readNode(CarsModel.CAR_LIST_PATH).get();
+ final NormalizedNode cars = snapshot.readNode(CarsModel.CAR_LIST_PATH).orElseThrow();
assertEquals(Builders.mapBuilder()
.withNodeIdentifier(new NodeIdentifier(CarsModel.CAR_QNAME))
private static ContainerNode bigIntegerRoot() {
return Builders.containerBuilder()
- .withNodeIdentifier(new NodeIdentifier(SchemaContext.NAME))
- .withChild(Builders.containerBuilder()
- .withNodeIdentifier(new NodeIdentifier(CarsModel.CARS_QNAME))
- .withChild(Builders.mapBuilder()
- .withNodeIdentifier(new NodeIdentifier(CarsModel.CAR_QNAME))
- // Note: BigInteger
- .withChild(createCar("foo", BigInteger.ONE))
- .build())
+ .withNodeIdentifier(new NodeIdentifier(SchemaContext.NAME))
+ .withChild(Builders.containerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(CarsModel.CARS_QNAME))
+ .withChild(Builders.mapBuilder()
+ .withNodeIdentifier(new NodeIdentifier(CarsModel.CAR_QNAME))
+ .withChild(createCar("foo", Uint64.ONE))
.build())
- .build();
+ .build())
+ .build();
}
private static MapEntryNode createCar(final String name, final Object value) {
return Builders.mapEntryBuilder()
- .withNodeIdentifier(NodeIdentifierWithPredicates.of(CarsModel.CAR_QNAME,CarsModel.CAR_NAME_QNAME, name))
- .withChild(ImmutableNodes.leafNode(CarsModel.CAR_NAME_QNAME, name))
- // Note: old BigInteger
- .withChild(ImmutableNodes.leafNode(CarsModel.CAR_PRICE_QNAME, value))
- .build();
+ .withNodeIdentifier(NodeIdentifierWithPredicates.of(CarsModel.CAR_QNAME, CarsModel.CAR_NAME_QNAME, name))
+ .withChild(ImmutableNodes.leafNode(CarsModel.CAR_NAME_QNAME, name))
+ // Note: old BigInteger
+ .withChild(ImmutableNodes.leafNode(CarsModel.CAR_PRICE_QNAME, value))
+ .build();
}
private ShardDataTreeCohort newShardDataTreeCohort(final DataTreeOperation operation) {
@SuppressWarnings({ "rawtypes", "unchecked" })
private static void verifyOnDataTreeChanged(final DOMDataTreeChangeListener listener,
final Consumer<DataTreeCandidate> callback) {
- ArgumentCaptor<Collection> changes = ArgumentCaptor.forClass(Collection.class);
+ ArgumentCaptor<List> changes = ArgumentCaptor.forClass(List.class);
verify(listener, atLeastOnce()).onDataTreeChanged(changes.capture());
for (Collection list : changes.getAllValues()) {
for (Object dtc : list) {
reset(listener);
}
- private static NormalizedNode<?, ?> getCars(final ShardDataTree shardDataTree) {
+ private static NormalizedNode getCars(final ShardDataTree shardDataTree) {
final ReadOnlyShardDataTreeTransaction readOnlyShardDataTreeTransaction =
shardDataTree.newReadOnlyTransaction(nextTransactionId());
final DataTreeSnapshot snapshot1 = readOnlyShardDataTreeTransaction.getSnapshot();
- final Optional<NormalizedNode<?, ?>> optional = snapshot1.readNode(CarsModel.BASE_PATH);
+ final Optional<NormalizedNode> optional = snapshot1.readNode(CarsModel.BASE_PATH);
assertTrue(optional.isPresent());
- return optional.get();
+ return optional.orElseThrow();
}
private static DataTreeCandidate addCar(final ShardDataTree shardDataTree) {
import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.SchemaValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.api.SchemaValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.api.TreeType;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
return dataTree.prepare(modification);
}
- private Optional<NormalizedNode<?,?>> readCars(final ShardDataTree shardDataTree) {
+ private Optional<NormalizedNode> readCars(final ShardDataTree shardDataTree) {
final DataTree dataTree = shardDataTree.getDataTree();
// FIXME: this should not be called here
dataTree.setEffectiveModelContext(peopleSchemaContext);
return shardDataTree.readNode(CarsModel.BASE_PATH);
}
- private Optional<NormalizedNode<?,?>> readPeople(final ShardDataTree shardDataTree) {
+ private Optional<NormalizedNode> readPeople(final ShardDataTree shardDataTree) {
final DataTree dataTree = shardDataTree.getDataTree();
// FIXME: this should not be called here
dataTree.setEffectiveModelContext(peopleSchemaContext);
dataTree.commit(dataTree.prepare(modification));
return new ShardSnapshotState(new MetadataShardDataTreeSnapshot(dataTree.takeSnapshot().readNode(
- YangInstanceIdentifier.empty()).get()));
+ YangInstanceIdentifier.of()).orElseThrow()));
}
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard;
+package org.opendaylight.controller.cluster.datastore;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
import java.lang.management.ManagementFactory;
import java.text.SimpleDateFormat;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import org.junit.After;
-import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
@Before
public void setUp() throws Exception {
-
shardStats = new ShardStats("shard-1", "DataStore", null);
shardStats.registerMBean();
mbeanServer = ManagementFactory.getPlatformMBeanServer();
@Test
public void testGetShardName() throws Exception {
-
- Object attribute = mbeanServer.getAttribute(testMBeanName, "ShardName");
- Assert.assertEquals(attribute, "shard-1");
-
+ assertEquals("shard-1", mbeanServer.getAttribute(testMBeanName, "ShardName"));
}
@Test
shardStats.incrementCommittedTransactionCount();
//now let us get from MBeanServer what is the transaction count.
- Object attribute = mbeanServer.getAttribute(testMBeanName,
- "CommittedTransactionsCount");
- Assert.assertEquals(attribute, 3L);
-
-
+ assertEquals(3L, mbeanServer.getAttribute(testMBeanName, "CommittedTransactionsCount"));
}
@Test
public void testGetLastCommittedTransactionTime() throws Exception {
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
- Assert.assertEquals(shardStats.getLastCommittedTransactionTime(),
- sdf.format(new Date(0L)));
+ assertEquals(sdf.format(new Date(0L)), shardStats.getLastCommittedTransactionTime());
long millis = System.currentTimeMillis();
shardStats.setLastCommittedTransactionTime(millis);
//now let us get from MBeanServer what is the transaction count.
Object attribute = mbeanServer.getAttribute(testMBeanName,
"LastCommittedTransactionTime");
- Assert.assertEquals(attribute, sdf.format(new Date(millis)));
- Assert.assertNotEquals(attribute,
- sdf.format(new Date(millis - 1)));
-
+ assertEquals(sdf.format(new Date(millis)), attribute);
+ assertNotEquals(attribute, sdf.format(new Date(millis - 1)));
}
@Test
shardStats.incrementFailedTransactionsCount();
shardStats.incrementFailedTransactionsCount();
-
//now let us get from MBeanServer what is the transaction count.
- Object attribute =
- mbeanServer.getAttribute(testMBeanName, "FailedTransactionsCount");
- Assert.assertEquals(attribute, 2L);
+ assertEquals(2L, mbeanServer.getAttribute(testMBeanName, "FailedTransactionsCount"));
}
@Test
shardStats.incrementAbortTransactionsCount();
shardStats.incrementAbortTransactionsCount();
-
//now let us get from MBeanServer what is the transaction count.
- Object attribute =
- mbeanServer.getAttribute(testMBeanName, "AbortTransactionsCount");
- Assert.assertEquals(attribute, 2L);
+ assertEquals(2L, mbeanServer.getAttribute(testMBeanName, "AbortTransactionsCount"));
}
@Test
shardStats.incrementFailedReadTransactionsCount();
shardStats.incrementFailedReadTransactionsCount();
-
//now let us get from MBeanServer what is the transaction count.
- Object attribute =
- mbeanServer.getAttribute(testMBeanName, "FailedReadTransactionsCount");
- Assert.assertEquals(attribute, 2L);
+ assertEquals(2L, mbeanServer.getAttribute(testMBeanName, "FailedReadTransactionsCount"));
}
@Test
public void testResetTransactionCounters() throws Exception {
-
//let us increment committed transactions count and then check
shardStats.incrementCommittedTransactionCount();
shardStats.incrementCommittedTransactionCount();
shardStats.incrementCommittedTransactionCount();
//now let us get from MBeanServer what is the transaction count.
- Object attribute = mbeanServer.getAttribute(testMBeanName,
- "CommittedTransactionsCount");
- Assert.assertEquals(attribute, 3L);
+ assertEquals(3L, mbeanServer.getAttribute(testMBeanName, "CommittedTransactionsCount"));
//let us increment FailedReadTransactions count and then check
shardStats.incrementFailedReadTransactionsCount();
shardStats.incrementFailedReadTransactionsCount();
-
//now let us get from MBeanServer what is the transaction count.
- attribute =
- mbeanServer.getAttribute(testMBeanName, "FailedReadTransactionsCount");
- Assert.assertEquals(attribute, 2L);
-
+ assertEquals(2L, mbeanServer.getAttribute(testMBeanName, "FailedReadTransactionsCount"));
//here we will reset the counters and check the above ones are 0 after reset
mbeanServer.invoke(testMBeanName, "resetTransactionCounters", null, null);
//now let us get from MBeanServer what is the transaction count.
- attribute = mbeanServer.getAttribute(testMBeanName,
- "CommittedTransactionsCount");
- Assert.assertEquals(attribute, 0L);
-
- attribute =
- mbeanServer.getAttribute(testMBeanName, "FailedReadTransactionsCount");
- Assert.assertEquals(attribute, 0L);
-
-
+ assertEquals(0L, mbeanServer.getAttribute(testMBeanName, "CommittedTransactionsCount"));
+ assertEquals(0L, mbeanServer.getAttribute(testMBeanName, "FailedReadTransactionsCount"));
}
}
*/
package org.opendaylight.controller.cluster.datastore;
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.CoreMatchers.endsWith;
+import static org.hamcrest.MatcherAssert.assertThat;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
import org.opendaylight.controller.cluster.raft.client.messages.GetOnDemandRaftState;
import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
import org.opendaylight.controller.cluster.raft.messages.RequestVote;
import org.opendaylight.controller.cluster.raft.messages.ServerRemoved;
import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries;
import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
import org.opendaylight.controller.cluster.raft.policy.DisableElectionsRaftPolicy;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.yangtools.concepts.Identifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
import scala.concurrent.Await;
import scala.concurrent.Future;
import scala.concurrent.duration.FiniteDuration;
CreateTransactionReply.class);
final String path = reply.getTransactionPath().toString();
- assertTrue("Unexpected transaction path " + path, path.startsWith(String.format(
- "akka://test/user/testCreateTransaction/shard-%s-%s:ShardTransactionTest@0:",
+ assertThat(path, containsString(String.format("/user/testCreateTransaction/shard-%s-%s:ShardTransactionTest@0:",
shardID.getShardName(), shardID.getMemberName().getName())));
}
CreateTransactionReply.class);
final String path = reply.getTransactionPath().toString();
- assertTrue("Unexpected transaction path " + path, path.startsWith(String.format(
- "akka://test/user/testCreateTransactionOnChain/shard-%s-%s:ShardTransactionTest@0:",
+ assertThat(path, containsString(String.format(
+ "/user/testCreateTransactionOnChain/shard-%s-%s:ShardTransactionTest@0:",
shardID.getShardName(), shardID.getMemberName().getName())));
}
final DataTree store = new InMemoryDataTreeFactory().create(DataTreeConfiguration.DEFAULT_OPERATIONAL,
SCHEMA_CONTEXT);
- final ContainerNode container = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
- new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
- .withChild(ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).addChild(
- ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1)).build()).build();
+ final ContainerNode container = Builders.containerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+ .withChild(ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME)
+ .addChild(ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1))
+ .build())
+ .build();
writeToStore(store, TestModel.TEST_PATH, container);
- final YangInstanceIdentifier root = YangInstanceIdentifier.empty();
- final NormalizedNode<?,?> expected = readStore(store, root);
+ final YangInstanceIdentifier root = YangInstanceIdentifier.of();
+ final NormalizedNode expected = readStore(store, root);
final Snapshot snapshot = Snapshot.create(new ShardSnapshotState(new MetadataShardDataTreeSnapshot(expected)),
Collections.emptyList(), 1, 2, 3, 4, -1, null, null);
while (sw.elapsed(TimeUnit.SECONDS) <= 5) {
Uninterruptibles.sleepUninterruptibly(75, TimeUnit.MILLISECONDS);
- final NormalizedNode<?,?> actual = readStore(shard, TestModel.TEST_PATH);
+ final NormalizedNode actual = readStore(shard, TestModel.TEST_PATH);
if (actual != null) {
assertEquals("Applied state", node, actual);
return;
// Add some ModificationPayload entries
for (int i = 1; i <= nListEntries; i++) {
- listEntryKeys.add(Integer.valueOf(i));
+ listEntryKeys.add(i);
final YangInstanceIdentifier path = YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH)
.nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, i).build();
InMemoryJournal.addEntry(shardID.toString(), nListEntries + 2,
new ApplyJournalEntries(nListEntries));
- testRecovery(listEntryKeys);
+ testRecovery(listEntryKeys, true);
}
@Test
ImmutableNodes.containerNode(TestModel.TEST_QNAME), false), testKit.getRef());
final ReadyTransactionReply readyReply = ReadyTransactionReply
.fromSerializable(testKit.expectMsgClass(duration, ReadyTransactionReply.class));
- assertEquals("Cohort path", shard.path().toString(), readyReply.getCohortPath());
+
+ String pathSuffix = shard.path().toString().replaceFirst("akka://test", "");
+ assertThat(readyReply.getCohortPath(), endsWith(pathSuffix));
// Send the CanCommitTransaction message for the first Tx.
shard.tell(new CanCommitTransaction(transactionID1, CURRENT_VERSION).toSerializable(), testKit.getRef());
verifyOuterListEntry(shard, 1);
}
+ @Deprecated(since = "9.0.0", forRemoval = true)
@Test(expected = IllegalStateException.class)
public void testBatchedModificationsReadyWithIncorrectTotalMessageCount() throws Exception {
final ShardTestKit testKit = new ShardTestKit(getSystem());
}
@Test
+ @Deprecated(since = "9.0.0", forRemoval = true)
public void testBatchedModificationsWithOperationFailure() {
final ShardTestKit testKit = new ShardTestKit(getSystem());
final TestActorRef<Shard> shard = actorFactory.createTestActor(
final TransactionIdentifier transactionID = nextTransactionId();
- final ContainerNode invalidData = ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
- .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk")).build();
+ final ContainerNode invalidData = Builders.containerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+ .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk"))
+ .build();
BatchedModifications batched = new BatchedModifications(transactionID, CURRENT_VERSION);
batched.addModification(new MergeModification(TestModel.TEST_PATH, invalidData));
// Verify data in the data store.
- final NormalizedNode<?, ?> actualNode = readStore(shard, path);
+ final NormalizedNode actualNode = readStore(shard, path);
assertEquals("Stored node", containerNode, actualNode);
}
@Test
+ @Deprecated(since = "9.0.0", forRemoval = true)
public void testOnBatchedModificationsWhenNotLeader() {
final AtomicBoolean overrideLeaderCalls = new AtomicBoolean();
final ShardTestKit testKit = new ShardTestKit(getSystem());
}
@Test
+ @Deprecated(since = "9.0.0", forRemoval = true)
public void testTransactionMessagesWithNoLeader() {
final ShardTestKit testKit = new ShardTestKit(getSystem());
dataStoreContextBuilder.customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName())
ShardTestKit.waitUntilLeader(shard);
final TransactionIdentifier transactionID = nextTransactionId();
- final NormalizedNode<?, ?> containerNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ final NormalizedNode containerNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
if (readWrite) {
shard.tell(prepareForwardedReadyTransaction(shard, transactionID, TestModel.TEST_PATH, containerNode, true),
testKit.getRef());
testKit.expectMsgClass(Duration.ofSeconds(5), CommitTransactionReply.class);
- final NormalizedNode<?, ?> actualNode = readStore(shard, TestModel.TEST_PATH);
+ final NormalizedNode actualNode = readStore(shard, TestModel.TEST_PATH);
assertEquals(TestModel.TEST_QNAME.getLocalName(), containerNode, actualNode);
}
testKit.expectMsgClass(CommitTransactionReply.class);
- final NormalizedNode<?, ?> actualNode = readStore(shard, TestModel.OUTER_LIST_PATH);
+ final NormalizedNode actualNode = readStore(shard, TestModel.OUTER_LIST_PATH);
assertEquals(TestModel.OUTER_LIST_QNAME.getLocalName(), mergeData, actualNode);
}
shard.tell(new CommitTransaction(txId, CURRENT_VERSION).toSerializable(), testKit.getRef());
testKit.expectMsgClass(CommitTransactionReply.class);
- final NormalizedNode<?, ?> actualNode = readStore(shard, TestModel.OUTER_LIST_PATH);
+ final NormalizedNode actualNode = readStore(shard, TestModel.OUTER_LIST_PATH);
assertEquals(TestModel.OUTER_LIST_QNAME.getLocalName(), mergeData, actualNode);
}
final Duration duration = Duration.ofSeconds(5);
final TransactionIdentifier transactionID = nextTransactionId();
- final NormalizedNode<?, ?> containerNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ final NormalizedNode containerNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
shard.tell(prepareBatchedModifications(transactionID, TestModel.TEST_PATH, containerNode, false),
testKit.getRef());
testKit.expectMsgClass(duration, ReadyTransactionReply.class);
shard.tell(new CommitTransaction(transactionID, CURRENT_VERSION).toSerializable(), testKit.getRef());
testKit.expectMsgClass(duration, CommitTransactionReply.class);
- final NormalizedNode<?, ?> actualNode = readStore(shard, TestModel.TEST_PATH);
+ final NormalizedNode actualNode = readStore(shard, TestModel.TEST_PATH);
assertEquals(TestModel.TEST_QNAME.getLocalName(), containerNode, actualNode);
}
final Duration duration = Duration.ofSeconds(5);
final TransactionIdentifier transactionID1 = nextTransactionId();
- doThrow(new DataValidationFailedException(YangInstanceIdentifier.empty(), "mock canCommit failure"))
+ doThrow(new DataValidationFailedException(YangInstanceIdentifier.of(), "mock canCommit failure"))
.doNothing().when(dataTree).validate(any(DataTreeModification.class));
shard.tell(newBatchedModifications(transactionID1, TestModel.TEST_PATH,
ShardTestKit.waitUntilLeader(shard);
- doThrow(new DataValidationFailedException(YangInstanceIdentifier.empty(), "mock canCommit failure"))
+ doThrow(new DataValidationFailedException(YangInstanceIdentifier.of(), "mock canCommit failure"))
.doNothing().when(dataTree).validate(any(DataTreeModification.class));
final Duration duration = Duration.ofSeconds(5);
final ShardTestKit testKit = new ShardTestKit(getSystem());
final Creator<Shard> creator = () -> new Shard(newShardBuilder()) {
@Override
- void persistPayload(final Identifier id, final Payload payload,
- final boolean batchHint) {
+ void persistPayload(final Identifier id, final Payload payload, final boolean batchHint) {
// Simulate an AbortTransaction message occurring during
// replication, after
// persisting and before finishing the commit to the
shard.tell(new CommitTransaction(transactionID, CURRENT_VERSION).toSerializable(), testKit.getRef());
testKit.expectMsgClass(duration, CommitTransactionReply.class);
- final NormalizedNode<?, ?> node = readStore(shard, TestModel.TEST_PATH);
+ final NormalizedNode node = readStore(shard, TestModel.TEST_PATH);
// Since we're simulating an abort occurring during replication
// and before finish commit,
shard.tell(new CommitTransaction(transactionID2, CURRENT_VERSION).toSerializable(), testKit.getRef());
testKit.expectMsgClass(duration, CommitTransactionReply.class);
- final NormalizedNode<?, ?> node = readStore(shard, listNodePath);
+ final NormalizedNode node = readStore(shard, listNodePath);
assertNotNull(listNodePath + " not found", node);
}
testKit.expectMsgClass(duration, CommitTransactionReply.class);
- final NormalizedNode<?, ?> node = readStore(shard, TestModel.TEST2_PATH);
+ final NormalizedNode node = readStore(shard, TestModel.TEST2_PATH);
assertNotNull(TestModel.TEST2_PATH + " not found", node);
}
dataStoreContextBuilder.persistent(persistent);
- class TestShard extends Shard {
+ final class TestShard extends Shard {
- protected TestShard(final AbstractBuilder<?, ?> builder) {
+ TestShard(final AbstractBuilder<?, ?> builder) {
super(builder);
setPersistence(new TestPersistentDataProvider(super.persistence()));
}
ShardTestKit.waitUntilLeader(shard);
writeToStore(shard, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
- final NormalizedNode<?, ?> expectedRoot = readStore(shard, YangInstanceIdentifier.empty());
+ final NormalizedNode expectedRoot = readStore(shard, YangInstanceIdentifier.of());
// Trigger creation of a snapshot by ensuring
final RaftActorContext raftActorContext = ((TestShard) shard.underlyingActor()).getRaftActorContext();
}
private static void awaitAndValidateSnapshot(final AtomicReference<CountDownLatch> latch,
- final AtomicReference<Object> savedSnapshot, final NormalizedNode<?, ?> expectedRoot)
+ final AtomicReference<Object> savedSnapshot, final NormalizedNode expectedRoot)
throws InterruptedException {
assertTrue("Snapshot saved", latch.get().await(5, TimeUnit.SECONDS));
savedSnapshot.set(null);
}
- private static void verifySnapshot(final Snapshot snapshot, final NormalizedNode<?, ?> expectedRoot) {
- final NormalizedNode<?, ?> actual = ((ShardSnapshotState)snapshot.getState()).getSnapshot().getRootNode().get();
- assertEquals("Root node", expectedRoot, actual);
+ private static void verifySnapshot(final Snapshot snapshot, final NormalizedNode expectedRoot) {
+ assertEquals("Root node", expectedRoot,
+ ((ShardSnapshotState)snapshot.getState()).getSnapshot().getRootNode().orElseThrow());
}
/**
commitTransaction(store, putTransaction);
- final NormalizedNode<?, ?> expected = readStore(store, YangInstanceIdentifier.empty());
+ final NormalizedNode expected = readStore(store, YangInstanceIdentifier.of());
final DataTreeModification writeTransaction = store.takeSnapshot().newModification();
- writeTransaction.delete(YangInstanceIdentifier.empty());
- writeTransaction.write(YangInstanceIdentifier.empty(), expected);
+ writeTransaction.delete(YangInstanceIdentifier.of());
+ writeTransaction.write(YangInstanceIdentifier.of(), expected);
commitTransaction(store, writeTransaction);
- final NormalizedNode<?, ?> actual = readStore(store, YangInstanceIdentifier.empty());
+ final NormalizedNode actual = readStore(store, YangInstanceIdentifier.of());
assertEquals(expected, actual);
}
ShardLeaderStateChanged leaderStateChanged = MessageCollectorActor.expectFirstMatching(listener,
ShardLeaderStateChanged.class);
- assertTrue("getLocalShardDataTree present", leaderStateChanged.getLocalShardDataTree().isPresent());
- assertSame("getLocalShardDataTree", shard.underlyingActor().getDataStore().getDataTree(),
- leaderStateChanged.getLocalShardDataTree().get());
+ final var dataTree = leaderStateChanged.localShardDataTree();
+ assertNotNull("getLocalShardDataTree present", dataTree);
+ assertSame("getLocalShardDataTree", shard.underlyingActor().getDataStore().getDataTree(), dataTree);
MessageCollectorActor.clearMessages(listener);
shard.tell(new RequestVote(10000, "member2", 50, 50), testKit.getRef());
leaderStateChanged = MessageCollectorActor.expectFirstMatching(listener, ShardLeaderStateChanged.class);
- assertFalse("getLocalShardDataTree present", leaderStateChanged.getLocalShardDataTree().isPresent());
+ assertNull("getLocalShardDataTree present", leaderStateChanged.localShardDataTree());
}
@Test
import akka.testkit.javadsl.TestKit;
import akka.util.Timeout;
import com.google.common.util.concurrent.Uninterruptibles;
-import java.util.Optional;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
for (int i = 0; i < 20 * 5; i++) {
Future<Object> future = Patterns.ask(shard, FindLeader.INSTANCE, new Timeout(duration));
try {
- final Optional<String> maybeLeader = ((FindLeaderReply) Await.result(future, duration))
- .getLeaderActor();
+ final var maybeLeader = ((FindLeaderReply) Await.result(future, duration)).getLeaderActor();
if (maybeLeader.isPresent()) {
- return maybeLeader.get();
+ return maybeLeader.orElseThrow();
}
} catch (TimeoutException e) {
LOG.trace("FindLeader timed out", e);
for (int i = 0; i < 20 * 5; i++) {
Future<Object> future = Patterns.ask(shard, FindLeader.INSTANCE, new Timeout(duration));
try {
- final Optional<String> maybeLeader = ((FindLeaderReply) Await.result(future, duration))
- .getLeaderActor();
+ final var maybeLeader = ((FindLeaderReply) Await.result(future, duration)).getLeaderActor();
if (!maybeLeader.isPresent()) {
return;
}
- lastResponse = maybeLeader.get();
+ lastResponse = maybeLeader.orElseThrow();
} catch (TimeoutException e) {
lastResponse = e;
} catch (Exception e) {
*/
package org.opendaylight.controller.cluster.datastore;
+import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
import akka.actor.ActorRef;
import akka.actor.Props;
import org.junit.Test;
import org.opendaylight.controller.cluster.access.concepts.MemberName;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.DataExists;
import org.opendaylight.controller.cluster.datastore.messages.ReadData;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.mdsal.common.api.ReadFailedException;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
+import org.opendaylight.yangtools.yang.data.tree.api.TreeType;
import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
import scala.concurrent.Await;
import scala.concurrent.Future;
*
* @author Basheeruddin Ahmed
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
public class ShardTransactionFailureTest extends AbstractActorTest {
private static final EffectiveModelContext TEST_SCHEMA_CONTEXT = TestModel.createTestContext();
private static final TransactionType RO = TransactionType.READ_ONLY;
@Before
public void setup() {
- ShardStats stats = mock(ShardStats.class);
- when(MOCK_SHARD.getShardMBean()).thenReturn(stats);
+ doReturn(new ShardStats("inventory", "mxBeanType", MOCK_SHARD)).when(MOCK_SHARD).getShardMBean();
}
@Test(expected = ReadFailedException.class)
"testNegativeReadWithReadOnlyTransactionClosed");
Future<Object> future = akka.pattern.Patterns.ask(subject,
- new ReadData(YangInstanceIdentifier.empty(), DataStoreVersions.CURRENT_VERSION), 3000);
+ new ReadData(YangInstanceIdentifier.of(), DataStoreVersions.CURRENT_VERSION), 3000);
Await.result(future, FiniteDuration.create(3, TimeUnit.SECONDS));
subject.underlyingActor().getDOMStoreTransaction().abortFromTransactionActor();
- future = akka.pattern.Patterns.ask(subject, new ReadData(YangInstanceIdentifier.empty(),
+ future = akka.pattern.Patterns.ask(subject, new ReadData(YangInstanceIdentifier.of(),
DataStoreVersions.CURRENT_VERSION), 3000);
Await.result(future, FiniteDuration.create(3, TimeUnit.SECONDS));
}
"testNegativeReadWithReadWriteTransactionClosed");
Future<Object> future = akka.pattern.Patterns.ask(subject,
- new ReadData(YangInstanceIdentifier.empty(), DataStoreVersions.CURRENT_VERSION), 3000);
+ new ReadData(YangInstanceIdentifier.of(), DataStoreVersions.CURRENT_VERSION), 3000);
Await.result(future, FiniteDuration.create(3, TimeUnit.SECONDS));
subject.underlyingActor().getDOMStoreTransaction().abortFromTransactionActor();
- future = akka.pattern.Patterns.ask(subject, new ReadData(YangInstanceIdentifier.empty(),
+ future = akka.pattern.Patterns.ask(subject, new ReadData(YangInstanceIdentifier.of(),
DataStoreVersions.CURRENT_VERSION), 3000);
Await.result(future, FiniteDuration.create(3, TimeUnit.SECONDS));
}
"testNegativeExistsWithReadWriteTransactionClosed");
Future<Object> future = akka.pattern.Patterns.ask(subject,
- new DataExists(YangInstanceIdentifier.empty(), DataStoreVersions.CURRENT_VERSION), 3000);
+ new DataExists(YangInstanceIdentifier.of(), DataStoreVersions.CURRENT_VERSION), 3000);
Await.result(future, FiniteDuration.create(3, TimeUnit.SECONDS));
subject.underlyingActor().getDOMStoreTransaction().abortFromTransactionActor();
future = akka.pattern.Patterns.ask(subject,
- new DataExists(YangInstanceIdentifier.empty(), DataStoreVersions.CURRENT_VERSION), 3000);
+ new DataExists(YangInstanceIdentifier.of(), DataStoreVersions.CURRENT_VERSION), 3000);
Await.result(future, FiniteDuration.create(3, TimeUnit.SECONDS));
}
}
import org.opendaylight.controller.cluster.raft.TestActorFactory;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
+@Deprecated(since = "9.0.0", forRemoval = true)
public class ShardTransactionTest extends AbstractActorTest {
private static final TransactionType RO = TransactionType.READ_ONLY;
}
private void testOnReceiveReadData(final ActorRef transaction) {
- transaction.tell(new ReadData(YangInstanceIdentifier.empty(), DataStoreVersions.CURRENT_VERSION),
+ transaction.tell(new ReadData(YangInstanceIdentifier.of(), DataStoreVersions.CURRENT_VERSION),
testKit.getRef());
ReadDataReply reply = testKit.expectMsgClass(Duration.ofSeconds(5), ReadDataReply.class);
}
private void testOnReceiveDataExistsPositive(final ActorRef transaction) {
- transaction.tell(new DataExists(YangInstanceIdentifier.empty(), DataStoreVersions.CURRENT_VERSION),
+ transaction.tell(new DataExists(YangInstanceIdentifier.of(), DataStoreVersions.CURRENT_VERSION),
testKit.getRef());
DataExistsReply reply = testKit.expectMsgClass(Duration.ofSeconds(5), DataExistsReply.class);
final ActorRef transaction = newTransactionActor(RW, mockWriteTx, "testOnReceiveBatchedModifications");
YangInstanceIdentifier writePath = TestModel.TEST_PATH;
- NormalizedNode<?, ?> writeData = ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
- .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+ NormalizedNode writeData = Builders.containerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+ .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo"))
+ .build();
YangInstanceIdentifier mergePath = TestModel.OUTER_LIST_PATH;
- NormalizedNode<?, ?> mergeData = ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.OUTER_LIST_QNAME))
- .build();
+ NormalizedNode mergeData = Builders.containerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TestModel.OUTER_LIST_QNAME))
+ .build();
YangInstanceIdentifier deletePath = TestModel.TEST_PATH;
watcher.watch(transaction);
YangInstanceIdentifier writePath = TestModel.TEST_PATH;
- NormalizedNode<?, ?> writeData = ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
- .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+ NormalizedNode writeData = Builders.containerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+ .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo"))
+ .build();
final TransactionIdentifier tx1 = nextTransactionId();
BatchedModifications batched = new BatchedModifications(tx1, DataStoreVersions.CURRENT_VERSION);
watcher.watch(transaction);
YangInstanceIdentifier writePath = TestModel.TEST_PATH;
- NormalizedNode<?, ?> writeData = ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
- .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+ NormalizedNode writeData = Builders.containerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+ .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo"))
+ .build();
BatchedModifications batched = new BatchedModifications(nextTransactionId(),
DataStoreVersions.CURRENT_VERSION);
import java.util.concurrent.Future;
import org.junit.Before;
import org.junit.Test;
+import org.junit.runner.RunWith;
import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
+import org.opendaylight.yangtools.yang.common.Empty;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ConflictingModificationAppliedException;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.api.ConflictingModificationAppliedException;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateTip;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
/**
* Unit tests for SimpleShardDataTreeCohort.
*
* @author Thomas Pantelis
*/
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
public class SimpleShardDataTreeCohortTest extends AbstractTest {
@Mock
private ShardDataTree mockShardDataTree;
@Before
public void setup() {
- MockitoAnnotations.initMocks(this);
-
doReturn(Optional.empty()).when(mockUserCohorts).commit();
doReturn(Optional.empty()).when(mockUserCohorts).abort();
}).when(mockShardDataTree).startCanCommit(cohort);
@SuppressWarnings("unchecked")
- final FutureCallback<Void> callback = mock(FutureCallback.class);
+ final FutureCallback<Empty> callback = mock(FutureCallback.class);
cohort.canCommit(callback);
- verify(callback).onSuccess(null);
+ verify(callback).onSuccess(Empty.value());
verifyNoMoreInteractions(callback);
}
}).when(mockShardDataTree).startCanCommit(cohort);
@SuppressWarnings("unchecked")
- final FutureCallback<Void> callback = mock(FutureCallback.class);
+ final FutureCallback<Empty> callback = mock(FutureCallback.class);
cohort.canCommit(callback);
verify(callback).onFailure(cause);
@Test
public void testCanCommitWithConflictingModEx() {
- testValidatationPropagates(new ConflictingModificationAppliedException(YangInstanceIdentifier.empty(), "mock"));
+ testValidatationPropagates(new ConflictingModificationAppliedException(YangInstanceIdentifier.of(), "mock"));
}
@Test
public void testCanCommitWithDataValidationEx() {
- testValidatationPropagates(new DataValidationFailedException(YangInstanceIdentifier.empty(), "mock"));
+ testValidatationPropagates(new DataValidationFailedException(YangInstanceIdentifier.of(), "mock"));
}
@Test
}
private static Future<?> abort(final ShardDataTreeCohort cohort) {
- final CompletableFuture<Void> f = new CompletableFuture<>();
- cohort.abort(new FutureCallback<Void>() {
+ final CompletableFuture<Empty> f = new CompletableFuture<>();
+ cohort.abort(new FutureCallback<>() {
@Override
- public void onSuccess(final Void result) {
- f.complete(null);
+ public void onSuccess(final Empty result) {
+ f.complete(result);
}
@Override
+++ /dev/null
-/*
- * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import akka.actor.ActorSystem;
-import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
-import org.opendaylight.controller.cluster.datastore.config.Configuration;
-import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
-import org.opendaylight.controller.cluster.datastore.shardmanager.AbstractShardManagerCreator;
-import org.opendaylight.controller.cluster.datastore.shardmanager.TestShardManager;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-
-public class TestDistributedDataStore extends DistributedDataStore implements LocalShardStore {
-
- public TestDistributedDataStore(final ActorSystem actorSystem, final ClusterWrapper cluster,
- final Configuration configuration,
- final DatastoreContextFactory datastoreContextFactory,
- final DatastoreSnapshot restoreFromSnapshot) {
- super(actorSystem, cluster, configuration, datastoreContextFactory, restoreFromSnapshot);
- }
-
- TestDistributedDataStore(final ActorUtils actorUtils, final ClientIdentifier identifier) {
- super(actorUtils, identifier);
- }
-
- @Override
- protected AbstractShardManagerCreator<?> getShardManagerCreator() {
- return new TestShardManager.TestShardManagerCreator();
- }
-
- @Override
- public TestShardManager.GetLocalShardsReply getLocalShards() {
- TestShardManager.GetLocalShardsReply reply =
- (TestShardManager.GetLocalShardsReply) getActorUtils()
- .executeOperation(getActorUtils().getShardManager(), TestShardManager.GetLocalShards.INSTANCE);
-
- return reply;
- }
-}
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.MoreExecutors;
import com.google.common.util.concurrent.SettableFuture;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Supplier;
+import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
+import org.opendaylight.mdsal.common.api.CommitInfo;
+import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.yangtools.yang.common.Empty;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.concurrent.Future;
/**
* ThreePhaseCommitCohortProxy represents a set of remote cohort proxies.
*/
-public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<ActorSelection> {
-
+@Deprecated(since = "9.0.0", forRemoval = true)
+final class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCohort {
private static final Logger LOG = LoggerFactory.getLogger(ThreePhaseCommitCohortProxy.class);
+ private static final @NonNull ListenableFuture<Empty> IMMEDIATE_EMPTY_SUCCESS =
+ Futures.immediateFuture(Empty.value());
private static final MessageSupplier COMMIT_MESSAGE_SUPPLIER = new MessageSupplier() {
@Override
}
};
+ private static final OperationCallback NO_OP_CALLBACK = new OperationCallback() {
+ @Override
+ public void run() {
+ }
+
+ @Override
+ public void success() {
+ }
+
+ @Override
+ public void failure() {
+ }
+
+ @Override
+ public void pause() {
+ }
+
+ @Override
+ public void resume() {
+ }
+ };
+
+
private final ActorUtils actorUtils;
private final List<CohortInfo> cohorts;
- private final SettableFuture<Void> cohortsResolvedFuture = SettableFuture.create();
+ private final SettableFuture<Empty> cohortsResolvedFuture = SettableFuture.create();
private final TransactionIdentifier transactionId;
private volatile OperationCallback commitOperationCallback;
- public ThreePhaseCommitCohortProxy(final ActorUtils actorUtils, final List<CohortInfo> cohorts,
+ ThreePhaseCommitCohortProxy(final ActorUtils actorUtils, final List<CohortInfo> cohorts,
final TransactionIdentifier transactionId) {
this.actorUtils = actorUtils;
this.cohorts = cohorts;
this.transactionId = requireNonNull(transactionId);
if (cohorts.isEmpty()) {
- cohortsResolvedFuture.set(null);
+ cohortsResolvedFuture.set(Empty.value());
}
}
- private ListenableFuture<Void> resolveCohorts() {
+ private ListenableFuture<Empty> resolveCohorts() {
if (cohortsResolvedFuture.isDone()) {
return cohortsResolvedFuture;
}
info.setResolvedActor(actor);
if (done) {
LOG.debug("Tx {}: successfully resolved all cohort actors", transactionId);
- cohortsResolvedFuture.set(null);
+ cohortsResolvedFuture.set(Empty.value());
}
}
}
// extracted from ReadyTransactionReply messages by the Futures that were obtained earlier
// and passed to us from upstream processing. If any one fails then we'll fail canCommit.
- Futures.addCallback(resolveCohorts(), new FutureCallback<Void>() {
+ Futures.addCallback(resolveCohorts(), new FutureCallback<>() {
@Override
- public void onSuccess(final Void notUsed) {
+ public void onSuccess(final Empty result) {
finishCanCommit(returnFuture);
}
return returnFuture;
}
- @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
- justification = "https://github.com/spotbugs/spotbugs/issues/811")
private void finishCanCommit(final SettableFuture<Boolean> returnFuture) {
LOG.debug("Tx {} finishCanCommit", transactionId);
final Iterator<CohortInfo> iterator = cohorts.iterator();
- final OnComplete<Object> onComplete = new OnComplete<Object>() {
+ final OnComplete<Object> onComplete = new OnComplete<>() {
@Override
public void onComplete(final Throwable failure, final Object response) {
if (failure != null) {
sendCanCommitTransaction(iterator.next(), this);
} else {
LOG.debug("Tx {}: canCommit returning result: {}", transactionId, result);
- returnFuture.set(Boolean.valueOf(result));
+ returnFuture.set(result);
}
}
}
@Override
- public ListenableFuture<Void> preCommit() {
- // We don't need to do anything here - preCommit is done atomically with the commit phase
- // by the shard.
- return IMMEDIATE_VOID_SUCCESS;
+ public ListenableFuture<Empty> preCommit() {
+ // We don't need to do anything here - preCommit is done atomically with the commit phase by the shard.
+ return IMMEDIATE_EMPTY_SUCCESS;
}
@Override
- public ListenableFuture<Void> abort() {
+ public ListenableFuture<Empty> abort() {
// Note - we pass false for propagateException. In the front-end data broker, this method
// is called when one of the 3 phases fails with an exception. We'd rather have that
// original exception propagated to the client. If our abort fails and we propagate the
// exception then that exception will supersede and suppress the original exception. But
// it's the original exception that is the root cause and of more interest to the client.
- return voidOperation("abort", ABORT_MESSAGE_SUPPLIER,
- AbortTransactionReply.class, false, OperationCallback.NO_OP_CALLBACK);
+ return operation("abort", Empty.value(), ABORT_MESSAGE_SUPPLIER, AbortTransactionReply.class, false,
+ NO_OP_CALLBACK);
}
@Override
- public ListenableFuture<Void> commit() {
+ public ListenableFuture<? extends CommitInfo> commit() {
OperationCallback operationCallback = commitOperationCallback != null ? commitOperationCallback :
- OperationCallback.NO_OP_CALLBACK;
+ NO_OP_CALLBACK;
- return voidOperation("commit", COMMIT_MESSAGE_SUPPLIER,
- CommitTransactionReply.class, true, operationCallback);
+ return operation("commit", CommitInfo.empty(), COMMIT_MESSAGE_SUPPLIER, CommitTransactionReply.class, true,
+ operationCallback);
}
@SuppressWarnings("checkstyle:IllegalCatch")
- private static boolean successfulFuture(final ListenableFuture<Void> future) {
+ private static boolean successfulFuture(final ListenableFuture<?> future) {
if (!future.isDone()) {
return false;
}
}
}
- private ListenableFuture<Void> voidOperation(final String operationName,
+ private <T> ListenableFuture<T> operation(final String operationName, final T futureValue,
final MessageSupplier messageSupplier, final Class<?> expectedResponseClass,
final boolean propagateException, final OperationCallback callback) {
LOG.debug("Tx {} {}", transactionId, operationName);
- final SettableFuture<Void> returnFuture = SettableFuture.create();
+ final SettableFuture<T> returnFuture = SettableFuture.create();
// The cohort actor list should already be built at this point by the canCommit phase but,
// if not for some reason, we'll try to build it here.
- ListenableFuture<Void> future = resolveCohorts();
+ ListenableFuture<Empty> future = resolveCohorts();
if (successfulFuture(future)) {
- finishVoidOperation(operationName, messageSupplier, expectedResponseClass, propagateException,
- returnFuture, callback);
+ finishOperation(operationName, messageSupplier, expectedResponseClass, propagateException, returnFuture,
+ futureValue, callback);
} else {
- Futures.addCallback(future, new FutureCallback<Void>() {
+ Futures.addCallback(future, new FutureCallback<>() {
@Override
- public void onSuccess(final Void notUsed) {
- finishVoidOperation(operationName, messageSupplier, expectedResponseClass,
- propagateException, returnFuture, callback);
+ public void onSuccess(final Empty result) {
+ finishOperation(operationName, messageSupplier, expectedResponseClass, propagateException,
+ returnFuture, futureValue, callback);
}
@Override
if (propagateException) {
returnFuture.setException(failure);
} else {
- returnFuture.set(null);
+ returnFuture.set(futureValue);
}
}
}, MoreExecutors.directExecutor());
return returnFuture;
}
- private void finishVoidOperation(final String operationName, final MessageSupplier messageSupplier,
+ private <T> void finishOperation(final String operationName, final MessageSupplier messageSupplier,
final Class<?> expectedResponseClass, final boolean propagateException,
- final SettableFuture<Void> returnFuture, final OperationCallback callback) {
+ final SettableFuture<T> returnFuture, final T futureValue,
+ final OperationCallback callback) {
LOG.debug("Tx {} finish {}", transactionId, operationName);
callback.resume();
// Since the caller doesn't want us to propagate the exception we'll also
// not log it normally. But it's usually not good to totally silence
// exceptions so we'll log it to debug level.
- returnFuture.set(null);
+ returnFuture.set(futureValue);
}
callback.failure();
} else {
LOG.debug("Tx {}: {} succeeded", transactionId, operationName);
- returnFuture.set(null);
+ returnFuture.set(futureValue);
callback.success();
}
}, actorUtils.getClientDispatcher());
}
- @Override
- List<Future<ActorSelection>> getCohortFutures() {
- List<Future<ActorSelection>> cohortFutures = new ArrayList<>(cohorts.size());
- for (CohortInfo info: cohorts) {
- cohortFutures.add(info.getActorFuture());
- }
-
- return cohortFutures;
- }
-
static class CohortInfo {
private final Future<ActorSelection> actorFuture;
private final Supplier<Short> actorVersionSupplier;
package org.opendaylight.controller.cluster.datastore;
import static java.util.Objects.requireNonNull;
+import static org.hamcrest.CoreMatchers.instanceOf;
+import static org.hamcrest.MatcherAssert.assertThat;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.doReturn;
+import static org.junit.Assert.assertThrows;
+import static org.mockito.Mockito.lenient;
import static org.opendaylight.controller.cluster.datastore.DataStoreVersions.CURRENT_VERSION;
import akka.actor.ActorSelection;
import akka.testkit.TestActorRef;
import com.codahale.metrics.Snapshot;
import com.codahale.metrics.Timer;
-import com.google.common.base.Throwables;
import com.google.common.util.concurrent.ListenableFuture;
import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
import java.util.List;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import org.junit.Before;
import org.junit.Test;
+import org.junit.runner.RunWith;
import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.ThreePhaseCommitCohortProxy.CohortInfo;
import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
import org.opendaylight.controller.cluster.raft.TestActorFactory;
import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
+@Deprecated(since = "9.0.0", forRemoval = true)
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
-
- @SuppressWarnings("serial")
static class TestException extends RuntimeException {
+ private static final long serialVersionUID = 1L;
+
}
private ActorUtils actorUtils;
@Mock
private Timer commitTimer;
-
@Mock
private Timer.Context commitTimerContext;
-
@Mock
private Snapshot commitSnapshot;
private final List<TestActorRef<CohortActor>> cohortActors = new ArrayList<>();
private final TransactionIdentifier tx = nextTransactionId();
-
@Before
public void setUp() {
- MockitoAnnotations.initMocks(this);
-
actorUtils = new ActorUtils(getSystem(), actorFactory.createActor(Props.create(DoNothingActor.class)),
new MockClusterWrapper(), new MockConfiguration(), DatastoreContext.newBuilder().build(),
new PrimaryShardInfoFutureCache()) {
}
};
- doReturn(commitTimerContext).when(commitTimer).time();
- doReturn(commitSnapshot).when(commitTimer).getSnapshot();
+ lenient().doReturn(commitTimerContext).when(commitTimer).time();
+ lenient().doReturn(commitSnapshot).when(commitTimer).getSnapshot();
for (int i = 1; i < 11; i++) {
// Keep on increasing the amount of time it takes to complete transaction for each tenth of a
// percentile. Essentially this would be 1ms for the 10th percentile, 2ms for 20th percentile and so on.
- doReturn(TimeUnit.MILLISECONDS.toNanos(i) * 1D).when(commitSnapshot).getValue(i * 0.1);
+ lenient().doReturn(TimeUnit.MILLISECONDS.toNanos(i) * 1D).when(commitSnapshot).getValue(i * 0.1);
}
}
@Test
public void testCanCommitYesWithOneCohort() throws Exception {
- ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, Arrays.asList(
- newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(
- CanCommitTransactionReply.yes(CURRENT_VERSION)))), tx);
+ ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, List.of(
+ newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(CanCommitTransactionReply.yes(CURRENT_VERSION)))),
+ tx);
verifyCanCommit(proxy.canCommit(), true);
verifyCohortActors();
@Test
public void testCanCommitNoWithOneCohort() throws Exception {
- ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, Arrays.asList(
- newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(
- CanCommitTransactionReply.no(CURRENT_VERSION)))), tx);
+ ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, List.of(
+ newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(CanCommitTransactionReply.no(CURRENT_VERSION)))),
+ tx);
verifyCanCommit(proxy.canCommit(), false);
verifyCohortActors();
@Test
public void testCanCommitYesWithTwoCohorts() throws Exception {
- List<CohortInfo> cohorts = Arrays.asList(
- newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(
- CanCommitTransactionReply.yes(CURRENT_VERSION))),
- newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(
- CanCommitTransactionReply.yes(CURRENT_VERSION))));
- ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, cohorts, tx);
+ ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, List.of(
+ newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(CanCommitTransactionReply.yes(CURRENT_VERSION))),
+ newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(CanCommitTransactionReply.yes(CURRENT_VERSION)))),
+ tx);
verifyCanCommit(proxy.canCommit(), true);
verifyCohortActors();
@Test
public void testCanCommitNoWithThreeCohorts() throws Exception {
- List<CohortInfo> cohorts = Arrays.asList(
- newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(
- CanCommitTransactionReply.yes(CURRENT_VERSION))),
- newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(
- CanCommitTransactionReply.no(CURRENT_VERSION))),
- newCohortInfo(new CohortActor.Builder(tx)));
- ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, cohorts, tx);
+ ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, List.of(
+ newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(CanCommitTransactionReply.yes(CURRENT_VERSION))),
+ newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(CanCommitTransactionReply.no(CURRENT_VERSION))),
+ newCohortInfo(new CohortActor.Builder(tx))), tx);
verifyCanCommit(proxy.canCommit(), false);
verifyCohortActors();
}
- @Test(expected = TestException.class)
- public void testCanCommitWithExceptionFailure() throws Exception {
- ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, Arrays.asList(
- newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(new TestException()))), tx);
+ @Test
+ public void testCanCommitWithExceptionFailure() {
+ ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils,
+ List.of(newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(new TestException()))), tx);
- propagateExecutionExceptionCause(proxy.canCommit());
+ propagateExecutionExceptionCause(proxy.canCommit(), TestException.class);
}
- @Test(expected = IllegalArgumentException.class)
- public void testCanCommitWithInvalidResponseType() throws Exception {
- ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, Arrays.asList(
- newCohortInfo(new CohortActor.Builder(tx).expectCanCommit("invalid"))), tx);
+ @Test
+ public void testCanCommitWithInvalidResponseType() {
+ ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils,
+ List.of(newCohortInfo(new CohortActor.Builder(tx).expectCanCommit("invalid"))), tx);
- propagateExecutionExceptionCause(proxy.canCommit());
+ assertEquals("Unexpected response type class java.lang.String",
+ propagateExecutionExceptionCause(proxy.canCommit(), IllegalArgumentException.class));
}
- @Test(expected = TestException.class)
+ @Test
public void testCanCommitWithFailedCohortFuture() throws Exception {
- List<CohortInfo> cohorts = Arrays.asList(
- newCohortInfo(new CohortActor.Builder(tx)),
- newCohortInfoWithFailedFuture(new TestException()),
- newCohortInfo(new CohortActor.Builder(tx)));
- ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, cohorts, tx);
+ ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, List.of(
+ newCohortInfo(new CohortActor.Builder(tx)),
+ newCohortInfoWithFailedFuture(new TestException()),
+ newCohortInfo(new CohortActor.Builder(tx))), tx);
- propagateExecutionExceptionCause(proxy.canCommit());
+ propagateExecutionExceptionCause(proxy.canCommit(), TestException.class);
}
@Test
public void testAllThreePhasesSuccessful() throws Exception {
- List<CohortInfo> cohorts = Arrays.asList(
- newCohortInfo(
- new CohortActor.Builder(tx).expectCanCommit(CanCommitTransactionReply.yes(CURRENT_VERSION))
- .expectCommit(CommitTransactionReply.instance(CURRENT_VERSION))),
- newCohortInfo(
- new CohortActor.Builder(tx).expectCanCommit(CanCommitTransactionReply.yes(CURRENT_VERSION))
- .expectCommit(CommitTransactionReply.instance(CURRENT_VERSION))));
- ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, cohorts, tx);
+ ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, List.of(
+ newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(CanCommitTransactionReply.yes(CURRENT_VERSION))
+ .expectCommit(CommitTransactionReply.instance(CURRENT_VERSION))),
+ newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(CanCommitTransactionReply.yes(CURRENT_VERSION))
+ .expectCommit(CommitTransactionReply.instance(CURRENT_VERSION)))), tx);
verifyCanCommit(proxy.canCommit(), true);
verifySuccessfulFuture(proxy.preCommit());
verifyCohortActors();
}
- @Test(expected = TestException.class)
+ @Test
public void testCommitWithExceptionFailure() throws Exception {
- List<CohortInfo> cohorts = Arrays.asList(
- newCohortInfo(
- new CohortActor.Builder(tx).expectCanCommit(CanCommitTransactionReply.yes(CURRENT_VERSION))
- .expectCommit(CommitTransactionReply.instance(CURRENT_VERSION))),
- newCohortInfo(
- new CohortActor.Builder(tx).expectCanCommit(CanCommitTransactionReply.yes(CURRENT_VERSION))
- .expectCommit(new TestException())));
- ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, cohorts, tx);
+ ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, List.of(
+ newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(CanCommitTransactionReply.yes(CURRENT_VERSION))
+ .expectCommit(CommitTransactionReply.instance(CURRENT_VERSION))),
+ newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(CanCommitTransactionReply.yes(CURRENT_VERSION))
+ .expectCommit(new TestException()))), tx);
verifyCanCommit(proxy.canCommit(), true);
verifySuccessfulFuture(proxy.preCommit());
- propagateExecutionExceptionCause(proxy.commit());
+ propagateExecutionExceptionCause(proxy.commit(), TestException.class);
}
- @Test(expected = IllegalArgumentException.class)
+ @Test
public void testCommitWithInvalidResponseType() throws Exception {
- ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils,
- Arrays.asList(newCohortInfo(new CohortActor.Builder(tx)
- .expectCanCommit(CanCommitTransactionReply.yes(CURRENT_VERSION)).expectCommit("invalid"))), tx);
+ ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils,List.of(
+ newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(CanCommitTransactionReply.yes(CURRENT_VERSION))
+ .expectCommit("invalid"))),
+ tx);
verifyCanCommit(proxy.canCommit(), true);
verifySuccessfulFuture(proxy.preCommit());
- propagateExecutionExceptionCause(proxy.commit());
+ assertEquals("Unexpected response type class java.lang.String",
+ propagateExecutionExceptionCause(proxy.commit(), IllegalArgumentException.class));
}
@Test
public void testAbort() throws Exception {
- ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, Arrays.asList(
- newCohortInfo(new CohortActor.Builder(tx).expectAbort(
- AbortTransactionReply.instance(CURRENT_VERSION)))), tx);
+ ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils,
+ List.of(newCohortInfo(new CohortActor.Builder(tx).expectAbort(
+ AbortTransactionReply.instance(CURRENT_VERSION)))),
+ tx);
verifySuccessfulFuture(proxy.abort());
verifyCohortActors();
@Test
public void testAbortWithFailure() throws Exception {
- ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, Arrays.asList(
- newCohortInfo(new CohortActor.Builder(tx).expectAbort(new RuntimeException("mock")))), tx);
+ ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils,
+ List.of(newCohortInfo(new CohortActor.Builder(tx).expectAbort(new RuntimeException("mock")))), tx);
// The exception should not get propagated.
verifySuccessfulFuture(proxy.abort());
@Test
public void testAbortWithFailedCohortFuture() throws Exception {
- List<CohortInfo> cohorts = Arrays.asList(
- newCohortInfoWithFailedFuture(new TestException()), newCohortInfo(new CohortActor.Builder(tx)));
- ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, cohorts, tx);
+ ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, List.of(
+ newCohortInfoWithFailedFuture(new TestException()), newCohortInfo(new CohortActor.Builder(tx))), tx);
verifySuccessfulFuture(proxy.abort());
verifyCohortActors();
@Test
public void testWithNoCohorts() throws Exception {
- ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils,
- Collections.<CohortInfo>emptyList(), tx);
+ ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, List.of(), tx);
verifyCanCommit(proxy.canCommit(), true);
verifySuccessfulFuture(proxy.preCommit());
verifyCohortActors();
}
- @SuppressWarnings("checkstyle:avoidHidingCauseException")
- private void propagateExecutionExceptionCause(final ListenableFuture<?> future) throws Exception {
- try {
- future.get(5, TimeUnit.SECONDS);
- fail("Expected ExecutionException");
- } catch (ExecutionException e) {
- verifyCohortActors();
- Throwables.propagateIfPossible(e.getCause(), Exception.class);
- throw new RuntimeException(e.getCause());
- }
+ private String propagateExecutionExceptionCause(final ListenableFuture<?> future,
+ final Class<? extends Exception> expected) {
+ final var ex = assertThrows(ExecutionException.class, () -> future.get(5, TimeUnit.SECONDS)).getCause();
+ verifyCohortActors();
+ assertThat(ex, instanceOf(expected));
+ return ex.getMessage();
}
private CohortInfo newCohortInfo(final CohortActor.Builder builder, final short version) {
}
Builder expectCanCommit(final Class<?> newExpCanCommitType, final Object newCanCommitReply) {
- this.expCanCommitType = newExpCanCommitType;
- this.canCommitReply = newCanCommitReply;
+ expCanCommitType = newExpCanCommitType;
+ canCommitReply = newCanCommitReply;
return this;
}
}
Builder expectCommit(final Class<?> newExpCommitType, final Object newCommitReply) {
- this.expCommitType = newExpCommitType;
- this.commitReply = newCommitReply;
+ expCommitType = newExpCommitType;
+ commitReply = newCommitReply;
return this;
}
}
Builder expectAbort(final Class<?> newExpAbortType, final Object newAbortReply) {
- this.expAbortType = newExpAbortType;
- this.abortReply = newAbortReply;
+ expAbortType = newExpAbortType;
+ abortReply = newAbortReply;
return this;
}
+++ /dev/null
-/*
- * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore;
-
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.eq;
-import static org.mockito.ArgumentMatchers.isA;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.timeout;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.opendaylight.controller.cluster.datastore.TransactionType.READ_WRITE;
-import static org.opendaylight.controller.cluster.datastore.TransactionType.WRITE_ONLY;
-
-import akka.actor.ActorRef;
-import akka.util.Timeout;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-import java.util.function.Function;
-import org.junit.Assert;
-import org.junit.Test;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
-import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
-import org.opendaylight.controller.cluster.datastore.shardstrategy.DefaultShardStrategy;
-import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import scala.concurrent.Promise;
-
-public class TransactionChainProxyTest extends AbstractTransactionProxyTest {
- private LocalHistoryIdentifier historyId;
-
- @Override
- public void setUp() {
- super.setUp();
- historyId = MockIdentifiers.historyIdentifier(TransactionChainProxyTest.class, memberName);
- }
-
- @SuppressWarnings("resource")
- @Test
- public void testNewReadOnlyTransaction() {
-
- DOMStoreTransaction dst = new TransactionChainProxy(mockComponentFactory, historyId).newReadOnlyTransaction();
- Assert.assertTrue(dst instanceof DOMStoreReadTransaction);
-
- }
-
- @SuppressWarnings("resource")
- @Test
- public void testNewReadWriteTransaction() {
- DOMStoreTransaction dst = new TransactionChainProxy(mockComponentFactory, historyId).newReadWriteTransaction();
- Assert.assertTrue(dst instanceof DOMStoreReadWriteTransaction);
-
- }
-
- @SuppressWarnings("resource")
- @Test
- public void testNewWriteOnlyTransaction() {
- DOMStoreTransaction dst = new TransactionChainProxy(mockComponentFactory, historyId).newWriteOnlyTransaction();
- Assert.assertTrue(dst instanceof DOMStoreWriteTransaction);
-
- }
-
- @SuppressWarnings("unchecked")
- @Test
- public void testClose() {
- new TransactionChainProxy(mockComponentFactory, historyId).close();
-
- verify(mockActorContext, times(1)).broadcast(any(Function.class), any(Class.class));
- }
-
- @Test
- public void testRateLimitingUsedInReadWriteTxCreation() {
- try (TransactionChainProxy txChainProxy = new TransactionChainProxy(mockComponentFactory, historyId)) {
-
- txChainProxy.newReadWriteTransaction();
-
- verify(mockActorContext, times(1)).acquireTxCreationPermit();
- }
- }
-
- @Test
- public void testRateLimitingUsedInWriteOnlyTxCreation() {
- try (TransactionChainProxy txChainProxy = new TransactionChainProxy(mockComponentFactory, historyId)) {
-
- txChainProxy.newWriteOnlyTransaction();
-
- verify(mockActorContext, times(1)).acquireTxCreationPermit();
- }
- }
-
- @Test
- public void testRateLimitingNotUsedInReadOnlyTxCreation() {
- try (TransactionChainProxy txChainProxy = new TransactionChainProxy(mockComponentFactory, historyId)) {
-
- txChainProxy.newReadOnlyTransaction();
-
- verify(mockActorContext, times(0)).acquireTxCreationPermit();
- }
- }
-
- /**
- * Tests 2 successive chained write-only transactions and verifies the second transaction isn't
- * initiated until the first one completes its read future.
- */
- @Test
- @SuppressWarnings("checkstyle:IllegalCatch")
- public void testChainedWriteOnlyTransactions() throws Exception {
- dataStoreContextBuilder.writeOnlyTransactionOptimizationsEnabled(true);
-
- try (TransactionChainProxy txChainProxy = new TransactionChainProxy(mockComponentFactory, historyId)) {
-
- ActorRef txActorRef1 = setupActorContextWithoutInitialCreateTransaction(getSystem());
-
- Promise<Object> batchedReplyPromise1 = akka.dispatch.Futures.promise();
- doReturn(batchedReplyPromise1.future()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(txActorRef1)), isA(BatchedModifications.class), any(Timeout.class));
-
- DOMStoreWriteTransaction writeTx1 = txChainProxy.newWriteOnlyTransaction();
-
- NormalizedNode<?, ?> writeNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- writeTx1.write(TestModel.TEST_PATH, writeNode1);
-
- writeTx1.ready();
-
- verify(mockActorContext, times(1)).findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
-
- verifyOneBatchedModification(txActorRef1, new WriteModification(TestModel.TEST_PATH, writeNode1), true);
-
- ActorRef txActorRef2 = setupActorContextWithoutInitialCreateTransaction(getSystem());
-
- expectBatchedModifications(txActorRef2, 1);
-
- final NormalizedNode<?, ?> writeNode2 = ImmutableNodes.containerNode(TestModel.OUTER_LIST_QNAME);
-
- final DOMStoreWriteTransaction writeTx2 = txChainProxy.newWriteOnlyTransaction();
-
- final AtomicReference<Exception> caughtEx = new AtomicReference<>();
- final CountDownLatch write2Complete = new CountDownLatch(1);
- new Thread(() -> {
- try {
- writeTx2.write(TestModel.OUTER_LIST_PATH, writeNode2);
- } catch (Exception e) {
- caughtEx.set(e);
- } finally {
- write2Complete.countDown();
- }
- }).start();
-
- assertTrue("Tx 2 write should've completed", write2Complete.await(5, TimeUnit.SECONDS));
-
- if (caughtEx.get() != null) {
- throw caughtEx.get();
- }
-
- try {
- verify(mockActorContext, times(1)).findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
- } catch (AssertionError e) {
- fail("Tx 2 should not have initiated until the Tx 1's ready future completed");
- }
-
- batchedReplyPromise1.success(readyTxReply(txActorRef1.path().toString()).value().get().get());
-
- // Tx 2 should've proceeded to find the primary shard.
- verify(mockActorContext, timeout(5000).times(2)).findPrimaryShardAsync(
- eq(DefaultShardStrategy.DEFAULT_SHARD));
- }
- }
-
- /**
- * Tests 2 successive chained read-write transactions and verifies the second transaction isn't
- * initiated until the first one completes its read future.
- */
- @Test
- @SuppressWarnings("checkstyle:IllegalCatch")
- public void testChainedReadWriteTransactions() throws Exception {
- try (TransactionChainProxy txChainProxy = new TransactionChainProxy(mockComponentFactory, historyId)) {
-
- ActorRef txActorRef1 = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
-
- expectBatchedModifications(txActorRef1, 1);
-
- Promise<Object> readyReplyPromise1 = akka.dispatch.Futures.promise();
- doReturn(readyReplyPromise1.future()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(txActorRef1)), isA(BatchedModifications.class), any(Timeout.class));
-
- DOMStoreWriteTransaction writeTx1 = txChainProxy.newReadWriteTransaction();
-
- NormalizedNode<?, ?> writeNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- writeTx1.write(TestModel.TEST_PATH, writeNode1);
-
- writeTx1.ready();
-
- verifyOneBatchedModification(txActorRef1, new WriteModification(TestModel.TEST_PATH, writeNode1), true);
-
- String tx2MemberName = "mock-member";
- ActorRef shardActorRef2 = setupActorContextWithoutInitialCreateTransaction(getSystem());
- ActorRef txActorRef2 = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE,
- DataStoreVersions.CURRENT_VERSION, tx2MemberName, shardActorRef2);
-
- expectBatchedModifications(txActorRef2, 1);
-
- final NormalizedNode<?, ?> writeNode2 = ImmutableNodes.containerNode(TestModel.OUTER_LIST_QNAME);
-
- final DOMStoreWriteTransaction writeTx2 = txChainProxy.newReadWriteTransaction();
-
- final AtomicReference<Exception> caughtEx = new AtomicReference<>();
- final CountDownLatch write2Complete = new CountDownLatch(1);
- new Thread(() -> {
- try {
- writeTx2.write(TestModel.OUTER_LIST_PATH, writeNode2);
- } catch (Exception e) {
- caughtEx.set(e);
- } finally {
- write2Complete.countDown();
- }
- }).start();
-
- assertTrue("Tx 2 write should've completed", write2Complete.await(5, TimeUnit.SECONDS));
-
- if (caughtEx.get() != null) {
- throw caughtEx.get();
- }
-
- try {
- verify(mockActorContext, never()).executeOperationAsync(
- eq(getSystem().actorSelection(shardActorRef2.path())),
- eqCreateTransaction(tx2MemberName, READ_WRITE));
- } catch (AssertionError e) {
- fail("Tx 2 should not have initiated until the Tx 1's ready future completed");
- }
-
- readyReplyPromise1.success(readyTxReply(txActorRef1.path().toString()).value().get().get());
-
- verify(mockActorContext, timeout(5000)).executeOperationAsync(
- eq(getSystem().actorSelection(shardActorRef2.path())),
- eqCreateTransaction(tx2MemberName, READ_WRITE), any(Timeout.class));
- }
- }
-
- @Test(expected = IllegalStateException.class)
- public void testChainedWriteTransactionsWithPreviousTxNotReady() {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
-
- expectBatchedModifications(actorRef, 1);
-
- try (TransactionChainProxy txChainProxy = new TransactionChainProxy(mockComponentFactory, historyId)) {
-
- DOMStoreWriteTransaction writeTx1 = txChainProxy.newWriteOnlyTransaction();
-
- NormalizedNode<?, ?> writeNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- writeTx1.write(TestModel.TEST_PATH, writeNode1);
-
- txChainProxy.newWriteOnlyTransaction();
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore;
-
-import static org.junit.Assert.assertEquals;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-
-public class TransactionContextWrapperTest {
- @Mock
- private ActorUtils actorUtils;
-
- @Mock
- private TransactionContext transactionContext;
-
- private TransactionContextWrapper transactionContextWrapper;
-
- @Before
- public void setUp() {
- MockitoAnnotations.initMocks(this);
- doReturn(DatastoreContext.newBuilder().build()).when(actorUtils).getDatastoreContext();
- transactionContextWrapper = new TransactionContextWrapper(MockIdentifiers.transactionIdentifier(
- TransactionContextWrapperTest.class, "mock"), actorUtils, "mock");
- }
-
- @Test
- public void testExecutePriorTransactionOperations() {
- for (int i = 0; i < 100; i++) {
- transactionContextWrapper.maybeExecuteTransactionOperation(mock(TransactionOperation.class));
- }
- assertEquals(901, transactionContextWrapper.getLimiter().availablePermits());
-
- transactionContextWrapper.executePriorTransactionOperations(transactionContext);
-
- assertEquals(1001, transactionContextWrapper.getLimiter().availablePermits());
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyString;
-import static org.mockito.ArgumentMatchers.eq;
-import static org.mockito.ArgumentMatchers.isA;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-import static org.opendaylight.controller.cluster.datastore.TransactionType.READ_ONLY;
-import static org.opendaylight.controller.cluster.datastore.TransactionType.READ_WRITE;
-import static org.opendaylight.controller.cluster.datastore.TransactionType.WRITE_ONLY;
-
-import akka.actor.ActorRef;
-import akka.actor.ActorSelection;
-import akka.actor.ActorSystem;
-import akka.actor.Props;
-import akka.dispatch.Futures;
-import akka.util.Timeout;
-import com.google.common.base.Throwables;
-import com.google.common.collect.ImmutableSortedSet;
-import com.google.common.collect.Sets;
-import com.google.common.util.concurrent.FluentFuture;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.MoreExecutors;
-import com.google.common.util.concurrent.Uninterruptibles;
-import java.util.Collection;
-import java.util.List;
-import java.util.Optional;
-import java.util.SortedSet;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-import org.junit.Assert;
-import org.junit.Test;
-import org.mockito.ArgumentCaptor;
-import org.mockito.InOrder;
-import org.mockito.Mockito;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.config.Configuration;
-import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
-import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
-import org.opendaylight.controller.cluster.datastore.exceptions.PrimaryNotFoundException;
-import org.opendaylight.controller.cluster.datastore.exceptions.TimeoutException;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
-import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
-import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
-import org.opendaylight.controller.cluster.datastore.modification.DeleteModification;
-import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
-import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
-import org.opendaylight.controller.cluster.datastore.shardstrategy.DefaultShardStrategy;
-import org.opendaylight.controller.cluster.datastore.utils.NormalizedNodeAggregatorTest;
-import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
-import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
-import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
-import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.mdsal.common.api.ReadFailedException;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
-import scala.concurrent.Promise;
-
-@SuppressWarnings({"resource", "checkstyle:IllegalThrows", "checkstyle:AvoidHidingCauseException"})
-public class TransactionProxyTest extends AbstractTransactionProxyTest {
-
- @SuppressWarnings("serial")
- static class TestException extends RuntimeException {
- }
-
- interface Invoker {
- FluentFuture<?> invoke(TransactionProxy proxy);
- }
-
- @Test
- public void testRead() throws Exception {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
-
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_ONLY);
-
- doReturn(readDataReply(null)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
-
- Optional<NormalizedNode<?, ?>> readOptional = transactionProxy.read(
- TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
-
- assertFalse("NormalizedNode isPresent", readOptional.isPresent());
-
- NormalizedNode<?, ?> expectedNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
- doReturn(readDataReply(expectedNode)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
-
- readOptional = transactionProxy.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
-
- assertTrue("NormalizedNode isPresent", readOptional.isPresent());
-
- assertEquals("Response NormalizedNode", expectedNode, readOptional.get());
- }
-
- @Test(expected = ReadFailedException.class)
- public void testReadWithInvalidReplyMessageType() throws Throwable {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
-
- doReturn(Futures.successful(new Object())).when(mockActorContext)
- .executeOperationAsync(eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
-
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_ONLY);
-
- try {
- transactionProxy.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
- } catch (ExecutionException e) {
- throw e.getCause();
- }
- }
-
- @Test(expected = TestException.class)
- public void testReadWithAsyncRemoteOperatonFailure() throws Throwable {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
-
- doReturn(Futures.failed(new TestException())).when(mockActorContext)
- .executeOperationAsync(eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
-
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_ONLY);
-
- propagateReadFailedExceptionCause(transactionProxy.read(TestModel.TEST_PATH));
- }
-
- private void testExceptionOnInitialCreateTransaction(final Exception exToThrow, final Invoker invoker)
- throws Throwable {
- ActorRef actorRef = getSystem().actorOf(Props.create(DoNothingActor.class));
-
- if (exToThrow instanceof PrimaryNotFoundException) {
- doReturn(Futures.failed(exToThrow)).when(mockActorContext).findPrimaryShardAsync(anyString());
- } else {
- doReturn(primaryShardInfoReply(getSystem(), actorRef)).when(mockActorContext)
- .findPrimaryShardAsync(anyString());
- }
-
- doReturn(Futures.failed(exToThrow)).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), any(), any(Timeout.class));
-
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_ONLY);
-
- propagateReadFailedExceptionCause(invoker.invoke(transactionProxy));
- }
-
- private void testReadWithExceptionOnInitialCreateTransaction(final Exception exToThrow) throws Throwable {
- testExceptionOnInitialCreateTransaction(exToThrow, proxy -> proxy.read(TestModel.TEST_PATH));
- }
-
- @Test(expected = PrimaryNotFoundException.class)
- public void testReadWhenAPrimaryNotFoundExceptionIsThrown() throws Throwable {
- testReadWithExceptionOnInitialCreateTransaction(new PrimaryNotFoundException("test"));
- }
-
- @Test(expected = TestException.class)
- public void testReadWhenATimeoutExceptionIsThrown() throws Throwable {
- testReadWithExceptionOnInitialCreateTransaction(new TimeoutException("test",
- new TestException()));
- }
-
- @Test(expected = TestException.class)
- public void testReadWhenAnyOtherExceptionIsThrown() throws Throwable {
- testReadWithExceptionOnInitialCreateTransaction(new TestException());
- }
-
- @Test
- public void testReadWithPriorRecordingOperationSuccessful() throws Exception {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
-
- NormalizedNode<?, ?> expectedNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
- expectBatchedModifications(actorRef, 1);
-
- doReturn(readDataReply(expectedNode)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
-
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
- transactionProxy.write(TestModel.TEST_PATH, expectedNode);
-
- Optional<NormalizedNode<?, ?>> readOptional = transactionProxy.read(
- TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
-
- assertTrue("NormalizedNode isPresent", readOptional.isPresent());
- assertEquals("Response NormalizedNode", expectedNode, readOptional.get());
-
- InOrder inOrder = Mockito.inOrder(mockActorContext);
- inOrder.verify(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), isA(BatchedModifications.class), any(Timeout.class));
-
- inOrder.verify(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
- }
-
- @Test(expected = IllegalStateException.class)
- public void testReadPreConditionCheck() {
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
- transactionProxy.read(TestModel.TEST_PATH);
- }
-
- @Test(expected = IllegalArgumentException.class)
- public void testInvalidCreateTransactionReply() throws Throwable {
- ActorRef actorRef = getSystem().actorOf(Props.create(DoNothingActor.class));
-
- doReturn(getSystem().actorSelection(actorRef.path())).when(mockActorContext)
- .actorSelection(actorRef.path().toString());
-
- doReturn(primaryShardInfoReply(getSystem(), actorRef)).when(mockActorContext)
- .findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
-
- doReturn(Futures.successful(new Object())).when(mockActorContext).executeOperationAsync(
- eq(getSystem().actorSelection(actorRef.path())), eqCreateTransaction(memberName, READ_ONLY),
- any(Timeout.class));
-
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_ONLY);
-
- propagateReadFailedExceptionCause(transactionProxy.read(TestModel.TEST_PATH));
- }
-
- @Test
- public void testExists() throws Exception {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
-
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_ONLY);
-
- doReturn(dataExistsReply(false)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqDataExists(), any(Timeout.class));
-
- Boolean exists = transactionProxy.exists(TestModel.TEST_PATH).get();
-
- assertEquals("Exists response", Boolean.FALSE, exists);
-
- doReturn(dataExistsReply(true)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqDataExists(), any(Timeout.class));
-
- exists = transactionProxy.exists(TestModel.TEST_PATH).get();
-
- assertEquals("Exists response", Boolean.TRUE, exists);
- }
-
- @Test(expected = PrimaryNotFoundException.class)
- public void testExistsWhenAPrimaryNotFoundExceptionIsThrown() throws Throwable {
- testExceptionOnInitialCreateTransaction(new PrimaryNotFoundException("test"),
- proxy -> proxy.exists(TestModel.TEST_PATH));
- }
-
- @Test(expected = ReadFailedException.class)
- public void testExistsWithInvalidReplyMessageType() throws Throwable {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
-
- doReturn(Futures.successful(new Object())).when(mockActorContext)
- .executeOperationAsync(eq(actorSelection(actorRef)), eqDataExists(), any(Timeout.class));
-
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_ONLY);
-
- try {
- transactionProxy.exists(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
- } catch (ExecutionException e) {
- throw e.getCause();
- }
- }
-
- @Test(expected = TestException.class)
- public void testExistsWithAsyncRemoteOperatonFailure() throws Throwable {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
-
- doReturn(Futures.failed(new TestException())).when(mockActorContext)
- .executeOperationAsync(eq(actorSelection(actorRef)), eqDataExists(), any(Timeout.class));
-
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_ONLY);
-
- propagateReadFailedExceptionCause(transactionProxy.exists(TestModel.TEST_PATH));
- }
-
- @Test
- public void testExistsWithPriorRecordingOperationSuccessful() throws Exception {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
-
- NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
- expectBatchedModifications(actorRef, 1);
-
- doReturn(dataExistsReply(true)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqDataExists(), any(Timeout.class));
-
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
- transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
- Boolean exists = transactionProxy.exists(TestModel.TEST_PATH).get();
-
- assertEquals("Exists response", Boolean.TRUE, exists);
-
- InOrder inOrder = Mockito.inOrder(mockActorContext);
- inOrder.verify(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), isA(BatchedModifications.class), any(Timeout.class));
-
- inOrder.verify(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqDataExists(), any(Timeout.class));
- }
-
- @Test(expected = IllegalStateException.class)
- public void testExistsPreConditionCheck() {
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
- transactionProxy.exists(TestModel.TEST_PATH);
- }
-
- @Test
- public void testWrite() {
- dataStoreContextBuilder.shardBatchedModificationCount(1);
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
-
- NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
- expectBatchedModifications(actorRef, 1);
-
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
- transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
- verifyOneBatchedModification(actorRef, new WriteModification(TestModel.TEST_PATH, nodeToWrite), false);
- }
-
- @Test
- @SuppressWarnings("checkstyle:IllegalCatch")
- public void testWriteAfterAsyncRead() throws Exception {
- ActorRef actorRef = setupActorContextWithoutInitialCreateTransaction(getSystem(),
- DefaultShardStrategy.DEFAULT_SHARD);
-
- Promise<Object> createTxPromise = akka.dispatch.Futures.promise();
- doReturn(createTxPromise).when(mockActorContext).executeOperationAsync(
- eq(getSystem().actorSelection(actorRef.path())),
- eqCreateTransaction(memberName, READ_WRITE), any(Timeout.class));
-
- doReturn(readDataReply(null)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
-
- expectBatchedModificationsReady(actorRef);
-
- final NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
- final TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
- final CountDownLatch readComplete = new CountDownLatch(1);
- final AtomicReference<Throwable> caughtEx = new AtomicReference<>();
- com.google.common.util.concurrent.Futures.addCallback(transactionProxy.read(TestModel.TEST_PATH),
- new FutureCallback<Optional<NormalizedNode<?, ?>>>() {
- @Override
- public void onSuccess(final Optional<NormalizedNode<?, ?>> result) {
- try {
- transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
- } catch (Exception e) {
- caughtEx.set(e);
- } finally {
- readComplete.countDown();
- }
- }
-
- @Override
- public void onFailure(final Throwable failure) {
- caughtEx.set(failure);
- readComplete.countDown();
- }
- }, MoreExecutors.directExecutor());
-
- createTxPromise.success(createTransactionReply(actorRef, DataStoreVersions.CURRENT_VERSION));
-
- Uninterruptibles.awaitUninterruptibly(readComplete, 5, TimeUnit.SECONDS);
-
- final Throwable t = caughtEx.get();
- if (t != null) {
- Throwables.propagateIfPossible(t, Exception.class);
- throw new RuntimeException(t);
- }
-
- // This sends the batched modification.
- transactionProxy.ready();
-
- verifyOneBatchedModification(actorRef, new WriteModification(TestModel.TEST_PATH, nodeToWrite), true);
- }
-
- @Test(expected = IllegalStateException.class)
- public void testWritePreConditionCheck() {
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_ONLY);
- transactionProxy.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
- }
-
- @Test(expected = IllegalStateException.class)
- public void testWriteAfterReadyPreConditionCheck() {
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
- transactionProxy.ready();
-
- transactionProxy.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
- }
-
- @Test
- public void testMerge() {
- dataStoreContextBuilder.shardBatchedModificationCount(1);
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
-
- NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
- expectBatchedModifications(actorRef, 1);
-
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
- transactionProxy.merge(TestModel.TEST_PATH, nodeToWrite);
-
- verifyOneBatchedModification(actorRef, new MergeModification(TestModel.TEST_PATH, nodeToWrite), false);
- }
-
- @Test
- public void testDelete() {
- dataStoreContextBuilder.shardBatchedModificationCount(1);
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
-
- expectBatchedModifications(actorRef, 1);
-
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
- transactionProxy.delete(TestModel.TEST_PATH);
-
- verifyOneBatchedModification(actorRef, new DeleteModification(TestModel.TEST_PATH), false);
- }
-
- @Test
- public void testReadWrite() {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
-
- final NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
- doReturn(readDataReply(null)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
-
- expectBatchedModifications(actorRef, 1);
-
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
- transactionProxy.read(TestModel.TEST_PATH);
-
- transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
- transactionProxy.read(TestModel.TEST_PATH);
-
- transactionProxy.read(TestModel.TEST_PATH);
-
- List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
- assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
-
- verifyBatchedModifications(batchedModifications.get(0), false,
- new WriteModification(TestModel.TEST_PATH, nodeToWrite));
- }
-
- @Test
- public void testReadyWithReadWrite() {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
-
- final NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
- doReturn(readDataReply(null)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
-
- expectBatchedModificationsReady(actorRef, true);
-
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
- transactionProxy.read(TestModel.TEST_PATH);
-
- transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
- DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
-
- assertTrue(ready instanceof SingleCommitCohortProxy);
-
- verifyCohortFutures((SingleCommitCohortProxy)ready, new CommitTransactionReply().toSerializable());
-
- List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
- assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
-
- verifyBatchedModifications(batchedModifications.get(0), true, true,
- new WriteModification(TestModel.TEST_PATH, nodeToWrite));
-
- assertEquals("getTotalMessageCount", 1, batchedModifications.get(0).getTotalMessagesSent());
- }
-
- @Test
- public void testReadyWithNoModifications() {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
-
- doReturn(readDataReply(null)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
-
- expectBatchedModificationsReady(actorRef, true);
-
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
- transactionProxy.read(TestModel.TEST_PATH);
-
- DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
-
- assertTrue(ready instanceof SingleCommitCohortProxy);
-
- verifyCohortFutures((SingleCommitCohortProxy)ready, new CommitTransactionReply().toSerializable());
-
- List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
- assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
-
- verifyBatchedModifications(batchedModifications.get(0), true, true);
- }
-
- @Test
- public void testReadyWithMultipleShardWrites() {
- ActorRef actorRef1 = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
-
- ActorRef actorRef2 = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY,
- TestModel.JUNK_QNAME.getLocalName());
-
- expectBatchedModificationsReady(actorRef1);
- expectBatchedModificationsReady(actorRef2);
-
- ActorRef actorRef3 = getSystem().actorOf(Props.create(DoNothingActor.class));
-
- doReturn(getSystem().actorSelection(actorRef3.path())).when(mockActorContext)
- .actorSelection(actorRef3.path().toString());
-
- doReturn(Futures.successful(newPrimaryShardInfo(actorRef3, createDataTree()))).when(mockActorContext)
- .findPrimaryShardAsync(eq(CarsModel.BASE_QNAME.getLocalName()));
-
- expectReadyLocalTransaction(actorRef3, false);
-
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
- transactionProxy.write(TestModel.JUNK_PATH, ImmutableNodes.containerNode(TestModel.JUNK_QNAME));
- transactionProxy.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
- transactionProxy.write(CarsModel.BASE_PATH, ImmutableNodes.containerNode(CarsModel.BASE_QNAME));
-
- DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
-
- assertTrue(ready instanceof ThreePhaseCommitCohortProxy);
-
- verifyCohortFutures((ThreePhaseCommitCohortProxy)ready, actorSelection(actorRef1),
- actorSelection(actorRef2), actorSelection(actorRef3));
-
- SortedSet<String> expShardNames =
- ImmutableSortedSet.of(DefaultShardStrategy.DEFAULT_SHARD,
- TestModel.JUNK_QNAME.getLocalName(), CarsModel.BASE_QNAME.getLocalName());
-
- ArgumentCaptor<BatchedModifications> batchedMods = ArgumentCaptor.forClass(BatchedModifications.class);
- verify(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef1)), batchedMods.capture(), any(Timeout.class));
- assertTrue("Participating shards present", batchedMods.getValue().getParticipatingShardNames().isPresent());
- assertEquals("Participating shards", expShardNames, batchedMods.getValue().getParticipatingShardNames().get());
-
- batchedMods = ArgumentCaptor.forClass(BatchedModifications.class);
- verify(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef2)), batchedMods.capture(), any(Timeout.class));
- assertTrue("Participating shards present", batchedMods.getValue().getParticipatingShardNames().isPresent());
- assertEquals("Participating shards", expShardNames, batchedMods.getValue().getParticipatingShardNames().get());
-
- ArgumentCaptor<ReadyLocalTransaction> readyLocalTx = ArgumentCaptor.forClass(ReadyLocalTransaction.class);
- verify(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef3)), readyLocalTx.capture(), any(Timeout.class));
- assertTrue("Participating shards present", readyLocalTx.getValue().getParticipatingShardNames().isPresent());
- assertEquals("Participating shards", expShardNames, readyLocalTx.getValue().getParticipatingShardNames().get());
- }
-
- @Test
- public void testReadyWithWriteOnlyAndLastBatchPending() {
- dataStoreContextBuilder.writeOnlyTransactionOptimizationsEnabled(true);
-
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
-
- NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
- expectBatchedModificationsReady(actorRef, true);
-
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
- transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
- DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
-
- assertTrue(ready instanceof SingleCommitCohortProxy);
-
- verifyCohortFutures((SingleCommitCohortProxy)ready, new CommitTransactionReply().toSerializable());
-
- List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
- assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
-
- verifyBatchedModifications(batchedModifications.get(0), true, true,
- new WriteModification(TestModel.TEST_PATH, nodeToWrite));
- }
-
- @Test
- public void testReadyWithWriteOnlyAndLastBatchEmpty() {
- dataStoreContextBuilder.shardBatchedModificationCount(1).writeOnlyTransactionOptimizationsEnabled(true);
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
-
- NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
- expectBatchedModificationsReady(actorRef, true);
-
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
- transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
- DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
-
- assertTrue(ready instanceof SingleCommitCohortProxy);
-
- verifyCohortFutures((SingleCommitCohortProxy)ready, new CommitTransactionReply().toSerializable());
-
- List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
- assertEquals("Captured BatchedModifications count", 2, batchedModifications.size());
-
- verifyBatchedModifications(batchedModifications.get(0), false,
- new WriteModification(TestModel.TEST_PATH, nodeToWrite));
-
- verifyBatchedModifications(batchedModifications.get(1), true, true);
- }
-
- @Test
- public void testReadyWithReplyFailure() {
- dataStoreContextBuilder.writeOnlyTransactionOptimizationsEnabled(true);
-
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
-
- NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
- expectFailedBatchedModifications(actorRef);
-
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
- transactionProxy.merge(TestModel.TEST_PATH, nodeToWrite);
-
- DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
-
- assertTrue(ready instanceof SingleCommitCohortProxy);
-
- verifyCohortFutures((SingleCommitCohortProxy)ready, TestException.class);
- }
-
- @Test
- public void testReadyWithDebugContextEnabled() {
- dataStoreContextBuilder.transactionDebugContextEnabled(true);
-
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
-
- expectBatchedModificationsReady(actorRef, true);
-
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
- transactionProxy.merge(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
-
- DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
-
- assertTrue(ready instanceof DebugThreePhaseCommitCohort);
-
- verifyCohortFutures((DebugThreePhaseCommitCohort)ready, new CommitTransactionReply().toSerializable());
- }
-
- @Test
- public void testReadyWithLocalTransaction() {
- ActorRef shardActorRef = getSystem().actorOf(Props.create(DoNothingActor.class));
-
- doReturn(getSystem().actorSelection(shardActorRef.path())).when(mockActorContext)
- .actorSelection(shardActorRef.path().toString());
-
- doReturn(Futures.successful(newPrimaryShardInfo(shardActorRef, createDataTree()))).when(mockActorContext)
- .findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
-
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
- expectReadyLocalTransaction(shardActorRef, true);
-
- NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
- DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
- assertTrue(ready instanceof SingleCommitCohortProxy);
- verifyCohortFutures((SingleCommitCohortProxy)ready, new CommitTransactionReply().toSerializable());
-
- ArgumentCaptor<ReadyLocalTransaction> readyLocalTx = ArgumentCaptor.forClass(ReadyLocalTransaction.class);
- verify(mockActorContext).executeOperationAsync(
- eq(actorSelection(shardActorRef)), readyLocalTx.capture(), any(Timeout.class));
- assertFalse("Participating shards present", readyLocalTx.getValue().getParticipatingShardNames().isPresent());
- }
-
- @Test
- public void testReadyWithLocalTransactionWithFailure() {
- ActorRef shardActorRef = getSystem().actorOf(Props.create(DoNothingActor.class));
-
- doReturn(getSystem().actorSelection(shardActorRef.path())).when(mockActorContext)
- .actorSelection(shardActorRef.path().toString());
-
- DataTree mockDataTree = createDataTree();
- DataTreeModification mockModification = mockDataTree.takeSnapshot().newModification();
- doThrow(new RuntimeException("mock")).when(mockModification).ready();
-
- doReturn(Futures.successful(newPrimaryShardInfo(shardActorRef, mockDataTree))).when(mockActorContext)
- .findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
-
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
- expectReadyLocalTransaction(shardActorRef, true);
-
- NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
- DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
- assertTrue(ready instanceof SingleCommitCohortProxy);
- verifyCohortFutures((SingleCommitCohortProxy)ready, RuntimeException.class);
- }
-
- private void testWriteOnlyTxWithFindPrimaryShardFailure(final Exception toThrow) {
- doReturn(Futures.failed(toThrow)).when(mockActorContext).findPrimaryShardAsync(anyString());
-
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
- NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
- transactionProxy.merge(TestModel.TEST_PATH, nodeToWrite);
-
- transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
- transactionProxy.delete(TestModel.TEST_PATH);
-
- DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
-
- assertTrue(ready instanceof SingleCommitCohortProxy);
-
- verifyCohortFutures((SingleCommitCohortProxy)ready, toThrow.getClass());
- }
-
- @Test
- public void testWriteOnlyTxWithPrimaryNotFoundException() {
- testWriteOnlyTxWithFindPrimaryShardFailure(new PrimaryNotFoundException("mock"));
- }
-
- @Test
- public void testWriteOnlyTxWithNotInitializedException() {
- testWriteOnlyTxWithFindPrimaryShardFailure(new NotInitializedException("mock"));
- }
-
- @Test
- public void testWriteOnlyTxWithNoShardLeaderException() {
- testWriteOnlyTxWithFindPrimaryShardFailure(new NoShardLeaderException("mock"));
- }
-
- @Test
- public void testReadyWithInvalidReplyMessageType() {
- dataStoreContextBuilder.writeOnlyTransactionOptimizationsEnabled(true);
- ActorRef actorRef1 = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
-
- ActorRef actorRef2 = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY,
- TestModel.JUNK_QNAME.getLocalName());
-
- doReturn(Futures.successful(new Object())).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef1)), isA(BatchedModifications.class), any(Timeout.class));
-
- expectBatchedModificationsReady(actorRef2);
-
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
- transactionProxy.write(TestModel.JUNK_PATH, ImmutableNodes.containerNode(TestModel.JUNK_QNAME));
- transactionProxy.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
-
- DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
-
- assertTrue(ready instanceof ThreePhaseCommitCohortProxy);
-
- verifyCohortFutures((ThreePhaseCommitCohortProxy)ready, actorSelection(actorRef2),
- IllegalArgumentException.class);
- }
-
- @Test
- public void testGetIdentifier() {
- setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_ONLY);
-
- Object id = transactionProxy.getIdentifier();
- assertNotNull("getIdentifier returned null", id);
- assertTrue("Invalid identifier: " + id, id.toString().contains(memberName));
- }
-
- @Test
- public void testClose() {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
-
- doReturn(readDataReply(null)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
-
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
- transactionProxy.read(TestModel.TEST_PATH);
-
- transactionProxy.close();
-
- verify(mockActorContext).sendOperationAsync(
- eq(actorSelection(actorRef)), isA(CloseTransaction.class));
- }
-
- private interface TransactionProxyOperation {
- void run(TransactionProxy transactionProxy);
- }
-
- private PrimaryShardInfo newPrimaryShardInfo(final ActorRef actorRef) {
- return new PrimaryShardInfo(getSystem().actorSelection(actorRef.path()), DataStoreVersions.CURRENT_VERSION);
- }
-
- private PrimaryShardInfo newPrimaryShardInfo(final ActorRef actorRef, final DataTree dataTree) {
- return new PrimaryShardInfo(getSystem().actorSelection(actorRef.path()), DataStoreVersions.CURRENT_VERSION,
- dataTree);
- }
-
- private void throttleOperation(final TransactionProxyOperation operation) {
- throttleOperation(operation, 1, true);
- }
-
- private void throttleOperation(final TransactionProxyOperation operation, final int outstandingOpsLimit,
- final boolean shardFound) {
- throttleOperation(operation, outstandingOpsLimit, shardFound, TimeUnit.MILLISECONDS.toNanos(
- mockActorContext.getDatastoreContext().getOperationTimeoutInMillis()));
- }
-
- private void throttleOperation(final TransactionProxyOperation operation, final int outstandingOpsLimit,
- final boolean shardFound, final long expectedCompletionTime) {
- ActorSystem actorSystem = getSystem();
- ActorRef shardActorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
-
- // Note that we setting batchedModificationCount to one less than what we need because in TransactionProxy
- // we now allow one extra permit to be allowed for ready
- doReturn(dataStoreContextBuilder.operationTimeoutInSeconds(2)
- .shardBatchedModificationCount(outstandingOpsLimit - 1).build()).when(mockActorContext)
- .getDatastoreContext();
-
- doReturn(actorSystem.actorSelection(shardActorRef.path())).when(mockActorContext)
- .actorSelection(shardActorRef.path().toString());
-
- if (shardFound) {
- doReturn(Futures.successful(newPrimaryShardInfo(shardActorRef))).when(mockActorContext)
- .findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
- doReturn(Futures.successful(newPrimaryShardInfo(shardActorRef))).when(mockActorContext)
- .findPrimaryShardAsync(eq("cars"));
-
- } else {
- doReturn(Futures.failed(new Exception("not found")))
- .when(mockActorContext).findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
- }
-
- doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
- eq(actorSystem.actorSelection(shardActorRef.path())), eqCreateTransaction(memberName, READ_WRITE),
- any(Timeout.class));
-
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
- long start = System.nanoTime();
-
- operation.run(transactionProxy);
-
- long end = System.nanoTime();
-
- Assert.assertTrue(String.format("Expected elapsed time: %s. Actual: %s",
- expectedCompletionTime, end - start),
- end - start > expectedCompletionTime && end - start < expectedCompletionTime * 2);
-
- }
-
- private void completeOperation(final TransactionProxyOperation operation) {
- completeOperation(operation, true);
- }
-
- private void completeOperation(final TransactionProxyOperation operation, final boolean shardFound) {
- ActorSystem actorSystem = getSystem();
- ActorRef shardActorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
-
- doReturn(actorSystem.actorSelection(shardActorRef.path())).when(mockActorContext)
- .actorSelection(shardActorRef.path().toString());
-
- if (shardFound) {
- doReturn(Futures.successful(newPrimaryShardInfo(shardActorRef))).when(mockActorContext)
- .findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
- } else {
- doReturn(Futures.failed(new PrimaryNotFoundException("test"))).when(mockActorContext)
- .findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
- }
-
- ActorRef txActorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
- String actorPath = txActorRef.path().toString();
- CreateTransactionReply createTransactionReply = new CreateTransactionReply(actorPath, nextTransactionId(),
- DataStoreVersions.CURRENT_VERSION);
-
- doReturn(actorSystem.actorSelection(actorPath)).when(mockActorContext).actorSelection(actorPath);
-
- doReturn(Futures.successful(createTransactionReply)).when(mockActorContext).executeOperationAsync(
- eq(actorSystem.actorSelection(shardActorRef.path())), eqCreateTransaction(memberName, READ_WRITE),
- any(Timeout.class));
-
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
- long start = System.nanoTime();
-
- operation.run(transactionProxy);
-
- long end = System.nanoTime();
-
- long expected = TimeUnit.MILLISECONDS.toNanos(mockActorContext.getDatastoreContext()
- .getOperationTimeoutInMillis());
- Assert.assertTrue(String.format("Expected elapsed time: %s. Actual: %s",
- expected, end - start), end - start <= expected);
- }
-
- private void completeOperationLocal(final TransactionProxyOperation operation, final DataTree dataTree) {
- ActorSystem actorSystem = getSystem();
- ActorRef shardActorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
-
- doReturn(actorSystem.actorSelection(shardActorRef.path())).when(mockActorContext)
- .actorSelection(shardActorRef.path().toString());
-
- doReturn(Futures.successful(newPrimaryShardInfo(shardActorRef, dataTree))).when(mockActorContext)
- .findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
-
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
- long start = System.nanoTime();
-
- operation.run(transactionProxy);
-
- long end = System.nanoTime();
-
- long expected = TimeUnit.MILLISECONDS.toNanos(mockActorContext.getDatastoreContext()
- .getOperationTimeoutInMillis());
- Assert.assertTrue(String.format("Expected elapsed time: %s. Actual: %s", expected, end - start),
- end - start <= expected);
- }
-
- private static DataTree createDataTree() {
- DataTree dataTree = mock(DataTree.class);
- DataTreeSnapshot dataTreeSnapshot = mock(DataTreeSnapshot.class);
- DataTreeModification dataTreeModification = mock(DataTreeModification.class);
-
- doReturn(dataTreeSnapshot).when(dataTree).takeSnapshot();
- doReturn(dataTreeModification).when(dataTreeSnapshot).newModification();
-
- return dataTree;
- }
-
- private static DataTree createDataTree(final NormalizedNode<?, ?> readResponse) {
- DataTree dataTree = mock(DataTree.class);
- DataTreeSnapshot dataTreeSnapshot = mock(DataTreeSnapshot.class);
- DataTreeModification dataTreeModification = mock(DataTreeModification.class);
-
- doReturn(dataTreeSnapshot).when(dataTree).takeSnapshot();
- doReturn(dataTreeModification).when(dataTreeSnapshot).newModification();
- doReturn(Optional.of(readResponse)).when(dataTreeModification).readNode(any(YangInstanceIdentifier.class));
-
- return dataTree;
- }
-
-
- @Test
- public void testWriteCompletionForLocalShard() {
- completeOperationLocal(transactionProxy -> {
- NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
- transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
- transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
- }, createDataTree());
- }
-
- @Test
- public void testWriteThrottlingWhenShardFound() {
- throttleOperation(transactionProxy -> {
- NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
- expectIncompleteBatchedModifications();
-
- transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
- transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
- });
- }
-
- @Test
- public void testWriteThrottlingWhenShardNotFound() {
- // Confirm that there is no throttling when the Shard is not found
- completeOperation(transactionProxy -> {
- NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
- expectBatchedModifications(2);
-
- transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
- transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
- }, false);
-
- }
-
-
- @Test
- public void testWriteCompletion() {
- completeOperation(transactionProxy -> {
- NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
- expectBatchedModifications(2);
-
- transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
- transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
- });
- }
-
- @Test
- public void testMergeThrottlingWhenShardFound() {
- throttleOperation(transactionProxy -> {
- NormalizedNode<?, ?> nodeToMerge = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
- expectIncompleteBatchedModifications();
-
- transactionProxy.merge(TestModel.TEST_PATH, nodeToMerge);
-
- transactionProxy.merge(TestModel.TEST_PATH, nodeToMerge);
- });
- }
-
- @Test
- public void testMergeThrottlingWhenShardNotFound() {
- completeOperation(transactionProxy -> {
- NormalizedNode<?, ?> nodeToMerge = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
- expectBatchedModifications(2);
-
- transactionProxy.merge(TestModel.TEST_PATH, nodeToMerge);
-
- transactionProxy.merge(TestModel.TEST_PATH, nodeToMerge);
- }, false);
- }
-
- @Test
- public void testMergeCompletion() {
- completeOperation(transactionProxy -> {
- NormalizedNode<?, ?> nodeToMerge = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
- expectBatchedModifications(2);
-
- transactionProxy.merge(TestModel.TEST_PATH, nodeToMerge);
-
- transactionProxy.merge(TestModel.TEST_PATH, nodeToMerge);
- });
-
- }
-
- @Test
- public void testMergeCompletionForLocalShard() {
- completeOperationLocal(transactionProxy -> {
- NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
- transactionProxy.merge(TestModel.TEST_PATH, nodeToWrite);
-
- transactionProxy.merge(TestModel.TEST_PATH, nodeToWrite);
-
- }, createDataTree());
- }
-
-
- @Test
- public void testDeleteThrottlingWhenShardFound() {
-
- throttleOperation(transactionProxy -> {
- expectIncompleteBatchedModifications();
-
- transactionProxy.delete(TestModel.TEST_PATH);
-
- transactionProxy.delete(TestModel.TEST_PATH);
- });
- }
-
-
- @Test
- public void testDeleteThrottlingWhenShardNotFound() {
-
- completeOperation(transactionProxy -> {
- expectBatchedModifications(2);
-
- transactionProxy.delete(TestModel.TEST_PATH);
-
- transactionProxy.delete(TestModel.TEST_PATH);
- }, false);
- }
-
- @Test
- public void testDeleteCompletionForLocalShard() {
- completeOperationLocal(transactionProxy -> {
-
- transactionProxy.delete(TestModel.TEST_PATH);
-
- transactionProxy.delete(TestModel.TEST_PATH);
- }, createDataTree());
-
- }
-
- @Test
- public void testDeleteCompletion() {
- completeOperation(transactionProxy -> {
- expectBatchedModifications(2);
-
- transactionProxy.delete(TestModel.TEST_PATH);
-
- transactionProxy.delete(TestModel.TEST_PATH);
- });
-
- }
-
- @Test
- public void testReadThrottlingWhenShardFound() {
-
- throttleOperation(transactionProxy -> {
- doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqReadData());
-
- transactionProxy.read(TestModel.TEST_PATH);
-
- transactionProxy.read(TestModel.TEST_PATH);
- });
- }
-
- @Test
- public void testReadThrottlingWhenShardNotFound() {
-
- completeOperation(transactionProxy -> {
- doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqReadData());
-
- transactionProxy.read(TestModel.TEST_PATH);
-
- transactionProxy.read(TestModel.TEST_PATH);
- }, false);
- }
-
-
- @Test
- public void testReadCompletion() {
- completeOperation(transactionProxy -> {
- NormalizedNode<?, ?> nodeToRead = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
- doReturn(readDataReply(nodeToRead)).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqReadData(), any(Timeout.class));
-
- transactionProxy.read(TestModel.TEST_PATH);
-
- transactionProxy.read(TestModel.TEST_PATH);
- });
-
- }
-
- @Test
- public void testReadCompletionForLocalShard() {
- final NormalizedNode<?, ?> nodeToRead = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- completeOperationLocal(transactionProxy -> {
- transactionProxy.read(TestModel.TEST_PATH);
-
- transactionProxy.read(TestModel.TEST_PATH);
- }, createDataTree(nodeToRead));
-
- }
-
- @Test
- public void testReadCompletionForLocalShardWhenExceptionOccurs() {
- completeOperationLocal(transactionProxy -> {
- transactionProxy.read(TestModel.TEST_PATH);
-
- transactionProxy.read(TestModel.TEST_PATH);
- }, createDataTree());
-
- }
-
- @Test
- public void testExistsThrottlingWhenShardFound() {
-
- throttleOperation(transactionProxy -> {
- doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqDataExists());
-
- transactionProxy.exists(TestModel.TEST_PATH);
-
- transactionProxy.exists(TestModel.TEST_PATH);
- });
- }
-
- @Test
- public void testExistsThrottlingWhenShardNotFound() {
-
- completeOperation(transactionProxy -> {
- doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqDataExists());
-
- transactionProxy.exists(TestModel.TEST_PATH);
-
- transactionProxy.exists(TestModel.TEST_PATH);
- }, false);
- }
-
-
- @Test
- public void testExistsCompletion() {
- completeOperation(transactionProxy -> {
- doReturn(dataExistsReply(true)).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqDataExists(), any(Timeout.class));
-
- transactionProxy.exists(TestModel.TEST_PATH);
-
- transactionProxy.exists(TestModel.TEST_PATH);
- });
-
- }
-
- @Test
- public void testExistsCompletionForLocalShard() {
- final NormalizedNode<?, ?> nodeToRead = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- completeOperationLocal(transactionProxy -> {
- transactionProxy.exists(TestModel.TEST_PATH);
-
- transactionProxy.exists(TestModel.TEST_PATH);
- }, createDataTree(nodeToRead));
-
- }
-
- @Test
- public void testExistsCompletionForLocalShardWhenExceptionOccurs() {
- completeOperationLocal(transactionProxy -> {
- transactionProxy.exists(TestModel.TEST_PATH);
-
- transactionProxy.exists(TestModel.TEST_PATH);
- }, createDataTree());
-
- }
-
- @Test
- public void testReadyThrottling() {
-
- throttleOperation(transactionProxy -> {
- NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
- expectBatchedModifications(1);
-
- transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
- transactionProxy.ready();
- });
- }
-
- @Test
- public void testReadyThrottlingWithTwoTransactionContexts() {
- throttleOperation(transactionProxy -> {
- NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- NormalizedNode<?, ?> carsNode = ImmutableNodes.containerNode(CarsModel.BASE_QNAME);
-
- expectBatchedModifications(2);
-
- transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
- // Trying to write to Cars will cause another transaction context to get created
- transactionProxy.write(CarsModel.BASE_PATH, carsNode);
-
- // Now ready should block for both transaction contexts
- transactionProxy.ready();
- }, 1, true, TimeUnit.MILLISECONDS.toNanos(mockActorContext.getDatastoreContext()
- .getOperationTimeoutInMillis()) * 2);
- }
-
- private void testModificationOperationBatching(final TransactionType type) {
- int shardBatchedModificationCount = 3;
- dataStoreContextBuilder.shardBatchedModificationCount(shardBatchedModificationCount);
-
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), type);
-
- expectBatchedModifications(actorRef, shardBatchedModificationCount);
-
- YangInstanceIdentifier writePath1 = TestModel.TEST_PATH;
- NormalizedNode<?, ?> writeNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
- YangInstanceIdentifier writePath2 = TestModel.OUTER_LIST_PATH;
- NormalizedNode<?, ?> writeNode2 = ImmutableNodes.containerNode(TestModel.OUTER_LIST_QNAME);
-
- YangInstanceIdentifier writePath3 = TestModel.INNER_LIST_PATH;
- NormalizedNode<?, ?> writeNode3 = ImmutableNodes.containerNode(TestModel.INNER_LIST_QNAME);
-
- YangInstanceIdentifier mergePath1 = TestModel.TEST_PATH;
- NormalizedNode<?, ?> mergeNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
- YangInstanceIdentifier mergePath2 = TestModel.OUTER_LIST_PATH;
- NormalizedNode<?, ?> mergeNode2 = ImmutableNodes.containerNode(TestModel.OUTER_LIST_QNAME);
-
- YangInstanceIdentifier mergePath3 = TestModel.INNER_LIST_PATH;
- NormalizedNode<?, ?> mergeNode3 = ImmutableNodes.containerNode(TestModel.INNER_LIST_QNAME);
-
- YangInstanceIdentifier deletePath1 = TestModel.TEST_PATH;
- YangInstanceIdentifier deletePath2 = TestModel.OUTER_LIST_PATH;
-
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, type);
-
- transactionProxy.write(writePath1, writeNode1);
- transactionProxy.write(writePath2, writeNode2);
- transactionProxy.delete(deletePath1);
- transactionProxy.merge(mergePath1, mergeNode1);
- transactionProxy.merge(mergePath2, mergeNode2);
- transactionProxy.write(writePath3, writeNode3);
- transactionProxy.merge(mergePath3, mergeNode3);
- transactionProxy.delete(deletePath2);
-
- // This sends the last batch.
- transactionProxy.ready();
-
- List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
- assertEquals("Captured BatchedModifications count", 3, batchedModifications.size());
-
- verifyBatchedModifications(batchedModifications.get(0), false, new WriteModification(writePath1, writeNode1),
- new WriteModification(writePath2, writeNode2), new DeleteModification(deletePath1));
-
- verifyBatchedModifications(batchedModifications.get(1), false, new MergeModification(mergePath1, mergeNode1),
- new MergeModification(mergePath2, mergeNode2), new WriteModification(writePath3, writeNode3));
-
- verifyBatchedModifications(batchedModifications.get(2), true, true,
- new MergeModification(mergePath3, mergeNode3), new DeleteModification(deletePath2));
-
- assertEquals("getTotalMessageCount", 3, batchedModifications.get(2).getTotalMessagesSent());
- }
-
- @Test
- public void testReadWriteModificationOperationBatching() {
- testModificationOperationBatching(READ_WRITE);
- }
-
- @Test
- public void testWriteOnlyModificationOperationBatching() {
- testModificationOperationBatching(WRITE_ONLY);
- }
-
- @Test
- public void testOptimizedWriteOnlyModificationOperationBatching() {
- dataStoreContextBuilder.writeOnlyTransactionOptimizationsEnabled(true);
- testModificationOperationBatching(WRITE_ONLY);
- }
-
- @Test
- public void testModificationOperationBatchingWithInterleavedReads() throws Exception {
-
- int shardBatchedModificationCount = 10;
- dataStoreContextBuilder.shardBatchedModificationCount(shardBatchedModificationCount);
-
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
-
- expectBatchedModifications(actorRef, shardBatchedModificationCount);
-
- final YangInstanceIdentifier writePath1 = TestModel.TEST_PATH;
- final NormalizedNode<?, ?> writeNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
- YangInstanceIdentifier writePath2 = TestModel.OUTER_LIST_PATH;
- NormalizedNode<?, ?> writeNode2 = ImmutableNodes.containerNode(TestModel.OUTER_LIST_QNAME);
-
- final YangInstanceIdentifier mergePath1 = TestModel.TEST_PATH;
- final NormalizedNode<?, ?> mergeNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
- YangInstanceIdentifier mergePath2 = TestModel.INNER_LIST_PATH;
- NormalizedNode<?, ?> mergeNode2 = ImmutableNodes.containerNode(TestModel.INNER_LIST_QNAME);
-
- final YangInstanceIdentifier deletePath = TestModel.OUTER_LIST_PATH;
-
- doReturn(readDataReply(writeNode2)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqReadData(writePath2), any(Timeout.class));
-
- doReturn(readDataReply(mergeNode2)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqReadData(mergePath2), any(Timeout.class));
-
- doReturn(dataExistsReply(true)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqDataExists(), any(Timeout.class));
-
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
- transactionProxy.write(writePath1, writeNode1);
- transactionProxy.write(writePath2, writeNode2);
-
- Optional<NormalizedNode<?, ?>> readOptional = transactionProxy.read(writePath2).get(5, TimeUnit.SECONDS);
-
- assertTrue("NormalizedNode isPresent", readOptional.isPresent());
- assertEquals("Response NormalizedNode", writeNode2, readOptional.get());
-
- transactionProxy.merge(mergePath1, mergeNode1);
- transactionProxy.merge(mergePath2, mergeNode2);
-
- readOptional = transactionProxy.read(mergePath2).get(5, TimeUnit.SECONDS);
-
- transactionProxy.delete(deletePath);
-
- Boolean exists = transactionProxy.exists(TestModel.TEST_PATH).get();
- assertEquals("Exists response", Boolean.TRUE, exists);
-
- assertTrue("NormalizedNode isPresent", readOptional.isPresent());
- assertEquals("Response NormalizedNode", mergeNode2, readOptional.get());
-
- List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
- assertEquals("Captured BatchedModifications count", 3, batchedModifications.size());
-
- verifyBatchedModifications(batchedModifications.get(0), false, new WriteModification(writePath1, writeNode1),
- new WriteModification(writePath2, writeNode2));
-
- verifyBatchedModifications(batchedModifications.get(1), false, new MergeModification(mergePath1, mergeNode1),
- new MergeModification(mergePath2, mergeNode2));
-
- verifyBatchedModifications(batchedModifications.get(2), false, new DeleteModification(deletePath));
-
- InOrder inOrder = Mockito.inOrder(mockActorContext);
- inOrder.verify(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), isA(BatchedModifications.class), any(Timeout.class));
-
- inOrder.verify(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqReadData(writePath2), any(Timeout.class));
-
- inOrder.verify(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), isA(BatchedModifications.class), any(Timeout.class));
-
- inOrder.verify(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqReadData(mergePath2), any(Timeout.class));
-
- inOrder.verify(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), isA(BatchedModifications.class), any(Timeout.class));
-
- inOrder.verify(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqDataExists(), any(Timeout.class));
- }
-
- @Test
- public void testReadRoot() throws InterruptedException, ExecutionException, java.util.concurrent.TimeoutException {
- EffectiveModelContext schemaContext = SchemaContextHelper.full();
- Configuration configuration = mock(Configuration.class);
- doReturn(configuration).when(mockActorContext).getConfiguration();
- doReturn(schemaContext).when(mockActorContext).getSchemaContext();
- doReturn(Sets.newHashSet("test", "cars")).when(configuration).getAllShardNames();
-
- NormalizedNode<?, ?> expectedNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- NormalizedNode<?, ?> expectedNode2 = ImmutableNodes.containerNode(CarsModel.CARS_QNAME);
-
- setUpReadData("test", NormalizedNodeAggregatorTest.getRootNode(expectedNode1, schemaContext));
- setUpReadData("cars", NormalizedNodeAggregatorTest.getRootNode(expectedNode2, schemaContext));
-
- doReturn(MemberName.forName(memberName)).when(mockActorContext).getCurrentMemberName();
-
- doReturn(getSystem().dispatchers().defaultGlobalDispatcher()).when(mockActorContext).getClientDispatcher();
-
- TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_ONLY);
-
- Optional<NormalizedNode<?, ?>> readOptional = transactionProxy.read(
- YangInstanceIdentifier.empty()).get(5, TimeUnit.SECONDS);
-
- assertTrue("NormalizedNode isPresent", readOptional.isPresent());
-
- NormalizedNode<?, ?> normalizedNode = readOptional.get();
-
- assertTrue("Expect value to be a Collection", normalizedNode.getValue() instanceof Collection);
-
- @SuppressWarnings("unchecked")
- Collection<NormalizedNode<?,?>> collection = (Collection<NormalizedNode<?,?>>) normalizedNode.getValue();
-
- for (NormalizedNode<?,?> node : collection) {
- assertTrue("Expected " + node + " to be a ContainerNode", node instanceof ContainerNode);
- }
-
- assertTrue("Child with QName = " + TestModel.TEST_QNAME + " not found",
- NormalizedNodeAggregatorTest.findChildWithQName(collection, TestModel.TEST_QNAME) != null);
-
- assertEquals(expectedNode1, NormalizedNodeAggregatorTest.findChildWithQName(collection, TestModel.TEST_QNAME));
-
- assertTrue("Child with QName = " + CarsModel.BASE_QNAME + " not found",
- NormalizedNodeAggregatorTest.findChildWithQName(collection, CarsModel.BASE_QNAME) != null);
-
- assertEquals(expectedNode2, NormalizedNodeAggregatorTest.findChildWithQName(collection, CarsModel.BASE_QNAME));
- }
-
-
- private void setUpReadData(final String shardName, final NormalizedNode<?, ?> expectedNode) {
- ActorSystem actorSystem = getSystem();
- ActorRef shardActorRef = getSystem().actorOf(Props.create(DoNothingActor.class));
-
- doReturn(getSystem().actorSelection(shardActorRef.path())).when(mockActorContext)
- .actorSelection(shardActorRef.path().toString());
-
- doReturn(primaryShardInfoReply(getSystem(), shardActorRef)).when(mockActorContext)
- .findPrimaryShardAsync(eq(shardName));
-
- ActorRef txActorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
-
- doReturn(actorSystem.actorSelection(txActorRef.path())).when(mockActorContext)
- .actorSelection(txActorRef.path().toString());
-
- doReturn(Futures.successful(createTransactionReply(txActorRef, DataStoreVersions.CURRENT_VERSION)))
- .when(mockActorContext).executeOperationAsync(eq(actorSystem.actorSelection(shardActorRef.path())),
- eqCreateTransaction(memberName, TransactionType.READ_ONLY), any(Timeout.class));
-
- doReturn(readDataReply(expectedNode)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(txActorRef)), eqReadData(YangInstanceIdentifier.empty()), any(Timeout.class));
- }
-}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.datastore;
import com.codahale.metrics.Timer;
* TransactionRateLimitingCallback computes the new transaction rate limit on the successful completion of a
* transaction.
*/
-public class TransactionRateLimitingCallback implements OperationCallback {
+@Deprecated(since = "9.0.0", forRemoval = true)
+final class TransactionRateLimitingCallback implements OperationCallback {
private static Ticker TICKER = Ticker.systemTicker();
private enum State {
private long elapsedTime;
private volatile State state = State.STOPPED;
- TransactionRateLimitingCallback(ActorUtils actorUtils) {
+ TransactionRateLimitingCallback(final ActorUtils actorUtils) {
commitTimer = actorUtils.getOperationTimer(ActorUtils.COMMIT);
}
}
@VisibleForTesting
- static void setTicker(Ticker ticker) {
+ static void setTicker(final Ticker ticker) {
TICKER = ticker;
}
}
*/
package org.opendaylight.controller.cluster.datastore;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.assertThrows;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.Mockito.doReturn;
import java.util.concurrent.TimeUnit;
import org.junit.Before;
import org.junit.Test;
+import org.junit.runner.RunWith;
import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
/**
*
* @author Thomas Pantelis
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
public class TransactionRateLimitingCallbackTest {
-
@Mock
ActorUtils mockContext;
-
@Mock
Timer mockTimer;
-
@Mock
Ticker mockTicker;
@Before
public void setUp() {
- MockitoAnnotations.initMocks(this);
doReturn(mockTimer).when(mockContext).getOperationTimer(ActorUtils.COMMIT);
callback = new TransactionRateLimitingCallback(mockContext);
TransactionRateLimitingCallback.setTicker(mockTicker);
@Test
public void testSuccessWithoutRun() {
- try {
- callback.success();
- fail("Expected IllegalStateException");
- } catch (IllegalStateException e) {
- // expected
- }
+ final var ex = assertThrows(IllegalStateException.class, callback::success);
verify(mockTimer, never()).update(anyLong(), any(TimeUnit.class));
}
import java.time.Duration;
import org.junit.Before;
import org.junit.Test;
+import org.junit.runner.RunWith;
import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
import org.opendaylight.controller.cluster.datastore.AbstractActorTest;
import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeNotificationListenerRegistration;
import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeNotificationListenerRegistrationReply;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
public class DataTreeNotificationListenerRegistrationActorTest extends AbstractActorTest {
@Mock
private ListenerRegistration<?> mockListenerReg;
@Before
public void setup() {
- MockitoAnnotations.initMocks(this);
DataTreeNotificationListenerRegistrationActor.killDelay = 100;
kit = new TestKit(getSystem());
}
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
import akka.actor.ActorRef;
import akka.testkit.javadsl.TestKit;
public class ShardSnapshotActorTest extends AbstractActorTest {
private static final InputOutputStreamFactory STREAM_FACTORY = InputOutputStreamFactory.simple();
- private static final NormalizedNode<?, ?> DATA = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ private static final NormalizedNode DATA = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
private static void testSerializeSnapshot(final String testName, final ShardDataTreeSnapshot snapshot,
final boolean withInstallSnapshot) throws Exception {
final ActorRef snapshotActor = getSystem().actorOf(ShardSnapshotActor.props(STREAM_FACTORY), testName);
kit.watch(snapshotActor);
- final NormalizedNode<?, ?> expectedRoot = snapshot.getRootNode().get();
+ final NormalizedNode expectedRoot = snapshot.getRootNode().orElseThrow();
ByteArrayOutputStream installSnapshotStream = withInstallSnapshot ? new ByteArrayOutputStream() : null;
ShardSnapshotActor.requestSnapshot(snapshotActor, snapshot,
}
assertEquals("Deserialized snapshot type", snapshot.getClass(), deserialized.getClass());
-
- final Optional<NormalizedNode<?, ?>> maybeNode = deserialized.getRootNode();
- assertTrue("isPresent", maybeNode.isPresent());
- assertEquals("Root node", expectedRoot, maybeNode.get());
+ assertEquals("Root node", Optional.of(expectedRoot), deserialized.getRootNode());
}
}
import com.google.common.collect.ImmutableSortedSet;
import com.google.common.collect.Sets;
-import java.net.URI;
import java.util.Collection;
import java.util.Set;
import org.junit.Assert;
import org.opendaylight.controller.cluster.access.concepts.MemberName;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ModuleShardStrategy;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategy;
+import org.opendaylight.yangtools.yang.common.XMLNamespace;
public abstract class ConfigurationImplBaseTest {
private static final MemberName MEMBER_1 = MemberName.forName("member-1");
@Test
public void testAddModuleShardConfiguration() throws Exception {
- URI namespace = new URI("urn:opendaylight:test:oven");
+ XMLNamespace namespace = XMLNamespace.of("urn:opendaylight:test:oven");
String moduleName = "oven";
String shardName = "oven-shard";
String shardStrategyName = ModuleShardStrategy.NAME;
ImmutableSortedSet.copyOf(configuration.getMembersFromShardName(shardName)));
assertEquals("getShardNameForModule", shardName, configuration.getShardNameForModule(moduleName));
assertEquals("getModuleNameFromNameSpace", moduleName,
- configuration.getModuleNameFromNameSpace(namespace.toASCIIString()));
+ configuration.getModuleNameFromNameSpace(namespace.toString()));
assertEquals("getAllShardNames", ImmutableSortedSet.of("people-1", "cars-1", "test-1", "default", shardName),
ImmutableSortedSet.copyOf(configuration.getAllShardNames()));
import com.google.common.collect.ImmutableSortedSet;
import com.google.common.collect.Sets;
-import java.net.URI;
import java.util.Collection;
import java.util.Set;
import org.junit.Assert;
import org.opendaylight.controller.cluster.access.concepts.MemberName;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ModuleShardStrategy;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategy;
+import org.opendaylight.yangtools.yang.common.XMLNamespace;
public class ConfigurationImplTest {
private static final MemberName MEMBER_1 = MemberName.forName("member-1");
@Test
public void testAddModuleShardConfiguration() throws Exception {
- URI namespace = new URI("urn:opendaylight:test:oven");
+ XMLNamespace namespace = XMLNamespace.of("urn:opendaylight:test:oven");
String moduleName = "oven";
String shardName = "oven-shard";
String shardStrategyName = ModuleShardStrategy.NAME;
ImmutableSortedSet.copyOf(configuration.getMembersFromShardName(shardName)));
assertEquals("getShardNameForModule", shardName, configuration.getShardNameForModule(moduleName));
assertEquals("getModuleNameFromNameSpace", moduleName,
- configuration.getModuleNameFromNameSpace(namespace.toASCIIString()));
+ configuration.getModuleNameFromNameSpace(namespace.toString()));
assertEquals("getAllShardNames", ImmutableSortedSet.of("people-1", "cars-1", "test-1", "default", shardName),
ImmutableSortedSet.copyOf(configuration.getAllShardNames()));
import static org.junit.Assert.assertTrue;
import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
*
* @author Thomas Pantelis
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
public class AbortTransactionReplyTest {
-
@Test
public void testSerialization() {
AbortTransactionReply expected = AbortTransactionReply.instance(DataStoreVersions.CURRENT_VERSION);
import static org.junit.Assert.assertTrue;
import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
import org.opendaylight.controller.cluster.datastore.MockIdentifiers;
*
* @author Thomas Pantelis
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
public class AbortTransactionTest {
-
@Test
public void testSerialization() {
AbortTransaction expected = new AbortTransaction(
import java.io.Serializable;
import java.util.Optional;
import java.util.SortedSet;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.AbstractTest;
import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
/**
* Unit tests for BatchedModifications.
*
* @author Thomas Pantelis
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
public class BatchedModificationsTest extends AbstractTest {
-
@Test
public void testSerialization() {
YangInstanceIdentifier writePath = TestModel.TEST_PATH;
- NormalizedNode<?, ?> writeData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
- new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
- .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+ ContainerNode writeData = Builders.containerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+ .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo"))
+ .build();
YangInstanceIdentifier mergePath = TestModel.OUTER_LIST_PATH;
- NormalizedNode<?, ?> mergeData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
- new YangInstanceIdentifier.NodeIdentifier(TestModel.OUTER_LIST_QNAME)).build();
+ ContainerNode mergeData = Builders.containerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TestModel.OUTER_LIST_QNAME))
+ .build();
YangInstanceIdentifier deletePath = TestModel.TEST_PATH;
assertEquals("getTransactionID", tx2, clone.getTransactionId());
assertTrue("isReady", clone.isReady());
assertTrue("isDoCommitOnReady", clone.isDoCommitOnReady());
- assertTrue("participatingShardNames present", clone.getParticipatingShardNames().isPresent());
- assertEquals("participatingShardNames", shardNames, clone.getParticipatingShardNames().get());
+ assertEquals("participatingShardNames", Optional.of(shardNames), clone.getParticipatingShardNames());
assertEquals("getModifications size", 0, clone.getModifications().size());
// Test not ready.
assertEquals("getTransactionID", tx2, clone.getTransactionId());
assertFalse("isReady", clone.isReady());
assertEquals("getModifications size", 0, clone.getModifications().size());
-
- // Test pre-Flourine
-
- batched = new BatchedModifications(tx2, DataStoreVersions.BORON_VERSION);
- batched.addModification(new WriteModification(writePath, writeData));
- batched.setReady(Optional.of(ImmutableSortedSet.of("one", "two")));
-
- clone = (BatchedModifications) SerializationUtils.clone((Serializable) batched.toSerializable());
-
- assertEquals("getVersion", DataStoreVersions.BORON_VERSION, clone.getVersion());
- assertEquals("getTransactionID", tx2, clone.getTransactionId());
- assertTrue("isReady", clone.isReady());
- assertFalse("participatingShardNames present", clone.getParticipatingShardNames().isPresent());
- assertEquals("getModifications size", 1, clone.getModifications().size());
}
@Test
import static org.junit.Assert.assertTrue;
import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
*
* @author Thomas Pantelis
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
public class CanCommitTransactionReplyTest {
-
@Test
public void testSerialization() {
testSerialization(CanCommitTransactionReply.yes(DataStoreVersions.CURRENT_VERSION),
CanCommitTransactionReply.class);
}
- private static void testSerialization(CanCommitTransactionReply expected, Class<?> expSerialized) {
+ private static void testSerialization(final CanCommitTransactionReply expected, final Class<?> expSerialized) {
Object serialized = expected.toSerializable();
assertEquals("Serialized type", expSerialized, serialized.getClass());
import static org.junit.Assert.assertTrue;
import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.AbstractTest;
import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
*
* @author Thomas Pantelis
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
public class CanCommitTransactionTest extends AbstractTest {
-
@Test
public void testSerialization() {
CanCommitTransaction expected = new CanCommitTransaction(nextTransactionId(),
import static org.junit.Assert.assertTrue;
import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.AbstractTest;
import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
*
* @author Thomas Pantelis
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
public class CloseTransactionChainTest extends AbstractTest {
-
@Test
public void testSerialization() {
- CloseTransactionChain expected = new CloseTransactionChain(nextHistoryId(), DataStoreVersions.CURRENT_VERSION);
+ CloseTransactionChain expected = new CloseTransactionChain(newHistoryId(1), DataStoreVersions.CURRENT_VERSION);
- Object serialized = expected.toSerializable();
+ var serialized = (Serializable) expected.toSerializable();
assertEquals("Serialized type", CloseTransactionChain.class, serialized.getClass());
+ final byte[] bytes = SerializationUtils.serialize(serialized);
+ assertEquals(241, bytes.length);
+
CloseTransactionChain actual = CloseTransactionChain.fromSerializable(
- SerializationUtils.clone((Serializable) serialized));
+ SerializationUtils.deserialize(bytes));
assertEquals("getIdentifier", expected.getIdentifier(), actual.getIdentifier());
assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, actual.getVersion());
}
import static org.junit.Assert.assertEquals;
import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
*
* @author Thomas Pantelis
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
public class CloseTransactionTest {
@Test
public void testCloseTransactionSerialization() {
import static org.junit.Assert.assertTrue;
import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
*
* @author Thomas Pantelis
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
public class CommitTransactionReplyTest {
-
@Test
public void testSerialization() {
CommitTransactionReply expected = CommitTransactionReply.instance(DataStoreVersions.CURRENT_VERSION);
import static org.junit.Assert.assertTrue;
import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.AbstractTest;
import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
*
* @author Thomas Pantelis
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
public class CommitTransactionTest extends AbstractTest {
@Test
import static org.junit.Assert.assertTrue;
import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.AbstractTest;
import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
*
* @author Thomas Pantelis
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
public class CreateTransactionReplyTest extends AbstractTest {
-
@Test
public void testSerialization() {
CreateTransactionReply expected = new CreateTransactionReply("txPath", nextTransactionId(),
import static org.junit.Assert.assertTrue;
import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.AbstractTest;
import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
*
* @author Thomas Pantelis
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
public class CreateTransactionTest extends AbstractTest {
-
@Test
public void testSerialization() {
CreateTransaction expected = new CreateTransaction(nextTransactionId(), 2, DataStoreVersions.CURRENT_VERSION);
import static org.junit.Assert.assertTrue;
import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
*
* @author Thomas Pantelis
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
public class DataExistsReplyTest {
-
@Test
public void testSerialization() {
DataExistsReply expected = new DataExistsReply(true, DataStoreVersions.CURRENT_VERSION);
import static org.junit.Assert.assertTrue;
import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
*
* @author Thomas Pantelis
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
public class DataExistsTest {
-
@Test
public void testSerialization() {
DataExists expected = new DataExists(TestModel.TEST_PATH, DataStoreVersions.CURRENT_VERSION);
import static org.junit.Assert.assertTrue;
import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
/**
* Unit tests for ReadDataReply.
*
* @author Thomas Pantelis
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
public class ReadDataReplyTest {
@Test
public void testSerialization() {
- NormalizedNode<?, ?> data = ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
- .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+ ContainerNode data = Builders.containerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+ .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
ReadDataReply expected = new ReadDataReply(data, DataStoreVersions.CURRENT_VERSION);
import static org.junit.Assert.assertTrue;
import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
*
* @author Thomas Pantelis
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
public class ReadDataTest {
-
@Test
public void testSerialization() {
ReadData expected = new ReadData(TestModel.TEST_PATH, DataStoreVersions.CURRENT_VERSION);
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
/**
* Unit tests for ReadyLocalTransactionSerializer.
*
* @author Thomas Pantelis
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
public class ReadyLocalTransactionSerializerTest extends AbstractTest {
-
@Test
public void testToAndFromBinary() throws NotSerializableException {
DataTree dataTree = new InMemoryDataTreeFactory().create(
assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, batched.getVersion());
assertTrue("isReady", batched.isReady());
assertTrue("isDoCommitOnReady", batched.isDoCommitOnReady());
- assertTrue("participatingShardNames present", batched.getParticipatingShardNames().isPresent());
- assertEquals("participatingShardNames", shardNames, batched.getParticipatingShardNames().get());
+ assertEquals("participatingShardNames", Optional.of(shardNames), batched.getParticipatingShardNames());
List<Modification> batchedMods = batched.getModifications();
assertEquals("getModifications size", 2, batchedMods.size());
import static org.junit.Assert.assertEquals;
import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
*
* @author Thomas Pantelis
*/
+@Deprecated(since = "9.0.0", forRemoval = true)
public class ReadyTransactionReplyTest {
@Test
*/
package org.opendaylight.controller.cluster.datastore.modification;
-import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.MoreExecutors;
import java.util.Optional;
+import org.eclipse.jdt.annotation.NonNull;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
import org.opendaylight.mdsal.dom.store.inmemory.InMemoryDOMDataStore;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
+@Deprecated(since = "9.0.0", forRemoval = true)
public abstract class AbstractModificationTest {
private static EffectiveModelContext TEST_SCHEMA_CONTEXT;
+ static final @NonNull ContainerNode TEST_CONTAINER = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+
protected InMemoryDOMDataStore store;
@BeforeClass
cohort.commit();
}
- protected Optional<NormalizedNode<?, ?>> readData(final YangInstanceIdentifier path) throws Exception {
- DOMStoreReadTransaction transaction = store.newReadOnlyTransaction();
- ListenableFuture<Optional<NormalizedNode<?, ?>>> future = transaction.read(path);
- return future.get();
+ protected Optional<NormalizedNode> readData(final YangInstanceIdentifier path) throws Exception {
+ try (var transaction = store.newReadOnlyTransaction()) {
+ return transaction.read(path).get();
+ }
}
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.datastore.modification;
import static org.junit.Assert.assertEquals;
import java.util.Optional;
-import org.apache.commons.lang.SerializationUtils;
-import org.junit.Assert;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+@Deprecated(since = "9.0.0", forRemoval = true)
public class DeleteModificationTest extends AbstractModificationTest {
-
@Test
public void testApply() throws Exception {
// Write something into the datastore
DOMStoreReadWriteTransaction writeTransaction = store.newReadWriteTransaction();
- WriteModification writeModification = new WriteModification(TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+ WriteModification writeModification = new WriteModification(TestModel.TEST_PATH, TEST_CONTAINER);
writeModification.apply(writeTransaction);
commitTransaction(writeTransaction);
// Check if it's in the datastore
- Optional<NormalizedNode<?, ?>> data = readData(TestModel.TEST_PATH);
- Assert.assertTrue(data.isPresent());
+ assertEquals(Optional.of(TEST_CONTAINER), readData(TestModel.TEST_PATH));
// Delete stuff from the datastore
DOMStoreWriteTransaction deleteTransaction = store.newWriteOnlyTransaction();
deleteModification.apply(deleteTransaction);
commitTransaction(deleteTransaction);
- data = readData(TestModel.TEST_PATH);
- Assert.assertFalse(data.isPresent());
+ assertEquals(Optional.empty(), readData(TestModel.TEST_PATH));
}
@Test
DeleteModification expected = new DeleteModification(path);
- DeleteModification clone = (DeleteModification) SerializationUtils.clone(expected);
+ DeleteModification clone = SerializationUtils.clone(expected);
assertEquals("getPath", expected.getPath(), clone.getPath());
}
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.datastore.modification;
import static org.junit.Assert.assertEquals;
import java.util.Optional;
-import org.apache.commons.lang.SerializationUtils;
-import org.junit.Assert;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
+@Deprecated(since = "9.0.0", forRemoval = true)
public class MergeModificationTest extends AbstractModificationTest {
-
@Test
public void testApply() throws Exception {
//TODO : Need to write a better test for this
commitTransaction(writeTransaction);
//Check if it's in the datastore
- Optional<NormalizedNode<?,?>> data = readData(TestModel.TEST_PATH);
- Assert.assertTrue(data.isPresent());
-
+ assertEquals(Optional.of(TEST_CONTAINER), readData(TestModel.TEST_PATH));
}
@Test
public void testSerialization() {
- YangInstanceIdentifier path = TestModel.TEST_PATH;
- NormalizedNode<?, ?> data = ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
- .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
-
- MergeModification expected = new MergeModification(path, data);
+ MergeModification expected = new MergeModification(TestModel.TEST_PATH, Builders.containerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+ .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo"))
+ .build());
- MergeModification clone = (MergeModification) SerializationUtils.clone(expected);
+ MergeModification clone = SerializationUtils.clone(expected);
assertEquals("getPath", expected.getPath(), clone.getPath());
assertEquals("getData", expected.getData(), clone.getData());
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.datastore.modification;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import java.util.Optional;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
+@Deprecated(since = "9.0.0", forRemoval = true)
public class MutableCompositeModificationTest extends AbstractModificationTest {
-
@Test
public void testApply() throws Exception {
-
MutableCompositeModification compositeModification = new MutableCompositeModification();
compositeModification.addModification(new WriteModification(TestModel.TEST_PATH,
ImmutableNodes.containerNode(TestModel.TEST_QNAME)));
compositeModification.apply(transaction);
commitTransaction(transaction);
- Optional<NormalizedNode<?, ?>> data = readData(TestModel.TEST_PATH);
-
- assertNotNull(data.get());
- assertEquals(TestModel.TEST_QNAME, data.get().getNodeType());
+ assertEquals(TestModel.TEST_QNAME, readData(TestModel.TEST_PATH).orElseThrow().name().getNodeType());
}
@Test
public void testSerialization() {
YangInstanceIdentifier writePath = TestModel.TEST_PATH;
- NormalizedNode<?, ?> writeData = ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
- .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+ ContainerNode writeData = Builders.containerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+ .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo"))
+ .build();
YangInstanceIdentifier mergePath = TestModel.OUTER_LIST_PATH;
- NormalizedNode<?, ?> mergeData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
- new YangInstanceIdentifier.NodeIdentifier(TestModel.OUTER_LIST_QNAME)).build();
+ ContainerNode mergeData = Builders.containerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TestModel.OUTER_LIST_QNAME))
+ .build();
YangInstanceIdentifier deletePath = TestModel.TEST_PATH;
- MutableCompositeModification compositeModification = new MutableCompositeModification();
+ MutableCompositeModification compositeModification =
+ new MutableCompositeModification(DataStoreVersions.POTASSIUM_VERSION);
+ compositeModification.addModification(new WriteModification(writePath, writeData));
+ compositeModification.addModification(new MergeModification(mergePath, mergeData));
+ compositeModification.addModification(new DeleteModification(deletePath));
+
+ final byte[] bytes = SerializationUtils.serialize(compositeModification);
+ assertEquals(360, bytes.length);
+ MutableCompositeModification clone = (MutableCompositeModification) SerializationUtils.deserialize(bytes);
+
+ assertEquals("getVersion", DataStoreVersions.POTASSIUM_VERSION, clone.getVersion());
+
+ assertEquals("getModifications size", 3, clone.getModifications().size());
+
+ WriteModification write = (WriteModification)clone.getModifications().get(0);
+ assertEquals("getVersion", DataStoreVersions.POTASSIUM_VERSION, write.getVersion());
+ assertEquals("getPath", writePath, write.getPath());
+ assertEquals("getData", writeData, write.getData());
+
+ MergeModification merge = (MergeModification)clone.getModifications().get(1);
+ assertEquals("getVersion", DataStoreVersions.POTASSIUM_VERSION, merge.getVersion());
+ assertEquals("getPath", mergePath, merge.getPath());
+ assertEquals("getData", mergeData, merge.getData());
+
+ DeleteModification delete = (DeleteModification)clone.getModifications().get(2);
+ assertEquals("getVersion", DataStoreVersions.POTASSIUM_VERSION, delete.getVersion());
+ assertEquals("getPath", deletePath, delete.getPath());
+ }
+
+ @Test
+ public void testSerializationModern() {
+ YangInstanceIdentifier writePath = TestModel.TEST_PATH;
+ ContainerNode writeData = Builders.containerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+ .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo"))
+ .build();
+
+ YangInstanceIdentifier mergePath = TestModel.OUTER_LIST_PATH;
+ ContainerNode mergeData = Builders.containerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TestModel.OUTER_LIST_QNAME))
+ .build();
+
+ YangInstanceIdentifier deletePath = TestModel.TEST_PATH;
+
+ MutableCompositeModification compositeModification =
+ new MutableCompositeModification();
compositeModification.addModification(new WriteModification(writePath, writeData));
compositeModification.addModification(new MergeModification(mergePath, mergeData));
compositeModification.addModification(new DeleteModification(deletePath));
- MutableCompositeModification clone = (MutableCompositeModification)
- SerializationUtils.clone(compositeModification);
+ final byte[] bytes = SerializationUtils.serialize(compositeModification);
+ assertEquals(360, bytes.length);
+ MutableCompositeModification clone = (MutableCompositeModification) SerializationUtils.deserialize(bytes);
assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, clone.getVersion());
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.datastore.modification;
import static org.junit.Assert.assertEquals;
import java.util.Optional;
-import org.apache.commons.lang.SerializationUtils;
-import org.junit.Assert;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
+@Deprecated(since = "9.0.0", forRemoval = true)
public class WriteModificationTest extends AbstractModificationTest {
-
@Test
public void testApply() throws Exception {
//Write something into the datastore
DOMStoreReadWriteTransaction writeTransaction = store.newReadWriteTransaction();
- WriteModification writeModification = new WriteModification(TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+ WriteModification writeModification = new WriteModification(TestModel.TEST_PATH, TEST_CONTAINER);
writeModification.apply(writeTransaction);
commitTransaction(writeTransaction);
//Check if it's in the datastore
- Optional<NormalizedNode<?,?>> data = readData(TestModel.TEST_PATH);
- Assert.assertTrue(data.isPresent());
+ assertEquals(Optional.of(TEST_CONTAINER), readData(TestModel.TEST_PATH));
}
@Test
public void testSerialization() {
- YangInstanceIdentifier path = TestModel.TEST_PATH;
- NormalizedNode<?, ?> data = ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
- .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
-
- WriteModification expected = new WriteModification(path, data);
+ WriteModification expected = new WriteModification(TestModel.TEST_PATH, Builders.containerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+ .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo"))
+ .build());
- WriteModification clone = (WriteModification) SerializationUtils.clone(expected);
+ WriteModification clone = SerializationUtils.clone(expected);
assertEquals("getPath", expected.getPath(), clone.getPath());
assertEquals("getData", expected.getData(), clone.getData());
}
package org.opendaylight.controller.cluster.datastore.persisted;
public class AbortTransactionPayloadTest extends AbstractIdentifiablePayloadTest<AbortTransactionPayload> {
-
- @Override
- AbortTransactionPayload object() {
- return AbortTransactionPayload.create(nextTransactionId(), 512);
+ public AbortTransactionPayloadTest() {
+ super(AbortTransactionPayload.create(newTransactionId(0), 512), 125);
}
}
*/
package org.opendaylight.controller.cluster.datastore.persisted;
+import static java.util.Objects.requireNonNull;
+import static org.junit.Assert.assertEquals;
+
import org.apache.commons.lang3.SerializationUtils;
-import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.AbstractTest;
-public abstract class AbstractIdentifiablePayloadTest<T extends AbstractIdentifiablePayload<?>> extends AbstractTest {
+abstract class AbstractIdentifiablePayloadTest<T extends AbstractIdentifiablePayload<?>> extends AbstractTest {
+ private final T object;
+ private final int expectedSize;
- abstract T object();
+ AbstractIdentifiablePayloadTest(final T object, final int expectedSize) {
+ this.object = requireNonNull(object);
+ this.expectedSize = expectedSize;
+ }
@Test
public void testSerialization() {
- final T object = object();
- final T cloned = SerializationUtils.clone(object);
- Assert.assertEquals(object.getIdentifier(), cloned.getIdentifier());
+ final byte[] bytes = SerializationUtils.serialize(object);
+ assertEquals(expectedSize, bytes.length);
+ final T cloned = SerializationUtils.deserialize(bytes);
+ assertEquals(object.getIdentifier(), cloned.getIdentifier());
}
}
package org.opendaylight.controller.cluster.datastore.persisted;
public class CloseLocalHistoryPayloadTest extends AbstractIdentifiablePayloadTest<CloseLocalHistoryPayload> {
-
- @Override
- CloseLocalHistoryPayload object() {
- return CloseLocalHistoryPayload.create(nextHistoryId(), 512);
+ public CloseLocalHistoryPayloadTest() {
+ super(CloseLocalHistoryPayload.create(newHistoryId(0), 512), 124);
}
}
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.AbstractTest;
-import org.opendaylight.controller.cluster.datastore.persisted.DataTreeCandidateInputOutput.DataTreeCandidateWithVersion;
+import org.opendaylight.controller.cluster.datastore.persisted.CommitTransactionPayload.CandidateTransaction;
import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.spi.DataTreeCandidates;
public class CommitTransactionPayloadTest extends AbstractTest {
static final QName LEAF_SET = QName.create(TestModel.TEST_QNAME, "leaf-set");
private static DataTreeCandidateNode findNode(final Collection<DataTreeCandidateNode> nodes,
final PathArgument arg) {
for (DataTreeCandidateNode node : nodes) {
- if (arg.equals(node.getIdentifier())) {
+ if (arg.equals(node.name())) {
return node;
}
}
final Collection<DataTreeCandidateNode> actual) {
// Make sure all expected nodes are there
for (DataTreeCandidateNode exp : expected) {
- final DataTreeCandidateNode act = findNode(actual, exp.getIdentifier());
+ final DataTreeCandidateNode act = findNode(actual, exp.name());
assertNotNull("missing expected child", act);
assertCandidateNodeEquals(exp, act);
}
// Make sure no nodes are present which are not in the expected set
for (DataTreeCandidateNode act : actual) {
- final DataTreeCandidateNode exp = findNode(expected, act.getIdentifier());
+ final DataTreeCandidateNode exp = findNode(expected, act.name());
assertNull("unexpected child", exp);
}
}
- private static void assertCandidateEquals(final DataTreeCandidate expected,
- final DataTreeCandidateWithVersion actual) {
- final DataTreeCandidate candidate = actual.getCandidate();
+ private static void assertCandidateEquals(final DataTreeCandidate expected, final CandidateTransaction actual) {
+ final var candidate = actual.candidate();
assertEquals("root path", expected.getRootPath(), candidate.getRootPath());
assertCandidateNodeEquals(expected.getRootNode(), candidate.getRootNode());
}
private static void assertCandidateNodeEquals(final DataTreeCandidateNode expected,
final DataTreeCandidateNode actual) {
- assertEquals("child type", expected.getModificationType(), actual.getModificationType());
+ assertEquals("child type", expected.modificationType(), actual.modificationType());
- switch (actual.getModificationType()) {
+ switch (actual.modificationType()) {
case DELETE:
case WRITE:
- assertEquals("child identifier", expected.getIdentifier(), actual.getIdentifier());
- assertEquals("child data", expected.getDataAfter(), actual.getDataAfter());
+ assertEquals("child identifier", expected.name(), actual.name());
+ assertEquals("child data", expected.dataAfter(), actual.dataAfter());
break;
case SUBTREE_MODIFIED:
- assertEquals("child identifier", expected.getIdentifier(), actual.getIdentifier());
- assertChildrenEquals(expected.getChildNodes(), actual.getChildNodes());
+ assertEquals("child identifier", expected.name(), actual.name());
+ assertChildrenEquals(expected.childNodes(), actual.childNodes());
break;
case UNMODIFIED:
break;
default:
- fail("Unexpect root type " + actual.getModificationType());
+ fail("Unexpect root type " + actual.modificationType());
break;
}
}
@Before
public void setUp() {
setUpStatic();
- final YangInstanceIdentifier writePath = TestModel.TEST_PATH;
- final NormalizedNode<?, ?> writeData = ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
- .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
- candidate = DataTreeCandidates.fromNormalizedNode(writePath, writeData);
+ candidate = DataTreeCandidates.fromNormalizedNode(TestModel.TEST_PATH, ImmutableNodes.newContainerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+ .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo"))
+ .build());
}
@Test
public void testCandidateSerialization() throws IOException {
final CommitTransactionPayload payload = CommitTransactionPayload.create(nextTransactionId(), candidate);
assertEquals("payload size", 156, payload.size());
+ assertEquals("serialized size", 242, SerializationUtils.serialize(payload).length);
}
@Test
public void testCandidateSerDes() throws IOException {
final CommitTransactionPayload payload = CommitTransactionPayload.create(nextTransactionId(), candidate);
- assertCandidateEquals(candidate, payload.getCandidate().getValue());
+ assertCandidateEquals(candidate, payload.getCandidate());
}
@Test
public void testPayloadSerDes() throws IOException {
final CommitTransactionPayload payload = CommitTransactionPayload.create(nextTransactionId(), candidate);
- assertCandidateEquals(candidate, SerializationUtils.clone(payload).getCandidate().getValue());
+ assertCandidateEquals(candidate, SerializationUtils.clone(payload).getCandidate());
}
- @SuppressWarnings({ "rawtypes", "unchecked" })
@Test
public void testLeafSetEntryNodeCandidate() throws Exception {
- YangInstanceIdentifier.NodeWithValue entryPathArg = new YangInstanceIdentifier.NodeWithValue(LEAF_SET, "one");
+ NodeWithValue<String> entryPathArg = new NodeWithValue<>(LEAF_SET, "one");
YangInstanceIdentifier leafSetEntryPath = YangInstanceIdentifier.builder(TestModel.TEST_PATH).node(LEAF_SET)
.node(entryPathArg).build();
- NormalizedNode<?, ?> leafSetEntryNode = Builders.leafSetEntryBuilder().withNodeIdentifier(entryPathArg)
- .withValue("one").build();
-
- candidate = DataTreeCandidates.fromNormalizedNode(leafSetEntryPath, leafSetEntryNode);
+ candidate = DataTreeCandidates.fromNormalizedNode(leafSetEntryPath, ImmutableNodes.leafSetEntry(entryPathArg));
CommitTransactionPayload payload = CommitTransactionPayload.create(nextTransactionId(), candidate);
- assertCandidateEquals(candidate, payload.getCandidate().getValue());
+ assertCandidateEquals(candidate, payload.getCandidate());
}
- @SuppressWarnings({ "rawtypes", "unchecked" })
@Test
public void testLeafSetNodeCandidate() throws Exception {
- YangInstanceIdentifier.NodeWithValue entryPathArg = new YangInstanceIdentifier.NodeWithValue(LEAF_SET, "one");
YangInstanceIdentifier leafSetPath = YangInstanceIdentifier.builder(TestModel.TEST_PATH).node(LEAF_SET).build();
- LeafSetEntryNode leafSetEntryNode = Builders.leafSetEntryBuilder().withNodeIdentifier(entryPathArg)
- .withValue("one").build();
- NormalizedNode<?, ?> leafSetNode = Builders.leafSetBuilder().withNodeIdentifier(
- new YangInstanceIdentifier.NodeIdentifier(LEAF_SET)).withChild(leafSetEntryNode).build();
-
- candidate = DataTreeCandidates.fromNormalizedNode(leafSetPath, leafSetNode);
+ candidate = DataTreeCandidates.fromNormalizedNode(leafSetPath, ImmutableNodes.newSystemLeafSetBuilder()
+ .withNodeIdentifier(new NodeIdentifier(LEAF_SET))
+ .withChild(ImmutableNodes.leafSetEntry(LEAF_SET, "one"))
+ .build());
CommitTransactionPayload payload = CommitTransactionPayload.create(nextTransactionId(), candidate);
- assertCandidateEquals(candidate, payload.getCandidate().getValue());
+ assertCandidateEquals(candidate, payload.getCandidate());
}
- @SuppressWarnings({ "rawtypes", "unchecked" })
@Test
public void testOrderedLeafSetNodeCandidate() throws Exception {
- YangInstanceIdentifier.NodeWithValue entryPathArg = new YangInstanceIdentifier.NodeWithValue(LEAF_SET, "one");
YangInstanceIdentifier leafSetPath = YangInstanceIdentifier.builder(TestModel.TEST_PATH).node(LEAF_SET).build();
- LeafSetEntryNode leafSetEntryNode = Builders.leafSetEntryBuilder().withNodeIdentifier(entryPathArg)
- .withValue("one").build();
- NormalizedNode<?, ?> leafSetNode = Builders.orderedLeafSetBuilder().withNodeIdentifier(
- new YangInstanceIdentifier.NodeIdentifier(LEAF_SET)).withChild(leafSetEntryNode).build();
-
- candidate = DataTreeCandidates.fromNormalizedNode(leafSetPath, leafSetNode);
+ candidate = DataTreeCandidates.fromNormalizedNode(leafSetPath, ImmutableNodes.newUserLeafSetBuilder()
+ .withNodeIdentifier(new NodeIdentifier(LEAF_SET))
+ .withChild(ImmutableNodes.leafSetEntry(LEAF_SET, "one"))
+ .build());
CommitTransactionPayload payload = CommitTransactionPayload.create(nextTransactionId(), candidate);
- assertCandidateEquals(candidate, payload.getCandidate().getValue());
+ assertCandidateEquals(candidate, payload.getCandidate());
}
@Test
public void testLeafNodeCandidate() throws Exception {
YangInstanceIdentifier leafPath = YangInstanceIdentifier.builder(TestModel.TEST_PATH)
.node(TestModel.DESC_QNAME).build();
- LeafNode<Object> leafNode = Builders.leafBuilder().withNodeIdentifier(
- new YangInstanceIdentifier.NodeIdentifier(TestModel.DESC_QNAME)).withValue("test").build();
- candidate = DataTreeCandidates.fromNormalizedNode(leafPath, leafNode);
+ candidate = DataTreeCandidates.fromNormalizedNode(leafPath,
+ ImmutableNodes.leafNode(TestModel.DESC_QNAME, "test"));
CommitTransactionPayload payload = CommitTransactionPayload.create(nextTransactionId(), candidate);
- assertCandidateEquals(candidate, payload.getCandidate().getValue());
+ assertCandidateEquals(candidate, payload.getCandidate());
}
@Test
candidate = dataTree.prepare(modification);
CommitTransactionPayload payload = CommitTransactionPayload.create(nextTransactionId(), candidate);
- assertCandidateEquals(candidate, payload.getCandidate().getValue());
+ assertCandidateEquals(candidate, payload.getCandidate());
}
}
package org.opendaylight.controller.cluster.datastore.persisted;
public class CreateLocalHistoryPayloadTest extends AbstractIdentifiablePayloadTest<CreateLocalHistoryPayload> {
-
- @Override
- CreateLocalHistoryPayload object() {
- return CreateLocalHistoryPayload.create(nextHistoryId(), 512);
+ public CreateLocalHistoryPayloadTest() {
+ super(CreateLocalHistoryPayload.create(newHistoryId(0), 512), 124);
}
}
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertThrows;
import static org.junit.Assert.assertTrue;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Range;
-import com.google.common.collect.RangeSet;
-import com.google.common.collect.TreeRangeSet;
import com.google.common.primitives.UnsignedLong;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
import org.opendaylight.controller.cluster.access.concepts.FrontendType;
import org.opendaylight.controller.cluster.access.concepts.MemberName;
+import org.opendaylight.controller.cluster.datastore.utils.ImmutableUnsignedLongSet;
+import org.opendaylight.controller.cluster.datastore.utils.MutableUnsignedLongSet;
+import org.opendaylight.controller.cluster.datastore.utils.UnsignedLongBitmap;
public class FrontendShardDataTreeSnapshotMetadataTest {
- @Test(expected = NullPointerException.class)
- public final void testCreateMetadataSnapshotNullInput() {
- new FrontendShardDataTreeSnapshotMetadata(null);
+ @Test
+ public void testCreateMetadataSnapshotNullInput() {
+ assertThrows(NullPointerException.class, () -> new FrontendShardDataTreeSnapshotMetadata(null));
}
@Test
- public final void testCreateMetadataSnapshotEmptyInput() throws Exception {
+ public void testCreateMetadataSnapshotEmptyInput() throws Exception {
final FrontendShardDataTreeSnapshotMetadata emptyOrigSnapshot = createEmptyMetadataSnapshot();
- final FrontendShardDataTreeSnapshotMetadata emptyCopySnapshot = copy(emptyOrigSnapshot, 127);
+ final FrontendShardDataTreeSnapshotMetadata emptyCopySnapshot = copy(emptyOrigSnapshot, 86);
testMetadataSnapshotEqual(emptyOrigSnapshot, emptyCopySnapshot);
}
@Test
- public final void testSerializeMetadataSnapshotWithOneClient() throws Exception {
+ public void testSerializeMetadataSnapshotWithOneClient() throws Exception {
final FrontendShardDataTreeSnapshotMetadata origSnapshot = createMetadataSnapshot(1);
- final FrontendShardDataTreeSnapshotMetadata copySnapshot = copy(origSnapshot, 162);
+ final FrontendShardDataTreeSnapshotMetadata copySnapshot = copy(origSnapshot, 121);
testMetadataSnapshotEqual(origSnapshot, copySnapshot);
}
@Test
- public final void testSerializeMetadataSnapshotWithMoreClients() throws Exception {
+ public void testSerializeMetadataSnapshotWithMoreClients() throws Exception {
final FrontendShardDataTreeSnapshotMetadata origSnapshot = createMetadataSnapshot(5);
- final FrontendShardDataTreeSnapshotMetadata copySnapshot = copy(origSnapshot, 314);
+ final FrontendShardDataTreeSnapshotMetadata copySnapshot = copy(origSnapshot, 273);
testMetadataSnapshotEqual(origSnapshot, copySnapshot);
}
final List<FrontendClientMetadata> origClientList = origSnapshot.getClients();
final List<FrontendClientMetadata> copyClientList = copySnapshot.getClients();
- assertTrue(origClientList.size() == copyClientList.size());
+ assertEquals(origClientList.size(), copyClientList.size());
final Map<ClientIdentifier, FrontendClientMetadata> origIdent = new HashMap<>();
final Map<ClientIdentifier, FrontendClientMetadata> copyIdent = new HashMap<>();
- origClientList.forEach(client -> origIdent.put(client.getIdentifier(), client));
- origClientList.forEach(client -> copyIdent.put(client.getIdentifier(), client));
+ origClientList.forEach(client -> origIdent.put(client.clientId(), client));
+ origClientList.forEach(client -> copyIdent.put(client.clientId(), client));
assertTrue(origIdent.keySet().containsAll(copyIdent.keySet()));
assertTrue(copyIdent.keySet().containsAll(origIdent.keySet()));
origIdent.values().forEach(client -> {
- final FrontendClientMetadata copyClient = copyIdent.get(client.getIdentifier());
- testObject(client.getIdentifier(), copyClient.getIdentifier());
- assertTrue(client.getPurgedHistories().equals(copyClient.getPurgedHistories()));
- assertTrue(client.getCurrentHistories().equals(copyClient.getCurrentHistories()));
+ final var copyClient = copyIdent.get(client.clientId());
+ testObject(client.clientId(), copyClient.clientId());
+ assertEquals(client.getPurgedHistories(), copyClient.getPurgedHistories());
+ assertEquals(client.getCurrentHistories(), copyClient.getCurrentHistories());
});
}
private static FrontendShardDataTreeSnapshotMetadata createEmptyMetadataSnapshot() {
- return new FrontendShardDataTreeSnapshotMetadata(Collections.<FrontendClientMetadata>emptyList());
+ return new FrontendShardDataTreeSnapshotMetadata(List.of());
}
private static FrontendShardDataTreeSnapshotMetadata createMetadataSnapshot(final int size) {
final FrontendIdentifier frontendIdentifier = FrontendIdentifier.create(MemberName.forName(indexName),
FrontendType.forName(index));
final ClientIdentifier clientIdentifier = ClientIdentifier.create(frontendIdentifier, num);
+ final ImmutableUnsignedLongSet purgedHistories = MutableUnsignedLongSet.of(0).immutableCopy();
- final RangeSet<UnsignedLong> purgedHistories = TreeRangeSet.create();
- purgedHistories.add(Range.closed(UnsignedLong.ZERO, UnsignedLong.ONE));
-
- final Collection<FrontendHistoryMetadata> currentHistories = Collections.singleton(
- new FrontendHistoryMetadata(num, num, true, ImmutableMap.of(UnsignedLong.ZERO, Boolean.TRUE),
- purgedHistories));
-
- return new FrontendClientMetadata(clientIdentifier, purgedHistories, currentHistories);
+ return new FrontendClientMetadata(clientIdentifier, purgedHistories, List.of(
+ new FrontendHistoryMetadata(num, num, true,
+ UnsignedLongBitmap.copyOf(Map.of(UnsignedLong.ZERO, Boolean.TRUE)), purgedHistories)));
}
private static <T> void testObject(final T object, final T equalObject) {
package org.opendaylight.controller.cluster.datastore.persisted;
public class PurgeLocalHistoryPayloadTest extends AbstractIdentifiablePayloadTest<PurgeLocalHistoryPayload> {
-
- @Override
- PurgeLocalHistoryPayload object() {
- return PurgeLocalHistoryPayload.create(nextHistoryId(), 512);
+ public PurgeLocalHistoryPayloadTest() {
+ super(PurgeLocalHistoryPayload.create(newHistoryId(0), 512), 124);
}
}
package org.opendaylight.controller.cluster.datastore.persisted;
public class PurgeTransactionPayloadTest extends AbstractIdentifiablePayloadTest<PurgeTransactionPayload> {
-
- @Override
- PurgeTransactionPayload object() {
- return PurgeTransactionPayload.create(nextTransactionId(), 512);
+ public PurgeTransactionPayloadTest() {
+ super(PurgeTransactionPayload.create(newTransactionId(0), 512), 125);
}
}
package org.opendaylight.controller.cluster.datastore.persisted;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import com.google.common.collect.ImmutableMap;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.Externalizable;
import java.util.Optional;
import org.junit.Test;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
/**
* Unit tests for ShardDataTreeSnapshot.
* @author Thomas Pantelis
*/
public class ShardDataTreeSnapshotTest {
-
@Test
public void testShardDataTreeSnapshotWithNoMetadata() throws Exception {
- NormalizedNode<?, ?> expectedNode = ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
+ ContainerNode expectedNode = ImmutableNodes.newContainerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
.withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
MetadataShardDataTreeSnapshot snapshot = new MetadataShardDataTreeSnapshot(expectedNode);
}
final byte[] bytes = bos.toByteArray();
- assertEquals(236, bytes.length);
+ assertEquals(202, bytes.length);
ShardDataTreeSnapshot deserialized;
try (ObjectInputStream in = new ObjectInputStream(new ByteArrayInputStream(bytes))) {
deserialized = ShardDataTreeSnapshot.deserialize(in).getSnapshot();
}
- Optional<NormalizedNode<?, ?>> actualNode = deserialized.getRootNode();
- assertTrue("rootNode present", actualNode.isPresent());
- assertEquals("rootNode", expectedNode, actualNode.get());
+ assertEquals("rootNode", Optional.of(expectedNode), deserialized.getRootNode());
assertEquals("Deserialized type", MetadataShardDataTreeSnapshot.class, deserialized.getClass());
assertEquals("Metadata size", 0, ((MetadataShardDataTreeSnapshot)deserialized).getMetadata().size());
}
@Test
public void testShardDataTreeSnapshotWithMetadata() throws Exception {
- NormalizedNode<?, ?> expectedNode = ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
+ ContainerNode expectedNode = ImmutableNodes.newContainerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
.withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
Map<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>> expMetadata =
- ImmutableMap.of(TestShardDataTreeSnapshotMetadata.class, new TestShardDataTreeSnapshotMetadata("test"));
+ Map.of(TestShardDataTreeSnapshotMetadata.class, new TestShardDataTreeSnapshotMetadata("test"));
MetadataShardDataTreeSnapshot snapshot = new MetadataShardDataTreeSnapshot(expectedNode, expMetadata);
ByteArrayOutputStream bos = new ByteArrayOutputStream();
try (ObjectOutputStream out = new ObjectOutputStream(bos)) {
}
final byte[] bytes = bos.toByteArray();
- assertEquals(384, bytes.length);
+ assertEquals(350, bytes.length);
ShardDataTreeSnapshot deserialized;
try (ObjectInputStream in = new ObjectInputStream(new ByteArrayInputStream(bytes))) {
deserialized = ShardDataTreeSnapshot.deserialize(in).getSnapshot();
}
- Optional<NormalizedNode<?, ?>> actualNode = deserialized.getRootNode();
- assertTrue("rootNode present", actualNode.isPresent());
- assertEquals("rootNode", expectedNode, actualNode.get());
+ assertEquals("rootNode", Optional.of(expectedNode), deserialized.getRootNode());
assertEquals("Deserialized type", MetadataShardDataTreeSnapshot.class, deserialized.getClass());
assertEquals("Metadata", expMetadata, ((MetadataShardDataTreeSnapshot)deserialized).getMetadata());
}
static class TestShardDataTreeSnapshotMetadata
extends ShardDataTreeSnapshotMetadata<TestShardDataTreeSnapshotMetadata> {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
private final String data;
@Override
public boolean equals(final Object obj) {
- return obj instanceof TestShardDataTreeSnapshotMetadata
- && data.equals(((TestShardDataTreeSnapshotMetadata)obj).data);
+ return obj instanceof TestShardDataTreeSnapshotMetadata other && data.equals(other.data);
}
private static class Proxy implements Externalizable {
+ @java.io.Serial
+ private static final long serialVersionUID = 7534948936595056176L;
+
private String data;
@SuppressWarnings("checkstyle:RedundantModifier")
import static org.junit.Assert.assertEquals;
import java.util.Arrays;
-import java.util.Collections;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
/**
@Test
public void testSerialization() {
ShardManagerSnapshot expected =
- new ShardManagerSnapshot(Arrays.asList("shard1", "shard2"), Collections.emptyMap());
- ShardManagerSnapshot cloned = (ShardManagerSnapshot) SerializationUtils.clone(expected);
+ new ShardManagerSnapshot(Arrays.asList("shard1", "shard2"));
+ ShardManagerSnapshot cloned = SerializationUtils.clone(expected);
assertEquals("getShardList", expected.getShardList(), cloned.getShardList());
}
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
-import org.apache.commons.lang.SerializationUtils;
+import java.util.Optional;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
/**
* Unit tests for ShardSnapshotState.
* @author Thomas Pantelis
*/
public class ShardSnapshotStateTest {
-
@Test
public void testSerialization() {
- NormalizedNode<?, ?> expectedNode = ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
- .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+ ContainerNode expectedNode = Builders.containerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+ .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo"))
+ .build();
ShardSnapshotState expected = new ShardSnapshotState(new MetadataShardDataTreeSnapshot(expectedNode));
- ShardSnapshotState cloned = (ShardSnapshotState) SerializationUtils.clone(expected);
+ ShardSnapshotState cloned = SerializationUtils.clone(expected);
assertNotNull("getSnapshot is null", cloned.getSnapshot());
assertEquals("getSnapshot type", MetadataShardDataTreeSnapshot.class, cloned.getSnapshot().getClass());
- assertEquals("getRootNode", expectedNode,
- ((MetadataShardDataTreeSnapshot)cloned.getSnapshot()).getRootNode().get());
+ assertEquals("getRootNode", Optional.of(expectedNode),
+ ((MetadataShardDataTreeSnapshot)cloned.getSnapshot()).getRootNode());
}
}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import org.opendaylight.controller.cluster.datastore.utils.MutableUnsignedLongSet;
+
+public class SkipTransactionsPayloadTest extends AbstractIdentifiablePayloadTest<SkipTransactionsPayload> {
+ public SkipTransactionsPayloadTest() {
+ super(SkipTransactionsPayload.create(newHistoryId(0), MutableUnsignedLongSet.of(42).immutableCopy(), 512), 131);
+ }
+}
TestKit kit = new TestKit(getSystem());
List<String> shardList = Arrays.asList("shard1", "shard2", "shard3");
- ShardManagerSnapshot shardManagerSnapshot = new ShardManagerSnapshot(shardList, Collections.emptyMap());
+ ShardManagerSnapshot shardManagerSnapshot = new ShardManagerSnapshot(shardList);
ActorRef replyActor = getSystem().actorOf(ShardManagerGetSnapshotReplyActor.props(
shardList, "config", shardManagerSnapshot, kit.getRef(),
"shard-manager", FiniteDuration.create(100, TimeUnit.SECONDS)), "testSuccess");
*/
package org.opendaylight.controller.cluster.datastore.shardmanager;
+import static org.awaitility.Awaitility.await;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.actor.AddressFromURIString;
+import akka.actor.PoisonPill;
import akka.actor.Props;
import akka.actor.Status;
import akka.actor.Status.Failure;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.SettableFuture;
import com.google.common.util.concurrent.Uninterruptibles;
-import java.net.URI;
import java.time.Duration;
import java.util.AbstractMap;
import java.util.Arrays;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.stream.Collectors;
+import org.junit.After;
import org.junit.AfterClass;
+import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.junit.MockitoJUnitRunner;
import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.AbstractShardManagerTest;
+import org.opendaylight.controller.cluster.databroker.ClientBackedDataStore;
+import org.opendaylight.controller.cluster.datastore.AbstractClusterRefActorTest;
import org.opendaylight.controller.cluster.datastore.ClusterWrapperImpl;
import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
import org.opendaylight.controller.cluster.datastore.DatastoreContext;
import org.opendaylight.controller.cluster.datastore.DatastoreContextFactory;
-import org.opendaylight.controller.cluster.datastore.DistributedDataStore;
import org.opendaylight.controller.cluster.datastore.Shard;
+import org.opendaylight.controller.cluster.datastore.config.Configuration;
import org.opendaylight.controller.cluster.datastore.config.ConfigurationImpl;
import org.opendaylight.controller.cluster.datastore.config.EmptyModuleShardConfigProvider;
import org.opendaylight.controller.cluster.datastore.config.ModuleShardConfiguration;
import org.opendaylight.controller.cluster.datastore.messages.LocalPrimaryShardFound;
import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
import org.opendaylight.controller.cluster.datastore.messages.LocalShardNotFound;
-import org.opendaylight.controller.cluster.datastore.messages.PeerDown;
-import org.opendaylight.controller.cluster.datastore.messages.PeerUp;
import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
import org.opendaylight.controller.cluster.datastore.messages.RemotePrimaryShardFound;
import org.opendaylight.controller.cluster.datastore.messages.RemoveShardReplica;
import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
import org.opendaylight.controller.cluster.notifications.RoleChangeNotification;
import org.opendaylight.controller.cluster.raft.RaftState;
+import org.opendaylight.controller.cluster.raft.TestActorFactory;
import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
import org.opendaylight.controller.cluster.raft.base.messages.SwitchBehavior;
import org.opendaylight.controller.cluster.raft.client.messages.GetSnapshot;
import org.opendaylight.controller.cluster.raft.messages.ServerChangeStatus;
import org.opendaylight.controller.cluster.raft.messages.ServerRemoved;
import org.opendaylight.controller.cluster.raft.policy.DisableElectionsRaftPolicy;
+import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.yangtools.concepts.Registration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.common.XMLNamespace;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.concurrent.Future;
import scala.concurrent.duration.FiniteDuration;
-public class ShardManagerTest extends AbstractShardManagerTest {
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
+public class ShardManagerTest extends AbstractClusterRefActorTest {
private static final Logger LOG = LoggerFactory.getLogger(ShardManagerTest.class);
+ private static final MemberName MEMBER_1 = MemberName.forName("member-1");
private static final MemberName MEMBER_2 = MemberName.forName("member-2");
private static final MemberName MEMBER_3 = MemberName.forName("member-3");
+ private static int ID_COUNTER = 1;
+ private static ActorRef mockShardActor;
+ private static ShardIdentifier mockShardName;
+ private static SettableFuture<Empty> ready;
private static EffectiveModelContext TEST_SCHEMA_CONTEXT;
+ private final String shardMrgIDSuffix = "config" + ID_COUNTER++;
+ private final TestActorFactory actorFactory = new TestActorFactory(getSystem());
+ private final DatastoreContext.Builder datastoreContextBuilder = DatastoreContext.newBuilder()
+ .dataStoreName(shardMrgIDSuffix).shardInitializationTimeout(600, TimeUnit.MILLISECONDS)
+ .shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(6);
+
private final String shardMgrID = ShardManagerIdentifier.builder().type(shardMrgIDSuffix).build().toString();
@BeforeClass
TEST_SCHEMA_CONTEXT = null;
}
+ @Before
+ public void setUp() {
+ ready = SettableFuture.create();
+
+ InMemoryJournal.clear();
+ InMemorySnapshotStore.clear();
+
+ if (mockShardActor == null) {
+ mockShardName = ShardIdentifier.create(Shard.DEFAULT_NAME, MEMBER_1, "config");
+ mockShardActor = getSystem().actorOf(MessageCollectorActor.props(), mockShardName.toString());
+ }
+
+ MessageCollectorActor.clearMessages(mockShardActor);
+ }
+
+ @After
+ public void tearDown() {
+ InMemoryJournal.clear();
+ InMemorySnapshotStore.clear();
+
+ mockShardActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ await().atMost(Duration.ofSeconds(10)).until(mockShardActor::isTerminated);
+ mockShardActor = null;
+
+ actorFactory.close();
+ }
+
+ private TestShardManager.Builder newTestShardMgrBuilder() {
+ return TestShardManager.builder(datastoreContextBuilder)
+ .distributedDataStore(mock(ClientBackedDataStore.class));
+ }
+
+ private TestShardManager.Builder newTestShardMgrBuilder(final Configuration config) {
+ return newTestShardMgrBuilder().configuration(config);
+ }
+
+ private Props newShardMgrProps() {
+ return newShardMgrProps(new MockConfiguration());
+ }
+
+ private Props newShardMgrProps(final Configuration config) {
+ return newTestShardMgrBuilder(config).readinessFuture(ready).props();
+ }
+
private ActorSystem newActorSystem(final String config) {
return newActorSystem("cluster-test", config);
}
return system.actorOf(MessageCollectorActor.props(), name);
}
- private Props newShardMgrProps() {
- return newShardMgrProps(new MockConfiguration());
- }
-
private static DatastoreContextFactory newDatastoreContextFactory(final DatastoreContext datastoreContext) {
DatastoreContextFactory mockFactory = mock(DatastoreContextFactory.class);
doReturn(datastoreContext).when(mockFactory).getBaseDatastoreContext();
}
private TestShardManager.Builder newTestShardMgrBuilderWithMockShardActor(final ActorRef shardActor) {
- return TestShardManager.builder(datastoreContextBuilder).shardActor(shardActor)
- .distributedDataStore(mock(DistributedDataStore.class));
+ return TestShardManager.builder(datastoreContextBuilder)
+ .shardActor(shardActor)
+ .distributedDataStore(mock(ClientBackedDataStore.class));
}
final ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
- shardManager.tell(new ActorInitialized(), mockShardActor);
+ shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
DataTree mockDataTree = mock(DataTree.class);
shardManager.tell(new ShardLeaderStateChanged(memberId, memberId, mockDataTree,
final ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
- shardManager.tell(new ActorInitialized(), mockShardActor);
+ shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
String memberId2 = "member-2-shard-default-" + shardMrgIDSuffix;
String memberId1 = "member-1-shard-default-" + shardMrgIDSuffix;
final ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
- shardManager.tell(new ActorInitialized(), mockShardActor);
+ shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
String memberId2 = "member-2-shard-default-" + shardMrgIDSuffix;
MockClusterWrapper.sendMemberUp(shardManager, "member-2", kit.getRef().path().toString());
final ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
- shardManager.tell(new ActorInitialized(), mockShardActor);
+ shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
shardManager.tell(new FindPrimary(Shard.DEFAULT_NAME, false), kit.getRef());
final ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
- shardManager.tell(new ActorInitialized(), mockShardActor);
+ shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
String memberId = "member-1-shard-default-" + shardMrgIDSuffix;
shardManager.tell(
kit.expectNoMessage(Duration.ofMillis(150));
- shardManager.tell(new ActorInitialized(), mockShardActor);
+ shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
kit.expectNoMessage(Duration.ofMillis(150));
kit.expectMsgClass(Duration.ofSeconds(2), NotInitializedException.class);
- shardManager.tell(new ActorInitialized(), mockShardActor);
+ shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
kit.expectNoMessage(Duration.ofMillis(200));
final ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
- shardManager.tell(new ActorInitialized(), mockShardActor);
+ shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
shardManager.tell(new RoleChangeNotification("member-1-shard-default-" + shardMrgIDSuffix, null,
RaftState.Candidate.name()), mockShardActor);
final ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
- shardManager.tell(new ActorInitialized(), mockShardActor);
+ shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
shardManager.tell(new RoleChangeNotification("member-1-shard-default-" + shardMrgIDSuffix, null,
RaftState.IsolatedLeader.name()), mockShardActor);
final ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
- shardManager.tell(new ActorInitialized(), mockShardActor);
+ shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
shardManager.tell(new FindPrimary(Shard.DEFAULT_NAME, true), kit.getRef());
shardManager1.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
shardManager2.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
- shardManager2.tell(new ActorInitialized(), mockShardActor2);
+ shardManager2.tell(new ActorInitialized(mockShardActor2), ActorRef.noSender());
String memberId2 = "member-2-shard-astronauts-" + shardMrgIDSuffix;
short leaderVersion = DataStoreVersions.CURRENT_VERSION - 1;
final TestKit kit = new TestKit(system1);
shardManager1.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
shardManager2.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
- shardManager1.tell(new ActorInitialized(), mockShardActor1);
- shardManager2.tell(new ActorInitialized(), mockShardActor2);
+ shardManager1.tell(new ActorInitialized(mockShardActor1), ActorRef.noSender());
+ shardManager2.tell(new ActorInitialized(mockShardActor1), ActorRef.noSender());
String memberId2 = "member-2-shard-default-" + shardMrgIDSuffix;
String memberId1 = "member-1-shard-default-" + shardMrgIDSuffix;
kit.getRef());
shardManager1.underlyingActor().waitForUnreachableMember();
-
- PeerDown peerDown = MessageCollectorActor.expectFirstMatching(mockShardActor1, PeerDown.class);
- assertEquals("getMemberName", MEMBER_2, peerDown.getMemberName());
MessageCollectorActor.clearMessages(mockShardActor1);
shardManager1.tell(MockClusterWrapper.createMemberRemoved("member-2", "akka://cluster-test@127.0.0.1:2558"),
kit.getRef());
- MessageCollectorActor.expectFirstMatching(mockShardActor1, PeerDown.class);
-
shardManager1.tell(new FindPrimary("default", true), kit.getRef());
kit.expectMsgClass(Duration.ofSeconds(5), NoShardLeaderException.class);
shardManager1.underlyingActor().waitForReachableMember();
- PeerUp peerUp = MessageCollectorActor.expectFirstMatching(mockShardActor1, PeerUp.class);
- assertEquals("getMemberName", MEMBER_2, peerUp.getMemberName());
- MessageCollectorActor.clearMessages(mockShardActor1);
-
shardManager1.tell(new FindPrimary("default", true), kit.getRef());
RemotePrimaryShardFound found1 = kit.expectMsgClass(Duration.ofSeconds(5), RemotePrimaryShardFound.class);
shardManager1.tell(MockClusterWrapper.createMemberUp("member-2", "akka://cluster-test@127.0.0.1:2558"),
kit.getRef());
- MessageCollectorActor.expectFirstMatching(mockShardActor1, PeerUp.class);
-
// Test FindPrimary wait succeeds after reachable member event.
shardManager1.tell(MockClusterWrapper.createUnreachableMember("member-2",
final TestKit kit = new TestKit(system1);
shardManager1.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
shardManager2.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
- shardManager1.tell(new ActorInitialized(), mockShardActor1);
- shardManager2.tell(new ActorInitialized(), mockShardActor2);
+ shardManager1.tell(new ActorInitialized(mockShardActor1), ActorRef.noSender());
+ shardManager2.tell(new ActorInitialized(mockShardActor2), ActorRef.noSender());
String memberId2 = "member-2-shard-default-" + shardMrgIDSuffix;
String memberId1 = "member-1-shard-default-" + shardMrgIDSuffix;
final TestKit kit256 = new TestKit(system256);
shardManager256.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit256.getRef());
shardManager2.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit256.getRef());
- shardManager256.tell(new ActorInitialized(), mockShardActor256);
- shardManager2.tell(new ActorInitialized(), mockShardActor2);
+ shardManager256.tell(new ActorInitialized(mockShardActor256), ActorRef.noSender());
+ shardManager2.tell(new ActorInitialized(mockShardActor2), ActorRef.noSender());
String memberId256 = "member-256-shard-default-" + shardMrgIDSuffix;
String memberId2 = "member-2-shard-default-" + shardMrgIDSuffix;
final ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
- shardManager.tell(new ActorInitialized(), mockShardActor);
+ shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
shardManager.tell(new FindLocalShard(Shard.DEFAULT_NAME, false), kit.getRef());
Future<Object> future = Patterns.ask(shardManager, new FindLocalShard(Shard.DEFAULT_NAME, true),
new Timeout(5, TimeUnit.SECONDS));
- shardManager.tell(new ActorInitialized(), mockShardActor);
+ shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
Object resp = Await.result(future, kit.duration("5 seconds"));
assertTrue("Expected: LocalShardFound, Actual: " + resp, resp instanceof LocalShardFound);
final ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
- shardManager.tell(new ActorInitialized(), mockShardActor);
+ shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
shardManager.tell(new SwitchShardBehavior(mockShardName, RaftState.Leader, 1000), kit.getRef());
.persistent(false).build();
Shard.Builder shardBuilder = Shard.builder();
- ModuleShardConfiguration config = new ModuleShardConfiguration(URI.create("foo-ns"), "foo-module",
+ ModuleShardConfiguration config = new ModuleShardConfiguration(XMLNamespace.of("foo-ns"), "foo-module",
"foo", null, members("member-1", "member-5", "member-6"));
shardManager.tell(new CreateShard(config, shardBuilder, datastoreContext), kit.getRef());
shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), ActorRef.noSender());
Shard.Builder shardBuilder = Shard.builder();
- ModuleShardConfiguration config = new ModuleShardConfiguration(URI.create("foo-ns"), "foo-module",
+ ModuleShardConfiguration config = new ModuleShardConfiguration(XMLNamespace.of("foo-ns"), "foo-module",
"foo", null, members("member-5", "member-6"));
shardManager.tell(new CreateShard(config, shardBuilder, null), kit.getRef());
Shard.Builder shardBuilder = Shard.builder();
- ModuleShardConfiguration config = new ModuleShardConfiguration(URI.create("foo-ns"), "foo-module",
+ ModuleShardConfiguration config = new ModuleShardConfiguration(XMLNamespace.of("foo-ns"), "foo-module",
"foo", null, members("member-1"));
shardManager.tell(new CreateShard(config, shardBuilder, null), kit.getRef());
.put("astronauts", Collections.<String>emptyList()).build());
ShardManagerSnapshot snapshot =
- new ShardManagerSnapshot(Arrays.asList("shard1", "shard2", "astronauts"), Collections.emptyMap());
+ new ShardManagerSnapshot(Arrays.asList("shard1", "shard2", "astronauts"));
DatastoreSnapshot restoreFromSnapshot = new DatastoreSnapshot(shardMrgIDSuffix, snapshot,
Collections.<ShardSnapshot>emptyList());
TestActorRef<TestShardManager> shardManager = actorFactory.createTestActor(newTestShardMgrBuilder(mockConfig)
newReplicaShardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
leaderShardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
- leaderShardManager.tell(new ActorInitialized(), mockShardLeaderActor);
+ leaderShardManager.tell(new ActorInitialized(mockShardLeaderActor), ActorRef.noSender());
short leaderVersion = DataStoreVersions.CURRENT_VERSION - 1;
leaderShardManager.tell(
// persisted.
String[] restoredShards = { "default", "people" };
ShardManagerSnapshot snapshot =
- new ShardManagerSnapshot(Arrays.asList(restoredShards), Collections.emptyMap());
+ new ShardManagerSnapshot(Arrays.asList(restoredShards));
InMemorySnapshotStore.addSnapshot(shardManagerID, snapshot);
Uninterruptibles.sleepUninterruptibly(2, TimeUnit.MILLISECONDS);
.createTestActor(newPropsShardMgrWithMockShardActor(), shardMgrID);
shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
- shardManager.tell(new ActorInitialized(), mockShardActor);
+ shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
String leaderId = "leader-member-shard-default-" + shardMrgIDSuffix;
AddServerReply addServerReply = new AddServerReply(ServerChangeStatus.ALREADY_EXISTS, null);
ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
- shardManager.tell(new ActorInitialized(), mockShardActor);
+ shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
shardManager.tell(new ShardLeaderStateChanged(memberId, memberId, mock(DataTree.class),
DataStoreVersions.CURRENT_VERSION), kit.getRef());
shardManager.tell(
ActorRef shardManager = getSystem().actorOf(newPropsShardMgrWithMockShardActor(respondActor));
shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
- shardManager.tell(new ActorInitialized(), respondActor);
+ shardManager.tell(new ActorInitialized(respondActor), ActorRef.noSender());
shardManager.tell(new ShardLeaderStateChanged(memberId, memberId, mock(DataTree.class),
DataStoreVersions.CURRENT_VERSION), kit.getRef());
shardManager.tell(
newReplicaShardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
leaderShardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
- leaderShardManager.tell(new ActorInitialized(), mockShardLeaderActor);
- newReplicaShardManager.tell(new ActorInitialized(), mockShardLeaderActor);
+ leaderShardManager.tell(new ActorInitialized(mockShardLeaderActor), ActorRef.noSender());
+ newReplicaShardManager.tell(new ActorInitialized(mockShardLeaderActor), ActorRef.noSender());
short leaderVersion = DataStoreVersions.CURRENT_VERSION - 1;
leaderShardManager.tell(
shardManager.underlyingActor().waitForRecoveryComplete();
shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
- shardManager.tell(new ActorInitialized(), shard);
+ shardManager.tell(new ActorInitialized(shard), ActorRef.noSender());
waitForShardInitialized(shardManager, "people", kit);
waitForShardInitialized(shardManager, "default", kit);
.put("people", Arrays.asList("member-1", "member-2")).build());
String[] restoredShards = {"default", "astronauts"};
ShardManagerSnapshot snapshot =
- new ShardManagerSnapshot(Arrays.asList(restoredShards), Collections.emptyMap());
+ new ShardManagerSnapshot(Arrays.asList(restoredShards));
InMemorySnapshotStore.addSnapshot("shard-manager-" + shardMrgIDSuffix, snapshot);
// create shardManager to come up with restored data
.addShardActor("shard1", shard1).addShardActor("shard2", shard2).props());
shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
- shardManager.tell(new ActorInitialized(), shard1);
- shardManager.tell(new ActorInitialized(), shard2);
+ shardManager.tell(new ActorInitialized(shard1), ActorRef.noSender());
+ shardManager.tell(new ActorInitialized(shard2), ActorRef.noSender());
FiniteDuration duration = FiniteDuration.create(5, TimeUnit.SECONDS);
Future<Boolean> stopFuture = Patterns.gracefulStop(shardManager, duration, Shutdown.INSTANCE);
ActorRef shardManager = getSystem().actorOf(newPropsShardMgrWithMockShardActor(respondActor));
shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
- shardManager.tell(new ActorInitialized(), respondActor);
+ shardManager.tell(new ActorInitialized(respondActor), ActorRef.noSender());
shardManager.tell(new ShardLeaderStateChanged(memberId, memberId, mock(DataTree.class),
DataStoreVersions.CURRENT_VERSION), kit.getRef());
shardManager.tell(
ActorRef shardManager = getSystem().actorOf(newPropsShardMgrWithMockShardActor(respondActor));
shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
- shardManager.tell(new ActorInitialized(), respondActor);
+ shardManager.tell(new ActorInitialized(respondActor), ActorRef.noSender());
shardManager.tell(new RoleChangeNotification(memberId, null, RaftState.Follower.name()), respondActor);
shardManager.tell(
final ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
- shardManager.tell(new ActorInitialized(), mockShardActor);
+ shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
final Consumer<String> mockCallback = mock(Consumer.class);
shardManager.tell(new RegisterForShardAvailabilityChanges(mockCallback), kit.getRef());
}
Builder shardActor(final ActorRef newShardActor) {
- this.shardActor = newShardActor;
+ shardActor = newShardActor;
return this;
}
String peerId = ShardIdentifier.create("default", MEMBER_2, type).toString();
- String address = "akka.tcp://opendaylight-cluster-data@127.0.0.1:2550/user/shardmanager-" + type
+ String address = "akka://opendaylight-cluster-data@127.0.0.1:2550/user/shardmanager-" + type
+ "/" + MEMBER_2.getName() + "-shard-default-" + type;
resolver.setResolved(peerId, address);
import org.opendaylight.controller.cluster.datastore.config.ConfigurationImpl;
import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
public class ShardStrategyFactoryTest {
@Before
public void setUp() {
- factory = new ShardStrategyFactory(new ConfigurationImpl("module-shards.conf", "modules.conf"),
- LogicalDatastoreType.CONFIGURATION);
+ factory = new ShardStrategyFactory(new ConfigurationImpl("module-shards.conf", "modules.conf"));
}
@Test
import org.opendaylight.controller.cluster.raft.utils.EchoActor;
import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.concurrent.Await;
import scala.concurrent.duration.FiniteDuration;
public class ActorUtilsTest extends AbstractActorTest {
-
static final Logger LOG = LoggerFactory.getLogger(ActorUtilsTest.class);
- private static class TestMessage {
+ private static final class TestMessage {
+
}
private static final class MockShardManager extends UntypedAbstractActor {
}
@Override public void onReceive(final Object message) {
- if (message instanceof FindPrimary) {
- FindPrimary fp = (FindPrimary)message;
+ if (message instanceof FindPrimary fp) {
Object resp = findPrimaryResponses.get(fp.getShardName());
if (resp == null) {
LOG.error("No expected FindPrimary response found for shard name {}", fp.getShardName());
final ActorRef actorRef;
MockShardManagerCreator() {
- this.found = false;
- this.actorRef = null;
+ found = false;
+ actorRef = null;
}
MockShardManagerCreator(final boolean found, final ActorRef actorRef) {
ActorUtils actorUtils = new ActorUtils(getSystem(), shardManagerActorRef,
mock(ClusterWrapper.class), mock(Configuration.class));
- Optional<ActorRef> out = actorUtils.findLocalShard("default");
-
- assertEquals(shardActorRef, out.get());
+ assertEquals(Optional.of(shardActorRef), actorUtils.findLocalShard("default"));
testKit.expectNoMessage();
return null;
assertNotNull(actual);
assertTrue("LocalShardDataTree present", actual.getLocalShardDataTree().isPresent());
- assertSame("LocalShardDataTree", mockDataTree, actual.getLocalShardDataTree().get());
+ assertSame("LocalShardDataTree", mockDataTree, actual.getLocalShardDataTree().orElseThrow());
assertTrue("Unexpected PrimaryShardActor path " + actual.getPrimaryShardActor().path(),
expPrimaryPath.endsWith(actual.getPrimaryShardActor().pathString()));
assertEquals("getPrimaryShardVersion", DataStoreVersions.CURRENT_VERSION, actual.getPrimaryShardVersion());
import akka.cluster.Member;
import akka.cluster.MemberStatus;
import akka.cluster.UniqueAddress;
+import akka.util.Version;
import org.opendaylight.controller.cluster.access.concepts.MemberName;
import org.opendaylight.controller.cluster.datastore.ClusterWrapper;
import scala.collection.immutable.Set.Set1;
public static MemberRemoved createMemberRemoved(final String memberName, final String address) {
UniqueAddress uniqueAddress = new UniqueAddress(AddressFromURIString.parse(address), 55L);
- Member member = new Member(uniqueAddress, 1, MemberStatus.removed(), new Set1<>(memberName));
+ Member member = new Member(uniqueAddress, 1, MemberStatus.removed(), new Set1<>(memberName), Version.Zero());
return new MemberRemoved(member, MemberStatus.up());
}
public static MemberUp createMemberUp(final String memberName, final String address) {
UniqueAddress uniqueAddress = new UniqueAddress(AddressFromURIString.parse(address), 55L);
- Member member = new Member(uniqueAddress, 1, MemberStatus.up(), new Set1<>(memberName));
+ Member member = new Member(uniqueAddress, 1, MemberStatus.up(), new Set1<>(memberName), Version.Zero());
return new MemberUp(member);
}
public static UnreachableMember createUnreachableMember(final String memberName, final String address) {
UniqueAddress uniqueAddress = new UniqueAddress(AddressFromURIString.parse(address), 55L);
- Member member = new Member(uniqueAddress, 1, MemberStatus.up(), new Set1<>(memberName));
+ Member member = new Member(uniqueAddress, 1, MemberStatus.up(), new Set1<>(memberName), Version.Zero());
return new UnreachableMember(member);
}
public static ReachableMember createReachableMember(final String memberName, final String address) {
UniqueAddress uniqueAddress = new UniqueAddress(AddressFromURIString.parse(address), 55L);
- Member member = new Member(uniqueAddress, 1, MemberStatus.up(), new Set1<>(memberName));
+ Member member = new Member(uniqueAddress, 1, MemberStatus.up(), new Set1<>(memberName), Version.Zero());
return new ReachableMember(member);
}
import org.opendaylight.controller.cluster.access.concepts.MemberName;
import org.opendaylight.controller.cluster.datastore.config.ConfigurationImpl;
import org.opendaylight.controller.cluster.datastore.config.ModuleConfig;
-import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategy;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
public class MockConfiguration extends ConfigurationImpl {
public MockConfiguration() {
return retMap;
});
}
-
- @Override
- public ShardStrategy getStrategyForPrefix(final DOMDataTreeIdentifier prefix) {
- return null;
- }
}
import com.google.common.util.concurrent.Uninterruptibles;
import java.util.ArrayList;
import java.util.Arrays;
-import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Optional;
import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.DistinctNodeContainer;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNodeContainer;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
public class MockDataTreeChangeListener implements DOMDataTreeChangeListener {
public void reset(final int newExpChangeEventCount) {
changeLatch = new CountDownLatch(newExpChangeEventCount);
- this.expChangeEventCount = newExpChangeEventCount;
+ expChangeEventCount = newExpChangeEventCount;
synchronized (changeList) {
changeList.clear();
}
}
@Override
- public void onDataTreeChanged(final Collection<DataTreeCandidate> changes) {
+ public void onDataTreeChanged(final List<DataTreeCandidate> changes) {
if (changeLatch.getCount() > 0) {
synchronized (changeList) {
changeList.addAll(changes);
for (int i = 0; i < expPaths.length; i++) {
final DataTreeCandidate candidate = changeList.get(i);
- final Optional<NormalizedNode<?, ?>> maybeDataAfter = candidate.getRootNode().getDataAfter();
- if (!maybeDataAfter.isPresent()) {
+ final NormalizedNode dataAfter = candidate.getRootNode().dataAfter();
+ if (dataAfter == null) {
fail(String.format("Change %d does not contain data after. Actual: %s", i + 1,
- candidate.getRootNode()));
+ candidate.getRootNode()));
}
- final NormalizedNode<?, ?> dataAfter = maybeDataAfter.get();
final Optional<YangInstanceIdentifier> relativePath = expPaths[i].relativeTo(candidate.getRootPath());
if (!relativePath.isPresent()) {
assertEquals(String.format("Change %d does not contain %s. Actual: %s", i + 1, expPaths[i],
- dataAfter), expPaths[i].getLastPathArgument(), dataAfter.getIdentifier());
+ dataAfter), expPaths[i].getLastPathArgument(), dataAfter.name());
} else {
- NormalizedNode<?, ?> nextChild = dataAfter;
- for (PathArgument pathArg: relativePath.get().getPathArguments()) {
+ NormalizedNode nextChild = dataAfter;
+ for (PathArgument pathArg: relativePath.orElseThrow().getPathArguments()) {
boolean found = false;
- if (nextChild instanceof NormalizedNodeContainer) {
- Optional<NormalizedNode<?, ?>> maybeChild = ((NormalizedNodeContainer)nextChild)
- .getChild(pathArg);
+ if (nextChild instanceof DistinctNodeContainer) {
+ Optional<NormalizedNode> maybeChild = ((DistinctNodeContainer)nextChild)
+ .findChildByArg(pathArg);
if (maybeChild.isPresent()) {
found = true;
- nextChild = maybeChild.get();
+ nextChild = maybeChild.orElseThrow();
}
}
import static org.junit.Assert.assertTrue;
import com.google.common.collect.ImmutableList;
-import com.google.common.util.concurrent.FluentFuture;
import java.util.Collection;
import java.util.Optional;
import java.util.concurrent.ExecutionException;
import org.opendaylight.mdsal.dom.store.inmemory.InMemoryDOMDataStore;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
public class NormalizedNodeAggregatorTest {
@Test
- public void testAggregate() throws InterruptedException, ExecutionException,
- DataValidationFailedException {
+ public void testAggregate() throws InterruptedException, ExecutionException, DataValidationFailedException {
EffectiveModelContext schemaContext = SchemaContextHelper.full();
- NormalizedNode<?, ?> expectedNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- NormalizedNode<?, ?> expectedNode2 = ImmutableNodes.containerNode(CarsModel.CARS_QNAME);
-
- Optional<NormalizedNode<?, ?>> optional = NormalizedNodeAggregator.aggregate(YangInstanceIdentifier.empty(),
+ NormalizedNode expectedNode1 = ImmutableNodes.newContainerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+ .build();
+ NormalizedNode expectedNode2 = ImmutableNodes.newContainerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(CarsModel.CARS_QNAME))
+ .build();
+
+ Optional<NormalizedNode> optional = NormalizedNodeAggregator.aggregate(YangInstanceIdentifier.of(),
ImmutableList.of(
- Optional.<NormalizedNode<?, ?>>of(getRootNode(expectedNode1, schemaContext)),
- Optional.<NormalizedNode<?, ?>>of(getRootNode(expectedNode2, schemaContext))),
+ Optional.<NormalizedNode>of(getRootNode(expectedNode1, schemaContext)),
+ Optional.<NormalizedNode>of(getRootNode(expectedNode2, schemaContext))),
schemaContext, LogicalDatastoreType.CONFIGURATION);
- NormalizedNode<?,?> normalizedNode = optional.get();
+ NormalizedNode normalizedNode = optional.orElseThrow();
- assertTrue("Expect value to be a Collection", normalizedNode.getValue() instanceof Collection);
+ assertTrue("Expect value to be a Collection", normalizedNode.body() instanceof Collection);
@SuppressWarnings("unchecked")
- Collection<NormalizedNode<?,?>> collection = (Collection<NormalizedNode<?,?>>) normalizedNode.getValue();
+ Collection<NormalizedNode> collection = (Collection<NormalizedNode>) normalizedNode.body();
- for (NormalizedNode<?,?> node : collection) {
+ for (NormalizedNode node : collection) {
assertTrue("Expected " + node + " to be a ContainerNode", node instanceof ContainerNode);
}
}
- public static NormalizedNode<?, ?> getRootNode(final NormalizedNode<?, ?> moduleNode,
+ public static NormalizedNode getRootNode(final NormalizedNode moduleNode,
final EffectiveModelContext schemaContext) throws ExecutionException, InterruptedException {
try (InMemoryDOMDataStore store = new InMemoryDOMDataStore("test", Executors.newSingleThreadExecutor())) {
store.onModelContextUpdated(schemaContext);
DOMStoreWriteTransaction writeTransaction = store.newWriteOnlyTransaction();
- writeTransaction.merge(YangInstanceIdentifier.of(moduleNode.getNodeType()), moduleNode);
+ writeTransaction.merge(YangInstanceIdentifier.of(moduleNode.name().getNodeType()), moduleNode);
DOMStoreThreePhaseCommitCohort ready = writeTransaction.ready();
DOMStoreReadTransaction readTransaction = store.newReadOnlyTransaction();
- FluentFuture<Optional<NormalizedNode<?, ?>>> read = readTransaction.read(YangInstanceIdentifier.empty());
-
- Optional<NormalizedNode<?, ?>> nodeOptional = read.get();
-
- return nodeOptional.get();
+ return readTransaction.read(YangInstanceIdentifier.of()).get().orElseThrow();
}
}
- public static NormalizedNode<?,?> findChildWithQName(final Collection<NormalizedNode<?, ?>> collection,
+ public static NormalizedNode findChildWithQName(final Collection<NormalizedNode> collection,
final QName qname) {
- for (NormalizedNode<?, ?> node : collection) {
- if (node.getNodeType().equals(qname)) {
+ for (NormalizedNode node : collection) {
+ if (node.name().getNodeType().equals(qname)) {
return node;
}
}
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModificationCursor;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.SchemaValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModificationCursor;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
+import org.opendaylight.yangtools.yang.data.tree.api.SchemaValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.api.TreeType;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
import org.opendaylight.yangtools.yang.data.util.DataSchemaContextTree;
import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
@Test
public void testMerge() {
- NormalizedNode<?, ?> normalizedNode = CarsModel.create();
+ NormalizedNode normalizedNode = CarsModel.create();
YangInstanceIdentifier path = CarsModel.BASE_PATH;
pruningDataTreeModification.merge(path, normalizedNode);
@Test
public void testMergeWithInvalidNamespace() throws DataValidationFailedException {
- NormalizedNode<?, ?> normalizedNode = PeopleModel.emptyContainer();
+ NormalizedNode normalizedNode = PeopleModel.emptyContainer();
YangInstanceIdentifier path = PeopleModel.BASE_PATH;
pruningDataTreeModification.merge(path, normalizedNode);
verify(mockModification, times(1)).merge(path, normalizedNode);
DataTreeCandidate candidate = getCandidate();
- assertEquals("getModificationType", ModificationType.UNMODIFIED, candidate.getRootNode().getModificationType());
+ assertEquals("getModificationType", ModificationType.UNMODIFIED, candidate.getRootNode().modificationType());
}
@Test
public void testMergeWithInvalidChildNodeNames() throws DataValidationFailedException {
- ContainerNode augContainer = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
- new YangInstanceIdentifier.NodeIdentifier(AUG_CONTAINER)).withChild(
- ImmutableNodes.containerNode(AUG_INNER_CONTAINER)).build();
-
- DataContainerChild<?, ?> outerNode = outerNode(outerNodeEntry(1, innerNode("one", "two")));
- ContainerNode normalizedNode = ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TEST_QNAME)).withChild(outerNode)
- .withChild(augContainer).withChild(ImmutableNodes.leafNode(AUG_QNAME, "aug")).build();
+ DataContainerChild outerNode = outerNode(outerNodeEntry(1, innerNode("one", "two")));
+ ContainerNode normalizedNode = Builders.containerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
+ .withChild(outerNode)
+ .withChild(Builders.containerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(AUG_CONTAINER))
+ .withChild(ImmutableNodes.containerNode(AUG_INNER_CONTAINER))
+ .build())
+ .withChild(ImmutableNodes.leafNode(AUG_QNAME, "aug"))
+ .build();
YangInstanceIdentifier path = TestModel.TEST_PATH;
dataTree.commit(getCandidate());
- ContainerNode prunedNode = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
- new YangInstanceIdentifier.NodeIdentifier(TEST_QNAME)).withChild(outerNode).build();
+ ContainerNode prunedNode = Builders.containerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
+ .withChild(outerNode)
+ .build();
- Optional<NormalizedNode<?, ?>> actual = dataTree.takeSnapshot().readNode(path);
- assertTrue("After pruning present", actual.isPresent());
- assertEquals("After pruning", prunedNode, actual.get());
+ assertEquals("After pruning", Optional.of(prunedNode), dataTree.takeSnapshot().readNode(path));
}
@Test
public void testMergeWithValidNamespaceAndInvalidNodeName() throws DataValidationFailedException {
- NormalizedNode<?, ?> normalizedNode = ImmutableNodes.containerNode(INVALID_TEST_QNAME);
+ NormalizedNode normalizedNode = ImmutableNodes.containerNode(INVALID_TEST_QNAME);
YangInstanceIdentifier path = INVALID_TEST_PATH;
pruningDataTreeModification.merge(path, normalizedNode);
verify(mockModification, times(1)).merge(path, normalizedNode);
DataTreeCandidate candidate = getCandidate();
- assertEquals("getModificationType", ModificationType.UNMODIFIED, candidate.getRootNode().getModificationType());
+ assertEquals("getModificationType", ModificationType.UNMODIFIED, candidate.getRootNode().modificationType());
}
@Test
public void testWrite() {
- NormalizedNode<?, ?> normalizedNode = CarsModel.create();
+ NormalizedNode normalizedNode = CarsModel.create();
YangInstanceIdentifier path = CarsModel.BASE_PATH;
pruningDataTreeModification.write(path, normalizedNode);
localDataTree.validate(mod);
localDataTree.commit(localDataTree.prepare(mod));
- NormalizedNode<?, ?> normalizedNode = dataTree.takeSnapshot().readNode(YangInstanceIdentifier.empty()).get();
- pruningDataTreeModification.write(YangInstanceIdentifier.empty(), normalizedNode);
+ NormalizedNode normalizedNode = dataTree.takeSnapshot().readNode(YangInstanceIdentifier.of()).orElseThrow();
+ pruningDataTreeModification.write(YangInstanceIdentifier.of(), normalizedNode);
dataTree.commit(getCandidate());
- Optional<NormalizedNode<?, ?>> actual = dataTree.takeSnapshot().readNode(YangInstanceIdentifier.empty());
- assertTrue("Root present", actual.isPresent());
- assertEquals("Root node", normalizedNode, actual.get());
+ assertEquals(Optional.of(normalizedNode), dataTree.takeSnapshot().readNode(YangInstanceIdentifier.of()));
}
@Test
final Shard mockShard = Mockito.mock(Shard.class);
ShardDataTree shardDataTree = new ShardDataTree(mockShard, SCHEMA_CONTEXT, TreeType.CONFIGURATION);
- NormalizedNode<?, ?> root = shardDataTree.readNode(YangInstanceIdentifier.empty()).get();
+ NormalizedNode root = shardDataTree.readNode(YangInstanceIdentifier.of()).orElseThrow();
- NormalizedNode<?, ?> normalizedNode = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
- new YangInstanceIdentifier.NodeIdentifier(root.getNodeType())).withChild(
- ImmutableNodes.containerNode(AUG_CONTAINER)).build();
- pruningDataTreeModification.write(YangInstanceIdentifier.empty(), normalizedNode);
+ NormalizedNode normalizedNode = Builders.containerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(root.name().getNodeType()))
+ .withChild(ImmutableNodes.containerNode(AUG_CONTAINER))
+ .build();
+ pruningDataTreeModification.write(YangInstanceIdentifier.of(), normalizedNode);
dataTree.commit(getCandidate());
- Optional<NormalizedNode<?, ?>> actual = dataTree.takeSnapshot().readNode(YangInstanceIdentifier.empty());
- assertEquals("Root present", true, actual.isPresent());
- assertEquals("Root node", root, actual.get());
+ assertEquals(Optional.of(root), dataTree.takeSnapshot().readNode(YangInstanceIdentifier.of()));
}
@Test
public void testWriteWithInvalidNamespace() throws DataValidationFailedException {
- NormalizedNode<?, ?> normalizedNode = PeopleModel.emptyContainer();
+ NormalizedNode normalizedNode = PeopleModel.emptyContainer();
YangInstanceIdentifier path = PeopleModel.BASE_PATH;
pruningDataTreeModification.write(path, normalizedNode);
verify(mockModification, times(1)).write(path, normalizedNode);
DataTreeCandidate candidate = getCandidate();
- assertEquals("getModificationType", ModificationType.UNMODIFIED, candidate.getRootNode().getModificationType());
+ assertEquals("getModificationType", ModificationType.UNMODIFIED, candidate.getRootNode().modificationType());
}
@Test
public void testWriteWithInvalidChildNodeNames() throws DataValidationFailedException {
- ContainerNode augContainer = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
- new YangInstanceIdentifier.NodeIdentifier(AUG_CONTAINER)).withChild(
- ImmutableNodes.containerNode(AUG_INNER_CONTAINER)).build();
-
- DataContainerChild<?, ?> outerNode = outerNode(outerNodeEntry(1, innerNode("one", "two")));
- ContainerNode normalizedNode = ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TEST_QNAME)).withChild(outerNode)
- .withChild(augContainer).withChild(ImmutableNodes.leafNode(AUG_QNAME, "aug"))
- .withChild(ImmutableNodes.leafNode(NAME_QNAME, "name")).build();
+ DataContainerChild outerNode = outerNode(outerNodeEntry(1, innerNode("one", "two")));
+ ContainerNode normalizedNode = Builders.containerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
+ .withChild(outerNode)
+ .withChild(Builders.containerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(AUG_CONTAINER))
+ .withChild(ImmutableNodes.containerNode(AUG_INNER_CONTAINER))
+ .build())
+ .withChild(ImmutableNodes.leafNode(AUG_QNAME, "aug"))
+ .withChild(ImmutableNodes.leafNode(NAME_QNAME, "name"))
+ .build();
YangInstanceIdentifier path = TestModel.TEST_PATH;
dataTree.commit(getCandidate());
- ContainerNode prunedNode = ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TEST_QNAME)).withChild(outerNode)
- .withChild(ImmutableNodes.leafNode(NAME_QNAME, "name")).build();
+ ContainerNode prunedNode = Builders.containerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
+ .withChild(outerNode)
+ .withChild(ImmutableNodes.leafNode(NAME_QNAME, "name"))
+ .build();
- Optional<NormalizedNode<?, ?>> actual = dataTree.takeSnapshot().readNode(path);
- assertTrue("After pruning present", actual.isPresent());
- assertEquals("After pruning", prunedNode, actual.get());
+ assertEquals(Optional.of(prunedNode), dataTree.takeSnapshot().readNode(path));
}
@Test
package org.opendaylight.controller.cluster.datastore.utils;
import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.greaterThan;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import com.codahale.metrics.Snapshot;
import com.codahale.metrics.Timer;
+import com.google.common.base.Stopwatch;
+import java.time.Duration;
import java.util.concurrent.TimeUnit;
-import org.apache.commons.lang3.time.StopWatch;
import org.hamcrest.BaseMatcher;
import org.hamcrest.Description;
import org.hamcrest.Matcher;
import org.junit.Before;
import org.junit.Test;
+import org.junit.runner.RunWith;
import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
import org.opendaylight.controller.cluster.datastore.DatastoreContext;
+// FIXME: use Strict runner
+@RunWith(MockitoJUnitRunner.Silent.class)
public class TransactionRateLimiterTest {
-
@Mock
public ActorUtils actorUtils;
-
@Mock
public DatastoreContext datastoreContext;
-
@Mock
public Timer commitTimer;
-
@Mock
private Timer.Context commitTimerContext;
-
@Mock
private Snapshot commitSnapshot;
@Before
public void setUp() {
- MockitoAnnotations.initMocks(this);
doReturn(datastoreContext).when(actorUtils).getDatastoreContext();
doReturn(30).when(datastoreContext).getShardTransactionCommitTimeoutInSeconds();
doReturn(100L).when(datastoreContext).getTransactionCreationInitialRateLimit();
}
TransactionRateLimiter rateLimiter = new TransactionRateLimiter(actorUtils);
-
rateLimiter.acquire();
assertThat(rateLimiter.getTxCreationLimit(), approximately(292));
-
assertEquals(147, rateLimiter.getPollOnCount());
}
-
@Test
public void testAcquirePercentileValueZero() {
-
for (int i = 1; i < 11; i++) {
// Keep on increasing the amount of time it takes to complete transaction for each tenth of a
// percentile. Essentially this would be 1ms for the 10th percentile, 2ms for 20th percentile and so on.
doReturn(TimeUnit.MILLISECONDS.toNanos(0) * 1D).when(commitSnapshot).getValue(0.1);
TransactionRateLimiter rateLimiter = new TransactionRateLimiter(actorUtils);
-
rateLimiter.acquire();
assertThat(rateLimiter.getTxCreationLimit(), approximately(192));
-
assertEquals(97, rateLimiter.getPollOnCount());
}
@Test
public void testAcquireOnePercentileValueVeryHigh() {
-
for (int i = 1; i < 11; i++) {
// Keep on increasing the amount of time it takes to complete transaction for each tenth of a
// percentile. Essentially this would be 1ms for the 10th percentile, 2ms for 20th percentile and so on.
doReturn(TimeUnit.MILLISECONDS.toNanos(10000) * 1D).when(commitSnapshot).getValue(1.0);
TransactionRateLimiter rateLimiter = new TransactionRateLimiter(actorUtils);
-
rateLimiter.acquire();
assertThat(rateLimiter.getTxCreationLimit(), approximately(282));
-
assertEquals(142, rateLimiter.getPollOnCount());
}
}
TransactionRateLimiter rateLimiter = new TransactionRateLimiter(actorUtils);
-
rateLimiter.acquire();
// The initial rate limit will be retained here because the calculated rate limit was too small
assertThat(rateLimiter.getTxCreationLimit(), approximately(100));
-
assertEquals(1, rateLimiter.getPollOnCount());
}
@Test
public void testAcquireWithRealPercentileValues() {
-
for (int i = 1; i < 11; i++) {
// Keep on increasing the amount of time it takes to complete transaction for each tenth of a
// percentile. Essentially this would be 1ms for the 10th percentile, 2ms for 20th percentile and so on.
doReturn(TimeUnit.MILLISECONDS.toNanos(200) * 1D).when(commitSnapshot).getValue(1.0);
TransactionRateLimiter rateLimiter = new TransactionRateLimiter(actorUtils);
-
rateLimiter.acquire();
assertThat(rateLimiter.getTxCreationLimit(), approximately(101));
-
assertEquals(51, rateLimiter.getPollOnCount());
}
DatastoreContext.getGlobalDatastoreNames().add("operational");
TransactionRateLimiter rateLimiter = new TransactionRateLimiter(actorUtils);
-
rateLimiter.acquire();
assertThat(rateLimiter.getTxCreationLimit(), approximately(292));
-
assertEquals(147, rateLimiter.getPollOnCount());
}
@Test
public void testRateLimiting() {
-
for (int i = 1; i < 11; i++) {
doReturn(TimeUnit.SECONDS.toNanos(1) * 1D).when(commitSnapshot).getValue(i * 0.1);
}
- TransactionRateLimiter rateLimiter = new TransactionRateLimiter(actorUtils);
-
- StopWatch watch = new StopWatch();
-
- watch.start();
+ final TransactionRateLimiter rateLimiter = new TransactionRateLimiter(actorUtils);
+ final Stopwatch watch = Stopwatch.createStarted();
rateLimiter.acquire();
rateLimiter.acquire();
watch.stop();
- assertTrue("did not take as much time as expected rate limit : " + rateLimiter.getTxCreationLimit(),
- watch.getTime() > 1000);
+ assertThat("did not take as much time as expected rate limit : " + rateLimiter.getTxCreationLimit(),
+ watch.elapsed(), greaterThan(Duration.ofSeconds(1)));
}
@Test
public void testRateLimitNotCalculatedUntilPollCountReached() {
-
for (int i = 1; i < 11; i++) {
// Keep on increasing the amount of time it takes to complete transaction for each tenth of a
// percentile. Essentially this would be 1ms for the 10th percentile, 2ms for 20th percentile and so on.
doReturn(TimeUnit.MILLISECONDS.toNanos(200) * 1D).when(commitSnapshot).getValue(1.0);
TransactionRateLimiter rateLimiter = new TransactionRateLimiter(actorUtils);
-
rateLimiter.acquire();
assertThat(rateLimiter.getTxCreationLimit(), approximately(101));
-
assertEquals(51, rateLimiter.getPollOnCount());
for (int i = 0; i < 49; i++) {
@Test
public void testAcquireNegativeAcquireAndPollOnCount() {
-
for (int i = 1; i < 11; i++) {
// Keep on increasing the amount of time it takes to complete transaction for each tenth of a
// percentile. Essentially this would be 1ms for the 10th percentile, 2ms for 20th percentile and so on.
TransactionRateLimiter rateLimiter = new TransactionRateLimiter(actorUtils);
rateLimiter.setAcquireCount(Long.MAX_VALUE - 1);
rateLimiter.setPollOnCount(Long.MAX_VALUE);
-
rateLimiter.acquire();
assertThat(rateLimiter.getTxCreationLimit(), approximately(101));
-
assertEquals(-9223372036854775759L, rateLimiter.getPollOnCount());
for (int i = 0; i < 50; i++) {
}
verify(commitTimer, times(2)).getSnapshot();
-
}
public Matcher<Double> approximately(final double val) {
}
};
}
-
-
}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+
+import com.google.common.base.VerifyException;
+import com.google.common.io.ByteStreams;
+import com.google.common.primitives.UnsignedLong;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.Map;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.junit.MockitoJUnitRunner;
+import org.opendaylight.controller.cluster.datastore.utils.UnsignedLongBitmap.Regular;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
+public class UnsignedLongBitmapTest {
+ @Test
+ public void testEmpty() throws IOException {
+ final var empty = UnsignedLongBitmap.of();
+ assertTrue(empty.isEmpty());
+ assertEquals(empty, empty);
+ assertSame(empty, UnsignedLongBitmap.copyOf(Map.of()));
+ assertEquals(Map.of(), empty.mutableCopy());
+ assertEquals("{}", empty.toString());
+ assertEquals(0, empty.hashCode());
+
+ final var ex = assertThrows(IOException.class, () -> empty.writeEntriesTo(mock(DataOutput.class), 1));
+ assertEquals("Mismatched size: expected 0, got 1", ex.getMessage());
+
+ // Should not do anything
+ empty.writeEntriesTo(mock(DataOutput.class), 0);
+
+ assertSame(empty, assertWriteToReadFrom(empty));
+ }
+
+ @Test
+ public void testSingleton() {
+ final var one = UnsignedLongBitmap.of(0, false);
+ assertFalse(one.isEmpty());
+ assertEquals(1, one.size());
+ assertEquals(one, one);
+ assertEquals(one, UnsignedLongBitmap.of(0, false));
+ assertEquals(one, UnsignedLongBitmap.copyOf(Map.of(UnsignedLong.ZERO, false)));
+ assertEquals(Map.of(UnsignedLong.ZERO, false), one.mutableCopy());
+ assertEquals("{0=false}", one.toString());
+ assertEquals(1237, one.hashCode());
+
+ final var ex = assertThrows(IOException.class, () -> one.writeEntriesTo(mock(DataOutput.class), 0));
+ assertEquals("Mismatched size: expected 1, got 0", ex.getMessage());
+
+ assertEquals(one, UnsignedLongBitmap.of(0, false));
+ assertNotEquals(one, UnsignedLongBitmap.of(0, true));
+ assertNotEquals(one, UnsignedLongBitmap.of(1, false));
+ assertNotEquals(UnsignedLongBitmap.of(), one);
+ assertNotEquals(one, UnsignedLongBitmap.of());
+
+ assertWriteToReadFrom(one);
+ }
+
+ @Test
+ public void testRegular() {
+ final var one = UnsignedLongBitmap.copyOf(Map.of(UnsignedLong.ZERO, false, UnsignedLong.ONE, true));
+ assertFalse(one.isEmpty());
+ assertEquals(2, one.size());
+ assertEquals(one, one);
+ assertEquals(one, UnsignedLongBitmap.copyOf(Map.of(UnsignedLong.ONE, true, UnsignedLong.ZERO, false)));
+ assertEquals(Map.of(UnsignedLong.ZERO, false, UnsignedLong.ONE, true), one.mutableCopy());
+
+ assertNotEquals(one,
+ UnsignedLongBitmap.copyOf(Map.of(UnsignedLong.ZERO, false, UnsignedLong.valueOf(2), true)));
+ assertEquals("{0=false, 1=true}", one.toString());
+ assertEquals(40345, one.hashCode());
+
+ final var ex = assertThrows(IOException.class, () -> one.writeEntriesTo(mock(DataOutput.class), 1));
+ assertEquals("Mismatched size: expected 2, got 1", ex.getMessage());
+
+ final var two = UnsignedLongBitmap.copyOf(Map.of(UnsignedLong.ZERO, true, UnsignedLong.ONE, false));
+ assertFalse(two.isEmpty());
+ assertEquals(2, two.size());
+ assertEquals(two, two);
+ assertEquals(two, UnsignedLongBitmap.copyOf(Map.of(UnsignedLong.ZERO, true, UnsignedLong.ONE, false)));
+ assertEquals("{0=true, 1=false}", two.toString());
+ assertEquals(40549, two.hashCode());
+
+ assertNotEquals(one, two);
+ assertNotEquals(two, one);
+
+ assertWriteToReadFrom(one);
+ assertWriteToReadFrom(two);
+ }
+
+ private static UnsignedLongBitmap assertWriteToReadFrom(final UnsignedLongBitmap orig) {
+ final var dos = ByteStreams.newDataOutput();
+ try {
+ orig.writeEntriesTo(dos);
+ } catch (IOException e) {
+ throw new AssertionError(e);
+ }
+
+ final UnsignedLongBitmap copy;
+ try {
+ final var dis = ByteStreams.newDataInput(dos.toByteArray());
+ copy = UnsignedLongBitmap.readFrom(dis, orig.size());
+ assertThrows(IllegalStateException.class, () -> dis.readByte());
+ } catch (IOException e) {
+ throw new AssertionError(e);
+ }
+
+ assertEquals(orig, copy);
+ return copy;
+ }
+
+ @Test
+ public void testKeyOrder() throws IOException {
+ assertInvalidKey(0);
+ assertInvalidKey(1);
+ }
+
+ private static void assertInvalidKey(final long second) throws IOException {
+ final var out = ByteStreams.newDataOutput();
+ WritableObjects.writeLong(out, 1);
+ out.writeBoolean(false);
+ WritableObjects.writeLong(out, second);
+ out.writeBoolean(true);
+
+ final var ex = assertThrows(IOException.class,
+ () -> UnsignedLongBitmap.readFrom(ByteStreams.newDataInput(out.toByteArray()), 2));
+ assertEquals("Key " + second + " may not be used after key 1", ex.getMessage());
+ }
+
+ @Test
+ public void testInvalidArrays() {
+ assertThrows(VerifyException.class, () -> new Regular(new long[0], new boolean[] { false, false }));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotSame;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutput;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.junit.MockitoJUnitRunner;
+
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
+public class UnsignedLongSetTest {
+ @Test
+ public void testOperations() {
+ final var set = MutableUnsignedLongSet.of();
+ assertEquals("MutableUnsignedLongSet{size=0}", set.toString());
+ assertFalse(set.contains(0));
+
+ set.add(0);
+ assertTrue(set.contains(0));
+ assertRanges("[[0..0]]", set);
+
+ set.add(1);
+ assertTrue(set.contains(1));
+ assertRanges("[[0..1]]", set);
+ set.add(1);
+ assertRanges("[[0..1]]", set);
+
+ set.add(4);
+ assertRanges("[[0..1], [4..4]]", set);
+
+ set.add(3);
+ assertRanges("[[0..1], [3..4]]", set);
+
+ set.add(2);
+ assertRanges("[[0..4]]", set);
+
+ assertTrue(set.contains(2));
+ assertTrue(set.contains(3));
+ assertTrue(set.contains(4));
+
+ set.add(8);
+ assertRanges("[[0..4], [8..8]]", set);
+ set.add(6);
+ assertRanges("[[0..4], [6..6], [8..8]]", set);
+ set.add(7);
+ assertRanges("[[0..4], [6..8]]", set);
+ set.add(5);
+ assertRanges("[[0..8]]", set);
+
+ set.add(11);
+ assertRanges("[[0..8], [11..11]]", set);
+ set.add(9);
+ assertRanges("[[0..9], [11..11]]", set);
+ }
+
+ @Test
+ public void testSerialization() throws IOException {
+
+ final var set = MutableUnsignedLongSet.of(0, 1, 4, 3).immutableCopy();
+
+ final var bos = new ByteArrayOutputStream();
+ try (var out = new DataOutputStream(bos)) {
+ set.writeTo(out);
+ }
+
+ final var bytes = bos.toByteArray();
+ assertArrayEquals(new byte[] { 0, 0, 0, 2, 16, 2, 17, 3, 5 }, bytes);
+
+ final ImmutableUnsignedLongSet read;
+ try (var in = new DataInputStream(new ByteArrayInputStream(bytes))) {
+ read = ImmutableUnsignedLongSet.readFrom(in);
+ assertEquals(0, in.available());
+ }
+
+ assertEquals(set, read);
+ }
+
+ @Test
+ public void testToRangeSet() {
+ final var set = MutableUnsignedLongSet.of(0, 1, 4, 3);
+ assertEquals("[[0..2), [3..5)]", set.toRangeSet().toString());
+ }
+
+ @Test
+ public void testEmptyCopy() {
+ final var orig = MutableUnsignedLongSet.of();
+ assertSame(ImmutableUnsignedLongSet.of(), orig.immutableCopy());
+ final var copy = orig.mutableCopy();
+ assertEquals(orig, copy);
+ assertNotSame(orig, copy);
+ }
+
+ @Test
+ public void testMutableCopy() {
+ final var orig = MutableUnsignedLongSet.of();
+ orig.add(-1);
+ assertEquals("MutableUnsignedLongSet{span=[18446744073709551615..18446744073709551615], size=1}",
+ orig.toString());
+
+ final var copy = orig.mutableCopy();
+ assertEquals(orig, copy);
+ assertNotSame(orig, copy);
+
+ orig.add(-2);
+ assertNotEquals(orig, copy);
+ assertEquals("MutableUnsignedLongSet{span=[18446744073709551614..18446744073709551615], size=1}",
+ orig.toString());
+ }
+
+ @Test
+ public void testWriteRangesTo() throws IOException {
+ ImmutableUnsignedLongSet.of().writeRangesTo(mock(DataOutput.class), 0);
+ }
+
+ @Test
+ public void testWriteRangesToViolation() {
+ final var ex = assertThrows(IOException.class,
+ () -> ImmutableUnsignedLongSet.of().writeRangesTo(mock(DataOutput.class), 1));
+ assertEquals("Mismatched size: expected 0, got 1", ex.getMessage());
+ }
+
+ @Test
+ public void testAddRange() {
+ var set = sparseSet();
+ set.addAll(MutableUnsignedLongSet.of(1, 2));
+ assertRanges("[[1..2], [5..6], [9..10], [13..14]]", set);
+ set.addAll(MutableUnsignedLongSet.of(3, 4));
+ assertRanges("[[1..6], [9..10], [13..14]]", set);
+ set.addAll(MutableUnsignedLongSet.of(4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15));
+ assertRanges("[[1..15]]", set);
+
+ set = sparseSet();
+ set.addAll(MutableUnsignedLongSet.of(2, 3, 4, 5));
+ assertRanges("[[1..6], [9..10], [13..14]]", set);
+
+ set.addAll(MutableUnsignedLongSet.of(6, 7));
+ assertRanges("[[1..7], [9..10], [13..14]]", set);
+
+ set.addAll(MutableUnsignedLongSet.of(8));
+ assertRanges("[[1..10], [13..14]]", set);
+
+ set = MutableUnsignedLongSet.of();
+ set.addAll(MutableUnsignedLongSet.of(1, 2));
+ assertRanges("[[1..2]]", set);
+
+ set = sparseSet();
+ set.addAll(MutableUnsignedLongSet.of(4, 5));
+ assertRanges("[[1..2], [4..6], [9..10], [13..14]]", set);
+
+ set.addAll(MutableUnsignedLongSet.of(12, 13, 14, 15));
+ assertRanges("[[1..2], [4..6], [9..10], [12..15]]", set);
+
+ set.addAll(MutableUnsignedLongSet.of(8, 9, 10, 11));
+ assertRanges("[[1..2], [4..6], [8..15]]", set);
+
+ set.addAll(MutableUnsignedLongSet.of(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16));
+ assertRanges("[[0..16]]", set);
+
+ set = sparseSet();
+ set.addAll(MutableUnsignedLongSet.of(0, 1, 2, 3));
+ assertRanges("[[0..3], [5..6], [9..10], [13..14]]", set);
+
+ set = sparseSet();
+ set.addAll(MutableUnsignedLongSet.of(0, 1, 2, 3, 4, 5, 6, 7, 8));
+ assertRanges("[[0..10], [13..14]]", set);
+
+ set = sparseSet();
+ set.addAll(MutableUnsignedLongSet.of(0, 1, 2, 3, 4, 5, 6, 7, 8, 9));
+ assertRanges("[[0..10], [13..14]]", set);
+ }
+
+ private static MutableUnsignedLongSet sparseSet() {
+ final var ret = MutableUnsignedLongSet.of(1, 2, 5, 6, 9, 10, 13, 14);
+ assertRanges("[[1..2], [5..6], [9..10], [13..14]]", ret);
+ return ret;
+ }
+
+ private static void assertRanges(final String expected, final UnsignedLongSet set) {
+ assertEquals(expected, set.ranges().toString());
+ }
+}
+++ /dev/null
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.sharding;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.eq;
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.verifyNoMoreInteractions;
-
-import akka.actor.ActorRef;
-import akka.dispatch.Futures;
-import java.util.Optional;
-import java.util.concurrent.TimeUnit;
-import org.junit.Before;
-import org.junit.Test;
-import org.opendaylight.controller.cluster.datastore.AbstractActorTest;
-import org.opendaylight.controller.cluster.datastore.DatastoreContext;
-import org.opendaylight.controller.cluster.datastore.exceptions.LocalShardNotFoundException;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.controller.cluster.dom.api.LeaderLocation;
-import org.opendaylight.controller.cluster.dom.api.LeaderLocationListener;
-import org.opendaylight.controller.cluster.dom.api.LeaderLocationListenerRegistration;
-import org.opendaylight.controller.cluster.raft.LeadershipTransferFailedException;
-import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import scala.concurrent.Future;
-import scala.concurrent.duration.FiniteDuration;
-
-public class CDSShardAccessImplTest extends AbstractActorTest {
-
- private static final DOMDataTreeIdentifier TEST_ID =
- new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH);
-
- private CDSShardAccessImpl shardAccess;
- private ActorUtils context;
-
- @Before
- public void setUp() {
- context = mock(ActorUtils.class);
- final DatastoreContext datastoreContext = DatastoreContext.newBuilder().build();
- doReturn(Optional.of(getSystem().deadLetters())).when(context).findLocalShard(any());
- doReturn(datastoreContext).when(context).getDatastoreContext();
- doReturn(getSystem()).when(context).getActorSystem();
- shardAccess = new CDSShardAccessImpl(TEST_ID, context);
- }
-
- @Test
- @SuppressWarnings("checkstyle:IllegalCatch")
- public void testRegisterLeaderLocationListener() {
- final LeaderLocationListener listener1 = mock(LeaderLocationListener.class);
-
- // first registration should be OK
- shardAccess.registerLeaderLocationListener(listener1);
-
- // second registration should fail with IllegalArgumentEx
- try {
- shardAccess.registerLeaderLocationListener(listener1);
- fail("Should throw exception");
- } catch (final Exception e) {
- assertTrue(e instanceof IllegalArgumentException);
- }
-
- // null listener registration should fail with NPE
- try {
- shardAccess.registerLeaderLocationListener(null);
- fail("Should throw exception");
- } catch (final Exception e) {
- assertTrue(e instanceof NullPointerException);
- }
-
- // registering listener on closed shard access should fail with IllegalStateEx
- final LeaderLocationListener listener2 = mock(LeaderLocationListener.class);
- shardAccess.close();
- try {
- shardAccess.registerLeaderLocationListener(listener2);
- fail("Should throw exception");
- } catch (final Exception ex) {
- assertTrue(ex instanceof IllegalStateException);
- }
- }
-
- @Test
- @SuppressWarnings("checkstyle:IllegalCatch")
- public void testOnLeaderLocationChanged() {
- final LeaderLocationListener listener1 = mock(LeaderLocationListener.class);
- doThrow(new RuntimeException("Failed")).when(listener1).onLeaderLocationChanged(any());
- final LeaderLocationListener listener2 = mock(LeaderLocationListener.class);
- doNothing().when(listener2).onLeaderLocationChanged(any());
- final LeaderLocationListener listener3 = mock(LeaderLocationListener.class);
- doNothing().when(listener3).onLeaderLocationChanged(any());
-
- final LeaderLocationListenerRegistration<?> reg1 = shardAccess.registerLeaderLocationListener(listener1);
- final LeaderLocationListenerRegistration<?> reg2 = shardAccess.registerLeaderLocationListener(listener2);
- final LeaderLocationListenerRegistration<?> reg3 = shardAccess.registerLeaderLocationListener(listener3);
-
- // Error in listener1 should not affect dispatching change to other listeners
- shardAccess.onLeaderLocationChanged(LeaderLocation.LOCAL);
- verify(listener1).onLeaderLocationChanged(eq(LeaderLocation.LOCAL));
- verify(listener2).onLeaderLocationChanged(eq(LeaderLocation.LOCAL));
- verify(listener3).onLeaderLocationChanged(eq(LeaderLocation.LOCAL));
-
- // Closed listeners shouldn't see new leader location changes
- reg1.close();
- reg2.close();
- shardAccess.onLeaderLocationChanged(LeaderLocation.REMOTE);
- verify(listener3).onLeaderLocationChanged(eq(LeaderLocation.REMOTE));
- verifyNoMoreInteractions(listener1);
- verifyNoMoreInteractions(listener2);
-
- // Closed shard access should not dispatch any new events
- shardAccess.close();
- shardAccess.onLeaderLocationChanged(LeaderLocation.UNKNOWN);
- verifyNoMoreInteractions(listener1);
- verifyNoMoreInteractions(listener2);
- verifyNoMoreInteractions(listener3);
-
- reg3.close();
- }
-
- @Test
- @SuppressWarnings("checkstyle:IllegalCatch")
- public void testGetShardIdentifier() {
- assertEquals(shardAccess.getShardIdentifier(), TEST_ID);
-
- // closed shard access should throw illegal state
- shardAccess.close();
- try {
- shardAccess.getShardIdentifier();
- fail("Exception expected");
- } catch (final Exception e) {
- assertTrue(e instanceof IllegalStateException);
- }
- }
-
- @Test
- @SuppressWarnings("checkstyle:IllegalCatch")
- public void testGetLeaderLocation() {
- // new shard access does not know anything about leader location
- assertEquals(shardAccess.getLeaderLocation(), LeaderLocation.UNKNOWN);
-
- // we start getting leader location changes notifications
- shardAccess.onLeaderLocationChanged(LeaderLocation.LOCAL);
- assertEquals(shardAccess.getLeaderLocation(), LeaderLocation.LOCAL);
-
- shardAccess.onLeaderLocationChanged(LeaderLocation.REMOTE);
- shardAccess.onLeaderLocationChanged(LeaderLocation.UNKNOWN);
- assertEquals(shardAccess.getLeaderLocation(), LeaderLocation.UNKNOWN);
-
- // closed shard access throws illegal state
- shardAccess.close();
- try {
- shardAccess.getLeaderLocation();
- fail("Should have failed with IllegalStateEx");
- } catch (Exception e) {
- assertTrue(e instanceof IllegalStateException);
- }
- }
-
- @Test
- @SuppressWarnings("checkstyle:IllegalCatch")
- public void testMakeLeaderLocal() throws Exception {
- final FiniteDuration timeout = new FiniteDuration(5, TimeUnit.SECONDS);
- final ActorRef localShardRef = mock(ActorRef.class);
- final Future<ActorRef> localShardRefFuture = Futures.successful(localShardRef);
- doReturn(localShardRefFuture).when(context).findLocalShardAsync(any());
-
- // MakeLeaderLocal will reply with success
- doReturn(Futures.successful(null)).when(context).executeOperationAsync((ActorRef) any(), any(), any());
- doReturn(getSystem().dispatcher()).when(context).getClientDispatcher();
- assertEquals(waitOnAsyncTask(shardAccess.makeLeaderLocal(), timeout), null);
-
- // MakeLeaderLocal will reply with failure
- doReturn(Futures.failed(new LeadershipTransferFailedException("Failure")))
- .when(context).executeOperationAsync((ActorRef) any(), any(), any());
-
- try {
- waitOnAsyncTask(shardAccess.makeLeaderLocal(), timeout);
- fail("makeLeaderLocal operation should not be successful");
- } catch (final Exception e) {
- assertTrue(e instanceof LeadershipTransferFailedException);
- }
-
- // we don't even find local shard
- doReturn(Futures.failed(new LocalShardNotFoundException("Local shard not found")))
- .when(context).findLocalShardAsync(any());
-
- try {
- waitOnAsyncTask(shardAccess.makeLeaderLocal(), timeout);
- fail("makeLeaderLocal operation should not be successful");
- } catch (final Exception e) {
- assertTrue(e instanceof LeadershipTransferFailedException);
- assertTrue(e.getCause() instanceof LocalShardNotFoundException);
- }
-
- // closed shard access should throw IllegalStateEx
- shardAccess.close();
- try {
- shardAccess.makeLeaderLocal();
- fail("Should have thrown IllegalStateEx. ShardAccess is closed");
- } catch (final Exception e) {
- assertTrue(e instanceof IllegalStateException);
- }
- }
-}
\ No newline at end of file
+++ /dev/null
-/*
- * Copyright (c) 2016, 2017 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.sharding;
-
-import static org.hamcrest.CoreMatchers.hasItems;
-import static org.hamcrest.MatcherAssert.assertThat;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import java.util.Collections;
-import java.util.List;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.ArgumentCaptor;
-import org.mockito.Captor;
-import org.mockito.MockitoAnnotations;
-import org.opendaylight.controller.cluster.databroker.actors.dds.ClientLocalHistory;
-import org.opendaylight.controller.cluster.databroker.actors.dds.ClientTransaction;
-import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient;
-import org.opendaylight.controller.cluster.datastore.DistributedDataStore;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
-import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCursorAwareTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeProducer;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteCursor;
-import org.opendaylight.mdsal.dom.broker.ShardedDOMDataTree;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-
-public class DistributedShardFrontendTest {
-
- private static final DOMDataTreeIdentifier ROOT =
- new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.empty());
- private static final ListenableFuture<Object> SUCCESS_FUTURE = Futures.immediateFuture(null);
-
- private ShardedDOMDataTree shardedDOMDataTree;
-
- private DataStoreClient client;
- private ClientLocalHistory clientHistory;
- private ClientTransaction clientTransaction;
- private DOMDataTreeWriteCursor cursor;
-
- private static final YangInstanceIdentifier OUTER_LIST_YID = TestModel.OUTER_LIST_PATH.node(
- NodeIdentifierWithPredicates.of(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1));
- private static final DOMDataTreeIdentifier OUTER_LIST_ID =
- new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, OUTER_LIST_YID);
-
- @Captor
- private ArgumentCaptor<PathArgument> pathArgumentCaptor;
- @Captor
- private ArgumentCaptor<NormalizedNode<?, ?>> nodeCaptor;
-
- private DOMStoreThreePhaseCommitCohort commitCohort;
-
- @Before
- public void setUp() {
- MockitoAnnotations.initMocks(this);
- shardedDOMDataTree = new ShardedDOMDataTree();
- client = mock(DataStoreClient.class);
- cursor = mock(DOMDataTreeWriteCursor.class);
- clientTransaction = mock(ClientTransaction.class);
- clientHistory = mock(ClientLocalHistory.class);
- commitCohort = mock(DOMStoreThreePhaseCommitCohort.class);
-
- doReturn(SUCCESS_FUTURE).when(commitCohort).canCommit();
- doReturn(SUCCESS_FUTURE).when(commitCohort).preCommit();
- doReturn(SUCCESS_FUTURE).when(commitCohort).commit();
- doReturn(SUCCESS_FUTURE).when(commitCohort).abort();
-
- doReturn(clientTransaction).when(client).createTransaction();
- doReturn(clientTransaction).when(clientHistory).createTransaction();
- doNothing().when(clientHistory).close();
-
- doNothing().when(client).close();
- doReturn(clientHistory).when(client).createLocalHistory();
-
- doReturn(cursor).when(clientTransaction).openCursor();
- doNothing().when(cursor).close();
- doNothing().when(cursor).write(any(), any());
- doNothing().when(cursor).merge(any(), any());
- doNothing().when(cursor).delete(any());
-
- doReturn(commitCohort).when(clientTransaction).ready();
- }
-
- @Test
- public void testClientTransaction() throws Exception {
- final DistributedDataStore distributedDataStore = mock(DistributedDataStore.class);
- final ActorUtils context = mock(ActorUtils.class);
- doReturn(context).when(distributedDataStore).getActorUtils();
- doReturn(SchemaContextHelper.full()).when(context).getSchemaContext();
-
- final DistributedShardFrontend rootShard = new DistributedShardFrontend(distributedDataStore, client, ROOT);
-
- try (DOMDataTreeProducer producer = shardedDOMDataTree.createProducer(Collections.singletonList(ROOT))) {
- shardedDOMDataTree.registerDataTreeShard(ROOT, rootShard, producer);
- }
-
- final DataStoreClient outerListClient = mock(DataStoreClient.class);
- final ClientTransaction outerListClientTransaction = mock(ClientTransaction.class);
- final ClientLocalHistory outerListClientHistory = mock(ClientLocalHistory.class);
- final DOMDataTreeWriteCursor outerListCursor = mock(DOMDataTreeWriteCursor.class);
-
- doNothing().when(outerListCursor).close();
- doNothing().when(outerListCursor).write(any(), any());
- doNothing().when(outerListCursor).merge(any(), any());
- doNothing().when(outerListCursor).delete(any());
-
- doReturn(outerListCursor).when(outerListClientTransaction).openCursor();
- doReturn(outerListClientTransaction).when(outerListClient).createTransaction();
- doReturn(outerListClientHistory).when(outerListClient).createLocalHistory();
- doReturn(outerListClientTransaction).when(outerListClientHistory).createTransaction();
-
- doReturn(commitCohort).when(outerListClientTransaction).ready();
-
- doNothing().when(outerListClientHistory).close();
- doNothing().when(outerListClient).close();
-
- final DistributedShardFrontend outerListShard = new DistributedShardFrontend(
- distributedDataStore, outerListClient, OUTER_LIST_ID);
- try (DOMDataTreeProducer producer =
- shardedDOMDataTree.createProducer(Collections.singletonList(OUTER_LIST_ID))) {
- shardedDOMDataTree.registerDataTreeShard(OUTER_LIST_ID, outerListShard, producer);
- }
-
- final DOMDataTreeProducer producer = shardedDOMDataTree.createProducer(Collections.singletonList(ROOT));
- final DOMDataTreeCursorAwareTransaction tx = producer.createTransaction(false);
- final DOMDataTreeWriteCursor txCursor = tx.createCursor(ROOT);
-
- assertNotNull(txCursor);
- txCursor.write(TestModel.TEST_PATH.getLastPathArgument(), createCrossShardContainer());
-
- //check the lower shard got the correct modification
- verify(outerListCursor, times(2)).write(pathArgumentCaptor.capture(), nodeCaptor.capture());
-
- final List<PathArgument> capturedArgs = pathArgumentCaptor.getAllValues();
- assertEquals(2, capturedArgs.size());
- assertThat(capturedArgs,
- hasItems(new NodeIdentifier(TestModel.ID_QNAME), new NodeIdentifier(TestModel.INNER_LIST_QNAME)));
-
- final List<NormalizedNode<?, ?>> capturedValues = nodeCaptor.getAllValues();
- assertEquals(2, capturedValues.size());
- assertThat(capturedValues,
- hasItems(ImmutableNodes.leafNode(TestModel.ID_QNAME, 1), createInnerMapNode(1)));
-
- txCursor.close();
- tx.commit().get();
-
- verify(commitCohort, times(2)).canCommit();
- verify(commitCohort, times(2)).preCommit();
- verify(commitCohort, times(2)).commit();
- }
-
- private static MapNode createInnerMapNode(final int id) {
- final MapEntryNode listEntry = ImmutableNodes
- .mapEntryBuilder(TestModel.INNER_LIST_QNAME, TestModel.NAME_QNAME, "name-" + id)
- .withChild(ImmutableNodes.leafNode(TestModel.NAME_QNAME, "name-" + id))
- .withChild(ImmutableNodes.leafNode(TestModel.VALUE_QNAME, "value-" + id))
- .build();
-
- return ImmutableNodes.mapNodeBuilder(TestModel.INNER_LIST_QNAME).withChild(listEntry).build();
- }
-
- private static ContainerNode createCrossShardContainer() {
-
- final MapEntryNode outerListEntry1 =
- ImmutableNodes.mapEntryBuilder(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1)
- .withChild(createInnerMapNode(1))
- .build();
- final MapEntryNode outerListEntry2 =
- ImmutableNodes.mapEntryBuilder(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2)
- .withChild(createInnerMapNode(2))
- .build();
-
- final MapNode outerList = ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME)
- .withChild(outerListEntry1)
- .withChild(outerListEntry2)
- .build();
-
- final ContainerNode testContainer = ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
- .withChild(outerList)
- .build();
-
- return testContainer;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.doReturn;
-import static org.opendaylight.controller.cluster.datastore.IntegrationTestKit.findLocalShard;
-import static org.opendaylight.controller.cluster.datastore.IntegrationTestKit.waitUntilShardIsDown;
-
-import akka.actor.ActorRef;
-import akka.actor.ActorSystem;
-import akka.actor.Address;
-import akka.actor.AddressFromURIString;
-import akka.cluster.Cluster;
-import akka.testkit.javadsl.TestKit;
-import com.google.common.collect.Lists;
-import com.typesafe.config.ConfigFactory;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Set;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-import org.opendaylight.controller.cluster.ActorSystemProvider;
-import org.opendaylight.controller.cluster.datastore.AbstractTest;
-import org.opendaylight.controller.cluster.datastore.DatastoreContext;
-import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
-import org.opendaylight.controller.cluster.datastore.DistributedDataStore;
-import org.opendaylight.controller.cluster.datastore.IntegrationTestKit;
-import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
-import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
-import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
-import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
-import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCursorAwareTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeProducer;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeShardingConflictException;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteCursor;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafNodeBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class DistributedShardedDOMDataTreeRemotingTest extends AbstractTest {
-
- private static final Logger LOG = LoggerFactory.getLogger(DistributedShardedDOMDataTreeRemotingTest.class);
-
- private static final Address MEMBER_1_ADDRESS =
- AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558");
-
- private static final DOMDataTreeIdentifier TEST_ID =
- new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH);
-
- private static final String MODULE_SHARDS_CONFIG = "module-shards-default.conf";
-
- private ActorSystem leaderSystem;
- private ActorSystem followerSystem;
-
-
- private final Builder leaderDatastoreContextBuilder =
- DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(5);
-
- private final DatastoreContext.Builder followerDatastoreContextBuilder =
- DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(5);
-
- private DistributedDataStore leaderConfigDatastore;
- private DistributedDataStore leaderOperDatastore;
-
- private DistributedDataStore followerConfigDatastore;
- private DistributedDataStore followerOperDatastore;
-
-
- private IntegrationTestKit followerTestKit;
- private IntegrationTestKit leaderTestKit;
- private DistributedShardedDOMDataTree leaderShardFactory;
-
- private DistributedShardedDOMDataTree followerShardFactory;
- private ActorSystemProvider leaderSystemProvider;
- private ActorSystemProvider followerSystemProvider;
-
- @Before
- public void setUp() {
- InMemoryJournal.clear();
- InMemorySnapshotStore.clear();
-
- leaderSystem = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member1"));
- Cluster.get(leaderSystem).join(MEMBER_1_ADDRESS);
-
- followerSystem = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member2"));
- Cluster.get(followerSystem).join(MEMBER_1_ADDRESS);
-
- leaderSystemProvider = Mockito.mock(ActorSystemProvider.class);
- doReturn(leaderSystem).when(leaderSystemProvider).getActorSystem();
-
- followerSystemProvider = Mockito.mock(ActorSystemProvider.class);
- doReturn(followerSystem).when(followerSystemProvider).getActorSystem();
-
- }
-
- @After
- public void tearDown() {
- if (leaderConfigDatastore != null) {
- leaderConfigDatastore.close();
- }
- if (leaderOperDatastore != null) {
- leaderOperDatastore.close();
- }
-
- if (followerConfigDatastore != null) {
- followerConfigDatastore.close();
- }
- if (followerOperDatastore != null) {
- followerOperDatastore.close();
- }
-
- TestKit.shutdownActorSystem(leaderSystem, true);
- TestKit.shutdownActorSystem(followerSystem, true);
-
- InMemoryJournal.clear();
- InMemorySnapshotStore.clear();
- }
-
- private void initEmptyDatastores() throws Exception {
- initEmptyDatastores(MODULE_SHARDS_CONFIG);
- }
-
- private void initEmptyDatastores(final String moduleShardsConfig) throws Exception {
- leaderTestKit = new IntegrationTestKit(leaderSystem, leaderDatastoreContextBuilder);
-
- leaderConfigDatastore = leaderTestKit.setupDistributedDataStore(
- "config", moduleShardsConfig, true,
- SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
- leaderOperDatastore = leaderTestKit.setupDistributedDataStore(
- "operational", moduleShardsConfig, true,
- SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
-
- leaderShardFactory = new DistributedShardedDOMDataTree(leaderSystemProvider,
- leaderOperDatastore,
- leaderConfigDatastore);
-
- followerTestKit = new IntegrationTestKit(followerSystem, followerDatastoreContextBuilder);
-
- followerConfigDatastore = followerTestKit.setupDistributedDataStore(
- "config", moduleShardsConfig, true, SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
- followerOperDatastore = followerTestKit.setupDistributedDataStore(
- "operational", moduleShardsConfig, true,
- SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
-
- followerShardFactory = new DistributedShardedDOMDataTree(followerSystemProvider,
- followerOperDatastore,
- followerConfigDatastore);
-
- followerTestKit.waitForMembersUp("member-1");
-
- LOG.info("Initializing leader DistributedShardedDOMDataTree");
- leaderShardFactory.init();
-
- leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorUtils(),
- ClusterUtils.getCleanShardName(YangInstanceIdentifier.empty()));
-
- leaderTestKit.waitUntilLeader(leaderOperDatastore.getActorUtils(),
- ClusterUtils.getCleanShardName(YangInstanceIdentifier.empty()));
-
- LOG.info("Initializing follower DistributedShardedDOMDataTree");
- followerShardFactory.init();
- }
-
- @Test
- public void testProducerRegistrations() throws Exception {
- LOG.info("testProducerRegistrations starting");
- initEmptyDatastores();
-
- leaderTestKit.waitForMembersUp("member-2");
-
- // TODO refactor shard creation and verification to own method
- final DistributedShardRegistration shardRegistration =
- waitOnAsyncTask(leaderShardFactory.createDistributedShard(
- TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
- DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-
- leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorUtils(),
- ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()));
-
- final ActorRef leaderShardManager = leaderConfigDatastore.getActorUtils().getShardManager();
-
- assertNotNull(findLocalShard(leaderConfigDatastore.getActorUtils(),
- ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier())));
-
- assertNotNull(findLocalShard(followerConfigDatastore.getActorUtils(),
- ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier())));
-
- final Set<String> peers = new HashSet<>();
- IntegrationTestKit.verifyShardState(leaderConfigDatastore,
- ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()), onDemandShardState ->
- peers.addAll(onDemandShardState.getPeerAddresses().values()));
- assertEquals(peers.size(), 1);
-
- final DOMDataTreeProducer producer = leaderShardFactory.createProducer(Collections.singleton(TEST_ID));
- try {
- followerShardFactory.createProducer(Collections.singleton(TEST_ID));
- fail("Producer should be already registered on the other node");
- } catch (final IllegalArgumentException e) {
- assertTrue(e.getMessage().contains("is attached to producer"));
- }
-
- producer.close();
-
- final DOMDataTreeProducer followerProducer =
- followerShardFactory.createProducer(Collections.singleton(TEST_ID));
- try {
- leaderShardFactory.createProducer(Collections.singleton(TEST_ID));
- fail("Producer should be already registered on the other node");
- } catch (final IllegalArgumentException e) {
- assertTrue(e.getMessage().contains("is attached to producer"));
- }
-
- followerProducer.close();
- // try to create a shard on an already registered prefix on follower
- try {
- waitOnAsyncTask(followerShardFactory.createDistributedShard(
- TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
- DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
- fail("This prefix already should have a shard registration that was forwarded from the other node");
- } catch (final DOMDataTreeShardingConflictException e) {
- assertTrue(e.getMessage().contains("is already occupied by another shard"));
- }
-
- shardRegistration.close().toCompletableFuture().get();
-
- LOG.info("testProducerRegistrations ending");
- }
-
- @Test
- public void testWriteIntoMultipleShards() throws Exception {
- LOG.info("testWriteIntoMultipleShards starting");
- initEmptyDatastores();
-
- leaderTestKit.waitForMembersUp("member-2");
-
- LOG.debug("registering first shard");
- final DistributedShardRegistration shardRegistration =
- waitOnAsyncTask(leaderShardFactory.createDistributedShard(
- TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
- DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-
-
- leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorUtils(),
- ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()));
- findLocalShard(followerConfigDatastore.getActorUtils(),
- ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()));
-
- final Set<String> peers = new HashSet<>();
- IntegrationTestKit.verifyShardState(leaderConfigDatastore,
- ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()), onDemandShardState ->
- peers.addAll(onDemandShardState.getPeerAddresses().values()));
- assertEquals(peers.size(), 1);
-
- LOG.debug("Got after waiting for nonleader");
- final DOMDataTreeProducer producer = leaderShardFactory.createProducer(Collections.singleton(TEST_ID));
-
- final DOMDataTreeCursorAwareTransaction tx = producer.createTransaction(true);
- final DOMDataTreeWriteCursor cursor = tx.createCursor(TEST_ID);
- Assert.assertNotNull(cursor);
- final YangInstanceIdentifier nameId =
- YangInstanceIdentifier.builder(TestModel.TEST_PATH).node(TestModel.NAME_QNAME).build();
- cursor.write(nameId.getLastPathArgument(),
- ImmutableLeafNodeBuilder.<String>create().withNodeIdentifier(
- new NodeIdentifier(TestModel.NAME_QNAME)).withValue("Test Value").build());
-
- cursor.close();
- LOG.warn("Got to pre submit");
-
- tx.commit().get();
-
- shardRegistration.close().toCompletableFuture().get();
-
- LOG.info("testWriteIntoMultipleShards ending");
- }
-
- @Test
- public void testMultipleShardRegistrations() throws Exception {
- LOG.info("testMultipleShardRegistrations starting");
- initEmptyDatastores();
-
- final DistributedShardRegistration reg1 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(
- TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
- DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-
- final DistributedShardRegistration reg2 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(
- new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, TestModel.OUTER_CONTAINER_PATH),
- Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
- DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-
- final DistributedShardRegistration reg3 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(
- new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, TestModel.INNER_LIST_PATH),
- Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
- DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-
- final DistributedShardRegistration reg4 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(
- new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, TestModel.JUNK_PATH),
- Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
- DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-
- leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorUtils(),
- ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
- leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorUtils(),
- ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH));
- leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorUtils(),
- ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH));
- leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorUtils(),
- ClusterUtils.getCleanShardName(TestModel.JUNK_PATH));
-
- // check leader has local shards
- assertNotNull(findLocalShard(leaderConfigDatastore.getActorUtils(),
- ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
-
- assertNotNull(findLocalShard(leaderConfigDatastore.getActorUtils(),
- ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH)));
-
- assertNotNull(findLocalShard(leaderConfigDatastore.getActorUtils(),
- ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH)));
-
- assertNotNull(findLocalShard(leaderConfigDatastore.getActorUtils(),
- ClusterUtils.getCleanShardName(TestModel.JUNK_PATH)));
-
- // check follower has local shards
- assertNotNull(findLocalShard(followerConfigDatastore.getActorUtils(),
- ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
-
- assertNotNull(findLocalShard(followerConfigDatastore.getActorUtils(),
- ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH)));
-
- assertNotNull(findLocalShard(followerConfigDatastore.getActorUtils(),
- ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH)));
-
- assertNotNull(findLocalShard(followerConfigDatastore.getActorUtils(),
- ClusterUtils.getCleanShardName(TestModel.JUNK_PATH)));
-
- LOG.debug("Closing registrations");
-
- reg1.close().toCompletableFuture().get();
- reg2.close().toCompletableFuture().get();
- reg3.close().toCompletableFuture().get();
- reg4.close().toCompletableFuture().get();
-
- waitUntilShardIsDown(leaderConfigDatastore.getActorUtils(),
- ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
-
- waitUntilShardIsDown(leaderConfigDatastore.getActorUtils(),
- ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH));
-
- waitUntilShardIsDown(leaderConfigDatastore.getActorUtils(),
- ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH));
-
- waitUntilShardIsDown(leaderConfigDatastore.getActorUtils(),
- ClusterUtils.getCleanShardName(TestModel.JUNK_PATH));
-
- LOG.debug("All leader shards gone");
-
- waitUntilShardIsDown(followerConfigDatastore.getActorUtils(),
- ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
-
- waitUntilShardIsDown(followerConfigDatastore.getActorUtils(),
- ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH));
-
- waitUntilShardIsDown(followerConfigDatastore.getActorUtils(),
- ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH));
-
- waitUntilShardIsDown(followerConfigDatastore.getActorUtils(),
- ClusterUtils.getCleanShardName(TestModel.JUNK_PATH));
-
- LOG.debug("All follower shards gone");
- LOG.info("testMultipleShardRegistrations ending");
- }
-
- @Test
- public void testMultipleRegistrationsAtOnePrefix() throws Exception {
- LOG.info("testMultipleRegistrationsAtOnePrefix starting");
- initEmptyDatastores();
-
- for (int i = 0; i < 5; i++) {
- LOG.info("Round {}", i);
- final DistributedShardRegistration reg1 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(
- TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
- DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-
- leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorUtils(),
- ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
-
- assertNotNull(findLocalShard(leaderConfigDatastore.getActorUtils(),
- ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
-
- assertNotNull(findLocalShard(followerConfigDatastore.getActorUtils(),
- ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
-
-
- final Set<String> peers = new HashSet<>();
- IntegrationTestKit.verifyShardState(leaderConfigDatastore,
- ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()), onDemandShardState ->
- peers.addAll(onDemandShardState.getPeerAddresses().values()));
- assertEquals(peers.size(), 1);
-
- waitOnAsyncTask(reg1.close(), DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-
- waitUntilShardIsDown(leaderConfigDatastore.getActorUtils(),
- ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
-
- waitUntilShardIsDown(followerConfigDatastore.getActorUtils(),
- ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
- }
-
- LOG.info("testMultipleRegistrationsAtOnePrefix ending");
- }
-
- @Test
- public void testInitialBootstrappingWithNoModuleShards() throws Exception {
- LOG.info("testInitialBootstrappingWithNoModuleShards starting");
- initEmptyDatastores("module-shards-default-member-1.conf");
-
- // We just verify the DistributedShardedDOMDataTree initialized without error.
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.ArgumentMatchers.anyCollection;
-import static org.mockito.ArgumentMatchers.anyMap;
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.timeout;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.verifyNoMoreInteractions;
-import static org.opendaylight.controller.cluster.datastore.IntegrationTestKit.findLocalShard;
-import static org.opendaylight.controller.cluster.datastore.IntegrationTestKit.waitUntilShardIsDown;
-
-import akka.actor.ActorRef;
-import akka.actor.ActorSystem;
-import akka.actor.Address;
-import akka.actor.AddressFromURIString;
-import akka.actor.Props;
-import akka.cluster.Cluster;
-import akka.testkit.javadsl.TestKit;
-import com.google.common.collect.Lists;
-import com.google.common.util.concurrent.FluentFuture;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.typesafe.config.ConfigFactory;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import java.util.Set;
-import java.util.concurrent.CompletionStage;
-import java.util.concurrent.TimeUnit;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.mockito.ArgumentCaptor;
-import org.mockito.Captor;
-import org.mockito.Mockito;
-import org.mockito.MockitoAnnotations;
-import org.opendaylight.controller.cluster.ActorSystemProvider;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.databroker.actors.dds.ClientLocalHistory;
-import org.opendaylight.controller.cluster.databroker.actors.dds.ClientTransaction;
-import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient;
-import org.opendaylight.controller.cluster.databroker.actors.dds.SimpleDataStoreClientActor;
-import org.opendaylight.controller.cluster.datastore.AbstractTest;
-import org.opendaylight.controller.cluster.datastore.DatastoreContext;
-import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
-import org.opendaylight.controller.cluster.datastore.DistributedDataStore;
-import org.opendaylight.controller.cluster.datastore.IntegrationTestKit;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
-import org.opendaylight.controller.cluster.dom.api.CDSDataTreeProducer;
-import org.opendaylight.controller.cluster.dom.api.CDSShardAccess;
-import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
-import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
-import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
-import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCursorAwareTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeListener;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeProducer;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteCursor;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableMapNodeBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class DistributedShardedDOMDataTreeTest extends AbstractTest {
-
- private static final Logger LOG = LoggerFactory.getLogger(DistributedShardedDOMDataTreeRemotingTest.class);
-
- private static final Address MEMBER_1_ADDRESS =
- AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558");
-
- private static final DOMDataTreeIdentifier TEST_ID =
- new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH);
-
- private static final DOMDataTreeIdentifier INNER_LIST_ID =
- new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION,
- YangInstanceIdentifier.create(getOuterListIdFor(0).getPathArguments())
- .node(TestModel.INNER_LIST_QNAME));
- private static final Set<MemberName> SINGLE_MEMBER = Collections.singleton(AbstractTest.MEMBER_NAME);
-
- private static final String MODULE_SHARDS_CONFIG = "module-shards-default-member-1.conf";
-
- private ActorSystem leaderSystem;
-
- private final Builder leaderDatastoreContextBuilder =
- DatastoreContext.newBuilder()
- .shardHeartbeatIntervalInMillis(100)
- .shardElectionTimeoutFactor(2)
- .logicalStoreType(LogicalDatastoreType.CONFIGURATION);
-
- private DistributedDataStore leaderDistributedDataStore;
- private DistributedDataStore operDistributedDatastore;
- private IntegrationTestKit leaderTestKit;
-
- private DistributedShardedDOMDataTree leaderShardFactory;
-
- @Captor
- private ArgumentCaptor<Collection<DataTreeCandidate>> captorForChanges;
- @Captor
- private ArgumentCaptor<Map<DOMDataTreeIdentifier, NormalizedNode<?, ?>>> captorForSubtrees;
-
- private ActorSystemProvider leaderSystemProvider;
-
- @Before
- public void setUp() {
- MockitoAnnotations.initMocks(this);
-
- InMemoryJournal.clear();
- InMemorySnapshotStore.clear();
-
- leaderSystem = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member1"));
- Cluster.get(leaderSystem).join(MEMBER_1_ADDRESS);
-
- leaderSystemProvider = Mockito.mock(ActorSystemProvider.class);
- doReturn(leaderSystem).when(leaderSystemProvider).getActorSystem();
- }
-
- @After
- public void tearDown() {
- if (leaderDistributedDataStore != null) {
- leaderDistributedDataStore.close();
- }
-
- if (operDistributedDatastore != null) {
- operDistributedDatastore.close();
- }
-
- TestKit.shutdownActorSystem(leaderSystem);
-
- InMemoryJournal.clear();
- InMemorySnapshotStore.clear();
- }
-
- private void initEmptyDatastores() throws Exception {
- leaderTestKit = new IntegrationTestKit(leaderSystem, leaderDatastoreContextBuilder);
-
- leaderDistributedDataStore = leaderTestKit.setupDistributedDataStore(
- "config", MODULE_SHARDS_CONFIG, "empty-modules.conf", true,
- SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
-
- operDistributedDatastore = leaderTestKit.setupDistributedDataStore(
- "operational", MODULE_SHARDS_CONFIG, "empty-modules.conf",true,
- SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
-
- leaderShardFactory = new DistributedShardedDOMDataTree(leaderSystemProvider,
- operDistributedDatastore,
- leaderDistributedDataStore);
-
- leaderShardFactory.init();
- }
-
-
- @Test
- public void testWritesIntoDefaultShard() throws Exception {
- initEmptyDatastores();
-
- final DOMDataTreeIdentifier configRoot =
- new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.empty());
-
- final DOMDataTreeProducer producer = leaderShardFactory.createProducer(Collections.singleton(configRoot));
-
- final DOMDataTreeCursorAwareTransaction tx = producer.createTransaction(true);
- final DOMDataTreeWriteCursor cursor =
- tx.createCursor(new DOMDataTreeIdentifier(
- LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.empty()));
- Assert.assertNotNull(cursor);
-
- final ContainerNode test =
- ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME)).build();
-
- cursor.write(test.getIdentifier(), test);
- cursor.close();
-
- tx.commit().get();
- }
-
- @Test
- public void testSingleNodeWritesAndRead() throws Exception {
- initEmptyDatastores();
-
- final DistributedShardRegistration shardRegistration = waitOnAsyncTask(
- leaderShardFactory.createDistributedShard(TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME)),
- DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-
- leaderTestKit.waitUntilLeader(leaderDistributedDataStore.getActorUtils(),
- ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()));
-
- final DOMDataTreeProducer producer = leaderShardFactory.createProducer(Collections.singleton(TEST_ID));
-
- final DOMDataTreeCursorAwareTransaction tx = producer.createTransaction(true);
- final DOMDataTreeWriteCursor cursor = tx.createCursor(TEST_ID);
- Assert.assertNotNull(cursor);
- final YangInstanceIdentifier nameId =
- YangInstanceIdentifier.builder(TestModel.TEST_PATH).node(TestModel.NAME_QNAME).build();
- final LeafNode<String> valueToCheck = ImmutableLeafNodeBuilder.<String>create().withNodeIdentifier(
- new NodeIdentifier(TestModel.NAME_QNAME)).withValue("Test Value").build();
- LOG.debug("Writing data {} at {}, cursor {}", nameId.getLastPathArgument(), valueToCheck, cursor);
- cursor.write(nameId.getLastPathArgument(),
- valueToCheck);
-
- cursor.close();
- LOG.debug("Got to pre submit");
-
- tx.commit().get();
-
- final DOMDataTreeListener mockedDataTreeListener = mock(DOMDataTreeListener.class);
- doNothing().when(mockedDataTreeListener).onDataTreeChanged(anyCollection(), anyMap());
-
- leaderShardFactory.registerListener(mockedDataTreeListener, Collections.singletonList(TEST_ID),
- true, Collections.emptyList());
-
- verify(mockedDataTreeListener, timeout(1000).times(1)).onDataTreeChanged(captorForChanges.capture(),
- captorForSubtrees.capture());
- final List<Collection<DataTreeCandidate>> capturedValue = captorForChanges.getAllValues();
-
- final Optional<NormalizedNode<?, ?>> dataAfter =
- capturedValue.get(0).iterator().next().getRootNode().getDataAfter();
-
- final NormalizedNode<?,?> expected = ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME)).withChild(valueToCheck).build();
- assertEquals(expected, dataAfter.get());
-
- verifyNoMoreInteractions(mockedDataTreeListener);
-
- final String shardName = ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier());
- LOG.debug("Creating distributed datastore client for shard {}", shardName);
-
- final ActorUtils actorUtils = leaderDistributedDataStore.getActorUtils();
- final Props distributedDataStoreClientProps =
- SimpleDataStoreClientActor.props(actorUtils.getCurrentMemberName(), "Shard-" + shardName, actorUtils,
- shardName);
-
- final ActorRef clientActor = leaderSystem.actorOf(distributedDataStoreClientProps);
- final DataStoreClient distributedDataStoreClient = SimpleDataStoreClientActor
- .getDistributedDataStoreClient(clientActor, 30, TimeUnit.SECONDS);
-
- final ClientLocalHistory localHistory = distributedDataStoreClient.createLocalHistory();
- final ClientTransaction tx2 = localHistory.createTransaction();
- final FluentFuture<Optional<NormalizedNode<?, ?>>> read = tx2.read(YangInstanceIdentifier.empty());
-
- final Optional<NormalizedNode<?, ?>> optional = read.get();
- tx2.abort();
- localHistory.close();
-
- shardRegistration.close().toCompletableFuture().get();
-
- }
-
- @Test
- public void testMultipleWritesIntoSingleMapEntry() throws Exception {
- initEmptyDatastores();
-
- final DistributedShardRegistration shardRegistration = waitOnAsyncTask(
- leaderShardFactory.createDistributedShard(TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME)),
- DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-
- leaderTestKit.waitUntilLeader(leaderDistributedDataStore.getActorUtils(),
- ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()));
-
- LOG.warn("Got after waiting for nonleader");
- final ActorRef leaderShardManager = leaderDistributedDataStore.getActorUtils().getShardManager();
-
- leaderTestKit.waitUntilLeader(leaderDistributedDataStore.getActorUtils(),
- ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
-
- final YangInstanceIdentifier oid1 = getOuterListIdFor(0);
- final DOMDataTreeIdentifier outerListPath = new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, oid1);
-
- final DistributedShardRegistration outerListShardReg = waitOnAsyncTask(
- leaderShardFactory.createDistributedShard(outerListPath, Lists.newArrayList(AbstractTest.MEMBER_NAME)),
- DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-
- leaderTestKit.waitUntilLeader(leaderDistributedDataStore.getActorUtils(),
- ClusterUtils.getCleanShardName(outerListPath.getRootIdentifier()));
-
- final DOMDataTreeProducer shardProducer = leaderShardFactory.createProducer(
- Collections.singletonList(outerListPath));
-
- final DOMDataTreeCursorAwareTransaction tx = shardProducer.createTransaction(false);
- final DOMDataTreeWriteCursor cursor =
- tx.createCursor(new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, oid1));
- assertNotNull(cursor);
-
- MapNode innerList = ImmutableMapNodeBuilder
- .create()
- .withNodeIdentifier(new NodeIdentifier(TestModel.INNER_LIST_QNAME))
- .build();
-
- cursor.write(new NodeIdentifier(TestModel.INNER_LIST_QNAME), innerList);
- cursor.close();
- tx.commit().get();
-
- final ArrayList<ListenableFuture<?>> futures = new ArrayList<>();
- for (int i = 0; i < 1000; i++) {
- final Collection<MapEntryNode> innerListMapEntries = createInnerListMapEntries(1000, "run-" + i);
- for (final MapEntryNode innerListMapEntry : innerListMapEntries) {
- final DOMDataTreeCursorAwareTransaction tx1 = shardProducer.createTransaction(false);
- final DOMDataTreeWriteCursor cursor1 = tx1.createCursor(
- new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION,
- oid1.node(new NodeIdentifier(TestModel.INNER_LIST_QNAME))));
- cursor1.write(innerListMapEntry.getIdentifier(), innerListMapEntry);
- cursor1.close();
- futures.add(tx1.commit());
- }
- }
-
- futures.get(futures.size() - 1).get();
-
- final DOMDataTreeListener mockedDataTreeListener = mock(DOMDataTreeListener.class);
- doNothing().when(mockedDataTreeListener).onDataTreeChanged(anyCollection(), anyMap());
-
- leaderShardFactory.registerListener(mockedDataTreeListener, Collections.singletonList(INNER_LIST_ID),
- true, Collections.emptyList());
-
- verify(mockedDataTreeListener, timeout(1000).times(1)).onDataTreeChanged(captorForChanges.capture(),
- captorForSubtrees.capture());
- verifyNoMoreInteractions(mockedDataTreeListener);
- final List<Collection<DataTreeCandidate>> capturedValue = captorForChanges.getAllValues();
-
- final NormalizedNode<?,?> expected =
- ImmutableMapNodeBuilder
- .create()
- .withNodeIdentifier(new NodeIdentifier(TestModel.INNER_LIST_QNAME))
- // only the values from the last run should be present
- .withValue(createInnerListMapEntries(1000, "run-999"))
- .build();
-
- assertEquals("List values dont match the expected values from the last run",
- expected, capturedValue.get(0).iterator().next().getRootNode().getDataAfter().get());
-
- }
-
- // top level shard at TEST element, with subshards on each outer-list map entry
- @Test
- @Ignore
- public void testMultipleShardLevels() throws Exception {
- initEmptyDatastores();
-
- final DistributedShardRegistration testShardReg = waitOnAsyncTask(
- leaderShardFactory.createDistributedShard(TEST_ID, SINGLE_MEMBER),
- DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-
- final ArrayList<DistributedShardRegistration> registrations = new ArrayList<>();
- final int listSize = 5;
- for (int i = 0; i < listSize; i++) {
- final YangInstanceIdentifier entryYID = getOuterListIdFor(i);
- final CompletionStage<DistributedShardRegistration> future = leaderShardFactory.createDistributedShard(
- new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, entryYID), SINGLE_MEMBER);
-
- registrations.add(waitOnAsyncTask(future, DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION));
- }
-
- final DOMDataTreeIdentifier rootId =
- new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.empty());
- final DOMDataTreeProducer producer = leaderShardFactory.createProducer(Collections.singletonList(
- rootId));
-
- DOMDataTreeCursorAwareTransaction transaction = producer.createTransaction(false);
-
- DOMDataTreeWriteCursor cursor = transaction.createCursor(rootId);
- assertNotNull(cursor);
-
- final MapNode outerList =
- ImmutableMapNodeBuilder.create()
- .withNodeIdentifier(new NodeIdentifier(TestModel.OUTER_LIST_QNAME)).build();
-
- final ContainerNode testNode =
- ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
- .withChild(outerList)
- .build();
-
- cursor.write(testNode.getIdentifier(), testNode);
-
- cursor.close();
- transaction.commit().get();
-
- final DOMDataTreeListener mockedDataTreeListener = mock(DOMDataTreeListener.class);
- doNothing().when(mockedDataTreeListener).onDataTreeChanged(anyCollection(), anyMap());
-
- final MapNode wholeList = ImmutableMapNodeBuilder.create(outerList)
- .withValue(createOuterEntries(listSize, "testing-values")).build();
-
- transaction = producer.createTransaction(false);
- cursor = transaction.createCursor(TEST_ID);
- assertNotNull(cursor);
-
- cursor.write(wholeList.getIdentifier(), wholeList);
- cursor.close();
-
- transaction.commit().get();
-
- leaderShardFactory.registerListener(mockedDataTreeListener, Collections.singletonList(TEST_ID),
- true, Collections.emptyList());
-
- verify(mockedDataTreeListener, timeout(35000).atLeast(2)).onDataTreeChanged(captorForChanges.capture(),
- captorForSubtrees.capture());
- verifyNoMoreInteractions(mockedDataTreeListener);
- final List<Map<DOMDataTreeIdentifier, NormalizedNode<?, ?>>> allSubtrees = captorForSubtrees.getAllValues();
-
- final Map<DOMDataTreeIdentifier, NormalizedNode<?, ?>> lastSubtree = allSubtrees.get(allSubtrees.size() - 1);
-
- final NormalizedNode<?, ?> actual = lastSubtree.get(TEST_ID);
- assertNotNull(actual);
-
- final NormalizedNode<?, ?> expected =
- ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
- .withChild(ImmutableMapNodeBuilder.create(outerList)
- .withValue(createOuterEntries(listSize, "testing-values")).build())
- .build();
-
-
- for (final DistributedShardRegistration registration : registrations) {
- waitOnAsyncTask(registration.close(), DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
- }
-
- waitOnAsyncTask(testShardReg.close(), DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-
- assertEquals(expected, actual);
- }
-
- @Test
- public void testMultipleRegistrationsAtOnePrefix() throws Exception {
- initEmptyDatastores();
-
- for (int i = 0; i < 10; i++) {
- LOG.debug("Round {}", i);
- final DistributedShardRegistration reg1 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(
- TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME)),
- DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-
- leaderTestKit.waitUntilLeader(leaderDistributedDataStore.getActorUtils(),
- ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
-
- assertNotNull(findLocalShard(leaderDistributedDataStore.getActorUtils(),
- ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
-
- waitOnAsyncTask(reg1.close(), DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-
- waitUntilShardIsDown(leaderDistributedDataStore.getActorUtils(),
- ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
- }
- }
-
- @Test
- public void testCDSDataTreeProducer() throws Exception {
- initEmptyDatastores();
-
- final DistributedShardRegistration reg1 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(
- TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME)),
- DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-
- leaderTestKit.waitUntilLeader(leaderDistributedDataStore.getActorUtils(),
- ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
-
- assertNotNull(findLocalShard(leaderDistributedDataStore.getActorUtils(),
- ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
-
-
- final DOMDataTreeIdentifier configRoot =
- new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.empty());
- final DOMDataTreeProducer producer = leaderShardFactory.createProducer(Collections.singleton(configRoot));
-
- assertTrue(producer instanceof CDSDataTreeProducer);
-
- final CDSDataTreeProducer cdsProducer = (CDSDataTreeProducer) producer;
- CDSShardAccess shardAccess = cdsProducer.getShardAccess(TEST_ID);
- assertEquals(shardAccess.getShardIdentifier(), TEST_ID);
-
- shardAccess = cdsProducer.getShardAccess(INNER_LIST_ID);
- assertEquals(TEST_ID, shardAccess.getShardIdentifier());
-
- shardAccess = cdsProducer.getShardAccess(configRoot);
- assertEquals(configRoot, shardAccess.getShardIdentifier());
-
- waitOnAsyncTask(reg1.close(), DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
- }
-
- private static Collection<MapEntryNode> createOuterEntries(final int amount, final String valuePrefix) {
- final Collection<MapEntryNode> ret = new ArrayList<>();
- for (int i = 0; i < amount; i++) {
- ret.add(ImmutableNodes.mapEntryBuilder()
- .withNodeIdentifier(NodeIdentifierWithPredicates.of(TestModel.OUTER_LIST_QNAME,
- QName.create(TestModel.OUTER_LIST_QNAME, "id"), i))
- .withChild(ImmutableNodes
- .leafNode(QName.create(TestModel.OUTER_LIST_QNAME, "id"), i))
- .withChild(createWholeInnerList(amount, "outer id: " + i + " " + valuePrefix))
- .build());
- }
-
- return ret;
- }
-
- private static MapNode createWholeInnerList(final int amount, final String valuePrefix) {
- return ImmutableMapNodeBuilder.create().withNodeIdentifier(new NodeIdentifier(TestModel.INNER_LIST_QNAME))
- .withValue(createInnerListMapEntries(amount, valuePrefix)).build();
- }
-
- private static Collection<MapEntryNode> createInnerListMapEntries(final int amount, final String valuePrefix) {
- final Collection<MapEntryNode> ret = new ArrayList<>();
- for (int i = 0; i < amount; i++) {
- ret.add(ImmutableNodes.mapEntryBuilder()
- .withNodeIdentifier(NodeIdentifierWithPredicates.of(TestModel.INNER_LIST_QNAME,
- QName.create(TestModel.INNER_LIST_QNAME, "name"), Integer.toString(i)))
- .withChild(ImmutableNodes
- .leafNode(QName.create(TestModel.INNER_LIST_QNAME, "value"), valuePrefix + "-" + i))
- .build());
- }
-
- return ret;
- }
-
- private static YangInstanceIdentifier getOuterListIdFor(final int id) {
- return TestModel.OUTER_LIST_PATH.node(NodeIdentifierWithPredicates.of(
- TestModel.OUTER_LIST_QNAME, QName.create(TestModel.OUTER_LIST_QNAME, "id"), id));
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.sharding;
-
-import static akka.actor.ActorRef.noSender;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.eq;
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.timeout;
-import static org.mockito.Mockito.verify;
-
-import akka.actor.ActorRef;
-import akka.actor.Props;
-import akka.testkit.javadsl.TestKit;
-import org.junit.Test;
-import org.opendaylight.controller.cluster.datastore.AbstractActorTest;
-import org.opendaylight.controller.cluster.dom.api.LeaderLocation;
-import org.opendaylight.controller.cluster.dom.api.LeaderLocationListener;
-import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
-import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
-
-public class RoleChangeListenerActorTest extends AbstractActorTest {
-
- @Test
- public void testRegisterRoleChangeListenerOnStart() {
- final TestKit testKit = new TestKit(getSystem());
- final LeaderLocationListener listener = mock(LeaderLocationListener.class);
- final Props props = RoleChangeListenerActor.props(testKit.getRef(), listener);
-
- getSystem().actorOf(props, "testRegisterRoleChangeListenerOnStart");
- testKit.expectMsgClass(RegisterRoleChangeListener.class);
- }
-
- @Test
- public void testOnDataTreeChanged() {
- final LeaderLocationListener listener = mock(LeaderLocationListener.class);
- doNothing().when(listener).onLeaderLocationChanged(any());
- final Props props = RoleChangeListenerActor.props(getSystem().deadLetters(), listener);
-
- final ActorRef subject = getSystem().actorOf(props, "testDataTreeChangedChanged");
-
- subject.tell(new LeaderStateChanged("member-1", null, (short) 0), noSender());
- verify(listener, timeout(5000)).onLeaderLocationChanged(eq(LeaderLocation.UNKNOWN));
-
- subject.tell(new LeaderStateChanged("member-1", "member-1", (short) 0), noSender());
- verify(listener, timeout(5000)).onLeaderLocationChanged(eq(LeaderLocation.LOCAL));
-
- subject.tell(new LeaderStateChanged("member-1", "member-2", (short) 0), noSender());
- verify(listener, timeout(5000)).onLeaderLocationChanged(eq(LeaderLocation.REMOTE));
- }
-}
\ No newline at end of file
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.common.Uint64;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.SystemMapNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableMapNodeBuilder;
public final class CarsModel {
public static final QName BASE_QNAME = QName.create(
public static final YangInstanceIdentifier CAR_LIST_PATH = BASE_PATH.node(CAR_QNAME);
private CarsModel() {
-
+ // Hidden on purpose
}
- public static NormalizedNode<?, ?> create() {
-
- // Create a list builder
- CollectionNodeBuilder<MapEntryNode, MapNode> cars =
- ImmutableMapNodeBuilder.create().withNodeIdentifier(
- new YangInstanceIdentifier.NodeIdentifier(
- CAR_QNAME));
-
- // Create an entry for the car altima
- MapEntryNode altima =
- ImmutableNodes.mapEntryBuilder(CAR_QNAME, CAR_NAME_QNAME, "altima")
- .withChild(ImmutableNodes.leafNode(CAR_NAME_QNAME, "altima"))
- .withChild(ImmutableNodes.leafNode(CAR_PRICE_QNAME, Uint64.valueOf(1000)))
- .build();
-
- // Create an entry for the car accord
- MapEntryNode honda =
- ImmutableNodes.mapEntryBuilder(CAR_QNAME, CAR_NAME_QNAME, "accord")
- .withChild(ImmutableNodes.leafNode(CAR_NAME_QNAME, "accord"))
- .withChild(ImmutableNodes.leafNode(CAR_PRICE_QNAME, Uint64.valueOf("2000")))
- .build();
-
- cars.withChild(altima);
- cars.withChild(honda);
-
- return ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(BASE_QNAME))
- .withChild(cars.build())
+ public static ContainerNode create() {
+ return Builders.containerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(BASE_QNAME))
+ .withChild(Builders.mapBuilder()
+ .withNodeIdentifier(new NodeIdentifier(CAR_QNAME))
+ // Create an entry for the car altima
+ .withChild(ImmutableNodes.mapEntryBuilder(CAR_QNAME, CAR_NAME_QNAME, "altima")
+ .withChild(ImmutableNodes.leafNode(CAR_NAME_QNAME, "altima"))
+ .withChild(ImmutableNodes.leafNode(CAR_PRICE_QNAME, Uint64.valueOf(1000)))
+ .build())
+ // Create an entry for the car accord
+ .withChild(ImmutableNodes.mapEntryBuilder(CAR_QNAME, CAR_NAME_QNAME, "accord")
+ .withChild(ImmutableNodes.leafNode(CAR_NAME_QNAME, "accord"))
+ .withChild(ImmutableNodes.leafNode(CAR_PRICE_QNAME, Uint64.valueOf("2000")))
+ .build())
+ .build())
.build();
-
}
- public static NormalizedNode<?, ?> createEmptyCarsList() {
+ public static NormalizedNode createEmptyCarsList() {
return newCarsNode(newCarsMapNode());
}
public static ContainerNode newCarsNode(final MapNode carsList) {
- return ImmutableContainerNodeBuilder.create().withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(
- BASE_QNAME)).withChild(carsList).build();
+ return Builders.containerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(BASE_QNAME))
+ .withChild(carsList)
+ .build();
}
public static MapNode newCarsMapNode(final MapEntryNode... carEntries) {
- CollectionNodeBuilder<MapEntryNode, MapNode> builder = ImmutableMapNodeBuilder.create()
- .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(CAR_QNAME));
+ var builder = Builders.mapBuilder().withNodeIdentifier(new NodeIdentifier(CAR_QNAME));
for (MapEntryNode e : carEntries) {
builder.withChild(e);
}
return builder.build();
}
- public static NormalizedNode<?, ?> emptyContainer() {
- return ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(BASE_QNAME))
- .build();
+ public static ContainerNode emptyContainer() {
+ return Builders.containerBuilder().withNodeIdentifier(new NodeIdentifier(BASE_QNAME)).build();
}
- public static NormalizedNode<?, ?> newCarMapNode() {
+ public static SystemMapNode newCarMapNode() {
return ImmutableNodes.mapNodeBuilder(CAR_QNAME).build();
}
import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapEntry;
import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapEntryBuilder;
import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapNodeBuilder;
+import static org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes.leafNode;
-import java.util.HashSet;
-import java.util.Set;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
-import org.opendaylight.yangtools.yang.data.api.schema.AugmentationNode;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafSetNode;
import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.DataContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafSetEntryNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafSetNodeBuilder;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.opendaylight.yangtools.yang.test.util.YangParserTestUtils;
private static final String SECOND_GRAND_CHILD_NAME = "second grand child";
private static final MapEntryNode BAR_NODE = mapEntryBuilder(OUTER_LIST_QNAME, ID_QNAME, TWO_ID)
- .withChild(mapNodeBuilder(INNER_LIST_QNAME)
- .withChild(mapEntry(INNER_LIST_QNAME, NAME_QNAME, TWO_ONE_NAME))
- .withChild(mapEntry(INNER_LIST_QNAME, NAME_QNAME, TWO_TWO_NAME))
- .build())
- .build();
+ .withChild(mapNodeBuilder(INNER_LIST_QNAME)
+ .withChild(mapEntry(INNER_LIST_QNAME, NAME_QNAME, TWO_ONE_NAME))
+ .withChild(mapEntry(INNER_LIST_QNAME, NAME_QNAME, TWO_TWO_NAME))
+ .build())
+ .build();
private CompositeModel() {
-
+ // Hidden on purpose
}
public static SchemaContext createTestContext() {
}
public static ContainerNode createTestContainer() {
- final LeafSetEntryNode<Object> nike = ImmutableLeafSetEntryNodeBuilder.create()
- .withNodeIdentifier(new NodeWithValue<>(QName.create(TEST_QNAME, "shoe"), "nike"))
- .withValue("nike").build();
- final LeafSetEntryNode<Object> puma = ImmutableLeafSetEntryNodeBuilder.create()
- .withNodeIdentifier(new NodeWithValue<>(QName.create(TEST_QNAME, "shoe"), "puma"))
- .withValue("puma").build();
- final LeafSetNode<Object> shoes = ImmutableLeafSetNodeBuilder.create()
+ return ImmutableNodes.newContainerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
+ .withChild(leafNode(DESC_QNAME, DESC))
+ .withChild(leafNode(AUG_QNAME, "First Test"))
+ .withChild(ImmutableNodes.<String>newSystemLeafSetBuilder()
.withNodeIdentifier(new NodeIdentifier(QName.create(TEST_QNAME, "shoe")))
- .withChild(nike).withChild(puma).build();
-
- final LeafSetEntryNode<Object> five = ImmutableLeafSetEntryNodeBuilder.create()
- .withNodeIdentifier(new NodeWithValue<>(QName.create(TEST_QNAME, "number"), 5))
- .withValue(5).build();
- final LeafSetEntryNode<Object> fifteen = ImmutableLeafSetEntryNodeBuilder.create()
- .withNodeIdentifier(new NodeWithValue<>(QName.create(TEST_QNAME, "number"), 15))
- .withValue(15).build();
- final LeafSetNode<Object> numbers = ImmutableLeafSetNodeBuilder.create()
+ .withChildValue("nike")
+ .withChildValue("puma")
+ .build())
+ .withChild(ImmutableNodes.<Integer>newSystemLeafSetBuilder()
.withNodeIdentifier(new NodeIdentifier(QName.create(TEST_QNAME, "number")))
- .withChild(five).withChild(fifteen).build();
-
- Set<QName> childAugmentations = new HashSet<>();
- childAugmentations.add(AUG_QNAME);
- final AugmentationIdentifier augmentationIdentifier = new AugmentationIdentifier(childAugmentations);
- final AugmentationNode augmentationNode = Builders.augmentationBuilder()
- .withNodeIdentifier(augmentationIdentifier).withChild(ImmutableNodes.leafNode(AUG_QNAME, "First Test"))
- .build();
- return ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
- .withChild(ImmutableNodes.leafNode(DESC_QNAME, DESC)).withChild(augmentationNode).withChild(shoes)
- .withChild(numbers).withChild(mapNodeBuilder(OUTER_LIST_QNAME)
- .withChild(mapEntry(OUTER_LIST_QNAME, ID_QNAME, ONE_ID)).withChild(BAR_NODE).build())
- .build();
+ .withChildValue(5)
+ .withChildValue(15)
+ .build())
+ .withChild(mapNodeBuilder(OUTER_LIST_QNAME)
+ .withChild(mapEntry(OUTER_LIST_QNAME, ID_QNAME, ONE_ID))
+ .withChild(BAR_NODE)
+ .build())
+ .build();
}
public static ContainerNode createFamily() {
- final DataContainerNodeBuilder<NodeIdentifier, ContainerNode> familyContainerBuilder =
- ImmutableContainerNodeBuilder.create().withNodeIdentifier(new NodeIdentifier(FAMILY_QNAME));
-
- final CollectionNodeBuilder<MapEntryNode, MapNode> childrenBuilder = mapNodeBuilder(CHILDREN_QNAME);
-
- final DataContainerNodeBuilder<NodeIdentifierWithPredicates, MapEntryNode>
- firstChildBuilder = mapEntryBuilder(CHILDREN_QNAME, CHILD_NUMBER_QNAME, FIRST_CHILD_ID);
- final DataContainerNodeBuilder<NodeIdentifierWithPredicates, MapEntryNode>
- secondChildBuilder = mapEntryBuilder(CHILDREN_QNAME, CHILD_NUMBER_QNAME, SECOND_CHILD_ID);
-
- final DataContainerNodeBuilder<NodeIdentifierWithPredicates, MapEntryNode>
- firstGrandChildBuilder = mapEntryBuilder(GRAND_CHILDREN_QNAME, GRAND_CHILD_NUMBER_QNAME,
- FIRST_GRAND_CHILD_ID);
- final DataContainerNodeBuilder<NodeIdentifierWithPredicates, MapEntryNode>
- secondGrandChildBuilder = mapEntryBuilder(GRAND_CHILDREN_QNAME, GRAND_CHILD_NUMBER_QNAME,
- SECOND_GRAND_CHILD_ID);
-
- firstGrandChildBuilder.withChild(ImmutableNodes.leafNode(GRAND_CHILD_NUMBER_QNAME, FIRST_GRAND_CHILD_ID))
- .withChild(ImmutableNodes.leafNode(GRAND_CHILD_NAME_QNAME, FIRST_GRAND_CHILD_NAME));
-
- secondGrandChildBuilder.withChild(ImmutableNodes.leafNode(GRAND_CHILD_NUMBER_QNAME, SECOND_GRAND_CHILD_ID))
- .withChild(ImmutableNodes.leafNode(GRAND_CHILD_NAME_QNAME, SECOND_GRAND_CHILD_NAME));
-
- firstChildBuilder.withChild(ImmutableNodes.leafNode(CHILD_NUMBER_QNAME, FIRST_CHILD_ID))
- .withChild(ImmutableNodes.leafNode(CHILD_NAME_QNAME, FIRST_CHILD_NAME))
- .withChild(mapNodeBuilder(GRAND_CHILDREN_QNAME).withChild(firstGrandChildBuilder.build()).build());
-
- secondChildBuilder.withChild(ImmutableNodes.leafNode(CHILD_NUMBER_QNAME, SECOND_CHILD_ID))
- .withChild(ImmutableNodes.leafNode(CHILD_NAME_QNAME, SECOND_CHILD_NAME))
- .withChild(mapNodeBuilder(GRAND_CHILDREN_QNAME).withChild(firstGrandChildBuilder.build()).build());
-
- childrenBuilder.withChild(firstChildBuilder.build());
- childrenBuilder.withChild(secondChildBuilder.build());
+ final var firstGrandChild =
+ mapEntryBuilder(GRAND_CHILDREN_QNAME, GRAND_CHILD_NUMBER_QNAME, FIRST_GRAND_CHILD_ID)
+ .withChild(leafNode(GRAND_CHILD_NUMBER_QNAME, FIRST_GRAND_CHILD_ID))
+ .withChild(leafNode(GRAND_CHILD_NAME_QNAME, FIRST_GRAND_CHILD_NAME))
+ .build();
- return familyContainerBuilder.withChild(childrenBuilder.build()).build();
+ return ImmutableNodes.newContainerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(FAMILY_QNAME))
+ .withChild(mapNodeBuilder(CHILDREN_QNAME)
+ .withChild(mapEntryBuilder(CHILDREN_QNAME, CHILD_NUMBER_QNAME, FIRST_CHILD_ID)
+ .withChild(leafNode(CHILD_NUMBER_QNAME, FIRST_CHILD_ID))
+ .withChild(leafNode(CHILD_NAME_QNAME, FIRST_CHILD_NAME))
+ .withChild(mapNodeBuilder(GRAND_CHILDREN_QNAME).withChild(firstGrandChild).build())
+ .build())
+ .withChild(mapEntryBuilder(CHILDREN_QNAME, CHILD_NUMBER_QNAME, SECOND_CHILD_ID)
+ .withChild(leafNode(CHILD_NUMBER_QNAME, SECOND_CHILD_ID))
+ .withChild(leafNode(CHILD_NAME_QNAME, SECOND_CHILD_NAME))
+ .withChild(mapNodeBuilder(GRAND_CHILDREN_QNAME).withChild(firstGrandChild).build())
+ .build())
+ .build())
+ .build();
}
}
*/
package org.opendaylight.controller.md.cluster.datastore.model;
+import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapEntryBuilder;
+
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableMapNodeBuilder;
+import org.opendaylight.yangtools.yang.data.api.schema.SystemMapNode;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
public final class PeopleModel {
public static final QName BASE_QNAME = QName.create(
public static final YangInstanceIdentifier PERSON_LIST_PATH = BASE_PATH.node(PERSON_QNAME);
private PeopleModel() {
-
+ // Hidden on purpose
}
- public static NormalizedNode<?, ?> create() {
-
- // Create a list builder
- CollectionNodeBuilder<MapEntryNode, MapNode> cars =
- ImmutableMapNodeBuilder.create().withNodeIdentifier(
- new YangInstanceIdentifier.NodeIdentifier(
- PERSON_QNAME));
-
- // Create an entry for the person jack
- MapEntryNode jack =
- ImmutableNodes.mapEntryBuilder(PERSON_QNAME, PERSON_NAME_QNAME, "jack")
- .withChild(ImmutableNodes.leafNode(PERSON_NAME_QNAME, "jack"))
- .withChild(ImmutableNodes.leafNode(PERSON_AGE_QNAME, 100L))
- .build();
-
- // Create an entry for the person jill
- MapEntryNode jill =
- ImmutableNodes.mapEntryBuilder(PERSON_QNAME, PERSON_NAME_QNAME, "jill")
- .withChild(ImmutableNodes.leafNode(PERSON_NAME_QNAME, "jill"))
- .withChild(ImmutableNodes.leafNode(PERSON_AGE_QNAME, 200L))
- .build();
-
- cars.withChild(jack);
- cars.withChild(jill);
-
- return ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(BASE_QNAME))
- .withChild(cars.build())
+ public static ContainerNode create() {
+ return ImmutableNodes.newContainerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(BASE_QNAME))
+ .withChild(ImmutableNodes.newSystemMapBuilder()
+ .withNodeIdentifier(new NodeIdentifier(PERSON_QNAME))
+ // Create an entry for the person jack
+ .withChild(mapEntryBuilder(PERSON_QNAME, PERSON_NAME_QNAME, "jack")
+ .withChild(ImmutableNodes.leafNode(PERSON_NAME_QNAME, "jack"))
+ .withChild(ImmutableNodes.leafNode(PERSON_AGE_QNAME, 100L))
+ .build())
+ // Create an entry for the person jill
+ .withChild(mapEntryBuilder(PERSON_QNAME, PERSON_NAME_QNAME, "jill")
+ .withChild(ImmutableNodes.leafNode(PERSON_NAME_QNAME, "jill"))
+ .withChild(ImmutableNodes.leafNode(PERSON_AGE_QNAME, 200L))
+ .build())
+ .build())
.build();
-
}
- public static NormalizedNode<?, ?> emptyContainer() {
- return ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(
- new YangInstanceIdentifier.NodeIdentifier(BASE_QNAME))
- .build();
+ public static ContainerNode emptyContainer() {
+ return ImmutableNodes.newContainerBuilder().withNodeIdentifier(new NodeIdentifier(BASE_QNAME)).build();
}
- public static NormalizedNode<?, ?> newPersonMapNode() {
- return ImmutableNodes.mapNodeBuilder(PERSON_QNAME).build();
+ public static SystemMapNode newPersonMapNode() {
+ return ImmutableNodes.newSystemMapBuilder().withNodeIdentifier(new NodeIdentifier(PERSON_QNAME)).build();
}
public static MapEntryNode newPersonEntry(final String name) {
- return ImmutableNodes.mapEntryBuilder(PERSON_QNAME, PERSON_NAME_QNAME, name)
- .withChild(ImmutableNodes.leafNode(PERSON_NAME_QNAME, name)).build();
+ return mapEntryBuilder(PERSON_QNAME, PERSON_NAME_QNAME, name)
+ .withChild(ImmutableNodes.leafNode(PERSON_NAME_QNAME, name))
+ .build();
}
public static YangInstanceIdentifier newPersonPath(final String name) {
return YangInstanceIdentifier.builder(PERSON_LIST_PATH)
- .nodeWithKey(PERSON_QNAME, PERSON_NAME_QNAME, name).build();
+ .nodeWithKey(PERSON_QNAME, PERSON_NAME_QNAME, name)
+ .build();
}
}
public static EffectiveModelContext select(final String... schemaFiles) {
return YangParserTestUtils.parseYangResources(SchemaContextHelper.class, schemaFiles);
}
-
- public static EffectiveModelContext distributedShardedDOMDataTreeSchemaContext() {
- // we need prefix-shard-configuration and odl-datastore-test models
- // for DistributedShardedDOMDataTree tests
- return YangParserTestUtils.parseYangResources(SchemaContextHelper.class, ODL_DATASTORE_TEST_YANG,
- "/META-INF/yang/prefix-shard-configuration@2017-01-10.yang");
- }
}
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
import org.opendaylight.yangtools.yang.test.util.YangParserTestUtils;
return YangParserTestUtils.parseYangResource(DATASTORE_TEST_YANG);
}
- public static DataContainerChild<?, ?> outerMapNode() {
+ public static DataContainerChild outerMapNode() {
return ImmutableNodes.mapNodeBuilder(OUTER_LIST_QNAME).build();
}
- public static DataContainerChild<?, ?> outerNode(final int... ids) {
- CollectionNodeBuilder<MapEntryNode, MapNode> outer = ImmutableNodes.mapNodeBuilder(OUTER_LIST_QNAME);
+ public static DataContainerChild outerNode(final int... ids) {
+ var outer = ImmutableNodes.mapNodeBuilder(OUTER_LIST_QNAME);
for (int id: ids) {
outer.addChild(ImmutableNodes.mapEntry(OUTER_LIST_QNAME, ID_QNAME, id));
}
return outer.build();
}
- public static DataContainerChild<?, ?> outerNode(final MapEntryNode... entries) {
- CollectionNodeBuilder<MapEntryNode, MapNode> outer = ImmutableNodes.mapNodeBuilder(OUTER_LIST_QNAME);
+ public static DataContainerChild outerNode(final MapEntryNode... entries) {
+ var outer = ImmutableNodes.mapNodeBuilder(OUTER_LIST_QNAME);
for (MapEntryNode e: entries) {
outer.addChild(e);
}
return outer.build();
}
- public static DataContainerChild<?, ?> innerNode(final String... names) {
- CollectionNodeBuilder<MapEntryNode, MapNode> outer = ImmutableNodes.mapNodeBuilder(INNER_LIST_QNAME);
+ public static DataContainerChild innerNode(final String... names) {
+ var outer = ImmutableNodes.mapNodeBuilder(INNER_LIST_QNAME);
for (String name: names) {
outer.addChild(ImmutableNodes.mapEntry(INNER_LIST_QNAME, NAME_QNAME, name));
}
return outer.build();
}
- public static MapEntryNode outerNodeEntry(final int id, final DataContainerChild<?, ?> inner) {
+ public static MapEntryNode outerNodeEntry(final int id, final DataContainerChild inner) {
return ImmutableNodes.mapEntryBuilder(OUTER_LIST_QNAME, ID_QNAME, id).addChild(inner).build();
}
- public static NormalizedNode<?, ?> testNodeWithOuter(final int... ids) {
+ public static ContainerNode testNodeWithOuter(final int... ids) {
return testNodeWithOuter(outerNode(ids));
}
- public static NormalizedNode<?, ?> testNodeWithOuter(final DataContainerChild<?, ?> outer) {
- return ImmutableContainerNodeBuilder.create().withNodeIdentifier(
- new YangInstanceIdentifier.NodeIdentifier(TEST_QNAME)).withChild(outer).build();
+ public static ContainerNode testNodeWithOuter(final DataContainerChild outer) {
+ return Builders.containerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
+ .withChild(outer)
+ .build();
}
public static NodeIdentifierWithPredicates outerEntryKey(final int id) {
akka {
persistence.snapshot-store.plugin = "in-memory-snapshot-store"
persistence.journal.plugin = "in-memory-journal"
+ coordinated-shutdown.run-by-actor-system-terminate = off
+ persistence.non-persistent.journal {
+ class = "org.opendaylight.controller.cluster.raft.utils.InMemoryJournal"
+ }
loggers = ["akka.testkit.TestEventListener", "akka.event.slf4j.Slf4jLogger"]
akka {
persistence.snapshot-store.plugin = "in-memory-snapshot-store"
persistence.journal.plugin = "in-memory-journal"
+ coordinated-shutdown.run-by-actor-system-terminate = off
+ persistence.non-persistent.journal {
+ class = "org.opendaylight.controller.cluster.raft.utils.InMemoryJournal"
+ }
loglevel = "INFO"
actor {
warn-about-java-serializer-usage = false
}
remote {
+ classic {
+ netty.tcp {
+ hostname = "127.0.0.1"
+ port = 2565
+ }
+ }
+
log-remote-lifecycle-events = off
artery {
enabled = on
canonical.hostname = "127.0.0.1"
canonical.port = 2565
+ transport = tcp
}
netty.tcp {
}
cluster {
- auto-down-unreachable-after = 100s
retry-unsuccessful-join-after = 100ms
roles = [
akka {
persistence.snapshot-store.plugin = "in-memory-snapshot-store"
persistence.journal.plugin = "in-memory-journal"
+ coordinated-shutdown.run-by-actor-system-terminate = off
+
+ persistence.non-persistent.journal {
+ class = "org.opendaylight.controller.cluster.raft.utils.InMemoryJournal"
+ }
loglevel = "INFO"
warn-about-java-serializer-usage = false
}
remote {
+ classic {
+ netty.tcp {
+ hostname = "127.0.0.1"
+ port = 2558
+ }
+ }
+
log-remote-lifecycle-events = off
artery {
enabled = on
canonical.hostname = "127.0.0.1"
canonical.port = 2558
+ transport = tcp
}
netty.tcp {
akka {
persistence.snapshot-store.plugin = "in-memory-snapshot-store"
persistence.journal.plugin = "in-memory-journal"
+ coordinated-shutdown.run-by-actor-system-terminate = off
+
+ persistence.non-persistent.journal {
+ class = "org.opendaylight.controller.cluster.raft.utils.InMemoryJournal"
+ }
actor {
provider = "akka.cluster.ClusterActorRefProvider"
warn-about-java-serializer-usage = false
}
remote {
+ classic {
+ netty.tcp {
+ hostname = "127.0.0.1"
+ port = 2559
+ }
+ }
+
log-remote-lifecycle-events = off
artery {
enabled = on
canonical.hostname = "127.0.0.1"
canonical.port = 2559
+ transport = tcp
}
netty.tcp {
akka {
persistence.snapshot-store.plugin = "in-memory-snapshot-store"
persistence.journal.plugin = "in-memory-journal"
+ coordinated-shutdown.run-by-actor-system-terminate = off
+
+ persistence.non-persistent.journal {
+ class = "org.opendaylight.controller.cluster.raft.utils.InMemoryJournal"
+ }
loglevel = "INFO"
warn-about-java-serializer-usage = false
}
remote {
+ classic {
+ netty.tcp {
+ hostname = "127.0.0.1"
+ port = 2557
+ }
+ }
+
log-remote-lifecycle-events = off
artery {
enabled = on
canonical.hostname = "127.0.0.1"
canonical.port = 2557
+ transport = tcp
}
netty.tcp {
akka {
persistence.snapshot-store.plugin = "in-memory-snapshot-store"
persistence.journal.plugin = "in-memory-journal"
+ coordinated-shutdown.run-by-actor-system-terminate = off
+
+ persistence.non-persistent.journal {
+ class = "org.opendaylight.controller.cluster.raft.utils.InMemoryJournal"
+ }
loglevel = "INFO"
warn-about-java-serializer-usage = false
}
remote {
+ classic {
+ netty.tcp {
+ hostname = "127.0.0.1"
+ port = 2560
+ }
+ }
+
log-remote-lifecycle-events = off
artery {
enabled = on
canonical.hostname = "127.0.0.1"
canonical.port = 2560
+ transport = tcp
}
netty.tcp {
akka {
persistence.snapshot-store.plugin = "in-memory-snapshot-store"
persistence.journal.plugin = "in-memory-journal"
+ coordinated-shutdown.run-by-actor-system-terminate = off
+
+ persistence.non-persistent.journal {
+ class = "org.opendaylight.controller.cluster.raft.utils.InMemoryJournal"
+ }
loglevel = "INFO"
warn-about-java-serializer-usage = false
}
remote {
+ classic {
+ netty.tcp {
+ hostname = "127.0.0.1"
+ port = 2561
+ }
+ }
+
log-remote-lifecycle-events = off
artery {
enabled = on
canonical.hostname = "127.0.0.1"
canonical.port = 2561
+ transport = tcp
}
netty.tcp {
akka {
persistence.snapshot-store.plugin = "in-memory-snapshot-store"
persistence.journal.plugin = "in-memory-journal"
+ coordinated-shutdown.run-by-actor-system-terminate = off
+
+ persistence.non-persistent.journal {
+ class = "org.opendaylight.controller.cluster.raft.utils.InMemoryJournal"
+ }
loglevel = "INFO"
warn-about-java-serializer-usage = false
}
remote {
+ classic {
+ netty.tcp {
+ hostname = "127.0.0.1"
+ port = 2562
+ }
+ }
+
log-remote-lifecycle-events = off
artery {
enabled = on
canonical.hostname = "127.0.0.1"
canonical.port = 2562
+ transport = tcp
}
netty.tcp {
--- /dev/null
+{"Entries":[{"Entry":[{"Node":[{"Path":"/"},{"ModificationType":"UNMODIFIED"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=1}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=2}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=3}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=4}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=5}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=6}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=7}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=8}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=9}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=10}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=11}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=12}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=13}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=14}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=15}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=16}]"}]}]}]}
\ No newline at end of file
--- /dev/null
+{"odl-datastore-test:test":{}}
\ No newline at end of file
}
}
}
+ persistence.non-persistent.journal {
+ class = "org.opendaylight.controller.cluster.raft.utils.InMemoryJournal"
+ }
loglevel = "INFO"
]
}
}
-}
\ No newline at end of file
+}
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>mdsal-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
- <relativePath>../parent</relativePath>
- </parent>
-
- <artifactId>sal-distributed-eos</artifactId>
- <packaging>bundle</packaging>
-
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.mdsal</groupId>
- <artifactId>mdsal-eos-dom-api</artifactId>
- </dependency>
-
- <dependency>
- <groupId>org.osgi</groupId>
- <artifactId>osgi.cmpn</artifactId>
- </dependency>
-
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-distributed-datastore</artifactId>
- </dependency>
-
- <dependency>
- <groupId>commons-lang</groupId>
- <artifactId>commons-lang</artifactId>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-testkit_2.13</artifactId>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-akka-raft-example</artifactId>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-akka-raft</artifactId>
- <type>test-jar</type>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-distributed-datastore</artifactId>
- <version>${project.version}</version>
- <type>test-jar</type>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>org.slf4j</groupId>
- <artifactId>slf4j-simple</artifactId>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-test-util</artifactId>
- </dependency>
- </dependencies>
-
- <scm>
- <connection>scm:git:http://git.opendaylight.org/gerrit/controller.git</connection>
- <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
- <tag>HEAD</tag>
- <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL:Architecture:Clustering</url>
- </scm>
-</project>
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership;
-
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.ENTITY_OWNERS_PATH;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.ENTITY_OWNER_QNAME;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.ENTITY_QNAME;
-
-import java.util.Optional;
-import org.opendaylight.controller.cluster.datastore.ShardDataTree;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.clustering.entity.owners.rev150804.entity.owners.EntityType;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
-
-public abstract class AbstractEntityOwnerChangeListener implements DOMDataTreeChangeListener {
- private static final YangInstanceIdentifier EOS_PATH = YangInstanceIdentifier.builder(ENTITY_OWNERS_PATH)
- .node(EntityType.QNAME).node(EntityType.QNAME).node(ENTITY_QNAME).node(ENTITY_QNAME)
- .node(ENTITY_OWNER_QNAME).build();
-
- void init(final ShardDataTree shardDataTree) {
- shardDataTree.registerTreeChangeListener(EOS_PATH, this, Optional.empty(), noop -> { /* NOOP */ });
- }
-
- protected static String extractOwner(final LeafNode<?> ownerLeaf) {
- return ownerLeaf.getValue().toString();
- }
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership;
-
-import static java.util.Objects.requireNonNull;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.CANDIDATE_NAME_QNAME;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.ENTITY_ID_QNAME;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.ENTITY_OWNERS_PATH;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.ENTITY_QNAME;
-
-import akka.actor.ActorRef;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import org.opendaylight.controller.cluster.datastore.ShardDataTree;
-import org.opendaylight.controller.cluster.entityownership.messages.CandidateAdded;
-import org.opendaylight.controller.cluster.entityownership.messages.CandidateRemoved;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.clustering.entity.owners.rev150804.entity.owners.EntityType;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.clustering.entity.owners.rev150804.entity.owners.entity.type.entity.Candidate;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Listens for candidate entries added/removed and notifies the EntityOwnershipShard appropriately.
- *
- * @author Moiz Raja
- * @author Thomas Pantelis
- */
-class CandidateListChangeListener implements DOMDataTreeChangeListener {
- private static final Logger LOG = LoggerFactory.getLogger(CandidateListChangeListener.class);
-
- private final String logId;
- private final ActorRef shard;
- private final Map<YangInstanceIdentifier, Collection<String>> currentCandidates = new HashMap<>();
-
- CandidateListChangeListener(final ActorRef shard, final String logId) {
- this.shard = requireNonNull(shard, "shard should not be null");
- this.logId = logId;
- }
-
- void init(final ShardDataTree shardDataTree) {
- shardDataTree.registerTreeChangeListener(YangInstanceIdentifier.builder(ENTITY_OWNERS_PATH)
- .node(EntityType.QNAME).node(EntityType.QNAME).node(ENTITY_QNAME).node(ENTITY_QNAME)
- .node(Candidate.QNAME).node(Candidate.QNAME).build(), this, Optional.empty(), noop -> { /* NOOP */ });
- }
-
- @Override
- public void onDataTreeChanged(final Collection<DataTreeCandidate> changes) {
- for (DataTreeCandidate change: changes) {
- DataTreeCandidateNode changeRoot = change.getRootNode();
- ModificationType type = changeRoot.getModificationType();
-
- LOG.debug("{}: Candidate node changed: {}, {}", logId, type, change.getRootPath());
-
- NodeIdentifierWithPredicates candidateKey =
- (NodeIdentifierWithPredicates) change.getRootPath().getLastPathArgument();
- String candidate = candidateKey.getValue(CANDIDATE_NAME_QNAME).toString();
-
- YangInstanceIdentifier entityId = extractEntityPath(change.getRootPath());
-
- if (type == ModificationType.WRITE || type == ModificationType.APPEARED) {
- LOG.debug("{}: Candidate {} was added for entity {}", logId, candidate, entityId);
-
- Collection<String> newCandidates = addToCurrentCandidates(entityId, candidate);
- shard.tell(new CandidateAdded(entityId, candidate, new ArrayList<>(newCandidates)), shard);
- } else if (type == ModificationType.DELETE || type == ModificationType.DISAPPEARED) {
- LOG.debug("{}: Candidate {} was removed for entity {}", logId, candidate, entityId);
-
- Collection<String> newCandidates = removeFromCurrentCandidates(entityId, candidate);
- shard.tell(new CandidateRemoved(entityId, candidate, new ArrayList<>(newCandidates)), shard);
- }
- }
- }
-
- private Collection<String> addToCurrentCandidates(final YangInstanceIdentifier entityId,
- final String newCandidate) {
- Collection<String> candidates = currentCandidates.computeIfAbsent(entityId, k -> new LinkedHashSet<>());
- candidates.add(newCandidate);
- return candidates;
- }
-
- private Collection<String> removeFromCurrentCandidates(final YangInstanceIdentifier entityId,
- final String candidateToRemove) {
- Collection<String> candidates = currentCandidates.get(entityId);
- if (candidates != null) {
- candidates.remove(candidateToRemove);
- return candidates;
- }
-
- // Shouldn't happen
- return Collections.emptyList();
- }
-
- private static YangInstanceIdentifier extractEntityPath(final YangInstanceIdentifier candidatePath) {
- List<PathArgument> newPathArgs = new ArrayList<>();
- for (PathArgument pathArg: candidatePath.getPathArguments()) {
- newPathArgs.add(pathArg);
- if (pathArg instanceof NodeIdentifierWithPredicates
- && ENTITY_ID_QNAME.equals(((NodeIdentifierWithPredicates) pathArg).keySet().iterator().next())) {
- break;
- }
- }
-
- return YangInstanceIdentifier.create(newPathArgs);
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership;
-
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipCandidateRegistration;
-import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
-
-/**
- * Implementation of EntityOwnershipCandidateRegistration.
- *
- * @author Thomas Pantelis
- */
-class DistributedEntityOwnershipCandidateRegistration extends AbstractObjectRegistration<DOMEntity>
- implements DOMEntityOwnershipCandidateRegistration {
- private final DistributedEntityOwnershipService service;
-
- DistributedEntityOwnershipCandidateRegistration(final DOMEntity entity,
- final DistributedEntityOwnershipService service) {
- super(entity);
- this.service = service;
- }
-
- @Override
- protected void removeRegistration() {
- service.unregisterCandidate(getInstance());
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.base.MoreObjects.ToStringHelper;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListenerRegistration;
-import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
-
-/**
- * Implementation of EntityOwnershipListenerRegistration.
- *
- * @author Thomas Pantelis
- */
-class DistributedEntityOwnershipListenerRegistration extends AbstractObjectRegistration<DOMEntityOwnershipListener>
- implements DOMEntityOwnershipListenerRegistration {
- private final DistributedEntityOwnershipService service;
- private final String entityType;
-
- DistributedEntityOwnershipListenerRegistration(final DOMEntityOwnershipListener listener, final String entityType,
- final DistributedEntityOwnershipService service) {
- super(listener);
- this.entityType = requireNonNull(entityType, "entityType cannot be null");
- this.service = requireNonNull(service, "DOMEntityOwnershipListener cannot be null");
- }
-
- @Override
- protected void removeRegistration() {
- service.unregisterListener(getEntityType(), getInstance());
- }
-
- @Override
- public String getEntityType() {
- return entityType;
- }
-
- @Override
- protected ToStringHelper addToStringAttributes(final ToStringHelper toStringHelper) {
- return toStringHelper.add("entityType", entityType);
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership;
-
-import static java.util.Objects.requireNonNull;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.CANDIDATE_NODE_ID;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.ENTITY_OWNER_NODE_ID;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.entityPath;
-
-import akka.actor.ActorRef;
-import akka.dispatch.OnComplete;
-import akka.pattern.Patterns;
-import akka.util.Timeout;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Strings;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.util.Collection;
-import java.util.Optional;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.config.Configuration;
-import org.opendaylight.controller.cluster.datastore.config.ModuleShardConfiguration;
-import org.opendaylight.controller.cluster.datastore.messages.CreateShard;
-import org.opendaylight.controller.cluster.datastore.messages.GetShardDataTree;
-import org.opendaylight.controller.cluster.datastore.shardstrategy.ModuleShardStrategy;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.controller.cluster.entityownership.messages.RegisterCandidateLocal;
-import org.opendaylight.controller.cluster.entityownership.messages.RegisterListenerLocal;
-import org.opendaylight.controller.cluster.entityownership.messages.UnregisterCandidateLocal;
-import org.opendaylight.controller.cluster.entityownership.messages.UnregisterListenerLocal;
-import org.opendaylight.controller.cluster.entityownership.selectionstrategy.EntityOwnerSelectionStrategyConfig;
-import org.opendaylight.mdsal.eos.common.api.CandidateAlreadyRegisteredException;
-import org.opendaylight.mdsal.eos.common.api.EntityOwnershipState;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipCandidateRegistration;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListenerRegistration;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.clustering.entity.owners.rev150804.EntityOwners;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Await;
-import scala.concurrent.Future;
-import scala.concurrent.duration.Duration;
-
-/**
- * The distributed implementation of the EntityOwnershipService.
- *
- * @author Thomas Pantelis
- */
-public class DistributedEntityOwnershipService implements DOMEntityOwnershipService, AutoCloseable {
- @VisibleForTesting
- static final String ENTITY_OWNERSHIP_SHARD_NAME = "entity-ownership";
-
- private static final Logger LOG = LoggerFactory.getLogger(DistributedEntityOwnershipService.class);
- private static final Timeout MESSAGE_TIMEOUT = new Timeout(1, TimeUnit.MINUTES);
-
- private final ConcurrentMap<DOMEntity, DOMEntity> registeredEntities = new ConcurrentHashMap<>();
- private final ActorUtils context;
-
- private volatile ActorRef localEntityOwnershipShard;
- private volatile DataTree localEntityOwnershipShardDataTree;
-
- DistributedEntityOwnershipService(final ActorUtils context) {
- this.context = requireNonNull(context);
- }
-
- public static DistributedEntityOwnershipService start(final ActorUtils context,
- final EntityOwnerSelectionStrategyConfig strategyConfig) {
- ActorRef shardManagerActor = context.getShardManager();
-
- Configuration configuration = context.getConfiguration();
- Collection<MemberName> entityOwnersMemberNames = configuration.getUniqueMemberNamesForAllShards();
- CreateShard createShard = new CreateShard(new ModuleShardConfiguration(EntityOwners.QNAME.getNamespace(),
- "entity-owners", ENTITY_OWNERSHIP_SHARD_NAME, ModuleShardStrategy.NAME, entityOwnersMemberNames),
- newShardBuilder(context, strategyConfig), null);
-
- Future<Object> createFuture = context.executeOperationAsync(shardManagerActor, createShard, MESSAGE_TIMEOUT);
- createFuture.onComplete(new OnComplete<>() {
- @Override
- public void onComplete(final Throwable failure, final Object response) {
- if (failure != null) {
- LOG.error("Failed to create {} shard", ENTITY_OWNERSHIP_SHARD_NAME, failure);
- } else {
- LOG.info("Successfully created {} shard", ENTITY_OWNERSHIP_SHARD_NAME);
- }
- }
- }, context.getClientDispatcher());
-
- return new DistributedEntityOwnershipService(context);
- }
-
- private void executeEntityOwnershipShardOperation(final ActorRef shardActor, final Object message) {
- Future<Object> future = context.executeOperationAsync(shardActor, message, MESSAGE_TIMEOUT);
- future.onComplete(new OnComplete<>() {
- @Override
- public void onComplete(final Throwable failure, final Object response) {
- if (failure != null) {
- // FIXME: CONTROLLER-1904: reduce the severity to info once we have a retry mechanism
- LOG.error("Error sending message {} to {}", message, shardActor, failure);
- } else {
- LOG.debug("{} message to {} succeeded", message, shardActor);
- }
- }
- }, context.getClientDispatcher());
- }
-
- @VisibleForTesting
- void executeLocalEntityOwnershipShardOperation(final Object message) {
- if (localEntityOwnershipShard == null) {
- Future<ActorRef> future = context.findLocalShardAsync(ENTITY_OWNERSHIP_SHARD_NAME);
- future.onComplete(new OnComplete<ActorRef>() {
- @Override
- public void onComplete(final Throwable failure, final ActorRef shardActor) {
- if (failure != null) {
- // FIXME: CONTROLLER-1904: reduce the severity to info once we have a retry mechanism
- LOG.error("Failed to find local {} shard", ENTITY_OWNERSHIP_SHARD_NAME, failure);
- } else {
- localEntityOwnershipShard = shardActor;
- executeEntityOwnershipShardOperation(localEntityOwnershipShard, message);
- }
- }
- }, context.getClientDispatcher());
-
- } else {
- executeEntityOwnershipShardOperation(localEntityOwnershipShard, message);
- }
- }
-
- @Override
- public DOMEntityOwnershipCandidateRegistration registerCandidate(final DOMEntity entity)
- throws CandidateAlreadyRegisteredException {
- requireNonNull(entity, "entity cannot be null");
-
- if (registeredEntities.putIfAbsent(entity, entity) != null) {
- throw new CandidateAlreadyRegisteredException(entity);
- }
-
- RegisterCandidateLocal registerCandidate = new RegisterCandidateLocal(entity);
-
- LOG.debug("Registering candidate with message: {}", registerCandidate);
-
- executeLocalEntityOwnershipShardOperation(registerCandidate);
- return new DistributedEntityOwnershipCandidateRegistration(entity, this);
- }
-
- void unregisterCandidate(final DOMEntity entity) {
- LOG.debug("Unregistering candidate for {}", entity);
-
- executeLocalEntityOwnershipShardOperation(new UnregisterCandidateLocal(entity));
- registeredEntities.remove(entity);
- }
-
- @Override
- public DOMEntityOwnershipListenerRegistration registerListener(final String entityType,
- final DOMEntityOwnershipListener listener) {
- RegisterListenerLocal registerListener = new RegisterListenerLocal(listener, entityType);
-
- LOG.debug("Registering listener with message: {}", registerListener);
-
- executeLocalEntityOwnershipShardOperation(registerListener);
- return new DistributedEntityOwnershipListenerRegistration(listener, entityType, this);
- }
-
- @Override
- public Optional<EntityOwnershipState> getOwnershipState(final DOMEntity forEntity) {
- requireNonNull(forEntity, "forEntity cannot be null");
-
- DataTree dataTree = getLocalEntityOwnershipShardDataTree();
- if (dataTree == null) {
- return Optional.empty();
- }
-
- Optional<NormalizedNode<?, ?>> entityNode = dataTree.takeSnapshot().readNode(
- entityPath(forEntity.getType(), forEntity.getIdentifier()));
- if (!entityNode.isPresent()) {
- return Optional.empty();
- }
-
- // Check if there are any candidates, if there are none we do not really have ownership state
- final MapEntryNode entity = (MapEntryNode) entityNode.get();
- final Optional<DataContainerChild<? extends PathArgument, ?>> optionalCandidates =
- entity.getChild(CANDIDATE_NODE_ID);
- final boolean hasCandidates = optionalCandidates.isPresent()
- && ((MapNode) optionalCandidates.get()).getValue().size() > 0;
- if (!hasCandidates) {
- return Optional.empty();
- }
-
- MemberName localMemberName = context.getCurrentMemberName();
- Optional<DataContainerChild<? extends PathArgument, ?>> ownerLeaf = entity.getChild(ENTITY_OWNER_NODE_ID);
- String owner = ownerLeaf.isPresent() ? ownerLeaf.get().getValue().toString() : null;
- boolean hasOwner = !Strings.isNullOrEmpty(owner);
- boolean isOwner = hasOwner && localMemberName.getName().equals(owner);
-
- return Optional.of(EntityOwnershipState.from(isOwner, hasOwner));
- }
-
- @Override
- public boolean isCandidateRegistered(final DOMEntity entity) {
- return registeredEntities.get(entity) != null;
- }
-
- @VisibleForTesting
- @SuppressWarnings("checkstyle:IllegalCatch")
- @SuppressFBWarnings(value = "REC_CATCH_EXCEPTION", justification = "Akka's Await.result() API contract")
- DataTree getLocalEntityOwnershipShardDataTree() {
- final DataTree local = localEntityOwnershipShardDataTree;
- if (local != null) {
- return local;
- }
-
- if (localEntityOwnershipShard == null) {
- try {
- localEntityOwnershipShard = Await.result(context.findLocalShardAsync(
- ENTITY_OWNERSHIP_SHARD_NAME), Duration.Inf());
- } catch (TimeoutException | InterruptedException e) {
- LOG.error("Failed to find local {} shard", ENTITY_OWNERSHIP_SHARD_NAME, e);
- return null;
- }
- }
-
- try {
- return localEntityOwnershipShardDataTree = (DataTree) Await.result(Patterns.ask(localEntityOwnershipShard,
- GetShardDataTree.INSTANCE, MESSAGE_TIMEOUT), Duration.Inf());
- } catch (TimeoutException | InterruptedException e) {
- LOG.error("Failed to find local {} shard", ENTITY_OWNERSHIP_SHARD_NAME, e);
- return null;
- }
- }
-
- void unregisterListener(final String entityType, final DOMEntityOwnershipListener listener) {
- LOG.debug("Unregistering listener {} for entity type {}", listener, entityType);
-
- executeLocalEntityOwnershipShardOperation(new UnregisterListenerLocal(listener, entityType));
- }
-
- @Override
- public void close() {
- }
-
- private static EntityOwnershipShard.Builder newShardBuilder(final ActorUtils context,
- final EntityOwnerSelectionStrategyConfig strategyConfig) {
- return EntityOwnershipShard.newBuilder().localMemberName(context.getCurrentMemberName())
- .ownerSelectionStrategyConfig(strategyConfig);
- }
-
- @VisibleForTesting
- ActorRef getLocalEntityOwnershipShard() {
- return localEntityOwnershipShard;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership;
-
-import static com.google.common.base.Verify.verifyNotNull;
-import static java.util.Objects.requireNonNull;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.createEntity;
-
-import com.google.common.base.Strings;
-import java.util.Collection;
-import java.util.Objects;
-import java.util.Optional;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Listens for entity owner changes and notifies the EntityOwnershipListenerSupport appropriately.
- *
- * @author Thomas Pantelis
- */
-class EntityOwnerChangeListener extends AbstractEntityOwnerChangeListener {
- private static final Logger LOG = LoggerFactory.getLogger(EntityOwnerChangeListener.class);
-
- private final String localMemberName;
- private final EntityOwnershipChangePublisher publisher;
-
- EntityOwnerChangeListener(final MemberName localMemberName, final EntityOwnershipChangePublisher publisher) {
- this.localMemberName = verifyNotNull(localMemberName.getName());
- this.publisher = requireNonNull(publisher);
- }
-
- @Override
- public void onDataTreeChanged(final Collection<DataTreeCandidate> changes) {
- for (DataTreeCandidate change: changes) {
- DataTreeCandidateNode changeRoot = change.getRootNode();
- LeafNode<?> ownerLeaf = (LeafNode<?>) changeRoot.getDataAfter().get();
-
- LOG.debug("{}: Entity node changed: {}, {}", logId(), changeRoot.getModificationType(),
- change.getRootPath());
-
- String newOwner = extractOwner(ownerLeaf);
-
- String origOwner = null;
- Optional<NormalizedNode<?, ?>> dataBefore = changeRoot.getDataBefore();
- if (dataBefore.isPresent()) {
- origOwner = extractOwner((LeafNode<?>) changeRoot.getDataBefore().get());
- }
-
- LOG.debug("{}: New owner: {}, Original owner: {}", logId(), newOwner, origOwner);
-
- if (!Objects.equals(origOwner, newOwner)) {
- boolean isOwner = localMemberName.equals(newOwner);
- boolean wasOwner = localMemberName.equals(origOwner);
- boolean hasOwner = !Strings.isNullOrEmpty(newOwner);
-
- DOMEntity entity = createEntity(change.getRootPath());
-
- LOG.debug(
- "{}: Calling notifyEntityOwnershipListeners: entity: {}, wasOwner: {}, isOwner: {}, hasOwner: {}",
- logId(), entity, wasOwner, isOwner, hasOwner);
-
- publisher.notifyEntityOwnershipListeners(entity, wasOwner, isOwner, hasOwner);
- }
- }
- }
-
- private String logId() {
- return publisher.getLogId();
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership;
-
-import java.util.Map.Entry;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.clustering.entity.owners.rev150804.EntityOwners;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.clustering.entity.owners.rev150804.entity.owners.EntityType;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.clustering.entity.owners.rev150804.entity.owners.entity.type.Entity;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.clustering.entity.owners.rev150804.entity.owners.entity.type.entity.Candidate;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableOrderedMapNodeBuilder;
-
-/**
- * Utility methods for entity-owners yang model.
- *
- * @author Thomas Pantelis
- */
-public final class EntityOwnersModel {
- static final QName ENTITY_QNAME = Entity.QNAME;
- static final QName CANDIDATE_NAME_QNAME = QName.create(Candidate.QNAME, "name").intern();
- static final QName ENTITY_ID_QNAME = QName.create(ENTITY_QNAME, "id").intern();
- static final QName ENTITY_OWNER_QNAME = QName.create(ENTITY_QNAME, "owner").intern();
- static final QName ENTITY_TYPE_QNAME = QName.create(EntityType.QNAME, "type").intern();
-
- static final NodeIdentifier ENTITY_OWNERS_NODE_ID = NodeIdentifier.create(EntityOwners.QNAME);
- static final NodeIdentifier ENTITY_OWNER_NODE_ID = NodeIdentifier.create(ENTITY_OWNER_QNAME);
- static final NodeIdentifier ENTITY_NODE_ID = NodeIdentifier.create(ENTITY_QNAME);
- static final NodeIdentifier ENTITY_ID_NODE_ID = NodeIdentifier.create(ENTITY_ID_QNAME);
- static final NodeIdentifier ENTITY_TYPE_NODE_ID = NodeIdentifier.create(ENTITY_TYPE_QNAME);
- static final NodeIdentifier CANDIDATE_NODE_ID = NodeIdentifier.create(Candidate.QNAME);
- static final NodeIdentifier CANDIDATE_NAME_NODE_ID = NodeIdentifier.create(CANDIDATE_NAME_QNAME);
- static final YangInstanceIdentifier ENTITY_OWNERS_PATH = YangInstanceIdentifier.create(ENTITY_OWNERS_NODE_ID);
- static final YangInstanceIdentifier ENTITY_TYPES_PATH = ENTITY_OWNERS_PATH.node(EntityType.QNAME).toOptimized();
-
- private EntityOwnersModel() {
- }
-
- static YangInstanceIdentifier entityPath(final String entityType, final YangInstanceIdentifier entityId) {
- return YangInstanceIdentifier.builder(ENTITY_OWNERS_PATH).node(EntityType.QNAME)
- .nodeWithKey(EntityType.QNAME, ENTITY_TYPE_QNAME, entityType).node(ENTITY_QNAME)
- .nodeWithKey(ENTITY_QNAME, ENTITY_ID_QNAME, entityId).build();
-
- }
-
- static YangInstanceIdentifier candidatePath(final String entityType, final YangInstanceIdentifier entityId,
- final String candidateName) {
- return YangInstanceIdentifier.builder(ENTITY_OWNERS_PATH).node(EntityType.QNAME)
- .nodeWithKey(EntityType.QNAME, ENTITY_TYPE_QNAME, entityType).node(ENTITY_QNAME)
- .nodeWithKey(ENTITY_QNAME, ENTITY_ID_QNAME, entityId).node(Candidate.QNAME)
- .nodeWithKey(Candidate.QNAME, CANDIDATE_NAME_QNAME, candidateName).build();
- }
-
- static YangInstanceIdentifier candidatePath(final YangInstanceIdentifier entityPath, final String candidateName) {
- return YangInstanceIdentifier.builder(entityPath).node(Candidate.QNAME).nodeWithKey(
- Candidate.QNAME, CANDIDATE_NAME_QNAME, candidateName).build();
- }
-
- static NodeIdentifierWithPredicates candidateNodeKey(final String candidateName) {
- return NodeIdentifierWithPredicates.of(Candidate.QNAME, CANDIDATE_NAME_QNAME, candidateName);
- }
-
- static NormalizedNode<?, ?> entityOwnersWithCandidate(final String entityType,
- final YangInstanceIdentifier entityId, final String candidateName) {
- return entityOwnersWithEntityTypeEntry(entityTypeEntryWithEntityEntry(entityType,
- entityEntryWithCandidateEntry(entityId, candidateName)));
- }
-
- static ContainerNode entityOwnersWithEntityTypeEntry(final MapEntryNode entityTypeNode) {
- return ImmutableContainerNodeBuilder.create().withNodeIdentifier(
- ENTITY_OWNERS_NODE_ID).addChild(ImmutableNodes.mapNodeBuilder(EntityType.QNAME)
- .addChild(entityTypeNode).build()).build();
- }
-
- static MapEntryNode entityTypeEntryWithEntityEntry(final String entityType, final MapEntryNode entityNode) {
- return ImmutableNodes.mapEntryBuilder(EntityType.QNAME,
- ENTITY_TYPE_QNAME, entityType).addChild(ImmutableNodes.mapNodeBuilder(
- ENTITY_QNAME).addChild(entityNode).build()).build();
- }
-
- static MapEntryNode entityEntryWithCandidateEntry(final YangInstanceIdentifier entityId,
- final String candidateName) {
- return ImmutableNodes.mapEntryBuilder(ENTITY_QNAME, ENTITY_ID_QNAME, entityId).addChild(
- candidateEntry(candidateName)).build();
- }
-
- static MapNode candidateEntry(final String candidateName) {
- return ImmutableOrderedMapNodeBuilder.create().withNodeIdentifier(new NodeIdentifier(Candidate.QNAME))
- .addChild(candidateMapEntry(candidateName)).build();
- }
-
- static MapEntryNode candidateMapEntry(final String candidateName) {
- return ImmutableNodes.mapEntry(Candidate.QNAME, CANDIDATE_NAME_QNAME, candidateName);
- }
-
- static MapEntryNode entityEntryWithOwner(final YangInstanceIdentifier entityId, final String owner) {
- return ImmutableNodes.mapEntryBuilder(ENTITY_QNAME, ENTITY_ID_QNAME, entityId)
- .addChild(ImmutableNodes.leafNode(ENTITY_OWNER_QNAME, owner != null ? owner : ""))
- .build();
- }
-
- public static String entityTypeFromEntityPath(final YangInstanceIdentifier entityPath) {
- YangInstanceIdentifier parent = entityPath;
- while (!parent.isEmpty()) {
- if (EntityType.QNAME.equals(parent.getLastPathArgument().getNodeType())) {
- YangInstanceIdentifier.NodeIdentifierWithPredicates entityTypeLastPathArgument =
- (YangInstanceIdentifier.NodeIdentifierWithPredicates) parent.getLastPathArgument();
- return (String) entityTypeLastPathArgument.getValue(ENTITY_TYPE_QNAME);
- }
- parent = parent.getParent();
- }
- return null;
- }
-
- static DOMEntity createEntity(final YangInstanceIdentifier entityPath) {
- String entityType = null;
- YangInstanceIdentifier entityId = null;
- for (PathArgument pathArg: entityPath.getPathArguments()) {
- if (pathArg instanceof NodeIdentifierWithPredicates) {
- NodeIdentifierWithPredicates nodeKey = (NodeIdentifierWithPredicates) pathArg;
- Entry<QName, Object> key = nodeKey.entrySet().iterator().next();
- if (ENTITY_TYPE_QNAME.equals(key.getKey())) {
- entityType = key.getValue().toString();
- } else if (ENTITY_ID_QNAME.equals(key.getKey())) {
- entityId = (YangInstanceIdentifier) key.getValue();
- }
- }
- }
-
- return new DOMEntity(entityType, entityId);
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2017 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership;
-
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-
-/**
- * Abstract base for notifying EntityOwnershipListeners.
- *
- * @author Thomas Pantelis
- */
-abstract class EntityOwnershipChangePublisher {
- abstract void notifyEntityOwnershipListeners(DOMEntity entity, boolean wasOwner, boolean isOwner, boolean hasOwner);
-
- abstract String getLogId();
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership;
-
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.Props;
-import akka.japi.Creator;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipChange;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
-
-/**
- * An actor which is responsible for notifying an EntityOwnershipListener of changes.
- *
- * @author Thomas Pantelis
- */
-final class EntityOwnershipListenerActor extends AbstractUntypedActor {
- private final DOMEntityOwnershipListener listener;
-
- private EntityOwnershipListenerActor(final DOMEntityOwnershipListener listener) {
- this.listener = listener;
- }
-
- @Override
- protected void handleReceive(final Object message) {
- if (message instanceof DOMEntityOwnershipChange) {
- onEntityOwnershipChanged((DOMEntityOwnershipChange)message);
- } else {
- unknownMessage(message);
- }
- }
-
- @SuppressWarnings("checkstyle:IllegalCatch")
- private void onEntityOwnershipChanged(final DOMEntityOwnershipChange change) {
- LOG.debug("Notifying EntityOwnershipListener {}: {}", listener, change);
-
- try {
- listener.ownershipChanged(change);
- } catch (Exception e) {
- LOG.error("Error notifying listener {}", listener, e);
- }
- }
-
- static Props props(final DOMEntityOwnershipListener listener) {
- return Props.create(EntityOwnershipListenerActor.class, new EntityOwnershipListenerCreator(listener));
- }
-
- private static final class EntityOwnershipListenerCreator implements Creator<EntityOwnershipListenerActor> {
- private static final long serialVersionUID = 1L;
-
- @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but we don't "
- + "create remote instances of this actor and thus don't need it to be Serializable.")
- private final DOMEntityOwnershipListener listener;
-
- EntityOwnershipListenerCreator(final DOMEntityOwnershipListener listener) {
- this.listener = requireNonNull(listener);
- }
-
- @Override
- public EntityOwnershipListenerActor create() {
- return new EntityOwnershipListenerActor(listener);
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership;
-
-import akka.actor.ActorContext;
-import akka.actor.ActorRef;
-import akka.actor.PoisonPill;
-import com.google.common.collect.HashMultimap;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Multimap;
-import java.util.Collection;
-import java.util.IdentityHashMap;
-import java.util.Map;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.stream.Collectors;
-import org.checkerframework.checker.lock.qual.GuardedBy;
-import org.checkerframework.checker.lock.qual.Holding;
-import org.opendaylight.mdsal.eos.common.api.EntityOwnershipChangeState;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipChange;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Manages EntityOwnershipListener registrations and notifications for the EntityOwnershipShard. This class is
- * thread-safe.
- *
- * @author Thomas Pantelis
- */
-class EntityOwnershipListenerSupport extends EntityOwnershipChangePublisher {
- private static final Logger LOG = LoggerFactory.getLogger(EntityOwnershipListenerSupport.class);
-
- private final String logId;
- private final ActorContext actorContext;
- private final ReadWriteLock listenerLock = new ReentrantReadWriteLock();
-
- @GuardedBy("listenerLock")
- private final Map<DOMEntityOwnershipListener, ListenerActorRefEntry> listenerActorMap = new IdentityHashMap<>();
-
- @GuardedBy("listenerLock")
- private final Multimap<String, DOMEntityOwnershipListener> entityTypeListenerMap = HashMultimap.create();
-
- private volatile boolean inJeopardy = false;
-
- EntityOwnershipListenerSupport(final ActorContext actorContext, final String logId) {
- this.actorContext = actorContext;
- this.logId = logId;
- }
-
- @Override
- String getLogId() {
- return logId;
- }
-
- /**
- * Set the in-jeopardy flag and indicate its previous state.
- *
- * @param inJeopardy new value of the in-jeopardy flag
- * @return Previous value of the flag.
- */
- @SuppressWarnings("checkstyle:hiddenField")
- boolean setInJeopardy(final boolean inJeopardy) {
- final boolean wasInJeopardy = this.inJeopardy;
- this.inJeopardy = inJeopardy;
- return wasInJeopardy;
- }
-
- void addEntityOwnershipListener(final String entityType, final DOMEntityOwnershipListener listener) {
- LOG.debug("{}: Adding EntityOwnershipListener {} for entity type {}", logId, listener, entityType);
-
- listenerLock.writeLock().lock();
- try {
- if (entityTypeListenerMap.put(entityType, listener)) {
- ListenerActorRefEntry listenerEntry = listenerActorMap.get(listener);
- if (listenerEntry == null) {
- listenerActorMap.put(listener, new ListenerActorRefEntry(listener));
- } else {
- listenerEntry.referenceCount++;
- }
- }
- } finally {
- listenerLock.writeLock().unlock();
- }
- }
-
- void removeEntityOwnershipListener(final String entityType, final DOMEntityOwnershipListener listener) {
- LOG.debug("{}: Removing EntityOwnershipListener {} for entity type {}", logId, listener, entityType);
-
- listenerLock.writeLock().lock();
- try {
- if (entityTypeListenerMap.remove(entityType, listener)) {
- ListenerActorRefEntry listenerEntry = listenerActorMap.get(listener);
-
- LOG.debug("{}: Found {}", logId, listenerEntry);
-
- listenerEntry.referenceCount--;
- if (listenerEntry.referenceCount <= 0) {
- listenerActorMap.remove(listener);
-
- if (listenerEntry.actorRef != null) {
- LOG.debug("Killing EntityOwnershipListenerActor {}", listenerEntry.actorRef);
- listenerEntry.actorRef.tell(PoisonPill.getInstance(), ActorRef.noSender());
- }
- }
- }
- } finally {
- listenerLock.writeLock().unlock();
- }
- }
-
- @Override
- void notifyEntityOwnershipListeners(final DOMEntity entity, final boolean wasOwner, final boolean isOwner,
- final boolean hasOwner) {
- listenerLock.readLock().lock();
- try {
- Collection<DOMEntityOwnershipListener> listeners = entityTypeListenerMap.get(entity.getType());
- if (!listeners.isEmpty()) {
- notifyListeners(entity, wasOwner, isOwner, hasOwner,
- listeners.stream().map(listenerActorMap::get).collect(Collectors.toList()));
- }
- } finally {
- listenerLock.readLock().unlock();
- }
- }
-
- void notifyEntityOwnershipListener(final DOMEntity entity, final boolean wasOwner, final boolean isOwner,
- final boolean hasOwner, final DOMEntityOwnershipListener listener) {
- listenerLock.readLock().lock();
- try {
- notifyListeners(entity, wasOwner, isOwner, hasOwner, ImmutableList.of(listenerActorMap.get(listener)));
- } finally {
- listenerLock.readLock().unlock();
- }
- }
-
- @Holding("listenerLock")
- private void notifyListeners(final DOMEntity entity, final boolean wasOwner, final boolean isOwner,
- final boolean hasOwner, final Collection<ListenerActorRefEntry> listenerEntries) {
- DOMEntityOwnershipChange changed = new DOMEntityOwnershipChange(entity,
- EntityOwnershipChangeState.from(wasOwner, isOwner, hasOwner), inJeopardy);
- for (ListenerActorRefEntry entry: listenerEntries) {
- ActorRef listenerActor = entry.actorFor();
-
- LOG.debug("{}: Notifying EntityOwnershipListenerActor {} with {}", logId, listenerActor, changed);
-
- listenerActor.tell(changed, ActorRef.noSender());
- }
- }
-
- private class ListenerActorRefEntry {
- final DOMEntityOwnershipListener listener;
-
- @GuardedBy("listenerLock")
- ActorRef actorRef;
-
- @GuardedBy("listenerLock")
- int referenceCount = 1;
-
- ListenerActorRefEntry(final DOMEntityOwnershipListener listener) {
- this.listener = listener;
- }
-
- ActorRef actorFor() {
- if (actorRef == null) {
- actorRef = actorContext.actorOf(EntityOwnershipListenerActor.props(listener));
-
- LOG.debug("{}: Created EntityOwnershipListenerActor {} for listener {}", logId, actorRef, listener);
- }
-
- return actorRef;
- }
-
- @Override
- public String toString() {
- return "ListenerActorRefEntry [actorRef=" + actorRef + ", referenceCount=" + referenceCount + "]";
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership;
-
-import static java.util.Objects.requireNonNull;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.CANDIDATE_NAME_NODE_ID;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.CANDIDATE_NODE_ID;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.ENTITY_ID_NODE_ID;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.ENTITY_ID_QNAME;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.ENTITY_NODE_ID;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.ENTITY_OWNERS_PATH;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.ENTITY_OWNER_NODE_ID;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.ENTITY_OWNER_QNAME;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.ENTITY_TYPES_PATH;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.ENTITY_TYPE_NODE_ID;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.ENTITY_TYPE_QNAME;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.candidateNodeKey;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.candidatePath;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.entityOwnersWithCandidate;
-
-import akka.actor.ActorRef;
-import akka.actor.ActorSelection;
-import akka.actor.Cancellable;
-import akka.cluster.Cluster;
-import akka.cluster.ClusterEvent.CurrentClusterState;
-import akka.cluster.Member;
-import akka.cluster.MemberStatus;
-import akka.pattern.Patterns;
-import com.google.common.base.Strings;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableSet;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.DatastoreContext;
-import org.opendaylight.controller.cluster.datastore.Shard;
-import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
-import org.opendaylight.controller.cluster.datastore.messages.PeerDown;
-import org.opendaylight.controller.cluster.datastore.messages.PeerUp;
-import org.opendaylight.controller.cluster.datastore.messages.SuccessReply;
-import org.opendaylight.controller.cluster.datastore.modification.DeleteModification;
-import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
-import org.opendaylight.controller.cluster.datastore.modification.Modification;
-import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
-import org.opendaylight.controller.cluster.entityownership.messages.CandidateAdded;
-import org.opendaylight.controller.cluster.entityownership.messages.CandidateRemoved;
-import org.opendaylight.controller.cluster.entityownership.messages.RegisterCandidateLocal;
-import org.opendaylight.controller.cluster.entityownership.messages.RegisterListenerLocal;
-import org.opendaylight.controller.cluster.entityownership.messages.RemoveAllCandidates;
-import org.opendaylight.controller.cluster.entityownership.messages.SelectOwner;
-import org.opendaylight.controller.cluster.entityownership.messages.UnregisterCandidateLocal;
-import org.opendaylight.controller.cluster.entityownership.messages.UnregisterListenerLocal;
-import org.opendaylight.controller.cluster.entityownership.selectionstrategy.EntityOwnerSelectionStrategy;
-import org.opendaylight.controller.cluster.entityownership.selectionstrategy.EntityOwnerSelectionStrategyConfig;
-import org.opendaylight.controller.cluster.raft.RaftState;
-import org.opendaylight.controller.cluster.raft.VotingState;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import scala.concurrent.Future;
-import scala.concurrent.duration.FiniteDuration;
-
-/**
- * Special Shard for EntityOwnership.
- *
- * @author Thomas Pantelis
- */
-class EntityOwnershipShard extends Shard {
- private final MemberName localMemberName;
- private final EntityOwnershipShardCommitCoordinator commitCoordinator;
- private final EntityOwnershipListenerSupport listenerSupport;
- private final Set<MemberName> downPeerMemberNames = new HashSet<>();
- private final EntityOwnerSelectionStrategyConfig strategyConfig;
- private final Map<YangInstanceIdentifier, Cancellable> entityToScheduledOwnershipTask = new HashMap<>();
- private final EntityOwnershipStatistics entityOwnershipStatistics;
- private boolean removeAllInitialCandidates = true;
-
- protected EntityOwnershipShard(final Builder builder) {
- super(builder);
- this.localMemberName = builder.localMemberName;
- this.commitCoordinator = new EntityOwnershipShardCommitCoordinator(builder.localMemberName, LOG);
- this.listenerSupport = new EntityOwnershipListenerSupport(getContext(), persistenceId());
- this.strategyConfig = builder.ownerSelectionStrategyConfig;
- this.entityOwnershipStatistics = new EntityOwnershipStatistics();
- this.entityOwnershipStatistics.init(getDataStore());
- }
-
- private static DatastoreContext noPersistenceDatastoreContext(final DatastoreContext datastoreContext) {
- return DatastoreContext.newBuilderFrom(datastoreContext).persistent(false).build();
- }
-
- @Override
- protected void onDatastoreContext(final DatastoreContext context) {
- super.onDatastoreContext(noPersistenceDatastoreContext(context));
- }
-
- @Override
- protected void onRecoveryComplete() {
- super.onRecoveryComplete();
-
- new CandidateListChangeListener(getSelf(), persistenceId()).init(getDataStore());
- new EntityOwnerChangeListener(localMemberName, listenerSupport).init(getDataStore());
- }
-
- @Override
- public void handleNonRaftCommand(final Object message) {
- if (message instanceof RegisterCandidateLocal) {
- onRegisterCandidateLocal((RegisterCandidateLocal) message);
- } else if (message instanceof UnregisterCandidateLocal) {
- onUnregisterCandidateLocal((UnregisterCandidateLocal) message);
- } else if (message instanceof CandidateAdded) {
- onCandidateAdded((CandidateAdded) message);
- } else if (message instanceof CandidateRemoved) {
- onCandidateRemoved((CandidateRemoved) message);
- } else if (message instanceof PeerDown) {
- onPeerDown((PeerDown) message);
- } else if (message instanceof PeerUp) {
- onPeerUp((PeerUp) message);
- } else if (message instanceof RegisterListenerLocal) {
- onRegisterListenerLocal((RegisterListenerLocal) message);
- } else if (message instanceof UnregisterListenerLocal) {
- onUnregisterListenerLocal((UnregisterListenerLocal) message);
- } else if (message instanceof SelectOwner) {
- onSelectOwner((SelectOwner) message);
- } else if (message instanceof RemoveAllCandidates) {
- onRemoveAllCandidates((RemoveAllCandidates) message);
- } else if (!commitCoordinator.handleMessage(message, this)) {
- super.handleNonRaftCommand(message);
- }
- }
-
- private void onRemoveAllCandidates(final RemoveAllCandidates message) {
- LOG.debug("{}: onRemoveAllCandidates: {}", persistenceId(), message);
-
- removeCandidateFromEntities(message.getMemberName());
- }
-
- private void onSelectOwner(final SelectOwner selectOwner) {
- LOG.debug("{}: onSelectOwner: {}", persistenceId(), selectOwner);
-
- String currentOwner = getCurrentOwner(selectOwner.getEntityPath());
- if (Strings.isNullOrEmpty(currentOwner)) {
- writeNewOwner(selectOwner.getEntityPath(), newOwner(currentOwner, selectOwner.getAllCandidates(),
- selectOwner.getOwnerSelectionStrategy()));
-
- Cancellable cancellable = entityToScheduledOwnershipTask.get(selectOwner.getEntityPath());
- if (cancellable != null) {
- if (!cancellable.isCancelled()) {
- cancellable.cancel();
- }
- entityToScheduledOwnershipTask.remove(selectOwner.getEntityPath());
- }
- }
- }
-
- private void onRegisterCandidateLocal(final RegisterCandidateLocal registerCandidate) {
- LOG.debug("{}: onRegisterCandidateLocal: {}", persistenceId(), registerCandidate);
-
- NormalizedNode<?, ?> entityOwners = entityOwnersWithCandidate(registerCandidate.getEntity().getType(),
- registerCandidate.getEntity().getIdentifier(), localMemberName.getName());
- commitCoordinator.commitModification(new MergeModification(ENTITY_OWNERS_PATH, entityOwners), this);
-
- getSender().tell(SuccessReply.INSTANCE, getSelf());
- }
-
- private void onUnregisterCandidateLocal(final UnregisterCandidateLocal unregisterCandidate) {
- LOG.debug("{}: onUnregisterCandidateLocal: {}", persistenceId(), unregisterCandidate);
-
- DOMEntity entity = unregisterCandidate.getEntity();
- YangInstanceIdentifier candidatePath = candidatePath(entity.getType(), entity.getIdentifier(),
- localMemberName.getName());
- commitCoordinator.commitModification(new DeleteModification(candidatePath), this);
-
- getSender().tell(SuccessReply.INSTANCE, getSelf());
- }
-
- private void onRegisterListenerLocal(final RegisterListenerLocal registerListener) {
- LOG.debug("{}: onRegisterListenerLocal: {}", persistenceId(), registerListener);
-
- listenerSupport.addEntityOwnershipListener(registerListener.getEntityType(), registerListener.getListener());
-
- getSender().tell(SuccessReply.INSTANCE, getSelf());
-
- searchForEntities((entityTypeNode, entityNode) -> {
- Optional<DataContainerChild<?, ?>> possibleType = entityTypeNode.getChild(ENTITY_TYPE_NODE_ID);
- String entityType = possibleType.isPresent() ? possibleType.get().getValue().toString() : null;
- if (registerListener.getEntityType().equals(entityType)) {
- final boolean hasOwner;
- final boolean isOwner;
-
- Optional<DataContainerChild<?, ?>> possibleOwner = entityNode.getChild(ENTITY_OWNER_NODE_ID);
- if (possibleOwner.isPresent()) {
- isOwner = localMemberName.getName().equals(possibleOwner.get().getValue().toString());
- hasOwner = true;
- } else {
- isOwner = false;
- hasOwner = false;
- }
-
- DOMEntity entity = new DOMEntity(entityType,
- (YangInstanceIdentifier) entityNode.getChild(ENTITY_ID_NODE_ID).get().getValue());
-
- listenerSupport.notifyEntityOwnershipListener(entity, false, isOwner, hasOwner,
- registerListener.getListener());
- }
- });
- }
-
- private void onUnregisterListenerLocal(final UnregisterListenerLocal unregisterListener) {
- LOG.debug("{}: onUnregisterListenerLocal: {}", persistenceId(), unregisterListener);
-
- listenerSupport.removeEntityOwnershipListener(unregisterListener.getEntityType(),
- unregisterListener.getListener());
-
- getSender().tell(SuccessReply.INSTANCE, getSelf());
- }
-
- void tryCommitModifications(final BatchedModifications modifications) {
- if (isLeader()) {
- LOG.debug("{}: Committing BatchedModifications {} locally", persistenceId(),
- modifications.getTransactionId());
-
- // Note that it's possible the commit won't get consensus and will timeout and not be applied
- // to the state. However we don't need to retry it in that case b/c it will be committed to
- // the journal first and, once a majority of followers come back on line and it is replicated,
- // it will be applied at that point.
- handleBatchedModificationsLocal(modifications, self());
- } else {
- final ActorSelection leader = getLeader();
- if (leader != null) {
- possiblyRemoveAllInitialCandidates(leader);
-
- LOG.debug("{}: Sending BatchedModifications {} to leader {}", persistenceId(),
- modifications.getTransactionId(), leader);
-
- Future<Object> future = Patterns.ask(leader, modifications, TimeUnit.SECONDS.toMillis(
- getDatastoreContext().getShardTransactionCommitTimeoutInSeconds()));
-
- Patterns.pipe(future, getContext().dispatcher()).pipeTo(getSelf(), ActorRef.noSender());
- }
- }
- }
-
- void possiblyRemoveAllInitialCandidates(final ActorSelection leader) {
- // The following handles removing all candidates on startup when re-joining with a remote leader. When a
- // follower is detected as down, the leader will re-assign new owners to entities that were owned by the
- // down member but doesn't remove the down member as a candidate, as the down node may actually be isolated
- // and still running. Therefore on startup we send an initial message to the remote leader to remove any
- // potential stale candidates we had previously registered, as it's possible a candidate may not be
- // registered by a client in the new incarnation. We have to send the RemoveAllCandidates message prior to any
- // pending registrations.
- if (removeAllInitialCandidates && leader != null) {
- removeAllInitialCandidates = false;
- if (!isLeader()) {
- LOG.debug("{} - got new leader {} on startup - sending RemoveAllCandidates", persistenceId(), leader);
-
- leader.tell(new RemoveAllCandidates(localMemberName), ActorRef.noSender());
- }
- }
- }
-
- boolean hasLeader() {
- return getLeader() != null && (!isLeader() || isLeaderActive());
- }
-
- /**
- * Determine if we are in jeopardy based on observed RAFT state.
- */
- private static boolean inJeopardy(final RaftState state) {
- switch (state) {
- case Candidate:
- case Follower:
- case Leader:
- case PreLeader:
- return false;
- case IsolatedLeader:
- return true;
- default:
- throw new IllegalStateException("Unsupported RAFT state " + state);
- }
- }
-
- private void notifyAllListeners() {
- searchForEntities((entityTypeNode, entityNode) -> {
- Optional<DataContainerChild<?, ?>> possibleType = entityTypeNode.getChild(ENTITY_TYPE_NODE_ID);
- if (possibleType.isPresent()) {
- final boolean hasOwner;
- final boolean isOwner;
-
- Optional<DataContainerChild<?, ?>> possibleOwner = entityNode.getChild(ENTITY_OWNER_NODE_ID);
- if (possibleOwner.isPresent()) {
- isOwner = localMemberName.getName().equals(possibleOwner.get().getValue().toString());
- hasOwner = true;
- } else {
- isOwner = false;
- hasOwner = false;
- }
-
- DOMEntity entity = new DOMEntity(possibleType.get().getValue().toString(),
- (YangInstanceIdentifier) entityNode.getChild(ENTITY_ID_NODE_ID).get().getValue());
-
- listenerSupport.notifyEntityOwnershipListeners(entity, isOwner, isOwner, hasOwner);
- }
- });
- }
-
- @Override
- protected void onStateChanged() {
- boolean isLeader = isLeader();
- LOG.debug("{}: onStateChanged: isLeader: {}, hasLeader: {}", persistenceId(), isLeader, hasLeader());
-
- // Examine current RAFT state to see if we are in jeopardy, potentially notifying all listeners
- final boolean inJeopardy = inJeopardy(getRaftState());
- final boolean wasInJeopardy = listenerSupport.setInJeopardy(inJeopardy);
- if (inJeopardy != wasInJeopardy) {
- LOG.debug("{}: {} jeopardy state, notifying all listeners", persistenceId(),
- inJeopardy ? "entered" : "left");
- notifyAllListeners();
- }
-
- commitCoordinator.onStateChanged(this, isLeader);
-
- super.onStateChanged();
- }
-
- @Override
- protected void onLeaderChanged(final String oldLeader, final String newLeader) {
- boolean isLeader = isLeader();
- LOG.debug("{}: onLeaderChanged: oldLeader: {}, newLeader: {}, isLeader: {}", persistenceId(), oldLeader,
- newLeader, isLeader);
-
- if (isLeader) {
-
- // Re-initialize the downPeerMemberNames from the current akka Cluster state. The previous leader, if any,
- // is most likely down however it's possible we haven't received the PeerDown message yet.
- initializeDownPeerMemberNamesFromClusterState();
-
- // Clear all existing strategies so that they get re-created when we call createStrategy again
- // This allows the strategies to be re-initialized with existing statistics maintained by
- // EntityOwnershipStatistics
- strategyConfig.clearStrategies();
-
- // Re-assign owners for all members that are known to be down. In a cluster which has greater than
- // 3 nodes it is possible for some node beside the leader being down when the leadership transitions
- // it makes sense to use this event to re-assign owners for those downed nodes.
- Set<String> ownedBy = new HashSet<>(downPeerMemberNames.size() + 1);
- for (MemberName downPeerName : downPeerMemberNames) {
- ownedBy.add(downPeerName.getName());
- }
-
- // Also try to assign owners for entities that have no current owner. See explanation in onPeerUp.
- ownedBy.add("");
- selectNewOwnerForEntitiesOwnedBy(ownedBy);
- } else {
- // The leader changed - notify the coordinator to check if pending modifications need to be sent.
- // While onStateChanged also does this, this method handles the case where the shard hears from a
- // leader and stays in the follower state. In that case no behavior state change occurs.
- commitCoordinator.onStateChanged(this, isLeader);
- }
-
- super.onLeaderChanged(oldLeader, newLeader);
- }
-
- @Override
- protected void onVotingStateChangeComplete() {
- // Re-evaluate ownership for all entities - if a member changed from voting to non-voting it should lose
- // ownership and vice versa it now is a candidate to become owner.
- final List<Modification> modifications = new ArrayList<>();
- searchForEntities((entityTypeNode, entityNode) -> {
- YangInstanceIdentifier entityPath = YangInstanceIdentifier.builder(ENTITY_TYPES_PATH)
- .node(entityTypeNode.getIdentifier()).node(ENTITY_NODE_ID).node(entityNode.getIdentifier())
- .node(ENTITY_OWNER_NODE_ID).build();
-
- Optional<String> possibleOwner =
- entityNode.getChild(ENTITY_OWNER_NODE_ID).map(node -> node.getValue().toString());
- String newOwner = newOwner(possibleOwner.orElse(null), getCandidateNames(entityNode),
- getEntityOwnerElectionStrategy(entityPath));
-
- if (!newOwner.equals(possibleOwner.orElse(""))) {
- modifications.add(new WriteModification(entityPath,
- ImmutableNodes.leafNode(ENTITY_OWNER_NODE_ID, newOwner)));
- }
- });
-
- commitCoordinator.commitModifications(modifications, this);
- }
-
- private void initializeDownPeerMemberNamesFromClusterState() {
- Optional<Cluster> cluster = getRaftActorContext().getCluster();
- if (!cluster.isPresent()) {
- return;
- }
-
- CurrentClusterState state = cluster.get().state();
- Set<Member> unreachable = state.getUnreachable();
-
- LOG.debug(
- "{}: initializeDownPeerMemberNamesFromClusterState - current downPeerMemberNames: {}, unreachable: {}",
- persistenceId(), downPeerMemberNames, unreachable);
-
- downPeerMemberNames.clear();
- for (Member m: unreachable) {
- downPeerMemberNames.add(MemberName.forName(m.getRoles().iterator().next()));
- }
-
- for (Member m: state.getMembers()) {
- if (m.status() != MemberStatus.up() && m.status() != MemberStatus.weaklyUp()) {
- LOG.debug("{}: Adding down member with status {}", persistenceId(), m.status());
- downPeerMemberNames.add(MemberName.forName(m.getRoles().iterator().next()));
- }
- }
-
- LOG.debug("{}: new downPeerMemberNames: {}", persistenceId(), downPeerMemberNames);
- }
-
- private void onCandidateRemoved(final CandidateRemoved message) {
- LOG.debug("{}: onCandidateRemoved: {}", persistenceId(), message);
-
- if (isLeader()) {
- String currentOwner = getCurrentOwner(message.getEntityPath());
- writeNewOwner(message.getEntityPath(),
- newOwner(currentOwner, message.getRemainingCandidates(),
- getEntityOwnerElectionStrategy(message.getEntityPath())));
- }
- }
-
- private EntityOwnerSelectionStrategy getEntityOwnerElectionStrategy(final YangInstanceIdentifier entityPath) {
- final String entityType = EntityOwnersModel.entityTypeFromEntityPath(entityPath);
- return strategyConfig.createStrategy(entityType, entityOwnershipStatistics.byEntityType(entityType));
- }
-
- private void onCandidateAdded(final CandidateAdded message) {
- if (!isLeader()) {
- return;
- }
-
- LOG.debug("{}: onCandidateAdded: {}", persistenceId(), message);
-
- // Since a node's candidate member is only added by the node itself, we can assume the node is up so
- // remove it from the downPeerMemberNames.
- downPeerMemberNames.remove(MemberName.forName(message.getNewCandidate()));
-
- final String currentOwner = getCurrentOwner(message.getEntityPath());
- final EntityOwnerSelectionStrategy strategy = getEntityOwnerElectionStrategy(message.getEntityPath());
-
- // Available members is all the known peers - the number of peers that are down + self
- // So if there are 2 peers and 1 is down then availableMembers will be 2
- final int availableMembers = getRaftActorContext().getPeerIds().size() - downPeerMemberNames.size() + 1;
-
- LOG.debug("{}: Using strategy {} to select owner, currentOwner = {}", persistenceId(), strategy, currentOwner);
-
- if (strategy.getSelectionDelayInMillis() == 0L) {
- writeNewOwner(message.getEntityPath(), newOwner(currentOwner, message.getAllCandidates(),
- strategy));
- } else if (message.getAllCandidates().size() == availableMembers) {
- LOG.debug("{}: Received the maximum candidates requests : {} writing new owner",
- persistenceId(), availableMembers);
- cancelOwnerSelectionTask(message.getEntityPath());
- writeNewOwner(message.getEntityPath(), newOwner(currentOwner, message.getAllCandidates(),
- strategy));
- } else {
- scheduleOwnerSelection(message.getEntityPath(), message.getAllCandidates(), strategy);
- }
- }
-
- private void onPeerDown(final PeerDown peerDown) {
- LOG.info("{}: onPeerDown: {}", persistenceId(), peerDown);
-
- MemberName downMemberName = peerDown.getMemberName();
- if (downPeerMemberNames.add(downMemberName) && isLeader()) {
- // Select new owners for entities owned by the down peer and which have other candidates. For an entity for
- // which the down peer is the only candidate, we leave it as the owner and don't clear it. This is done to
- // handle the case where the peer member process is actually still running but the node is partitioned.
- // When the partition is healed, the peer just remains as the owner. If the peer process actually restarted,
- // it will first remove all its candidates on startup. If another candidate is registered during the time
- // the peer is down, the new candidate will be selected as the new owner.
-
- selectNewOwnerForEntitiesOwnedBy(ImmutableSet.of(downMemberName.getName()));
- }
- }
-
- private void selectNewOwnerForEntitiesOwnedBy(final Set<String> ownedBy) {
- final List<Modification> modifications = new ArrayList<>();
- searchForEntitiesOwnedBy(ownedBy, (entityTypeNode, entityNode) -> {
- YangInstanceIdentifier entityPath = YangInstanceIdentifier.builder(ENTITY_TYPES_PATH)
- .node(entityTypeNode.getIdentifier()).node(ENTITY_NODE_ID).node(entityNode.getIdentifier())
- .node(ENTITY_OWNER_NODE_ID).build();
- String newOwner = newOwner(getCurrentOwner(entityPath), getCandidateNames(entityNode),
- getEntityOwnerElectionStrategy(entityPath));
-
- if (!newOwner.isEmpty()) {
- LOG.debug("{}: Found entity {}, writing new owner {}", persistenceId(), entityPath, newOwner);
-
- modifications.add(new WriteModification(entityPath,
- ImmutableNodes.leafNode(ENTITY_OWNER_NODE_ID, newOwner)));
-
- } else {
- LOG.debug("{}: Found entity {} but no other candidates - not clearing owner", persistenceId(),
- entityPath);
- }
- });
-
- commitCoordinator.commitModifications(modifications, this);
- }
-
- private void onPeerUp(final PeerUp peerUp) {
- LOG.debug("{}: onPeerUp: {}", persistenceId(), peerUp);
-
- downPeerMemberNames.remove(peerUp.getMemberName());
-
- // Notify the coordinator to check if pending modifications need to be sent. We do this here
- // to handle the case where the leader's peer address isn't known yet when a prior state or
- // leader change occurred.
- commitCoordinator.onStateChanged(this, isLeader());
-
- if (isLeader()) {
- // Try to assign owners for entities that have no current owner. It's possible the peer that is now up
- // had previously registered as a candidate and was the only candidate but the owner write tx couldn't be
- // committed due to a leader change. Eg, the leader is able to successfully commit the candidate add tx but
- // becomes isolated before it can commit the owner change and switches to follower. The majority partition
- // with a new leader has the candidate but the entity has no owner. When the partition is healed and the
- // previously isolated leader reconnects, we'll receive onPeerUp and, if there's still no owner, the
- // previous leader will gain ownership.
- selectNewOwnerForEntitiesOwnedBy(ImmutableSet.of(""));
- }
- }
-
- private static Collection<String> getCandidateNames(final MapEntryNode entity) {
- return entity.getChild(CANDIDATE_NODE_ID).map(child -> {
- Collection<MapEntryNode> candidates = ((MapNode) child).getValue();
- Collection<String> candidateNames = new ArrayList<>(candidates.size());
- for (MapEntryNode candidate: candidates) {
- candidateNames.add(candidate.getChild(CANDIDATE_NAME_NODE_ID).get().getValue().toString());
- }
- return candidateNames;
- }).orElse(ImmutableList.of());
- }
-
- private void searchForEntitiesOwnedBy(final Set<String> ownedBy, final EntityWalker walker) {
- LOG.debug("{}: Searching for entities owned by {}", persistenceId(), ownedBy);
-
- searchForEntities((entityTypeNode, entityNode) -> {
- Optional<DataContainerChild<? extends PathArgument, ?>> possibleOwner =
- entityNode.getChild(ENTITY_OWNER_NODE_ID);
- String currentOwner = possibleOwner.isPresent() ? possibleOwner.get().getValue().toString() : "";
- if (ownedBy.contains(currentOwner)) {
- walker.onEntity(entityTypeNode, entityNode);
- }
- });
- }
-
- private void removeCandidateFromEntities(final MemberName member) {
- final List<Modification> modifications = new ArrayList<>();
- searchForEntities((entityTypeNode, entityNode) -> {
- if (hasCandidate(entityNode, member)) {
- YangInstanceIdentifier entityId = (YangInstanceIdentifier) entityNode.getIdentifier()
- .getValue(ENTITY_ID_QNAME);
- YangInstanceIdentifier candidatePath = candidatePath(entityTypeNode.getIdentifier()
- .getValue(ENTITY_TYPE_QNAME).toString(), entityId, member.getName());
-
- LOG.info("{}: Found entity {}, removing candidate {}, path {}", persistenceId(), entityId,
- member, candidatePath);
-
- modifications.add(new DeleteModification(candidatePath));
- }
- });
-
- commitCoordinator.commitModifications(modifications, this);
- }
-
- private static boolean hasCandidate(final MapEntryNode entity, final MemberName candidateName) {
- return entity.getChild(CANDIDATE_NODE_ID)
- .flatMap(child -> ((MapNode)child).getChild(candidateNodeKey(candidateName.getName())))
- .isPresent();
- }
-
- private void searchForEntities(final EntityWalker walker) {
- Optional<NormalizedNode<?, ?>> possibleEntityTypes = getDataStore().readNode(ENTITY_TYPES_PATH);
- if (!possibleEntityTypes.isPresent()) {
- return;
- }
-
- for (MapEntryNode entityType : ((MapNode) possibleEntityTypes.get()).getValue()) {
- Optional<DataContainerChild<?, ?>> possibleEntities = entityType.getChild(ENTITY_NODE_ID);
- if (!possibleEntities.isPresent()) {
- // shouldn't happen but handle anyway
- continue;
- }
-
- for (MapEntryNode entity: ((MapNode) possibleEntities.get()).getValue()) {
- walker.onEntity(entityType, entity);
- }
- }
- }
-
- private void writeNewOwner(final YangInstanceIdentifier entityPath, final String newOwner) {
- LOG.debug("{}: Writing new owner {} for entity {}", persistenceId(), newOwner, entityPath);
-
- commitCoordinator.commitModification(new WriteModification(entityPath.node(ENTITY_OWNER_QNAME),
- ImmutableNodes.leafNode(ENTITY_OWNER_NODE_ID, newOwner)), this);
- }
-
- /**
- * Schedule a new owner selection job. Cancelling any outstanding job if it has not been cancelled.
- */
- private void scheduleOwnerSelection(final YangInstanceIdentifier entityPath, final Collection<String> allCandidates,
- final EntityOwnerSelectionStrategy strategy) {
- cancelOwnerSelectionTask(entityPath);
-
- LOG.debug("{}: Scheduling owner selection after {} ms", persistenceId(), strategy.getSelectionDelayInMillis());
-
- final Cancellable lastScheduledTask = context().system().scheduler().scheduleOnce(
- FiniteDuration.apply(strategy.getSelectionDelayInMillis(), TimeUnit.MILLISECONDS), self(),
- new SelectOwner(entityPath, allCandidates, strategy), context().system().dispatcher(), self());
-
- entityToScheduledOwnershipTask.put(entityPath, lastScheduledTask);
- }
-
- private void cancelOwnerSelectionTask(final YangInstanceIdentifier entityPath) {
- final Cancellable lastScheduledTask = entityToScheduledOwnershipTask.get(entityPath);
- if (lastScheduledTask != null && !lastScheduledTask.isCancelled()) {
- lastScheduledTask.cancel();
- }
- }
-
- private String newOwner(final String currentOwner, final Collection<String> candidates,
- final EntityOwnerSelectionStrategy ownerSelectionStrategy) {
- Collection<String> viableCandidates = getViableCandidates(candidates);
- if (viableCandidates.isEmpty()) {
- return "";
- }
- return ownerSelectionStrategy.newOwner(currentOwner, viableCandidates);
- }
-
- private Collection<String> getViableCandidates(final Collection<String> candidates) {
- Map<MemberName, VotingState> memberToVotingState = new HashMap<>();
- getRaftActorContext().getPeers().forEach(peerInfo -> memberToVotingState.put(
- ShardIdentifier.fromShardIdString(peerInfo.getId()).getMemberName(), peerInfo.getVotingState()));
-
- Collection<String> viableCandidates = new ArrayList<>();
-
- for (String candidate : candidates) {
- MemberName memberName = MemberName.forName(candidate);
- if (memberToVotingState.get(memberName) != VotingState.NON_VOTING
- && !downPeerMemberNames.contains(memberName)) {
- viableCandidates.add(candidate);
- }
- }
- return viableCandidates;
- }
-
- private String getCurrentOwner(final YangInstanceIdentifier entityId) {
- return getDataStore().readNode(entityId.node(ENTITY_OWNER_QNAME))
- .map(owner -> owner.getValue().toString())
- .orElse(null);
- }
-
- @FunctionalInterface
- private interface EntityWalker {
- void onEntity(MapEntryNode entityTypeNode, MapEntryNode entityNode);
- }
-
- public static Builder newBuilder() {
- return new Builder();
- }
-
- static class Builder extends Shard.AbstractBuilder<Builder, EntityOwnershipShard> {
- private MemberName localMemberName;
- private EntityOwnerSelectionStrategyConfig ownerSelectionStrategyConfig;
-
- protected Builder() {
- super(EntityOwnershipShard.class);
- }
-
- Builder localMemberName(final MemberName newLocalMemberName) {
- checkSealed();
- this.localMemberName = newLocalMemberName;
- return this;
- }
-
- Builder ownerSelectionStrategyConfig(final EntityOwnerSelectionStrategyConfig newOwnerSelectionStrategyConfig) {
- checkSealed();
- this.ownerSelectionStrategyConfig = newOwnerSelectionStrategyConfig;
- return this;
- }
-
- @Override
- protected void verify() {
- super.verify();
- requireNonNull(localMemberName, "localMemberName should not be null");
- requireNonNull(ownerSelectionStrategyConfig, "ownerSelectionStrategyConfig should not be null");
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership;
-
-import static java.util.Objects.requireNonNull;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.ENTITY_OWNER_QNAME;
-
-import akka.actor.ActorRef;
-import akka.actor.Cancellable;
-import akka.actor.Status.Failure;
-import com.google.common.collect.ImmutableList;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Queue;
-import org.eclipse.jdt.annotation.Nullable;
-import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.FrontendType;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
-import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
-import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
-import org.opendaylight.controller.cluster.datastore.modification.Modification;
-import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
-import org.slf4j.Logger;
-import scala.concurrent.duration.FiniteDuration;
-
-/**
- * Handles commits and retries for the EntityOwnershipShard.
- *
- * @author Thomas Pantelis
- */
-class EntityOwnershipShardCommitCoordinator {
- private static final Object COMMIT_RETRY_MESSAGE = new Object() {
- @Override
- public String toString() {
- return "entityCommitRetry";
- }
- };
- private static final FrontendType FRONTEND_TYPE = FrontendType.forName("entity-ownership-internal");
-
- private final Queue<Modification> pendingModifications = new LinkedList<>();
- private final LocalHistoryIdentifier historyId;
- private final Logger log;
-
- private BatchedModifications inflightCommit;
- private Cancellable retryCommitSchedule;
- private long transactionIDCounter = 0;
-
- EntityOwnershipShardCommitCoordinator(final MemberName localMemberName, final Logger log) {
- this.log = requireNonNull(log);
- historyId = new LocalHistoryIdentifier(
- ClientIdentifier.create(FrontendIdentifier.create(localMemberName, FRONTEND_TYPE), 0), 0);
- }
-
- boolean handleMessage(final Object message, final EntityOwnershipShard shard) {
- boolean handled = true;
- if (CommitTransactionReply.isSerializedType(message)) {
- // Successful reply from a local commit.
- inflightCommitSucceeded(shard);
- } else if (message instanceof akka.actor.Status.Failure) {
- // Failure reply from a local commit.
- inflightCommitFailure(((Failure) message).cause(), shard);
- } else if (COMMIT_RETRY_MESSAGE.equals(message)) {
- retryInflightCommit(shard);
- } else {
- handled = false;
- }
-
- return handled;
- }
-
- private void retryInflightCommit(final EntityOwnershipShard shard) {
- // Shouldn't be null happen but verify anyway
- if (inflightCommit == null) {
- return;
- }
-
- if (shard.hasLeader()) {
- log.debug("Retrying commit for BatchedModifications {}", inflightCommit.getTransactionId());
-
- shard.tryCommitModifications(inflightCommit);
- } else {
- scheduleInflightCommitRetry(shard);
- }
- }
-
- void inflightCommitFailure(final Throwable cause, final EntityOwnershipShard shard) {
- // This should've originated from a failed inflight commit but verify anyway
- if (inflightCommit == null) {
- return;
- }
-
- log.debug("Inflight BatchedModifications {} commit failed", inflightCommit.getTransactionId(), cause);
-
- if (!(cause instanceof NoShardLeaderException)) {
- // If the failure is other than NoShardLeaderException the commit may have been partially
- // processed so retry with a new transaction ID to be safe.
- newInflightCommitWithDifferentTransactionID();
- }
-
- scheduleInflightCommitRetry(shard);
- }
-
- private void scheduleInflightCommitRetry(final EntityOwnershipShard shard) {
- FiniteDuration duration = shard.getDatastoreContext().getShardRaftConfig().getElectionTimeOutInterval();
-
- log.debug("Scheduling retry for BatchedModifications commit {} in {}",
- inflightCommit.getTransactionId(), duration);
-
- retryCommitSchedule = shard.getContext().system().scheduler().scheduleOnce(duration, shard.getSelf(),
- COMMIT_RETRY_MESSAGE, shard.getContext().dispatcher(), ActorRef.noSender());
- }
-
- void inflightCommitSucceeded(final EntityOwnershipShard shard) {
- // Shouldn't be null but verify anyway
- if (inflightCommit == null) {
- return;
- }
-
- if (retryCommitSchedule != null) {
- retryCommitSchedule.cancel();
- }
-
- log.debug("BatchedModifications commit {} succeeded", inflightCommit.getTransactionId());
-
- inflightCommit = null;
- commitNextBatch(shard);
- }
-
- void commitNextBatch(final EntityOwnershipShard shard) {
- if (inflightCommit != null || pendingModifications.isEmpty() || !shard.hasLeader()) {
- return;
- }
-
- inflightCommit = newBatchedModifications();
- Iterator<Modification> iter = pendingModifications.iterator();
- while (iter.hasNext()) {
- inflightCommit.addModification(iter.next());
- iter.remove();
- if (inflightCommit.getModifications().size()
- >= shard.getDatastoreContext().getShardBatchedModificationCount()) {
- break;
- }
- }
-
- log.debug("Committing next BatchedModifications {}, size {}", inflightCommit.getTransactionId(),
- inflightCommit.getModifications().size());
-
- shard.tryCommitModifications(inflightCommit);
- }
-
- void commitModification(final Modification modification, final EntityOwnershipShard shard) {
- commitModifications(ImmutableList.of(modification), shard);
- }
-
- void commitModifications(final List<Modification> modifications, final EntityOwnershipShard shard) {
- if (modifications.isEmpty()) {
- return;
- }
-
- boolean hasLeader = shard.hasLeader();
- if (inflightCommit != null || !hasLeader) {
- if (log.isDebugEnabled()) {
- log.debug("{} - adding modifications to pending",
- inflightCommit != null ? "A commit is inflight" : "No shard leader");
- }
-
- pendingModifications.addAll(modifications);
- } else {
- inflightCommit = newBatchedModifications();
- inflightCommit.addModifications(modifications);
- shard.tryCommitModifications(inflightCommit);
- }
- }
-
- void onStateChanged(final EntityOwnershipShard shard, final boolean isLeader) {
- shard.possiblyRemoveAllInitialCandidates(shard.getLeader());
-
- possiblyPrunePendingCommits(shard, isLeader);
-
- if (!isLeader && inflightCommit != null) {
- // We're no longer the leader but we have an inflight local commit. This likely means we didn't get
- // consensus for the commit and switched to follower due to another node with a higher term. We
- // can't be sure if the commit was replicated to any node so we retry it here with a new
- // transaction ID.
- if (retryCommitSchedule != null) {
- retryCommitSchedule.cancel();
- }
-
- newInflightCommitWithDifferentTransactionID();
- retryInflightCommit(shard);
- } else {
- commitNextBatch(shard);
- }
- }
-
- private void possiblyPrunePendingCommits(final EntityOwnershipShard shard, final boolean isLeader) {
- // If we were the leader and transitioned to follower, we'll try to forward pending commits to the new leader.
- // However certain commits, e.g. entity owner changes, should only be committed by a valid leader as the
- // criteria used to determine the commit may be stale. Since we're no longer a valid leader, we should not
- // forward such commits thus we prune the pending modifications. We still should forward local candidate change
- // commits.
- if (shard.hasLeader() && !isLeader) {
- // We may have already submitted a transaction for replication and commit. We don't need the base Shard to
- // forward it since we also have it stored in the inflightCommit and handle retries. So we just clear
- // pending transactions and drop them.
- shard.convertPendingTransactionsToMessages();
-
- // Prune the inflightCommit.
- if (inflightCommit != null) {
- inflightCommit = pruneModifications(inflightCommit);
- }
-
- // Prune the subsequent pending modifications.
- pendingModifications.removeIf(mod -> !canForwardModificationToNewLeader(mod));
- }
- }
-
- private @Nullable BatchedModifications pruneModifications(final BatchedModifications toPrune) {
- BatchedModifications prunedModifications = new BatchedModifications(toPrune.getTransactionId(),
- toPrune.getVersion());
- prunedModifications.setDoCommitOnReady(toPrune.isDoCommitOnReady());
- if (toPrune.isReady()) {
- prunedModifications.setReady(toPrune.getParticipatingShardNames());
- }
- prunedModifications.setTotalMessagesSent(toPrune.getTotalMessagesSent());
- for (Modification mod: toPrune.getModifications()) {
- if (canForwardModificationToNewLeader(mod)) {
- prunedModifications.addModification(mod);
- }
- }
-
- return !prunedModifications.getModifications().isEmpty() ? prunedModifications : null;
- }
-
- private boolean canForwardModificationToNewLeader(final Modification mod) {
- // If this is a WRITE of entity owner we don't want to forward it to a new leader since the criteria used
- // to determine the new owner might be stale.
- if (mod instanceof WriteModification) {
- WriteModification writeMod = (WriteModification)mod;
- boolean canForward = !writeMod.getPath().getLastPathArgument().getNodeType().equals(ENTITY_OWNER_QNAME);
-
- if (!canForward) {
- log.debug("Not forwarding WRITE modification for {} to new leader", writeMod.getPath());
- }
-
- return canForward;
- }
-
- return true;
- }
-
- private void newInflightCommitWithDifferentTransactionID() {
- BatchedModifications newBatchedModifications = newBatchedModifications();
- newBatchedModifications.addModifications(inflightCommit.getModifications());
- inflightCommit = newBatchedModifications;
- }
-
- private BatchedModifications newBatchedModifications() {
- BatchedModifications modifications = new BatchedModifications(
- new TransactionIdentifier(historyId, ++transactionIDCounter), DataStoreVersions.CURRENT_VERSION);
- modifications.setDoCommitOnReady(true);
- modifications.setReady();
- modifications.setTotalMessagesSent(1);
- return modifications;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership;
-
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.entityTypeFromEntityPath;
-
-import com.google.common.base.Strings;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Optional;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import tech.pantheon.triemap.TrieMap;
-
-/**
- * EntityOwnershipStatistics is a utility class that keeps track of ownership statistics for the candidates and
- * caches it for quick count queries.
- * <p/>
- * While the entity ownership model does maintain the information about which entity is owned by which candidate
- * finding out how many entities of a given type are owned by a given candidate is not an efficient query.
- */
-class EntityOwnershipStatistics extends AbstractEntityOwnerChangeListener {
-
- private final TrieMap<String, TrieMap<String, Long>> statistics = TrieMap.create();
-
- EntityOwnershipStatistics(){
- }
-
- @Override
- public void onDataTreeChanged(final Collection<DataTreeCandidate> changes) {
- for (DataTreeCandidate change : changes) {
- DataTreeCandidateNode changeRoot = change.getRootNode();
- LeafNode<?> ownerLeaf = (LeafNode<?>) changeRoot.getDataAfter().get();
- String entityType = entityTypeFromEntityPath(change.getRootPath());
- String newOwner = extractOwner(ownerLeaf);
- if (!Strings.isNullOrEmpty(newOwner)) {
- updateStatistics(entityType, newOwner, 1);
- }
-
- Optional<NormalizedNode<?, ?>> dataBefore = changeRoot.getDataBefore();
- if (dataBefore.isPresent()) {
- String origOwner = extractOwner((LeafNode<?>) changeRoot.getDataBefore().get());
- if (!Strings.isNullOrEmpty(origOwner)) {
- updateStatistics(entityType, origOwner, -1);
- }
- }
- }
- }
-
- Map<String, Map<String, Long>> all() {
- Map<String, Map<String, Long>> snapshot = new HashMap<>();
- for (String entityType : statistics.immutableSnapshot().keySet()) {
- snapshot.put(entityType, byEntityType(entityType));
- }
- return snapshot;
- }
-
- Map<String, Long> byEntityType(final String entityType) {
- if (statistics.get(entityType) != null) {
- return statistics.get(entityType).immutableSnapshot();
- }
- return new HashMap<>();
- }
-
- private void updateStatistics(final String entityType, final String candidateName, final long count) {
- TrieMap<String, Long> map = statistics.get(entityType);
- if (map == null) {
- map = TrieMap.create();
- map.put(candidateName, count);
- statistics.put(entityType, map);
- } else {
- map.merge(candidateName, count, (ownedEntities, addedEntities) -> ownedEntities + addedEntities);
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership;
-
-import com.google.common.annotations.Beta;
-import java.util.Map;
-import java.util.Optional;
-import org.opendaylight.controller.cluster.datastore.DistributedDataStoreInterface;
-import org.opendaylight.controller.cluster.entityownership.selectionstrategy.EntityOwnerSelectionStrategyConfigReader;
-import org.opendaylight.mdsal.eos.common.api.CandidateAlreadyRegisteredException;
-import org.opendaylight.mdsal.eos.common.api.EntityOwnershipState;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipCandidateRegistration;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListenerRegistration;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipService;
-import org.osgi.service.component.annotations.Activate;
-import org.osgi.service.component.annotations.Component;
-import org.osgi.service.component.annotations.Deactivate;
-import org.osgi.service.component.annotations.Reference;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@Beta
-@Component(immediate = true, configurationPid = "org.opendaylight.controller.cluster.entity.owner.selection.strategies",
- property = "type=default")
-public final class OSGiDistributedEntityOwnershipService implements DOMEntityOwnershipService {
- private static final Logger LOG = LoggerFactory.getLogger(OSGiDistributedEntityOwnershipService.class);
-
- @Reference(target = "(type=distributed-operational)")
- DistributedDataStoreInterface operDatastore = null;
-
- private DistributedEntityOwnershipService delegate;
-
- @Override
- public DOMEntityOwnershipCandidateRegistration registerCandidate(final DOMEntity entity)
- throws CandidateAlreadyRegisteredException {
- return delegate.registerCandidate(entity);
- }
-
- @Override
- public DOMEntityOwnershipListenerRegistration registerListener(final String entityType,
- final DOMEntityOwnershipListener listener) {
- return delegate.registerListener(entityType, listener);
- }
-
- @Override
- public Optional<EntityOwnershipState> getOwnershipState(final DOMEntity forEntity) {
- return delegate.getOwnershipState(forEntity);
- }
-
- @Override
- public boolean isCandidateRegistered(final DOMEntity forEntity) {
- return delegate.isCandidateRegistered(forEntity);
- }
-
- @Activate
- // FIXME: 3.0.0: properties are keyed by String, this should be Map<String, Object>
- void activate(final Map<Object, Object> properties) {
- LOG.info("Distributed Entity Ownership Service starting");
- delegate = DistributedEntityOwnershipService.start(operDatastore.getActorUtils(),
- EntityOwnerSelectionStrategyConfigReader.loadStrategyWithConfig(properties));
- LOG.info("Distributed Entity Ownership Service started");
- }
-
- @Deactivate
- void deactivate() {
- LOG.info("Distributed Entity Ownership Service stopping");
- delegate.close();
- LOG.info("Distributed Entity Ownership Service stopped");
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership.messages;
-
-import java.util.Collection;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-/**
- * Message sent when a new candidate is added for an entity.
- *
- * @author Moiz Raja
- * @author Thomas Pantelis
- */
-public class CandidateAdded {
- private final YangInstanceIdentifier entityPath;
- private final Collection<String> allCandidates;
- private final String newCandidate;
-
- public CandidateAdded(final YangInstanceIdentifier entityPath, final String newCandidate,
- final Collection<String> allCandidates) {
- this.entityPath = entityPath;
- this.newCandidate = newCandidate;
- this.allCandidates = allCandidates;
- }
-
- public YangInstanceIdentifier getEntityPath() {
- return entityPath;
- }
-
- public Collection<String> getAllCandidates() {
- return allCandidates;
- }
-
- public String getNewCandidate() {
- return newCandidate;
- }
-
- @Override
- public String toString() {
- return "CandidateAdded [entityPath=" + entityPath + ", newCandidate=" + newCandidate + ", allCandidates="
- + allCandidates + "]";
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership.messages;
-
-import java.util.Collection;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-/**
- * Message sent when a candidate is removed for an entity.
- *
- * @author Moiz Raja
- * @author Thomas Pantelis
- */
-public class CandidateRemoved {
- private final YangInstanceIdentifier entityPath;
- private final String removedCandidate;
- private final Collection<String> remainingCandidates;
-
- public CandidateRemoved(final YangInstanceIdentifier entityPath, final String removedCandidate,
- final Collection<String> remainingCandidates) {
- this.entityPath = entityPath;
- this.removedCandidate = removedCandidate;
- this.remainingCandidates = remainingCandidates;
- }
-
- public YangInstanceIdentifier getEntityPath() {
- return entityPath;
- }
-
- public String getRemovedCandidate() {
- return removedCandidate;
- }
-
- public Collection<String> getRemainingCandidates() {
- return remainingCandidates;
- }
-
- @Override
- public String toString() {
- return "CandidateRemoved [entityPath=" + entityPath + ", removedCandidate=" + removedCandidate
- + ", remainingCandidates=" + remainingCandidates + "]";
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership.messages;
-
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-
-/**
- * Message sent to the local EntityOwnershipShard to register a candidate.
- *
- * @author Thomas Pantelis
- */
-public class RegisterCandidateLocal {
- private final DOMEntity entity;
-
- public RegisterCandidateLocal(final DOMEntity entity) {
- this.entity = entity;
- }
-
- public DOMEntity getEntity() {
- return entity;
- }
-
- @Override
- public String toString() {
- return "RegisterCandidateLocal [entity=" + entity + "]";
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership.messages;
-
-import static java.util.Objects.requireNonNull;
-
-import org.eclipse.jdt.annotation.NonNullByDefault;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
-
-/**
- * Message sent to the local EntityOwnershipShard to register an EntityOwnershipListener.
- *
- * @author Thomas Pantelis
- */
-@NonNullByDefault
-public class RegisterListenerLocal {
- private final DOMEntityOwnershipListener listener;
- private final String entityType;
-
- public RegisterListenerLocal(final DOMEntityOwnershipListener listener, final String entityType) {
- this.listener = requireNonNull(listener, "listener cannot be null");
- this.entityType = requireNonNull(entityType, "entityType cannot be null");
- }
-
- public DOMEntityOwnershipListener getListener() {
- return listener;
- }
-
- public String getEntityType() {
- return entityType;
- }
-
- @Override
- public String toString() {
- return "RegisterListenerLocal [entityType=" + entityType + ", listener=" + listener + "]";
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership.messages;
-
-import java.io.Serializable;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-
-/**
- * Message sent by an EntityOwnershipShard to its leader on startup to remove all its candidates.
- *
- * @author Thomas Pantelis
- */
-public class RemoveAllCandidates implements Serializable {
- private static final long serialVersionUID = 1L;
-
- private final MemberName memberName;
-
- public RemoveAllCandidates(final MemberName memberName) {
- this.memberName = memberName;
- }
-
- public MemberName getMemberName() {
- return memberName;
- }
-
- @Override
- public String toString() {
- return "RemoveAllCandidates [memberName=" + memberName + "]";
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership.messages;
-
-import static java.util.Objects.requireNonNull;
-
-import java.util.Collection;
-import org.opendaylight.controller.cluster.entityownership.selectionstrategy.EntityOwnerSelectionStrategy;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-/**
- * Message sent when a new owner needs to be selected.
- */
-public class SelectOwner {
- private final YangInstanceIdentifier entityPath;
- private final Collection<String> allCandidates;
- private final EntityOwnerSelectionStrategy ownerSelectionStrategy;
-
- public SelectOwner(final YangInstanceIdentifier entityPath, final Collection<String> allCandidates,
- final EntityOwnerSelectionStrategy ownerSelectionStrategy) {
- this.entityPath = requireNonNull(entityPath, "entityPath should not be null");
- this.allCandidates = requireNonNull(allCandidates, "allCandidates should not be null");
- this.ownerSelectionStrategy = requireNonNull(ownerSelectionStrategy,
- "ownerSelectionStrategy should not be null");
- }
-
- public YangInstanceIdentifier getEntityPath() {
- return entityPath;
- }
-
- public Collection<String> getAllCandidates() {
- return allCandidates;
- }
-
- public EntityOwnerSelectionStrategy getOwnerSelectionStrategy() {
- return ownerSelectionStrategy;
- }
-
- @Override
- public String toString() {
- return "SelectOwner [entityPath=" + entityPath + ", allCandidates=" + allCandidates
- + ", ownerSelectionStrategy=" + ownerSelectionStrategy + "]";
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership.messages;
-
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-
-/**
- * Message sent to the local EntityOwnershipShard to unregister a candidate.
- *
- * @author Thomas Pantelis
- */
-public class UnregisterCandidateLocal {
- private final DOMEntity entity;
-
- public UnregisterCandidateLocal(final DOMEntity entity) {
- this.entity = entity;
- }
-
- public DOMEntity getEntity() {
- return entity;
- }
-
- @Override
- public String toString() {
- return "UnregisterCandidateLocal [entity=" + entity + "]";
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership.messages;
-
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
-
-/**
- * Message sent to the local EntityOwnershipShard to unregister an EntityOwnershipListener.
- *
- * @author Thomas Pantelis
- */
-public class UnregisterListenerLocal {
- private final DOMEntityOwnershipListener listener;
- private final String entityType;
-
- public UnregisterListenerLocal(final DOMEntityOwnershipListener listener, final String entityType) {
- this.listener = listener;
- this.entityType = entityType;
- }
-
- public DOMEntityOwnershipListener getListener() {
- return listener;
- }
-
- public String getEntityType() {
- return entityType;
- }
-
- @Override
- public String toString() {
- return "UnregisterListenerLocal [entityType=" + entityType + ", listener=" + listener + "]";
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership.selectionstrategy;
-
-import java.util.Map;
-
-public abstract class AbstractEntityOwnerSelectionStrategy implements EntityOwnerSelectionStrategy {
-
- private final long selectionDelayInMillis;
- private final Map<String, Long> initialStatistics;
-
- protected AbstractEntityOwnerSelectionStrategy(final long selectionDelayInMillis,
- final Map<String, Long> initialStatistics) {
- this.selectionDelayInMillis = selectionDelayInMillis;
- this.initialStatistics = initialStatistics;
- }
-
- @Override
- public long getSelectionDelayInMillis() {
- return selectionDelayInMillis;
- }
-
- public Map<String, Long> getInitialStatistics() {
- return initialStatistics;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership.selectionstrategy;
-
-import java.util.Collection;
-import org.eclipse.jdt.annotation.Nullable;
-
-/**
- * An EntityOwnerSelectionStrategy is to be used by the EntityOwnershipShard to select a new owner from a collection
- * of candidates.
- */
-public interface EntityOwnerSelectionStrategy {
- /**
- * Returns the time in millis owner selection should be delayed.
- *
- * @return the time in millis owner selection should be delayed
- */
- long getSelectionDelayInMillis();
-
- /**
- * Selects a new owner from the list of viable candidates.
- *
- * @param currentOwner the current owner of the entity if any, null otherwise
- * @param viableCandidates the available candidates from which to choose the new owner
- * @return the new owner
- */
- String newOwner(@Nullable String currentOwner, Collection<String> viableCandidates);
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership.selectionstrategy;
-
-import java.lang.reflect.InvocationTargetException;
-import java.util.HashMap;
-import java.util.Map;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * FIXME: this is simple registry service, except it also loads classes.
- */
-public final class EntityOwnerSelectionStrategyConfig {
- private static final Logger LOG = LoggerFactory.getLogger(EntityOwnerSelectionStrategyConfig.class);
- private final Map<String, StrategyInfo> entityTypeToStrategyInfo = new HashMap<>();
- private final Map<String, EntityOwnerSelectionStrategy> entityTypeToOwnerSelectionStrategy = new HashMap<>();
-
- private EntityOwnerSelectionStrategyConfig() {
-
- }
-
- public boolean isStrategyConfigured(final String entityType) {
- return entityTypeToStrategyInfo.get(entityType) != null;
- }
-
- public EntityOwnerSelectionStrategy createStrategy(final String entityType,
- final Map<String, Long> initialStatistics) {
- final EntityOwnerSelectionStrategy strategy;
- final EntityOwnerSelectionStrategy existingStrategy = entityTypeToOwnerSelectionStrategy.get(entityType);
- if (existingStrategy != null) {
- strategy = existingStrategy;
- } else {
- EntityOwnerSelectionStrategyConfig.StrategyInfo strategyInfo = entityTypeToStrategyInfo.get(entityType);
- if (strategyInfo == null) {
- strategy = FirstCandidateSelectionStrategy.INSTANCE;
- } else {
- strategy = strategyInfo.createStrategy(initialStatistics);
- }
- entityTypeToOwnerSelectionStrategy.put(entityType, strategy);
- }
- return strategy;
- }
-
- /**
- * This class should not exist. It contains a single long, which is passed to the constructor (via reflection).
- * We are getting that information from a BundleContext. We are running in OSGi environment, hence this class
- * needs to be deployed in its own bundle, with its own configuration.
- * If this is used internally, it needs to be relocated into a separate package along with the implementation
- * using it.
- *
- * @deprecated FIXME: THIS IS CONFIGURATION FOR A CUSTOM-LOADED CLASS CONSTRUCTOR
- */
- @Deprecated
- public void clearStrategies() {
- entityTypeToOwnerSelectionStrategy.clear();
- }
-
- private static final class StrategyInfo {
- private final Class<? extends EntityOwnerSelectionStrategy> strategyClass;
- private final long delay;
-
- private StrategyInfo(final Class<? extends EntityOwnerSelectionStrategy> strategyClass, final long delay) {
- this.strategyClass = strategyClass;
- this.delay = delay;
- }
-
- public EntityOwnerSelectionStrategy createStrategy(final Map<String, Long> initialStatistics) {
- try {
- return strategyClass.getDeclaredConstructor(long.class, Map.class)
- .newInstance(delay, initialStatistics);
- } catch (InstantiationException | IllegalAccessException | InvocationTargetException
- | NoSuchMethodException e) {
- LOG.warn("could not create custom strategy", e);
- }
- return FirstCandidateSelectionStrategy.INSTANCE;
- }
- }
-
- public static Builder newBuilder() {
- return new Builder(new EntityOwnerSelectionStrategyConfig());
- }
-
- public static final class Builder {
- private final EntityOwnerSelectionStrategyConfig config;
-
- Builder(final EntityOwnerSelectionStrategyConfig config) {
- this.config = config;
- }
-
- public Builder addStrategy(final String entityType,
- final Class<? extends EntityOwnerSelectionStrategy> strategy, final long delay) {
- config.entityTypeToStrategyInfo.put(entityType, new StrategyInfo(strategy, delay));
- return this;
- }
-
- public EntityOwnerSelectionStrategyConfig build() {
- return this.config;
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership.selectionstrategy;
-
-import com.google.common.base.Preconditions;
-import java.util.Map;
-import java.util.Map.Entry;
-import org.opendaylight.controller.cluster.entityownership.selectionstrategy.EntityOwnerSelectionStrategyConfig.Builder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Reads the entity owner selection strategy config.
- *
- */
-public final class EntityOwnerSelectionStrategyConfigReader {
-
- private static final Logger LOG = LoggerFactory.getLogger(EntityOwnerSelectionStrategyConfigReader.class);
- private static final String ENTITY_TYPE_PREFIX = "entity.type.";
-
- private EntityOwnerSelectionStrategyConfigReader() {
- // Hidden on purpose
- }
-
- @SuppressWarnings("checkstyle:IllegalCatch")
- public static EntityOwnerSelectionStrategyConfig loadStrategyWithConfig(final Map<Object, Object> props) {
- final EntityOwnerSelectionStrategyConfig.Builder builder = EntityOwnerSelectionStrategyConfig.newBuilder();
-
- if (props != null && !props.isEmpty()) {
- parseConfiguration(builder, props);
- } else {
- if (props == null) {
- LOG.debug("Could not read strategy configuration file, will use default configuration.");
- } else {
- LOG.debug("Configuration file is empty, will use default configuration.");
- }
- }
- return builder.build();
- }
-
- private static EntityOwnerSelectionStrategyConfig parseConfiguration(final Builder builder,
- final Map<Object, Object> properties) {
-
- for (final Entry<Object, Object> entry : properties.entrySet()) {
- final String key = (String) entry.getKey();
- if (!key.startsWith(ENTITY_TYPE_PREFIX)) {
- LOG.debug("Ignoring non-conforming property key : {}", key);
- continue;
- }
-
- final String[] strategyClassAndDelay = ((String) properties.get(key)).split(",");
- final Class<? extends EntityOwnerSelectionStrategy> aClass = loadClass(strategyClassAndDelay[0]);
-
- final long delay;
- if (strategyClassAndDelay.length > 1) {
- delay = Long.parseLong(strategyClassAndDelay[1]);
- } else {
- delay = 0;
- }
-
- final String entityType = key.substring(key.lastIndexOf(".") + 1);
- builder.addStrategy(entityType, aClass, delay);
- LOG.debug("Entity Type '{}' using strategy {} delay {}", entityType, aClass, delay);
- }
-
- return builder.build();
- }
-
- @SuppressWarnings("unchecked")
- private static Class<? extends EntityOwnerSelectionStrategy> loadClass(final String strategyClassAndDelay) {
- final Class<?> clazz;
- try {
- clazz = EntityOwnerSelectionStrategyConfigReader.class.getClassLoader().loadClass(strategyClassAndDelay);
- } catch (final ClassNotFoundException e) {
- throw new IllegalArgumentException("Failed to load strategy " + strategyClassAndDelay, e);
- }
-
- Preconditions.checkArgument(EntityOwnerSelectionStrategy.class.isAssignableFrom(clazz),
- "Selected implementation %s must implement EntityOwnerSelectionStrategy, clazz");
-
- return (Class<? extends EntityOwnerSelectionStrategy>) clazz;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership.selectionstrategy;
-
-import com.google.common.base.Preconditions;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Map;
-
-/**
- * The FirstCandidateSelectionStrategy always selects the first viable candidate from the list of candidates.
- */
-public class FirstCandidateSelectionStrategy extends AbstractEntityOwnerSelectionStrategy {
-
- public static final FirstCandidateSelectionStrategy INSTANCE =
- new FirstCandidateSelectionStrategy(0L, Collections.emptyMap());
-
- public FirstCandidateSelectionStrategy(final long selectionDelayInMillis,
- final Map<String, Long> initialStatistics) {
- super(selectionDelayInMillis, initialStatistics);
- }
-
- @Override
- public String newOwner(final String currentOwner, final Collection<String> viableCandidates) {
- Preconditions.checkArgument(viableCandidates.size() > 0, "No viable candidates provided");
- return viableCandidates.iterator().next();
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership.selectionstrategy;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.MoreObjects;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * The LeastLoadedCandidateSelectionStrategy assigns ownership for an entity to the candidate which owns the least
- * number of entities.
- */
-public class LeastLoadedCandidateSelectionStrategy extends AbstractEntityOwnerSelectionStrategy {
- private final Map<String, Long> localStatistics = new HashMap<>();
-
- protected LeastLoadedCandidateSelectionStrategy(final long selectionDelayInMillis,
- final Map<String, Long> initialStatistics) {
- super(selectionDelayInMillis, initialStatistics);
-
- localStatistics.putAll(initialStatistics);
- }
-
- @Override
- public String newOwner(final String currentOwner, final Collection<String> viableCandidates) {
- Preconditions.checkArgument(viableCandidates.size() > 0);
- String leastLoadedCandidate = null;
- long leastLoadedCount = Long.MAX_VALUE;
-
- if (!Strings.isNullOrEmpty(currentOwner)) {
- long localVal = MoreObjects.firstNonNull(localStatistics.get(currentOwner), 0L);
- localStatistics.put(currentOwner, localVal - 1);
- }
-
- for (String candidateName : viableCandidates) {
- long val = MoreObjects.firstNonNull(localStatistics.get(candidateName), 0L);
- if (val < leastLoadedCount) {
- leastLoadedCount = val;
- leastLoadedCandidate = candidateName;
- }
- }
-
- if (leastLoadedCandidate == null) {
- leastLoadedCandidate = viableCandidates.iterator().next();
- }
-
- localStatistics.put(leastLoadedCandidate, leastLoadedCount + 1);
- return leastLoadedCandidate;
- }
-
- @VisibleForTesting
- Map<String, Long> getLocalStatistics() {
- return localStatistics;
- }
-}
+++ /dev/null
-module entity-owners {
- yang-version 1;
- namespace "urn:opendaylight:params:xml:ns:yang:controller:md:sal:clustering:entity-owners";
- prefix "entity-owners";
-
- description
- "This module contains the base YANG definitions for
- an implementation of the EntityOwnershipService which stores
- entity ownership information in the data store";
-
- revision "2015-08-04" {
- description "Initial revision.";
- }
-
- container entity-owners {
-
- // A list of all entities grouped by type
- list entity-type {
- key type;
- leaf type {
- type string;
- }
-
- list entity {
- key id;
-
- leaf id {
- type instance-identifier;
- }
-
- leaf owner {
- type string;
- }
-
- // A list of all the candidates that would like to own the entity
- list candidate {
- key name;
- ordered-by user;
-
- leaf name {
- type string;
- }
- }
- }
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.entityownership;
-
-import akka.actor.ActorSystem;
-import akka.testkit.javadsl.TestKit;
-import com.typesafe.config.ConfigFactory;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-public class AbstractClusterRefEntityOwnershipTest extends AbstractEntityOwnershipTest {
-
- private static ActorSystem system;
-
- @BeforeClass
- public static void setUpClass() {
- system = ActorSystem.create("test", ConfigFactory.load().getConfig("test-config"));
- }
-
- @AfterClass
- public static void tearDownClass() {
- TestKit.shutdownActorSystem(system);
- system = null;
- }
-
- protected static ActorSystem getSystem() {
- return system;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership;
-
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.mockito.ArgumentMatchers.argThat;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.CANDIDATE_NAME_QNAME;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.ENTITY_ID_QNAME;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.ENTITY_OWNERS_PATH;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.ENTITY_OWNER_QNAME;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.ENTITY_QNAME;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.ENTITY_TYPE_QNAME;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.candidatePath;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.entityPath;
-
-import akka.pattern.Patterns;
-import akka.testkit.TestActorRef;
-import akka.util.Timeout;
-import com.google.common.base.Stopwatch;
-import com.google.common.util.concurrent.Uninterruptibles;
-import java.util.Optional;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.function.Consumer;
-import java.util.function.Function;
-import org.junit.Assert;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.AbstractActorTest;
-import org.opendaylight.controller.cluster.datastore.AbstractShardTest;
-import org.opendaylight.controller.cluster.datastore.ShardDataTree;
-import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
-import org.opendaylight.controller.cluster.raft.client.messages.GetOnDemandRaftState;
-import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipChange;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.clustering.entity.owners.rev150804.EntityOwners;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.clustering.entity.owners.rev150804.entity.owners.EntityType;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.clustering.entity.owners.rev150804.entity.owners.entity.type.entity.Candidate;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
-import org.opendaylight.yangtools.yang.data.api.schema.DataContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Await;
-import scala.concurrent.Future;
-import scala.concurrent.duration.FiniteDuration;
-
-/**
- * Abstract base class providing utility methods.
- *
- * @author Thomas Pantelis
- */
-public class AbstractEntityOwnershipTest extends AbstractActorTest {
- protected final Logger testLog = LoggerFactory.getLogger(getClass());
-
- private static final AtomicInteger NEXT_SHARD_NUM = new AtomicInteger();
-
- protected void verifyEntityCandidate(final NormalizedNode<?, ?> node, final String entityType,
- final YangInstanceIdentifier entityId, final String candidateName, final boolean expectPresent) {
- try {
- assertNotNull("Missing " + EntityOwners.QNAME.toString(), node);
- assertTrue(node instanceof ContainerNode);
-
- ContainerNode entityOwnersNode = (ContainerNode) node;
-
- MapEntryNode entityTypeEntry = getMapEntryNodeChild(entityOwnersNode, EntityType.QNAME,
- ENTITY_TYPE_QNAME, entityType, true);
-
- MapEntryNode entityEntry = getMapEntryNodeChild(entityTypeEntry, ENTITY_QNAME, ENTITY_ID_QNAME,
- entityId, true);
-
- getMapEntryNodeChild(entityEntry, Candidate.QNAME, CANDIDATE_NAME_QNAME, candidateName, expectPresent);
- } catch (AssertionError e) {
- throw new AssertionError("Verification of entity candidate failed - returned data was: " + node, e);
- }
- }
-
- protected void verifyEntityCandidate(final String entityType, final YangInstanceIdentifier entityId,
- final String candidateName, final Function<YangInstanceIdentifier,NormalizedNode<?,?>> reader,
- final boolean expectPresent) {
- AssertionError lastError = null;
- Stopwatch sw = Stopwatch.createStarted();
- while (sw.elapsed(TimeUnit.MILLISECONDS) <= 5000) {
- NormalizedNode<?, ?> node = reader.apply(ENTITY_OWNERS_PATH);
- try {
- verifyEntityCandidate(node, entityType, entityId, candidateName, expectPresent);
- return;
- } catch (AssertionError e) {
- lastError = e;
- Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
- }
- }
-
- throw lastError;
- }
-
- protected void verifyEntityCandidate(final String entityType, final YangInstanceIdentifier entityId,
- final String candidateName, final Function<YangInstanceIdentifier,NormalizedNode<?,?>> reader) {
- verifyEntityCandidate(entityType, entityId, candidateName, reader, true);
- }
-
- protected MapEntryNode getMapEntryNodeChild(final DataContainerNode<? extends PathArgument> parent,
- final QName childMap, final QName child, final Object key, final boolean expectPresent) {
- Optional<DataContainerChild<? extends PathArgument, ?>> childNode =
- parent.getChild(new NodeIdentifier(childMap));
- // We have to account for empty maps disappearing. If we expect the entry to be non-present, tolerate a missing
- // map.
- if (!expectPresent && !childNode.isPresent()) {
- return null;
- }
-
- assertTrue("Missing " + childMap.toString(), childNode.isPresent());
-
- MapNode entityTypeMapNode = (MapNode) childNode.get();
- Optional<MapEntryNode> entityTypeEntry = entityTypeMapNode.getChild(NodeIdentifierWithPredicates.of(
- childMap, child, key));
- if (expectPresent && !entityTypeEntry.isPresent()) {
- fail("Missing " + childMap.toString() + " entry for " + key + ". Actual: " + entityTypeMapNode.getValue());
- } else if (!expectPresent && entityTypeEntry.isPresent()) {
- fail("Found unexpected " + childMap.toString() + " entry for " + key);
- }
-
- return entityTypeEntry.isPresent() ? entityTypeEntry.get() : null;
- }
-
- static void verifyOwner(final String expected, final String entityType, final YangInstanceIdentifier entityId,
- final Function<YangInstanceIdentifier,NormalizedNode<?,?>> reader) {
- AssertionError lastError = null;
- YangInstanceIdentifier entityPath = entityPath(entityType, entityId).node(ENTITY_OWNER_QNAME);
- Stopwatch sw = Stopwatch.createStarted();
- while (sw.elapsed(TimeUnit.MILLISECONDS) <= 5000) {
- try {
- NormalizedNode<?, ?> node = reader.apply(entityPath);
- Assert.assertNotNull("Owner was not set for entityId: " + entityId, node);
- Assert.assertEquals("Entity owner", expected, node.getValue().toString());
- return;
- } catch (AssertionError e) {
- lastError = e;
- Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
- }
- }
-
- throw lastError;
- }
-
- @SuppressWarnings("checkstyle:IllegalCatch")
- static void verifyOwner(final TestActorRef<? extends EntityOwnershipShard> shard, final String entityType,
- final YangInstanceIdentifier entityId, final String localMemberName) {
- verifyOwner(localMemberName, entityType, entityId, path -> {
- try {
- return AbstractShardTest.readStore(shard, path);
- } catch (Exception e) {
- return null;
- }
- });
- }
-
- protected void verifyNodeRemoved(final YangInstanceIdentifier path,
- final Function<YangInstanceIdentifier,NormalizedNode<?,?>> reader) {
- AssertionError lastError = null;
- Stopwatch sw = Stopwatch.createStarted();
- while (sw.elapsed(TimeUnit.MILLISECONDS) <= 5000) {
- try {
- NormalizedNode<?, ?> node = reader.apply(path);
- Assert.assertNull("Node was not removed at path: " + path, node);
- return;
- } catch (AssertionError e) {
- lastError = e;
- Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
- }
- }
-
- throw lastError;
- }
-
- static void writeNode(final YangInstanceIdentifier path, final NormalizedNode<?, ?> node,
- final ShardDataTree shardDataTree) throws DataValidationFailedException {
- DataTreeModification modification = shardDataTree.newModification();
- modification.merge(path, node);
- commit(shardDataTree, modification);
- }
-
- static void deleteNode(final YangInstanceIdentifier path, final ShardDataTree shardDataTree)
- throws DataValidationFailedException {
- DataTreeModification modification = shardDataTree.newModification();
- modification.delete(path);
- commit(shardDataTree, modification);
- }
-
- static void commit(final ShardDataTree shardDataTree, final DataTreeModification modification)
- throws DataValidationFailedException {
- modification.ready();
- shardDataTree.getDataTree().validate(modification);
- final DataTreeCandidate candidate = shardDataTree.getDataTree().prepare(modification);
- shardDataTree.getDataTree().commit(candidate);
- shardDataTree.notifyListeners(candidate);
- }
-
- static DOMEntityOwnershipChange ownershipChange(final DOMEntity expEntity, final boolean expWasOwner,
- final boolean expIsOwner, final boolean expHasOwner) {
- return ownershipChange(expEntity, expWasOwner, expIsOwner, expHasOwner, false);
- }
-
- static DOMEntityOwnershipChange ownershipChange(final DOMEntity expEntity, final boolean expWasOwner,
- final boolean expIsOwner, final boolean expHasOwner, final boolean expInJeopardy) {
- return argThat(change -> expEntity.equals(change.getEntity()) && expWasOwner == change.getState().wasOwner()
- && expIsOwner == change.getState().isOwner() && expHasOwner == change.getState().hasOwner()
- && expInJeopardy == change.inJeopardy());
- }
-
- static DOMEntityOwnershipChange ownershipChange(final DOMEntity expEntity) {
- return argThat(change -> expEntity.equals(change.getEntity()));
- }
-
- @SuppressWarnings("checkstyle:IllegalCatch")
- static void verifyNoOwnerSet(final TestActorRef<? extends EntityOwnershipShard> shard, final String entityType,
- final YangInstanceIdentifier entityId) {
- YangInstanceIdentifier entityPath = entityPath(entityType, entityId).node(ENTITY_OWNER_QNAME);
- try {
- NormalizedNode<?, ?> node = AbstractShardTest.readStore(shard, entityPath);
- if (node != null) {
- Assert.fail("Owner " + node.getValue() + " was set for " + entityPath);
- }
-
- } catch (Exception e) {
- throw new AssertionError("read failed", e);
- }
- }
-
- static void verifyRaftState(final TestActorRef<? extends EntityOwnershipShard> shard,
- final Consumer<OnDemandRaftState> verifier)
- throws Exception {
- AssertionError lastError = null;
- Stopwatch sw = Stopwatch.createStarted();
- while (sw.elapsed(TimeUnit.SECONDS) <= 5) {
- FiniteDuration operationDuration = FiniteDuration.create(5, TimeUnit.SECONDS);
- Future<Object> future = Patterns.ask(shard, GetOnDemandRaftState.INSTANCE, new Timeout(operationDuration));
- OnDemandRaftState raftState = (OnDemandRaftState)Await.result(future, operationDuration);
- try {
- verifier.accept(raftState);
- return;
- } catch (AssertionError e) {
- lastError = e;
- Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
- }
- }
-
- throw lastError;
- }
-
- static ShardIdentifier newShardId(final String memberName) {
- return ShardIdentifier.create("entity-ownership", MemberName.forName(memberName),
- "operational" + NEXT_SHARD_NUM.getAndIncrement());
- }
-
- @SuppressWarnings("checkstyle:IllegalCatch")
- void verifyEntityCandidateRemoved(final TestActorRef<EntityOwnershipShard> shard, final String entityType,
- final YangInstanceIdentifier entityId, final String candidateName) {
- verifyNodeRemoved(candidatePath(entityType, entityId, candidateName), path -> {
- try {
- return AbstractShardTest.readStore(shard, path);
- } catch (Exception e) {
- throw new AssertionError("Failed to read " + path, e);
- }
- });
- }
-
- @SuppressWarnings("checkstyle:IllegalCatch")
- void verifyCommittedEntityCandidate(final TestActorRef<? extends EntityOwnershipShard> shard,
- final String entityType, final YangInstanceIdentifier entityId, final String candidateName) {
- verifyEntityCandidate(entityType, entityId, candidateName, path -> {
- try {
- return AbstractShardTest.readStore(shard, path);
- } catch (Exception e) {
- throw new AssertionError("Failed to read " + path, e);
- }
- });
- }
-
- @SuppressWarnings("checkstyle:IllegalCatch")
- void verifyNoEntityCandidate(final TestActorRef<? extends EntityOwnershipShard> shard, final String entityType,
- final YangInstanceIdentifier entityId, final String candidateName) {
- verifyEntityCandidate(entityType, entityId, candidateName, path -> {
- try {
- return AbstractShardTest.readStore(shard, path);
- } catch (Exception e) {
- throw new AssertionError("Failed to read " + path, e);
- }
- }, false);
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership;
-
-import static org.junit.Assert.assertEquals;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.ENTITY_OWNERS_PATH;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.candidatePath;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.entityOwnersWithCandidate;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.entityPath;
-
-import akka.testkit.javadsl.TestKit;
-import com.google.common.collect.ImmutableSet;
-import java.time.Duration;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
-import org.opendaylight.controller.cluster.datastore.AbstractActorTest;
-import org.opendaylight.controller.cluster.datastore.Shard;
-import org.opendaylight.controller.cluster.datastore.ShardDataTree;
-import org.opendaylight.controller.cluster.entityownership.messages.CandidateAdded;
-import org.opendaylight.controller.cluster.entityownership.messages.CandidateRemoved;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
-
-/**
- * Unit tests for CandidateListChangeListener.
- *
- * @author Thomas Pantelis
- */
-public class CandidateListChangeListenerTest extends AbstractActorTest {
- private static final String ENTITY_TYPE = "test";
- private static final YangInstanceIdentifier ENTITY_ID1 =
- YangInstanceIdentifier.of(QName.create("test", "2015-08-14", "entity1"));
- private static final YangInstanceIdentifier ENTITY_ID2 =
- YangInstanceIdentifier.of(QName.create("test", "2015-08-14", "entity2"));
-
- private ShardDataTree shardDataTree;
-
- @Mock
- private Shard mockShard;
-
- @Before
- public void setup() {
- MockitoAnnotations.initMocks(this);
- shardDataTree = new ShardDataTree(mockShard, EOSTestUtils.SCHEMA_CONTEXT, TreeType.OPERATIONAL);
- }
-
- @Test
- public void testOnDataTreeChanged() throws Exception {
- TestKit kit = new TestKit(getSystem());
-
- new CandidateListChangeListener(kit.getRef(), "test").init(shardDataTree);
-
- String memberName1 = "member-1";
- writeNode(ENTITY_OWNERS_PATH, entityOwnersWithCandidate(ENTITY_TYPE, ENTITY_ID1, memberName1));
-
- CandidateAdded candidateAdded = kit.expectMsgClass(CandidateAdded.class);
- assertEquals("getEntityId", entityPath(ENTITY_TYPE, ENTITY_ID1), candidateAdded.getEntityPath());
- assertEquals("getNewCandidate", memberName1, candidateAdded.getNewCandidate());
- assertEquals("getAllCandidates", ImmutableSet.of(memberName1),
- ImmutableSet.copyOf(candidateAdded.getAllCandidates()));
-
- writeNode(ENTITY_OWNERS_PATH, entityOwnersWithCandidate(ENTITY_TYPE, ENTITY_ID1, memberName1));
- kit.expectNoMessage(Duration.ofMillis(500));
-
- String memberName2 = "member-2";
- writeNode(ENTITY_OWNERS_PATH, entityOwnersWithCandidate(ENTITY_TYPE, ENTITY_ID1, memberName2));
-
- candidateAdded = kit.expectMsgClass(CandidateAdded.class);
- assertEquals("getEntityId", entityPath(ENTITY_TYPE, ENTITY_ID1), candidateAdded.getEntityPath());
- assertEquals("getNewCandidate", memberName2, candidateAdded.getNewCandidate());
- assertEquals("getAllCandidates", ImmutableSet.of(memberName1, memberName2),
- ImmutableSet.copyOf(candidateAdded.getAllCandidates()));
-
- writeNode(ENTITY_OWNERS_PATH, entityOwnersWithCandidate(ENTITY_TYPE, ENTITY_ID2, memberName1));
-
- candidateAdded = kit.expectMsgClass(CandidateAdded.class);
- assertEquals("getEntityId", entityPath(ENTITY_TYPE, ENTITY_ID2), candidateAdded.getEntityPath());
- assertEquals("getNewCandidate", memberName1, candidateAdded.getNewCandidate());
- assertEquals("getAllCandidates", ImmutableSet.of(memberName1),
- ImmutableSet.copyOf(candidateAdded.getAllCandidates()));
-
- deleteNode(candidatePath(ENTITY_TYPE, ENTITY_ID1, memberName1));
-
- CandidateRemoved candidateRemoved = kit.expectMsgClass(CandidateRemoved.class);
- assertEquals("getEntityId", entityPath(ENTITY_TYPE, ENTITY_ID1), candidateRemoved.getEntityPath());
- assertEquals("getRemovedCandidate", memberName1, candidateRemoved.getRemovedCandidate());
- assertEquals("getRemainingCandidates", ImmutableSet.of(memberName2),
- ImmutableSet.copyOf(candidateRemoved.getRemainingCandidates()));
-
- deleteNode(candidatePath(ENTITY_TYPE, ENTITY_ID1, memberName2));
-
- candidateRemoved = kit.expectMsgClass(CandidateRemoved.class);
- assertEquals("getEntityId", entityPath(ENTITY_TYPE, ENTITY_ID1), candidateRemoved.getEntityPath());
- assertEquals("getRemovedCandidate", memberName2, candidateRemoved.getRemovedCandidate());
- assertEquals("getRemainingCandidates", ImmutableSet.of(),
- ImmutableSet.copyOf(candidateRemoved.getRemainingCandidates()));
- }
-
- private void writeNode(final YangInstanceIdentifier path, final NormalizedNode<?, ?> node)
- throws DataValidationFailedException {
- AbstractEntityOwnershipTest.writeNode(path, node, shardDataTree);
- }
-
- private void deleteNode(final YangInstanceIdentifier path) throws DataValidationFailedException {
- AbstractEntityOwnershipTest.deleteNode(path, shardDataTree);
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership;
-
-import static org.hamcrest.MatcherAssert.assertThat;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.AdditionalMatchers.or;
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.reset;
-import static org.mockito.Mockito.timeout;
-import static org.mockito.Mockito.verify;
-import static org.opendaylight.controller.cluster.datastore.MemberNode.verifyRaftState;
-import static org.opendaylight.controller.cluster.entityownership.AbstractEntityOwnershipTest.ownershipChange;
-import static org.opendaylight.controller.cluster.entityownership.DistributedEntityOwnershipService.ENTITY_OWNERSHIP_SHARD_NAME;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.CANDIDATE_NAME_NODE_ID;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.entityPath;
-
-import akka.actor.ActorRef;
-import akka.actor.Status.Failure;
-import akka.actor.Status.Success;
-import akka.cluster.Cluster;
-import akka.pattern.Patterns;
-import akka.util.Timeout;
-import com.google.common.base.Stopwatch;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
-import com.google.common.util.concurrent.Uninterruptibles;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-import java.util.Optional;
-import java.util.concurrent.TimeUnit;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.ArgumentCaptor;
-import org.mockito.Mock;
-import org.mockito.Mockito;
-import org.mockito.MockitoAnnotations;
-import org.mockito.exceptions.base.MockitoException;
-import org.opendaylight.controller.cluster.datastore.AbstractDataStore;
-import org.opendaylight.controller.cluster.datastore.DatastoreContext;
-import org.opendaylight.controller.cluster.datastore.IntegrationTestKit;
-import org.opendaylight.controller.cluster.datastore.MemberNode;
-import org.opendaylight.controller.cluster.datastore.messages.AddShardReplica;
-import org.opendaylight.controller.cluster.datastore.messages.ChangeShardMembersVotingStatus;
-import org.opendaylight.controller.cluster.entityownership.selectionstrategy.EntityOwnerSelectionStrategyConfig;
-import org.opendaylight.controller.cluster.raft.policy.DisableElectionsRaftPolicy;
-import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
-import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
-import org.opendaylight.mdsal.eos.common.api.EntityOwnershipState;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipCandidateRegistration;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipChange;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.clustering.entity.owners.rev150804.entity.owners.entity.type.entity.Candidate;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import scala.concurrent.Await;
-import scala.concurrent.Future;
-import scala.concurrent.duration.FiniteDuration;
-
-/**
- * End-to-end integration tests for the entity ownership functionality.
- *
- * @author Thomas Pantelis
- */
-public class DistributedEntityOwnershipIntegrationTest {
- private static final String MODULE_SHARDS_CONFIG = "module-shards-default.conf";
- private static final String MODULE_SHARDS_5_NODE_CONFIG = "module-shards-default-5-node.conf";
- private static final String MODULE_SHARDS_MEMBER_1_CONFIG = "module-shards-default-member-1.conf";
- private static final String ENTITY_TYPE1 = "entityType1";
- private static final String ENTITY_TYPE2 = "entityType2";
- private static final DOMEntity ENTITY1 = new DOMEntity(ENTITY_TYPE1, "entity1");
- private static final DOMEntity ENTITY1_2 = new DOMEntity(ENTITY_TYPE2, "entity1");
- private static final DOMEntity ENTITY2 = new DOMEntity(ENTITY_TYPE1, "entity2");
- private static final DOMEntity ENTITY3 = new DOMEntity(ENTITY_TYPE1, "entity3");
- private static final DOMEntity ENTITY4 = new DOMEntity(ENTITY_TYPE1, "entity4");
- private final DatastoreContext.Builder leaderDatastoreContextBuilder =
- DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(5)
- .shardIsolatedLeaderCheckIntervalInMillis(1000000);
-
- private final DatastoreContext.Builder followerDatastoreContextBuilder =
- DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(10000);
-
- private final List<MemberNode> memberNodes = new ArrayList<>();
-
- @Mock
- private DOMEntityOwnershipListener leaderMockListener;
-
- @Mock
- private DOMEntityOwnershipListener leaderMockListener2;
-
- @Mock
- private DOMEntityOwnershipListener follower1MockListener;
-
- @Mock
- private DOMEntityOwnershipListener follower2MockListener;
-
- @Before
- public void setUp() {
- MockitoAnnotations.initMocks(this);
- InMemoryJournal.clear();
- InMemorySnapshotStore.clear();
- }
-
- @After
- public void tearDown() {
- for (MemberNode m : Lists.reverse(memberNodes)) {
- m.cleanup();
- }
- memberNodes.clear();
- }
-
- private static DistributedEntityOwnershipService newOwnershipService(final AbstractDataStore datastore) {
- return DistributedEntityOwnershipService.start(datastore.getActorUtils(),
- EntityOwnerSelectionStrategyConfig.newBuilder().build());
- }
-
- @Test
- public void testFunctionalityWithThreeNodes() throws Exception {
- String name = "testFunctionalityWithThreeNodes";
- MemberNode leaderNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
- .moduleShardsConfig(MODULE_SHARDS_CONFIG).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
- .createOperDatastore(false).datastoreContextBuilder(leaderDatastoreContextBuilder).build();
-
- MemberNode follower1Node = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
- .moduleShardsConfig(MODULE_SHARDS_CONFIG).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
- .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
- MemberNode follower2Node = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
- .moduleShardsConfig(MODULE_SHARDS_CONFIG).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
- .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
- AbstractDataStore leaderDistributedDataStore = leaderNode.configDataStore();
-
- leaderDistributedDataStore.waitTillReady();
- follower1Node.configDataStore().waitTillReady();
- follower2Node.configDataStore().waitTillReady();
-
- final DOMEntityOwnershipService leaderEntityOwnershipService = newOwnershipService(leaderDistributedDataStore);
- final DOMEntityOwnershipService follower1EntityOwnershipService =
- newOwnershipService(follower1Node.configDataStore());
- final DOMEntityOwnershipService follower2EntityOwnershipService =
- newOwnershipService(follower2Node.configDataStore());
-
- leaderNode.kit().waitUntilLeader(leaderNode.configDataStore().getActorUtils(), ENTITY_OWNERSHIP_SHARD_NAME);
-
- leaderEntityOwnershipService.registerListener(ENTITY_TYPE1, leaderMockListener);
- leaderEntityOwnershipService.registerListener(ENTITY_TYPE2, leaderMockListener2);
- follower1EntityOwnershipService.registerListener(ENTITY_TYPE1, follower1MockListener);
-
- // Register leader candidate for entity1 and verify it becomes owner
-
- leaderEntityOwnershipService.registerCandidate(ENTITY1);
- verify(leaderMockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY1, false, true, true));
- verify(follower1MockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY1, false, false, true));
- reset(leaderMockListener, follower1MockListener);
-
- verifyGetOwnershipState(leaderEntityOwnershipService, ENTITY1, EntityOwnershipState.IS_OWNER);
- verifyGetOwnershipState(follower1EntityOwnershipService, ENTITY1, EntityOwnershipState.OWNED_BY_OTHER);
-
- // Register leader candidate for entity1_2 (same id, different type) and verify it becomes owner
-
- leaderEntityOwnershipService.registerCandidate(ENTITY1_2);
- verify(leaderMockListener2, timeout(5000)).ownershipChanged(ownershipChange(ENTITY1_2, false, true, true));
- Uninterruptibles.sleepUninterruptibly(300, TimeUnit.MILLISECONDS);
- verify(leaderMockListener, never()).ownershipChanged(ownershipChange(ENTITY1_2));
- reset(leaderMockListener2);
-
- // Register follower1 candidate for entity1 and verify it gets added but doesn't become owner
-
- follower1EntityOwnershipService.registerCandidate(ENTITY1);
- verifyCandidates(leaderDistributedDataStore, ENTITY1, "member-1", "member-2");
- verifyOwner(leaderDistributedDataStore, ENTITY1, "member-1");
- verifyOwner(follower2Node.configDataStore(), ENTITY1, "member-1");
- Uninterruptibles.sleepUninterruptibly(300, TimeUnit.MILLISECONDS);
- verify(leaderMockListener, never()).ownershipChanged(ownershipChange(ENTITY1));
- verify(follower1MockListener, never()).ownershipChanged(ownershipChange(ENTITY1));
-
- // Register follower1 candidate for entity2 and verify it becomes owner
-
- final DOMEntityOwnershipCandidateRegistration follower1Entity2Reg =
- follower1EntityOwnershipService.registerCandidate(ENTITY2);
- verify(follower1MockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY2, false, true, true));
- verify(leaderMockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY2, false, false, true));
- verifyOwner(follower2Node.configDataStore(), ENTITY2, "member-2");
- reset(leaderMockListener, follower1MockListener);
-
- // Register follower2 candidate for entity2 and verify it gets added but doesn't become owner
-
- follower2EntityOwnershipService.registerListener(ENTITY_TYPE1, follower2MockListener);
- verify(follower2MockListener, timeout(5000).times(2)).ownershipChanged(or(
- ownershipChange(ENTITY1, false, false, true), ownershipChange(ENTITY2, false, false, true)));
-
- follower2EntityOwnershipService.registerCandidate(ENTITY2);
- verifyCandidates(leaderDistributedDataStore, ENTITY2, "member-2", "member-3");
- verifyOwner(leaderDistributedDataStore, ENTITY2, "member-2");
-
- // Unregister follower1 candidate for entity2 and verify follower2 becomes owner
-
- follower1Entity2Reg.close();
- verifyCandidates(leaderDistributedDataStore, ENTITY2, "member-3");
- verifyOwner(leaderDistributedDataStore, ENTITY2, "member-3");
- verify(follower1MockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY2, true, false, true));
- verify(leaderMockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY2, false, false, true));
-
- // Depending on timing, follower2MockListener could get ownershipChanged with "false, false, true" if
- // if the original ownership change with "member-2 is replicated to follower2 after the listener is
- // registered.
- Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
- verify(follower2MockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY2, false, true, true));
-
- // Register follower1 candidate for entity3 and verify it becomes owner
-
- follower1EntityOwnershipService.registerCandidate(ENTITY3);
- verifyOwner(leaderDistributedDataStore, ENTITY3, "member-2");
- verify(follower1MockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY3, false, true, true));
- verify(follower2MockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY3, false, false, true));
- verify(leaderMockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY3, false, false, true));
-
- // Register follower2 candidate for entity4 and verify it becomes owner
-
- follower2EntityOwnershipService.registerCandidate(ENTITY4);
- verifyOwner(leaderDistributedDataStore, ENTITY4, "member-3");
- verify(follower2MockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY4, false, true, true));
- verify(follower1MockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY4, false, false, true));
- verify(leaderMockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY4, false, false, true));
- reset(follower1MockListener, follower2MockListener);
-
- // Register follower1 candidate for entity4 and verify it gets added but doesn't become owner
-
- follower1EntityOwnershipService.registerCandidate(ENTITY4);
- verifyCandidates(leaderDistributedDataStore, ENTITY4, "member-3", "member-2");
- verifyOwner(leaderDistributedDataStore, ENTITY4, "member-3");
-
- // Shutdown follower2 and verify it's owned entities (entity 4) get re-assigned
-
- reset(leaderMockListener, follower1MockListener);
- follower2Node.cleanup();
-
- verify(follower1MockListener, timeout(15000)).ownershipChanged(ownershipChange(ENTITY4, false, true, true));
- verify(leaderMockListener, timeout(15000)).ownershipChanged(ownershipChange(ENTITY4, false, false, true));
-
- // Register leader candidate for entity2 and verify it becomes owner
-
- DOMEntityOwnershipCandidateRegistration leaderEntity2Reg =
- leaderEntityOwnershipService.registerCandidate(ENTITY2);
- verifyOwner(leaderDistributedDataStore, ENTITY2, "member-1");
- verify(leaderMockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY2, false, true, true));
-
- // Unregister leader candidate for entity2 and verify the owner is cleared
-
- leaderEntity2Reg.close();
- verifyOwner(leaderDistributedDataStore, ENTITY2, "");
- verify(leaderMockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY2, true, false, false));
- verify(follower1MockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY2, false, false, false));
- }
-
- @Test
- public void testLeaderEntityOwnersReassignedAfterShutdown() throws Exception {
- followerDatastoreContextBuilder.shardElectionTimeoutFactor(5)
- .customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName());
-
- String name = "testLeaderEntityOwnersReassignedAfterShutdown";
- MemberNode leaderNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
- .moduleShardsConfig(MODULE_SHARDS_CONFIG).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
- .createOperDatastore(false).datastoreContextBuilder(leaderDatastoreContextBuilder).build();
-
- MemberNode follower1Node = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
- .moduleShardsConfig(MODULE_SHARDS_CONFIG).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
- .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
- MemberNode follower2Node = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
- .moduleShardsConfig(MODULE_SHARDS_CONFIG).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
- .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
- AbstractDataStore leaderDistributedDataStore = leaderNode.configDataStore();
-
- leaderDistributedDataStore.waitTillReady();
- follower1Node.configDataStore().waitTillReady();
- follower2Node.configDataStore().waitTillReady();
-
- follower1Node.waitForMembersUp("member-1", "member-3");
-
- final DOMEntityOwnershipService leaderEntityOwnershipService = newOwnershipService(leaderDistributedDataStore);
- final DOMEntityOwnershipService follower1EntityOwnershipService =
- newOwnershipService(follower1Node.configDataStore());
- final DOMEntityOwnershipService follower2EntityOwnershipService =
- newOwnershipService(follower2Node.configDataStore());
-
- leaderNode.kit().waitUntilLeader(leaderNode.configDataStore().getActorUtils(), ENTITY_OWNERSHIP_SHARD_NAME);
-
- // Register follower1 candidate for entity1 and verify it becomes owner
-
- follower1EntityOwnershipService.registerCandidate(ENTITY1);
- verifyOwner(leaderDistributedDataStore, ENTITY1, "member-2");
-
- // Register leader candidate for entity1
-
- leaderEntityOwnershipService.registerCandidate(ENTITY1);
- verifyCandidates(leaderDistributedDataStore, ENTITY1, "member-2", "member-1");
- verifyOwner(leaderDistributedDataStore, ENTITY1, "member-2");
-
- // Register leader candidate for entity2 and verify it becomes owner
-
- leaderEntityOwnershipService.registerCandidate(ENTITY2);
- verifyOwner(leaderDistributedDataStore, ENTITY2, "member-1");
-
- // Register follower2 candidate for entity2
-
- follower2EntityOwnershipService.registerCandidate(ENTITY2);
- verifyCandidates(leaderDistributedDataStore, ENTITY2, "member-1", "member-3");
- verifyOwner(leaderDistributedDataStore, ENTITY2, "member-1");
-
- // Re-enable elections on all remaining followers so one becomes the new leader
-
- ActorRef follower1Shard = IntegrationTestKit.findLocalShard(follower1Node.configDataStore().getActorUtils(),
- ENTITY_OWNERSHIP_SHARD_NAME);
- follower1Shard.tell(DatastoreContext.newBuilderFrom(followerDatastoreContextBuilder.build())
- .customRaftPolicyImplementation(null).build(), ActorRef.noSender());
-
- ActorRef follower2Shard = IntegrationTestKit.findLocalShard(follower2Node.configDataStore().getActorUtils(),
- ENTITY_OWNERSHIP_SHARD_NAME);
- follower2Shard.tell(DatastoreContext.newBuilderFrom(followerDatastoreContextBuilder.build())
- .customRaftPolicyImplementation(null).build(), ActorRef.noSender());
-
- // Shutdown the leader and verify its removed from the candidate list
-
- leaderNode.cleanup();
- follower1Node.waitForMemberDown("member-1");
- follower2Node.waitForMemberDown("member-1");
-
- // Verify the prior leader's entity owners are re-assigned.
-
- verifyCandidates(follower1Node.configDataStore(), ENTITY1, "member-2", "member-1");
- verifyCandidates(follower1Node.configDataStore(), ENTITY2, "member-1", "member-3");
- verifyOwner(follower1Node.configDataStore(), ENTITY1, "member-2");
- verifyOwner(follower1Node.configDataStore(), ENTITY2, "member-3");
- }
-
- @Test
- public void testLeaderAndFollowerEntityOwnersReassignedAfterShutdown() throws Exception {
- followerDatastoreContextBuilder.shardElectionTimeoutFactor(5)
- .customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName());
-
- String name = "testLeaderAndFollowerEntityOwnersReassignedAfterShutdown";
- final MemberNode leaderNode = MemberNode.builder(memberNodes).akkaConfig("Member1")
- .useAkkaArtery(false).testName(name)
- .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
- .createOperDatastore(false).datastoreContextBuilder(leaderDatastoreContextBuilder).build();
-
- final MemberNode follower1Node = MemberNode.builder(memberNodes).akkaConfig("Member2")
- .useAkkaArtery(false).testName(name)
- .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
- .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
- final MemberNode follower2Node = MemberNode.builder(memberNodes).akkaConfig("Member3")
- .useAkkaArtery(false).testName(name)
- .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
- .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
- final MemberNode follower3Node = MemberNode.builder(memberNodes).akkaConfig("Member4")
- .useAkkaArtery(false).testName(name)
- .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
- .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
- final MemberNode follower4Node = MemberNode.builder(memberNodes).akkaConfig("Member5")
- .useAkkaArtery(false).testName(name)
- .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
- .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
- AbstractDataStore leaderDistributedDataStore = leaderNode.configDataStore();
-
- leaderDistributedDataStore.waitTillReady();
- follower1Node.configDataStore().waitTillReady();
- follower2Node.configDataStore().waitTillReady();
- follower3Node.configDataStore().waitTillReady();
- follower4Node.configDataStore().waitTillReady();
-
- leaderNode.waitForMembersUp("member-2", "member-3", "member-4", "member-5");
- follower1Node.waitForMembersUp("member-1", "member-3", "member-4", "member-5");
-
- final DOMEntityOwnershipService leaderEntityOwnershipService = newOwnershipService(leaderDistributedDataStore);
- final DOMEntityOwnershipService follower1EntityOwnershipService =
- newOwnershipService(follower1Node.configDataStore());
- final DOMEntityOwnershipService follower2EntityOwnershipService =
- newOwnershipService(follower2Node.configDataStore());
- final DOMEntityOwnershipService follower3EntityOwnershipService =
- newOwnershipService(follower3Node.configDataStore());
- newOwnershipService(follower4Node.configDataStore());
-
- leaderNode.kit().waitUntilLeader(leaderNode.configDataStore().getActorUtils(), ENTITY_OWNERSHIP_SHARD_NAME);
-
- // Register follower1 candidate for entity1 and verify it becomes owner
-
- follower1EntityOwnershipService.registerCandidate(ENTITY1);
- verifyOwner(leaderDistributedDataStore, ENTITY1, "member-2");
-
- // Register leader candidate for entity1
-
- leaderEntityOwnershipService.registerCandidate(ENTITY1);
- verifyCandidates(leaderDistributedDataStore, ENTITY1, "member-2", "member-1");
- verifyOwner(leaderDistributedDataStore, ENTITY1, "member-2");
-
- // Register leader candidate for entity2 and verify it becomes owner
-
- leaderEntityOwnershipService.registerCandidate(ENTITY2);
- verifyOwner(leaderDistributedDataStore, ENTITY2, "member-1");
-
- // Register follower2 candidate for entity2
-
- follower2EntityOwnershipService.registerCandidate(ENTITY2);
- verifyCandidates(leaderDistributedDataStore, ENTITY2, "member-1", "member-3");
- verifyOwner(leaderDistributedDataStore, ENTITY2, "member-1");
-
- // Register follower3 as a candidate for entity2 as well
-
- follower3EntityOwnershipService.registerCandidate(ENTITY2);
- verifyCandidates(leaderDistributedDataStore, ENTITY2, "member-1", "member-3", "member-4");
- verifyOwner(leaderDistributedDataStore, ENTITY2, "member-1");
-
- // Re-enable elections on all remaining followers so one becomes the new leader
-
- ActorRef follower1Shard = IntegrationTestKit.findLocalShard(follower1Node.configDataStore().getActorUtils(),
- ENTITY_OWNERSHIP_SHARD_NAME);
- follower1Shard.tell(DatastoreContext.newBuilderFrom(followerDatastoreContextBuilder.build())
- .customRaftPolicyImplementation(null).build(), ActorRef.noSender());
-
- ActorRef follower2Shard = IntegrationTestKit.findLocalShard(follower2Node.configDataStore().getActorUtils(),
- ENTITY_OWNERSHIP_SHARD_NAME);
- follower2Shard.tell(DatastoreContext.newBuilderFrom(followerDatastoreContextBuilder.build())
- .customRaftPolicyImplementation(null).build(), ActorRef.noSender());
-
- ActorRef follower4Shard = IntegrationTestKit.findLocalShard(follower4Node.configDataStore().getActorUtils(),
- ENTITY_OWNERSHIP_SHARD_NAME);
- follower4Shard.tell(DatastoreContext.newBuilderFrom(followerDatastoreContextBuilder.build())
- .customRaftPolicyImplementation(null).build(), ActorRef.noSender());
-
- // Shutdown the leader and follower3
-
- leaderNode.cleanup();
- follower3Node.cleanup();
-
- follower1Node.waitForMemberDown("member-1");
- follower1Node.waitForMemberDown("member-4");
- follower2Node.waitForMemberDown("member-1");
- follower2Node.waitForMemberDown("member-4");
- follower4Node.waitForMemberDown("member-1");
- follower4Node.waitForMemberDown("member-4");
-
- // Verify the prior leader's and follower3 entity owners are re-assigned.
-
- verifyCandidates(follower1Node.configDataStore(), ENTITY1, "member-2", "member-1");
- verifyCandidates(follower1Node.configDataStore(), ENTITY2, "member-1", "member-3", "member-4");
- verifyOwner(follower1Node.configDataStore(), ENTITY1, "member-2");
- verifyOwner(follower1Node.configDataStore(), ENTITY2, "member-3");
- }
-
- /**
- * Reproduces bug <a href="https://bugs.opendaylight.org/show_bug.cgi?id=4554">4554</a>.
- */
- @Test
- public void testCloseCandidateRegistrationInQuickSuccession() throws Exception {
- String name = "testCloseCandidateRegistrationInQuickSuccession";
- MemberNode leaderNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
- .moduleShardsConfig(MODULE_SHARDS_CONFIG).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
- .createOperDatastore(false).datastoreContextBuilder(leaderDatastoreContextBuilder).build();
-
- MemberNode follower1Node = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
- .moduleShardsConfig(MODULE_SHARDS_CONFIG).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
- .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
- MemberNode follower2Node = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
- .moduleShardsConfig(MODULE_SHARDS_CONFIG).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
- .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
- AbstractDataStore leaderDistributedDataStore = leaderNode.configDataStore();
-
- leaderDistributedDataStore.waitTillReady();
- follower1Node.configDataStore().waitTillReady();
- follower2Node.configDataStore().waitTillReady();
-
- final DOMEntityOwnershipService leaderEntityOwnershipService = newOwnershipService(leaderDistributedDataStore);
- final DOMEntityOwnershipService follower1EntityOwnershipService =
- newOwnershipService(follower1Node.configDataStore());
- final DOMEntityOwnershipService follower2EntityOwnershipService =
- newOwnershipService(follower2Node.configDataStore());
-
- leaderNode.kit().waitUntilLeader(leaderNode.configDataStore().getActorUtils(), ENTITY_OWNERSHIP_SHARD_NAME);
-
- leaderEntityOwnershipService.registerListener(ENTITY_TYPE1, leaderMockListener);
- follower1EntityOwnershipService.registerListener(ENTITY_TYPE1, follower1MockListener);
- follower2EntityOwnershipService.registerListener(ENTITY_TYPE1, follower2MockListener);
-
- final DOMEntityOwnershipCandidateRegistration candidate1 =
- leaderEntityOwnershipService.registerCandidate(ENTITY1);
- verify(leaderMockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY1, false, true, true));
-
- final DOMEntityOwnershipCandidateRegistration candidate2 =
- follower1EntityOwnershipService.registerCandidate(ENTITY1);
- verify(follower1MockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY1, false, false, true));
-
- final DOMEntityOwnershipCandidateRegistration candidate3 =
- follower2EntityOwnershipService.registerCandidate(ENTITY1);
- verify(follower2MockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY1, false, false, true));
-
- Mockito.reset(leaderMockListener, follower1MockListener, follower2MockListener);
-
- ArgumentCaptor<DOMEntityOwnershipChange> leaderChangeCaptor =
- ArgumentCaptor.forClass(DOMEntityOwnershipChange.class);
- ArgumentCaptor<DOMEntityOwnershipChange> follower1ChangeCaptor =
- ArgumentCaptor.forClass(DOMEntityOwnershipChange.class);
- ArgumentCaptor<DOMEntityOwnershipChange> follower2ChangeCaptor =
- ArgumentCaptor.forClass(DOMEntityOwnershipChange.class);
- doNothing().when(leaderMockListener).ownershipChanged(leaderChangeCaptor.capture());
- doNothing().when(follower1MockListener).ownershipChanged(follower1ChangeCaptor.capture());
- doNothing().when(follower2MockListener).ownershipChanged(follower2ChangeCaptor.capture());
-
- candidate1.close();
- candidate2.close();
- candidate3.close();
-
- boolean passed = false;
- for (int i = 0; i < 100; i++) {
- Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
- final Optional<EntityOwnershipState> leaderState = leaderEntityOwnershipService.getOwnershipState(ENTITY1);
- final Optional<EntityOwnershipState> follower1State =
- follower1EntityOwnershipService.getOwnershipState(ENTITY1);
- final Optional<EntityOwnershipState> follower2State =
- follower2EntityOwnershipService.getOwnershipState(ENTITY1);
- final Optional<DOMEntityOwnershipChange> leaderChange = getValueSafely(leaderChangeCaptor);
- final Optional<DOMEntityOwnershipChange> follower1Change = getValueSafely(follower1ChangeCaptor);
- final Optional<DOMEntityOwnershipChange> follower2Change = getValueSafely(follower2ChangeCaptor);
- if (!leaderState.isPresent() || leaderState.get() == EntityOwnershipState.NO_OWNER
- && follower1State.isPresent() && follower1State.get() == EntityOwnershipState.NO_OWNER
- && follower2State.isPresent() && follower2State.get() == EntityOwnershipState.NO_OWNER
- && leaderChange.isPresent() && !leaderChange.get().getState().hasOwner()
- && follower1Change.isPresent() && !follower1Change.get().getState().hasOwner()
- && follower2Change.isPresent() && !follower2Change.get().getState().hasOwner()) {
- passed = true;
- break;
- }
- }
-
- assertTrue("No ownership change message was sent with hasOwner=false", passed);
- }
-
- private static Optional<DOMEntityOwnershipChange> getValueSafely(
- final ArgumentCaptor<DOMEntityOwnershipChange> captor) {
- try {
- return Optional.ofNullable(captor.getValue());
- } catch (MockitoException e) {
- // No value was captured
- return Optional.empty();
- }
- }
-
- /**
- * Tests bootstrapping the entity-ownership shard when there's no shards initially configured for local
- * member. The entity-ownership shard is initially created as inactive (ie remains a follower), requiring
- * an AddShardReplica request to join it to an existing leader.
- */
- @Test
- public void testEntityOwnershipShardBootstrapping() throws Exception {
- String name = "testEntityOwnershipShardBootstrapping";
- String moduleShardsConfig = MODULE_SHARDS_MEMBER_1_CONFIG;
- MemberNode leaderNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
- .moduleShardsConfig(moduleShardsConfig).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
- .createOperDatastore(false).datastoreContextBuilder(leaderDatastoreContextBuilder).build();
-
- AbstractDataStore leaderDistributedDataStore = leaderNode.configDataStore();
- final DOMEntityOwnershipService leaderEntityOwnershipService = newOwnershipService(leaderDistributedDataStore);
-
- leaderNode.kit().waitUntilLeader(leaderNode.configDataStore().getActorUtils(), ENTITY_OWNERSHIP_SHARD_NAME);
-
- MemberNode follower1Node = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
- .moduleShardsConfig(moduleShardsConfig).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
- .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
- AbstractDataStore follower1DistributedDataStore = follower1Node.configDataStore();
- follower1DistributedDataStore.waitTillReady();
-
- leaderNode.waitForMembersUp("member-2");
- follower1Node.waitForMembersUp("member-1");
-
- DOMEntityOwnershipService follower1EntityOwnershipService = newOwnershipService(follower1DistributedDataStore);
-
- leaderEntityOwnershipService.registerListener(ENTITY_TYPE1, leaderMockListener);
-
- // Register a candidate for follower1 - should get queued since follower1 has no leader
- final DOMEntityOwnershipCandidateRegistration candidateReg =
- follower1EntityOwnershipService.registerCandidate(ENTITY1);
- Uninterruptibles.sleepUninterruptibly(300, TimeUnit.MILLISECONDS);
- verify(leaderMockListener, never()).ownershipChanged(ownershipChange(ENTITY1));
-
- // Add replica in follower1
- AddShardReplica addReplica = new AddShardReplica(ENTITY_OWNERSHIP_SHARD_NAME);
- follower1DistributedDataStore.getActorUtils().getShardManager().tell(addReplica,
- follower1Node.kit().getRef());
- Object reply = follower1Node.kit().expectMsgAnyClassOf(follower1Node.kit().duration("5 sec"),
- Success.class, Failure.class);
- if (reply instanceof Failure) {
- throw new AssertionError("AddShardReplica failed", ((Failure)reply).cause());
- }
-
- // The queued candidate registration should proceed
- verify(leaderMockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY1, false, false, true));
- reset(leaderMockListener);
-
- candidateReg.close();
- verify(leaderMockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY1, false, false, false));
- reset(leaderMockListener);
-
- // Restart follower1 and verify the entity ownership shard is re-instated by registering.
- Cluster.get(leaderNode.kit().getSystem()).down(Cluster.get(follower1Node.kit().getSystem()).selfAddress());
- follower1Node.cleanup();
-
- follower1Node = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
- .moduleShardsConfig(moduleShardsConfig).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
- .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
- follower1EntityOwnershipService = newOwnershipService(follower1Node.configDataStore());
-
- follower1EntityOwnershipService.registerCandidate(ENTITY1);
- verify(leaderMockListener, timeout(20000)).ownershipChanged(ownershipChange(ENTITY1, false, false, true));
-
- verifyRaftState(follower1Node.configDataStore(), ENTITY_OWNERSHIP_SHARD_NAME, raftState -> {
- assertNull("Custom RaftPolicy class name", raftState.getCustomRaftPolicyClassName());
- assertEquals("Peer count", 1, raftState.getPeerAddresses().keySet().size());
- assertThat("Peer Id", Iterables.<String>getLast(raftState.getPeerAddresses().keySet()),
- org.hamcrest.CoreMatchers.containsString("member-1"));
- });
- }
-
- @Test
- public void testOwnerSelectedOnRapidUnregisteringAndRegisteringOfCandidates() throws Exception {
- String name = "testOwnerSelectedOnRapidUnregisteringAndRegisteringOfCandidates";
- MemberNode leaderNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
- .moduleShardsConfig(MODULE_SHARDS_CONFIG).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
- .createOperDatastore(false).datastoreContextBuilder(leaderDatastoreContextBuilder).build();
-
- MemberNode follower1Node = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
- .moduleShardsConfig(MODULE_SHARDS_CONFIG).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
- .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
- MemberNode follower2Node = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
- .moduleShardsConfig(MODULE_SHARDS_CONFIG).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
- .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
- AbstractDataStore leaderDistributedDataStore = leaderNode.configDataStore();
-
- leaderDistributedDataStore.waitTillReady();
- follower1Node.configDataStore().waitTillReady();
- follower2Node.configDataStore().waitTillReady();
-
- final DOMEntityOwnershipService leaderEntityOwnershipService = newOwnershipService(leaderDistributedDataStore);
- final DOMEntityOwnershipService follower1EntityOwnershipService =
- newOwnershipService(follower1Node.configDataStore());
- newOwnershipService(follower2Node.configDataStore());
-
- leaderNode.kit().waitUntilLeader(leaderNode.configDataStore().getActorUtils(), ENTITY_OWNERSHIP_SHARD_NAME);
-
- // Register leader candidate for entity1 and verify it becomes owner
-
- DOMEntityOwnershipCandidateRegistration leaderEntity1Reg =
- leaderEntityOwnershipService.registerCandidate(ENTITY1);
-
- verifyCandidates(leaderDistributedDataStore, ENTITY1, "member-1");
- verifyOwner(leaderDistributedDataStore, ENTITY1, "member-1");
-
- leaderEntity1Reg.close();
- follower1EntityOwnershipService.registerCandidate(ENTITY1);
-
- verifyCandidates(leaderDistributedDataStore, ENTITY1, "member-2");
- verifyOwner(leaderDistributedDataStore, ENTITY1, "member-2");
- }
-
- @Test
- public void testOwnerSelectedOnRapidRegisteringAndUnregisteringOfCandidates() throws Exception {
- String name = "testOwnerSelectedOnRapidRegisteringAndUnregisteringOfCandidates";
- MemberNode leaderNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
- .moduleShardsConfig(MODULE_SHARDS_CONFIG).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
- .createOperDatastore(false).datastoreContextBuilder(leaderDatastoreContextBuilder).build();
-
- MemberNode follower1Node = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
- .moduleShardsConfig(MODULE_SHARDS_CONFIG).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
- .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
- MemberNode follower2Node = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
- .moduleShardsConfig(MODULE_SHARDS_CONFIG).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
- .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
- AbstractDataStore leaderDistributedDataStore = leaderNode.configDataStore();
-
- leaderDistributedDataStore.waitTillReady();
- follower1Node.configDataStore().waitTillReady();
- follower2Node.configDataStore().waitTillReady();
-
- final DOMEntityOwnershipService leaderEntityOwnershipService = newOwnershipService(leaderDistributedDataStore);
- final DOMEntityOwnershipService follower1EntityOwnershipService =
- newOwnershipService(follower1Node.configDataStore());
- newOwnershipService(follower2Node.configDataStore());
-
- leaderNode.kit().waitUntilLeader(leaderNode.configDataStore().getActorUtils(), ENTITY_OWNERSHIP_SHARD_NAME);
-
- // Register leader candidate for entity1 and verify it becomes owner
-
- final DOMEntityOwnershipCandidateRegistration leaderEntity1Reg =
- leaderEntityOwnershipService.registerCandidate(ENTITY1);
-
- verifyCandidates(leaderDistributedDataStore, ENTITY1, "member-1");
- verifyOwner(leaderDistributedDataStore, ENTITY1, "member-1");
-
- follower1EntityOwnershipService.registerCandidate(ENTITY1);
- leaderEntity1Reg.close();
-
- verifyCandidates(leaderDistributedDataStore, ENTITY1, "member-2");
- verifyOwner(leaderDistributedDataStore, ENTITY1, "member-2");
- }
-
- @Test
- public void testEntityOwnershipWithNonVotingMembers() throws Exception {
- followerDatastoreContextBuilder.shardElectionTimeoutFactor(5)
- .customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName());
-
- String name = "testEntityOwnershipWithNonVotingMembers";
- final MemberNode member1LeaderNode = MemberNode.builder(memberNodes).akkaConfig("Member1")
- .useAkkaArtery(false).testName(name)
- .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
- .createOperDatastore(false).datastoreContextBuilder(leaderDatastoreContextBuilder).build();
-
- final MemberNode member2FollowerNode = MemberNode.builder(memberNodes).akkaConfig("Member2")
- .useAkkaArtery(false).testName(name)
- .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
- .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
- final MemberNode member3FollowerNode = MemberNode.builder(memberNodes).akkaConfig("Member3")
- .useAkkaArtery(false).testName(name)
- .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
- .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
- final MemberNode member4FollowerNode = MemberNode.builder(memberNodes).akkaConfig("Member4")
- .useAkkaArtery(false).testName(name)
- .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
- .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
- final MemberNode member5FollowerNode = MemberNode.builder(memberNodes).akkaConfig("Member5")
- .useAkkaArtery(false).testName(name)
- .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
- .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
- AbstractDataStore leaderDistributedDataStore = member1LeaderNode.configDataStore();
-
- leaderDistributedDataStore.waitTillReady();
- member2FollowerNode.configDataStore().waitTillReady();
- member3FollowerNode.configDataStore().waitTillReady();
- member4FollowerNode.configDataStore().waitTillReady();
- member5FollowerNode.configDataStore().waitTillReady();
-
- member1LeaderNode.waitForMembersUp("member-2", "member-3", "member-4", "member-5");
-
- final DOMEntityOwnershipService member3EntityOwnershipService =
- newOwnershipService(member3FollowerNode.configDataStore());
- final DOMEntityOwnershipService member4EntityOwnershipService =
- newOwnershipService(member4FollowerNode.configDataStore());
- final DOMEntityOwnershipService member5EntityOwnershipService =
- newOwnershipService(member5FollowerNode.configDataStore());
-
- newOwnershipService(member1LeaderNode.configDataStore());
- member1LeaderNode.kit().waitUntilLeader(member1LeaderNode.configDataStore().getActorUtils(),
- ENTITY_OWNERSHIP_SHARD_NAME);
-
- // Make member4 and member5 non-voting
-
- Future<Object> future = Patterns.ask(leaderDistributedDataStore.getActorUtils().getShardManager(),
- new ChangeShardMembersVotingStatus(ENTITY_OWNERSHIP_SHARD_NAME,
- ImmutableMap.of("member-4", Boolean.FALSE, "member-5", Boolean.FALSE)),
- new Timeout(10, TimeUnit.SECONDS));
- Object response = Await.result(future, FiniteDuration.apply(10, TimeUnit.SECONDS));
- if (response instanceof Throwable) {
- throw new AssertionError("ChangeShardMembersVotingStatus failed", (Throwable)response);
- }
-
- assertNull("Expected null Success response. Actual " + response, response);
-
- // Register member4 candidate for entity1 - it should not become owner since it's non-voting
-
- member4EntityOwnershipService.registerCandidate(ENTITY1);
- verifyCandidates(leaderDistributedDataStore, ENTITY1, "member-4");
-
- // Register member5 candidate for entity2 - it should not become owner since it's non-voting
-
- member5EntityOwnershipService.registerCandidate(ENTITY2);
- verifyCandidates(leaderDistributedDataStore, ENTITY2, "member-5");
-
- Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
- verifyOwner(leaderDistributedDataStore, ENTITY1, "");
- verifyOwner(leaderDistributedDataStore, ENTITY2, "");
-
- // Register member3 candidate for entity1 - it should become owner since it's voting
-
- member3EntityOwnershipService.registerCandidate(ENTITY1);
- verifyCandidates(leaderDistributedDataStore, ENTITY1, "member-4", "member-3");
- verifyOwner(leaderDistributedDataStore, ENTITY1, "member-3");
-
- // Switch member4 and member5 back to voting and member3 non-voting. This should result in member4 and member5
- // to become entity owners.
-
- future = Patterns.ask(leaderDistributedDataStore.getActorUtils().getShardManager(),
- new ChangeShardMembersVotingStatus(ENTITY_OWNERSHIP_SHARD_NAME,
- ImmutableMap.of("member-3", Boolean.FALSE, "member-4", Boolean.TRUE, "member-5", Boolean.TRUE)),
- new Timeout(10, TimeUnit.SECONDS));
- response = Await.result(future, FiniteDuration.apply(10, TimeUnit.SECONDS));
- if (response instanceof Throwable) {
- throw new AssertionError("ChangeShardMembersVotingStatus failed", (Throwable)response);
- }
-
- assertNull("Expected null Success response. Actual " + response, response);
-
- verifyOwner(leaderDistributedDataStore, ENTITY1, "member-4");
- verifyOwner(leaderDistributedDataStore, ENTITY2, "member-5");
- }
-
- private static void verifyGetOwnershipState(final DOMEntityOwnershipService service, final DOMEntity entity,
- final EntityOwnershipState expState) {
- Optional<EntityOwnershipState> state = service.getOwnershipState(entity);
- assertTrue("getOwnershipState present", state.isPresent());
- assertEquals("EntityOwnershipState", expState, state.get());
- }
-
- private static void verifyCandidates(final AbstractDataStore dataStore, final DOMEntity entity,
- final String... expCandidates) throws Exception {
- AssertionError lastError = null;
- Stopwatch sw = Stopwatch.createStarted();
- while (sw.elapsed(TimeUnit.MILLISECONDS) <= 10000) {
- Optional<NormalizedNode<?, ?>> possible = dataStore.newReadOnlyTransaction()
- .read(entityPath(entity.getType(), entity.getIdentifier()).node(Candidate.QNAME))
- .get(5, TimeUnit.SECONDS);
- try {
- assertTrue("Candidates not found for " + entity, possible.isPresent());
- Collection<String> actual = new ArrayList<>();
- for (MapEntryNode candidate: ((MapNode)possible.get()).getValue()) {
- actual.add(candidate.getChild(CANDIDATE_NAME_NODE_ID).get().getValue().toString());
- }
-
- assertEquals("Candidates for " + entity, Arrays.asList(expCandidates), actual);
- return;
- } catch (AssertionError e) {
- lastError = e;
- Uninterruptibles.sleepUninterruptibly(300, TimeUnit.MILLISECONDS);
- }
- }
-
- throw lastError;
- }
-
- @SuppressWarnings("checkstyle:IllegalCatch")
- private static void verifyOwner(final AbstractDataStore dataStore, final DOMEntity entity,
- final String expOwner) {
- AbstractEntityOwnershipTest.verifyOwner(expOwner, entity.getType(), entity.getIdentifier(), path -> {
- try {
- return dataStore.newReadOnlyTransaction().read(path).get(5, TimeUnit.SECONDS).get();
- } catch (Exception e) {
- return null;
- }
- });
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.reset;
-import static org.mockito.Mockito.spy;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.ENTITY_ID_QNAME;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.ENTITY_OWNERS_PATH;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.ENTITY_QNAME;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.candidatePath;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.entityEntryWithOwner;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.entityOwnersWithCandidate;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.entityOwnersWithEntityTypeEntry;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.entityPath;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.entityTypeEntryWithEntityEntry;
-
-import akka.actor.ActorRef;
-import com.google.common.collect.Sets;
-import java.util.Collection;
-import java.util.Optional;
-import java.util.concurrent.TimeUnit;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.ArgumentCaptor;
-import org.mockito.Mockito;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.AbstractDataStore;
-import org.opendaylight.controller.cluster.datastore.DatastoreContext;
-import org.opendaylight.controller.cluster.datastore.DatastoreContextFactory;
-import org.opendaylight.controller.cluster.datastore.DistributedDataStore;
-import org.opendaylight.controller.cluster.datastore.Shard;
-import org.opendaylight.controller.cluster.datastore.ShardDataTree;
-import org.opendaylight.controller.cluster.datastore.config.Configuration;
-import org.opendaylight.controller.cluster.datastore.config.ConfigurationImpl;
-import org.opendaylight.controller.cluster.datastore.config.EmptyModuleShardConfigProvider;
-import org.opendaylight.controller.cluster.datastore.utils.MockClusterWrapper;
-import org.opendaylight.controller.cluster.entityownership.messages.RegisterCandidateLocal;
-import org.opendaylight.controller.cluster.entityownership.messages.RegisterListenerLocal;
-import org.opendaylight.controller.cluster.entityownership.messages.UnregisterCandidateLocal;
-import org.opendaylight.controller.cluster.entityownership.messages.UnregisterListenerLocal;
-import org.opendaylight.controller.cluster.entityownership.selectionstrategy.EntityOwnerSelectionStrategyConfig;
-import org.opendaylight.mdsal.eos.common.api.CandidateAlreadyRegisteredException;
-import org.opendaylight.mdsal.eos.common.api.EntityOwnershipState;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipCandidateRegistration;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListenerRegistration;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import scala.concurrent.Await;
-import scala.concurrent.Future;
-import scala.concurrent.duration.FiniteDuration;
-
-/**
- * Unit tests for DistributedEntityOwnershipService.
- *
- * @author Thomas Pantelis
- */
-public class DistributedEntityOwnershipServiceTest extends AbstractClusterRefEntityOwnershipTest {
- static final String ENTITY_TYPE = "test";
- static final String ENTITY_TYPE2 = "test2";
- static final QName QNAME = QName.create("test", "2015-08-11", "foo");
- static int ID_COUNTER = 1;
-
- private final String dataStoreName = "config" + ID_COUNTER++;
- private AbstractDataStore dataStore;
-
- @Before
- public void setUp() {
- DatastoreContext datastoreContext = DatastoreContext.newBuilder().dataStoreName(dataStoreName)
- .shardInitializationTimeout(10, TimeUnit.SECONDS).build();
-
- Configuration configuration = new ConfigurationImpl(new EmptyModuleShardConfigProvider()) {
- @Override
- public Collection<MemberName> getUniqueMemberNamesForAllShards() {
- return Sets.newHashSet(MemberName.forName("member-1"));
- }
- };
-
- DatastoreContextFactory mockContextFactory = mock(DatastoreContextFactory.class);
- Mockito.doReturn(datastoreContext).when(mockContextFactory).getBaseDatastoreContext();
- Mockito.doReturn(datastoreContext).when(mockContextFactory).getShardDatastoreContext(Mockito.anyString());
-
- dataStore = new DistributedDataStore(getSystem(), new MockClusterWrapper(), configuration,
- mockContextFactory, null);
-
- dataStore.onModelContextUpdated(EOSTestUtils.SCHEMA_CONTEXT);
- }
-
- @After
- public void tearDown() {
- dataStore.close();
- }
-
- private static <T> T verifyMessage(final DistributedEntityOwnershipService mock, final Class<T> type) {
- final ArgumentCaptor<T> message = ArgumentCaptor.forClass(type);
- verify(mock).executeLocalEntityOwnershipShardOperation(message.capture());
- return message.getValue();
- }
-
- @Test
- public void testEntityOwnershipShardCreated() throws Exception {
- DistributedEntityOwnershipService service = DistributedEntityOwnershipService.start(dataStore.getActorUtils(),
- EntityOwnerSelectionStrategyConfig.newBuilder().build());
-
- Future<ActorRef> future = dataStore.getActorUtils().findLocalShardAsync(
- DistributedEntityOwnershipService.ENTITY_OWNERSHIP_SHARD_NAME);
- ActorRef shardActor = Await.result(future, FiniteDuration.create(10, TimeUnit.SECONDS));
- assertNotNull(DistributedEntityOwnershipService.ENTITY_OWNERSHIP_SHARD_NAME + " not found", shardActor);
-
- service.close();
- }
-
- @Test
- public void testRegisterCandidate() throws Exception {
- DistributedEntityOwnershipService service = spy(DistributedEntityOwnershipService.start(
- dataStore.getActorUtils(), EntityOwnerSelectionStrategyConfig.newBuilder().build()));
-
- YangInstanceIdentifier entityId = YangInstanceIdentifier.of(QNAME);
- DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId);
-
- DOMEntityOwnershipCandidateRegistration reg = service.registerCandidate(entity);
- verifyRegisterCandidateLocal(service, entity);
- verifyEntityOwnershipCandidateRegistration(entity, reg);
- verifyEntityCandidate(service.getLocalEntityOwnershipShard(), ENTITY_TYPE, entityId,
- dataStore.getActorUtils().getCurrentMemberName().getName());
-
- // Register the same entity - should throw exception
-
- try {
- service.registerCandidate(entity);
- fail("Expected CandidateAlreadyRegisteredException");
- } catch (CandidateAlreadyRegisteredException e) {
- // expected
- assertEquals("getEntity", entity, e.getEntity());
- }
-
- // Register a different entity - should succeed
- reset(service);
-
- DOMEntity entity2 = new DOMEntity(ENTITY_TYPE2, entityId);
- DOMEntityOwnershipCandidateRegistration reg2 = service.registerCandidate(entity2);
-
- verifyEntityOwnershipCandidateRegistration(entity2, reg2);
- verifyEntityCandidate(service.getLocalEntityOwnershipShard(), ENTITY_TYPE2, entityId,
- dataStore.getActorUtils().getCurrentMemberName().getName());
-
- service.close();
- }
-
- @Test
- public void testCloseCandidateRegistration() throws Exception {
- DistributedEntityOwnershipService service = spy(DistributedEntityOwnershipService.start(
- dataStore.getActorUtils(), EntityOwnerSelectionStrategyConfig.newBuilder().build()));
-
- DOMEntity entity = new DOMEntity(ENTITY_TYPE, YangInstanceIdentifier.of(QNAME));
-
- DOMEntityOwnershipCandidateRegistration reg = service.registerCandidate(entity);
-
- verifyEntityOwnershipCandidateRegistration(entity, reg);
- verifyRegisterCandidateLocal(service, entity);
-
- reset(service);
- reg.close();
- UnregisterCandidateLocal unregCandidate = verifyMessage(service, UnregisterCandidateLocal.class);
- assertEquals("getEntity", entity, unregCandidate.getEntity());
-
- // Re-register - should succeed.
- reset(service);
- service.registerCandidate(entity);
- verifyRegisterCandidateLocal(service, entity);
-
- service.close();
- }
-
- @Test
- public void testListenerRegistration() {
- DistributedEntityOwnershipService service = spy(DistributedEntityOwnershipService.start(
- dataStore.getActorUtils(), EntityOwnerSelectionStrategyConfig.newBuilder().build()));
-
- YangInstanceIdentifier entityId = YangInstanceIdentifier.of(QNAME);
- DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId);
- DOMEntityOwnershipListener listener = mock(DOMEntityOwnershipListener.class);
-
- DOMEntityOwnershipListenerRegistration reg = service.registerListener(entity.getType(), listener);
-
- assertNotNull("EntityOwnershipListenerRegistration null", reg);
- assertEquals("getEntityType", entity.getType(), reg.getEntityType());
- assertEquals("getInstance", listener, reg.getInstance());
-
- RegisterListenerLocal regListener = verifyMessage(service, RegisterListenerLocal.class);
- assertSame("getListener", listener, regListener.getListener());
- assertEquals("getEntityType", entity.getType(), regListener.getEntityType());
-
- reset(service);
- reg.close();
- UnregisterListenerLocal unregListener = verifyMessage(service, UnregisterListenerLocal.class);
- assertEquals("getEntityType", entity.getType(), unregListener.getEntityType());
- assertSame("getListener", listener, unregListener.getListener());
-
- service.close();
- }
-
- @Test
- public void testGetOwnershipState() throws Exception {
- DistributedEntityOwnershipService service = spy(DistributedEntityOwnershipService.start(
- dataStore.getActorUtils(), EntityOwnerSelectionStrategyConfig.newBuilder().build()));
-
- final Shard mockShard = Mockito.mock(Shard.class);
- ShardDataTree shardDataTree = new ShardDataTree(mockShard, EOSTestUtils.SCHEMA_CONTEXT, TreeType.OPERATIONAL);
-
- when(service.getLocalEntityOwnershipShardDataTree()).thenReturn(shardDataTree.getDataTree());
-
- DOMEntity entity1 = new DOMEntity(ENTITY_TYPE, "one");
- writeNode(ENTITY_OWNERS_PATH, entityOwnersWithCandidate(ENTITY_TYPE, entity1.getIdentifier(), "member-1"),
- shardDataTree);
- writeNode(ENTITY_OWNERS_PATH, entityOwnersWithEntityTypeEntry(entityTypeEntryWithEntityEntry(entity1.getType(),
- entityEntryWithOwner(entity1.getIdentifier(), "member-1"))), shardDataTree);
- verifyGetOwnershipState(service, entity1, EntityOwnershipState.IS_OWNER);
-
- writeNode(ENTITY_OWNERS_PATH, entityOwnersWithCandidate(ENTITY_TYPE,
- entity1.getIdentifier(), "member-2"), shardDataTree);
- writeNode(entityPath(entity1.getType(), entity1.getIdentifier()),
- entityEntryWithOwner(entity1.getIdentifier(), "member-2"), shardDataTree);
- verifyGetOwnershipState(service, entity1, EntityOwnershipState.OWNED_BY_OTHER);
-
- writeNode(entityPath(entity1.getType(), entity1.getIdentifier()), entityEntryWithOwner(entity1.getIdentifier(),
- ""), shardDataTree);
- verifyGetOwnershipState(service, entity1, EntityOwnershipState.NO_OWNER);
-
- DOMEntity entity2 = new DOMEntity(ENTITY_TYPE, "two");
- Optional<EntityOwnershipState> state = service.getOwnershipState(entity2);
- assertFalse("getOwnershipState present", state.isPresent());
-
- writeNode(ENTITY_OWNERS_PATH, entityOwnersWithCandidate(ENTITY_TYPE, entity2.getIdentifier(), "member-1"),
- shardDataTree);
- writeNode(entityPath(entity2.getType(), entity2.getIdentifier()), ImmutableNodes.mapEntry(ENTITY_QNAME,
- ENTITY_ID_QNAME, entity2.getIdentifier()), shardDataTree);
- verifyGetOwnershipState(service, entity2, EntityOwnershipState.NO_OWNER);
-
- deleteNode(candidatePath(entityPath(entity2.getType(), entity2.getIdentifier()), "member-1"), shardDataTree);
- Optional<EntityOwnershipState> state2 = service.getOwnershipState(entity2);
- assertFalse("getOwnershipState present", state2.isPresent());
- service.close();
- }
-
- @Test
- public void testIsCandidateRegistered() throws CandidateAlreadyRegisteredException {
- DistributedEntityOwnershipService service = DistributedEntityOwnershipService.start(dataStore.getActorUtils(),
- EntityOwnerSelectionStrategyConfig.newBuilder().build());
-
- final DOMEntity test = new DOMEntity("test-type", "test");
-
- assertFalse(service.isCandidateRegistered(test));
-
- service.registerCandidate(test);
-
- assertTrue(service.isCandidateRegistered(test));
-
- service.close();
- }
-
- private static void verifyGetOwnershipState(final DistributedEntityOwnershipService service, final DOMEntity entity,
- final EntityOwnershipState expState) {
- Optional<EntityOwnershipState> state = service.getOwnershipState(entity);
- assertTrue("getOwnershipState present", state.isPresent());
- assertEquals("EntityOwnershipState", expState, state.get());
- }
-
- @SuppressWarnings("checkstyle:IllegalCatch")
- private void verifyEntityCandidate(final ActorRef entityOwnershipShard, final String entityType,
- final YangInstanceIdentifier entityId, final String candidateName) {
- verifyEntityCandidate(entityType, entityId, candidateName, path -> {
- try {
- return dataStore.newReadOnlyTransaction().read(path).get(5, TimeUnit.SECONDS).get();
- } catch (Exception e) {
- return null;
- }
- });
- }
-
- private static void verifyRegisterCandidateLocal(final DistributedEntityOwnershipService service,
- final DOMEntity entity) {
- RegisterCandidateLocal regCandidate = verifyMessage(service, RegisterCandidateLocal.class);
- assertEquals("getEntity", entity, regCandidate.getEntity());
- }
-
- private static void verifyEntityOwnershipCandidateRegistration(final DOMEntity entity,
- final DOMEntityOwnershipCandidateRegistration reg) {
- assertNotNull("EntityOwnershipCandidateRegistration null", reg);
- assertEquals("getInstance", entity, reg.getInstance());
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership;
-
-import java.io.File;
-import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
-import org.opendaylight.yangtools.yang.test.util.YangParserTestUtils;
-
-final class EOSTestUtils {
- static final EffectiveModelContext SCHEMA_CONTEXT = YangParserTestUtils.parseYangFiles(
- new File("src/main/yang/entity-owners.yang"));
-
- private EOSTestUtils() {
- // Hidden on purpose
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership;
-
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyBoolean;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.reset;
-import static org.mockito.Mockito.verify;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.ENTITY_OWNERS_PATH;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.entityEntryWithOwner;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.entityOwnersWithCandidate;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.entityPath;
-
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.Shard;
-import org.opendaylight.controller.cluster.datastore.ShardDataTree;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
-
-/**
- * Unit tests for EntityOwnerChangeListener.
- *
- * @author Thomas Pantelis
- */
-public class EntityOwnerChangeListenerTest {
- private static final String LOCAL_MEMBER_NAME = "member-1";
- private static final String REMOTE_MEMBER_NAME1 = "member-2";
- private static final String REMOTE_MEMBER_NAME2 = "member-3";
- private static final String ENTITY_TYPE = "test";
- private static final YangInstanceIdentifier ENTITY_ID1 =
- YangInstanceIdentifier.of(QName.create("test", "2015-08-14", "entity1"));
- private static final YangInstanceIdentifier ENTITY_ID2 =
- YangInstanceIdentifier.of(QName.create("test", "2015-08-14", "entity2"));
- private static final DOMEntity ENTITY1 = new DOMEntity(ENTITY_TYPE, ENTITY_ID1);
- private static final DOMEntity ENTITY2 = new DOMEntity(ENTITY_TYPE, ENTITY_ID2);
-
- private final Shard mockShard = Mockito.mock(Shard.class);
-
- private final ShardDataTree shardDataTree = new ShardDataTree(mockShard, EOSTestUtils.SCHEMA_CONTEXT,
- TreeType.OPERATIONAL);
- private final EntityOwnershipListenerSupport mockListenerSupport = mock(EntityOwnershipListenerSupport.class);
- private EntityOwnerChangeListener listener;
-
- @Before
- public void setup() {
- listener = new EntityOwnerChangeListener(MemberName.forName(LOCAL_MEMBER_NAME), mockListenerSupport);
- listener.init(shardDataTree);
- }
-
- @Test
- public void testOnDataTreeChanged() throws Exception {
- writeNode(ENTITY_OWNERS_PATH, entityOwnersWithCandidate(ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME));
- writeNode(ENTITY_OWNERS_PATH, entityOwnersWithCandidate(ENTITY_TYPE, ENTITY_ID2, LOCAL_MEMBER_NAME));
- verify(mockListenerSupport, never()).notifyEntityOwnershipListeners(any(DOMEntity.class), anyBoolean(),
- anyBoolean(), anyBoolean());
-
- // Write local member as owner for entity 1
-
- writeNode(entityPath(ENTITY_TYPE, ENTITY_ID1), entityEntryWithOwner(ENTITY_ID1, LOCAL_MEMBER_NAME));
- verify(mockListenerSupport).notifyEntityOwnershipListeners(ENTITY1, false, true, true);
-
- // Add remote member 1 as candidate for entity 1 - listener support should not get notified
-
- reset(mockListenerSupport);
- writeNode(ENTITY_OWNERS_PATH, entityOwnersWithCandidate(ENTITY_TYPE, ENTITY_ID1, REMOTE_MEMBER_NAME1));
- verify(mockListenerSupport, never()).notifyEntityOwnershipListeners(any(DOMEntity.class), anyBoolean(),
- anyBoolean(), anyBoolean());
-
- // Change owner to remote member 1 for entity 1
-
- reset(mockListenerSupport);
- writeNode(entityPath(ENTITY_TYPE, ENTITY_ID1), entityEntryWithOwner(ENTITY_ID1, REMOTE_MEMBER_NAME1));
- verify(mockListenerSupport).notifyEntityOwnershipListeners(ENTITY1, true, false, true);
-
- // Change owner to remote member 2 for entity 1
-
- reset(mockListenerSupport);
- writeNode(entityPath(ENTITY_TYPE, ENTITY_ID1), entityEntryWithOwner(ENTITY_ID1, REMOTE_MEMBER_NAME2));
- verify(mockListenerSupport).notifyEntityOwnershipListeners(ENTITY1, false, false, true);
-
- // Clear the owner for entity 1
-
- reset(mockListenerSupport);
- writeNode(entityPath(ENTITY_TYPE, ENTITY_ID1), entityEntryWithOwner(ENTITY_ID1, ""));
- verify(mockListenerSupport).notifyEntityOwnershipListeners(ENTITY1, false, false, false);
-
- // Change owner to the local member for entity 1
-
- writeNode(entityPath(ENTITY_TYPE, ENTITY_ID1), entityEntryWithOwner(ENTITY_ID1, LOCAL_MEMBER_NAME));
- verify(mockListenerSupport).notifyEntityOwnershipListeners(ENTITY1, false, true, true);
-
- // Change owner to remote member 2 for entity 2
-
- reset(mockListenerSupport);
- writeNode(entityPath(ENTITY_TYPE, ENTITY_ID2), entityEntryWithOwner(ENTITY_ID2, REMOTE_MEMBER_NAME1));
- verify(mockListenerSupport).notifyEntityOwnershipListeners(ENTITY2, false, false, true);
-
- // Change owner to the local member for entity 2
-
- reset(mockListenerSupport);
- writeNode(entityPath(ENTITY_TYPE, ENTITY_ID2), entityEntryWithOwner(ENTITY_ID2, LOCAL_MEMBER_NAME));
- verify(mockListenerSupport).notifyEntityOwnershipListeners(ENTITY2, false, true, true);
-
- // Write local member owner for entity 2 again - expect no change
-
- reset(mockListenerSupport);
- writeNode(entityPath(ENTITY_TYPE, ENTITY_ID2), entityEntryWithOwner(ENTITY_ID2, LOCAL_MEMBER_NAME));
- verify(mockListenerSupport, never()).notifyEntityOwnershipListeners(any(DOMEntity.class), anyBoolean(),
- anyBoolean(), anyBoolean());
-
- // Clear the owner for entity 2
-
- reset(mockListenerSupport);
- writeNode(entityPath(ENTITY_TYPE, ENTITY_ID2), entityEntryWithOwner(ENTITY_ID2, null));
- verify(mockListenerSupport).notifyEntityOwnershipListeners(ENTITY2, true, false, false);
-
- // Clear the owner for entity 2 again - expect no change
-
- reset(mockListenerSupport);
- writeNode(entityPath(ENTITY_TYPE, ENTITY_ID2), entityEntryWithOwner(ENTITY_ID2, null));
- verify(mockListenerSupport, never()).notifyEntityOwnershipListeners(any(DOMEntity.class), anyBoolean(),
- anyBoolean(), anyBoolean());
- }
-
- private void writeNode(final YangInstanceIdentifier path, final NormalizedNode<?, ?> node)
- throws DataValidationFailedException {
- AbstractEntityOwnershipTest.writeNode(path, node, shardDataTree);
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership;
-
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.timeout;
-import static org.mockito.Mockito.verify;
-
-import akka.actor.ActorRef;
-import akka.testkit.TestActorRef;
-import org.junit.After;
-import org.junit.Test;
-import org.opendaylight.controller.cluster.raft.TestActorFactory;
-import org.opendaylight.mdsal.eos.common.api.EntityOwnershipChangeState;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipChange;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-/**
- * Unit tests for EntityOwnershipListenerActor.
- *
- * @author Thomas Pantelis
- */
-public class EntityOwnershipListenerActorTest extends AbstractEntityOwnershipTest {
- private final TestActorFactory actorFactory = new TestActorFactory(getSystem());
-
- @After
- public void tearDown() {
- actorFactory.close();
- }
-
- @Test
- public void testOnEntityOwnershipChanged() {
- DOMEntityOwnershipListener mockListener = mock(DOMEntityOwnershipListener.class);
-
- TestActorRef<EntityOwnershipListenerActor> listenerActor = actorFactory.createTestActor(
- EntityOwnershipListenerActor.props(mockListener), actorFactory.generateActorId("listener"));
-
- DOMEntity entity = new DOMEntity("test", YangInstanceIdentifier.of(QName.create("test", "id1")));
- boolean wasOwner = false;
- boolean isOwner = true;
- boolean hasOwner = true;
- listenerActor.tell(new DOMEntityOwnershipChange(entity, EntityOwnershipChangeState.from(
- wasOwner, isOwner, hasOwner)), ActorRef.noSender());
-
- verify(mockListener, timeout(5000)).ownershipChanged(ownershipChange(entity, wasOwner, isOwner, hasOwner));
- }
-
- @Test
- public void testOnEntityOwnershipChangedWithListenerEx() {
- DOMEntityOwnershipListener mockListener = mock(DOMEntityOwnershipListener.class);
-
- DOMEntity entity1 = new DOMEntity("test", YangInstanceIdentifier.of(QName.create("test", "id1")));
- doThrow(new RuntimeException("mock")).when(mockListener).ownershipChanged(
- ownershipChange(entity1, false, true, true));
- DOMEntity entity2 = new DOMEntity("test", YangInstanceIdentifier.of(QName.create("test", "id2")));
- doNothing().when(mockListener).ownershipChanged(ownershipChange(entity2, true, false, false));
-
- TestActorRef<EntityOwnershipListenerActor> listenerActor = actorFactory.createTestActor(
- EntityOwnershipListenerActor.props(mockListener), actorFactory.generateActorId("listener"));
-
- listenerActor.tell(new DOMEntityOwnershipChange(entity1, EntityOwnershipChangeState.from(
- false, true, true)), ActorRef.noSender());
- listenerActor.tell(new DOMEntityOwnershipChange(entity2, EntityOwnershipChangeState.from(
- true, false, false)), ActorRef.noSender());
-
- verify(mockListener, timeout(5000)).ownershipChanged(ownershipChange(entity2, true, false, false));
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership;
-
-import static org.junit.Assert.assertEquals;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.reset;
-import static org.mockito.Mockito.timeout;
-import static org.mockito.Mockito.verify;
-
-import akka.actor.ActorContext;
-import akka.actor.ActorRef;
-import akka.actor.Props;
-import akka.testkit.TestActorRef;
-import akka.testkit.javadsl.TestKit;
-import com.google.common.util.concurrent.Uninterruptibles;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.opendaylight.controller.cluster.raft.TestActorFactory;
-import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipChange;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import scala.collection.Iterator;
-import scala.collection.immutable.Iterable;
-
-/**
- * Unit tests for EntityOwnershipListenerSupport.
- *
- * @author Thomas Pantelis
- */
-public class EntityOwnershipListenerSupportTest extends AbstractEntityOwnershipTest {
- private final TestActorFactory actorFactory = new TestActorFactory(getSystem());
- private ActorContext actorContext;
-
- @Before
- public void setup() {
- TestActorRef<DoNothingActor> actor = actorFactory.createTestActor(
- Props.create(DoNothingActor.class), actorFactory.generateActorId("test"));
-
- actorContext = actor.underlyingActor().getContext();
- }
-
- @After
- public void tearDown() {
- actorFactory.close();
- }
-
- @Test
- public void testNotifyEntityOwnershipListeners() {
- EntityOwnershipListenerSupport support = new EntityOwnershipListenerSupport(actorContext, "test");
-
- DOMEntityOwnershipListener mockListener1 = mock(DOMEntityOwnershipListener.class, "EntityOwnershipListener1");
- DOMEntityOwnershipListener mockListener2 = mock(DOMEntityOwnershipListener.class, "EntityOwnershipListener2");
- DOMEntityOwnershipListener mockListener12 = mock(DOMEntityOwnershipListener.class,
- "EntityOwnershipListener1_2");
- String entityType1 = "type1";
- String entityType2 = "type2";
- final DOMEntity entity1 = new DOMEntity(entityType1, YangInstanceIdentifier.of(QName.create("test", "id1")));
- final DOMEntity entity2 = new DOMEntity(entityType2, YangInstanceIdentifier.of(QName.create("test", "id2")));
- final DOMEntity entity3 = new DOMEntity("noListener", YangInstanceIdentifier.of(QName.create("test", "id5")));
-
- // Add EntityOwnershipListener registrations.
-
- support.addEntityOwnershipListener(entityType1, mockListener1);
- support.addEntityOwnershipListener(entityType1, mockListener1); // register again - should be noop
- support.addEntityOwnershipListener(entityType1, mockListener12);
- support.addEntityOwnershipListener(entityType2, mockListener2);
-
- // Notify entity1 changed and verify appropriate listeners are notified.
-
- support.notifyEntityOwnershipListeners(entity1, false, true, true);
-
- verify(mockListener1, timeout(5000)).ownershipChanged(ownershipChange(entity1, false, true, true));
- verify(mockListener12, timeout(5000)).ownershipChanged(ownershipChange(entity1, false, true, true));
- Uninterruptibles.sleepUninterruptibly(300, TimeUnit.MILLISECONDS);
- verify(mockListener2, never()).ownershipChanged(any(DOMEntityOwnershipChange.class));
- assertEquals("# of listener actors", 2, actorContext.children().size());
- reset(mockListener1, mockListener2, mockListener12);
-
- // Notify entity2 changed and verify appropriate listeners are notified.
-
- support.notifyEntityOwnershipListeners(entity2, false, true, true);
-
- verify(mockListener2, timeout(5000)).ownershipChanged(ownershipChange(entity2, false, true, true));
- Uninterruptibles.sleepUninterruptibly(300, TimeUnit.MILLISECONDS);
- verify(mockListener1, never()).ownershipChanged(any(DOMEntityOwnershipChange.class));
- verify(mockListener12, never()).ownershipChanged(any(DOMEntityOwnershipChange.class));
- assertEquals("# of listener actors", 3, actorContext.children().size());
- reset(mockListener1, mockListener2, mockListener12);
-
- // Notify entity3 changed and verify no listeners are notified.
-
- support.notifyEntityOwnershipListeners(entity3, true, false, true);
-
- Uninterruptibles.sleepUninterruptibly(300, TimeUnit.MILLISECONDS);
- verify(mockListener1, never()).ownershipChanged(any(DOMEntityOwnershipChange.class));
- verify(mockListener2, never()).ownershipChanged(any(DOMEntityOwnershipChange.class));
- verify(mockListener12, never()).ownershipChanged(any(DOMEntityOwnershipChange.class));
- reset(mockListener1, mockListener2, mockListener12);
-
- Iterable<ActorRef> listenerActors = actorContext.children();
- assertEquals("# of listener actors", 3, listenerActors.size());
-
- // Unregister mockListener1, issue a change for entity1 and verify only remaining listeners are notified.
-
- support.removeEntityOwnershipListener(entityType1, mockListener1);
- support.notifyEntityOwnershipListeners(entity1, true, false, true);
-
- verify(mockListener12, timeout(5000)).ownershipChanged(ownershipChange(entity1, true, false, true));
- Uninterruptibles.sleepUninterruptibly(300, TimeUnit.MILLISECONDS);
- verify(mockListener1, never()).ownershipChanged(any(DOMEntityOwnershipChange.class));
- reset(mockListener1, mockListener2, mockListener12);
-
- // Unregister all listeners and verify their listener actors are destroyed.
-
- List<TestKit> watchers = new ArrayList<>();
- for (Iterator<ActorRef> iter = listenerActors.iterator(); iter.hasNext();) {
- TestKit kit = new TestKit(getSystem());
- kit.watch(iter.next());
- watchers.add(kit);
- }
-
- support.removeEntityOwnershipListener(entityType1, mockListener12);
- support.removeEntityOwnershipListener(entityType1, mockListener12); // un-register again - should be noop
- support.removeEntityOwnershipListener(entityType2, mockListener2);
-
- Iterator<ActorRef> iter = listenerActors.iterator();
- for (TestKit kit: watchers) {
- kit.expectTerminated(kit.duration("3 seconds"), iter.next());
- }
-
- assertEquals("# of listener actors", 0, actorContext.children().size());
-
- // Re-register mockListener1 and verify it is notified.
-
- reset(mockListener1, mockListener2);
-
- support.addEntityOwnershipListener(entityType1, mockListener1);
- support.notifyEntityOwnershipListeners(entity1, false, false, true);
-
- verify(mockListener1, timeout(5000)).ownershipChanged(ownershipChange(entity1, false, false, true));
- verify(mockListener12, never()).ownershipChanged(any(DOMEntityOwnershipChange.class));
- verify(mockListener2, never()).ownershipChanged(any(DOMEntityOwnershipChange.class));
-
- // Quickly register and unregister mockListener2 - expecting no exceptions.
-
- support.addEntityOwnershipListener(entityType1, mockListener2);
- support.removeEntityOwnershipListener(entityType1, mockListener2);
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership;
-
-import static org.junit.Assert.assertEquals;
-import static org.mockito.AdditionalMatchers.or;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.reset;
-import static org.mockito.Mockito.timeout;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.verifyNoMoreInteractions;
-import static org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor.clearMessages;
-import static org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor.expectFirstMatching;
-import static org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor.expectMatching;
-
-import akka.actor.ActorRef;
-import akka.actor.PoisonPill;
-import akka.actor.Props;
-import akka.actor.Terminated;
-import akka.dispatch.Dispatchers;
-import akka.testkit.TestActorRef;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.Uninterruptibles;
-import java.time.Duration;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.function.Predicate;
-import org.junit.After;
-import org.junit.Test;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.DatastoreContext;
-import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
-import org.opendaylight.controller.cluster.datastore.ShardTestKit;
-import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
-import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
-import org.opendaylight.controller.cluster.datastore.messages.PeerDown;
-import org.opendaylight.controller.cluster.datastore.messages.PeerUp;
-import org.opendaylight.controller.cluster.datastore.messages.SuccessReply;
-import org.opendaylight.controller.cluster.entityownership.messages.CandidateAdded;
-import org.opendaylight.controller.cluster.entityownership.messages.RegisterCandidateLocal;
-import org.opendaylight.controller.cluster.entityownership.messages.RegisterListenerLocal;
-import org.opendaylight.controller.cluster.entityownership.messages.UnregisterCandidateLocal;
-import org.opendaylight.controller.cluster.entityownership.messages.UnregisterListenerLocal;
-import org.opendaylight.controller.cluster.entityownership.selectionstrategy.EntityOwnerSelectionStrategyConfig;
-import org.opendaylight.controller.cluster.entityownership.selectionstrategy.LastCandidateSelectionStrategy;
-import org.opendaylight.controller.cluster.raft.RaftState;
-import org.opendaylight.controller.cluster.raft.TestActorFactory;
-import org.opendaylight.controller.cluster.raft.base.messages.ElectionTimeout;
-import org.opendaylight.controller.cluster.raft.base.messages.TimeoutNow;
-import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
-import org.opendaylight.controller.cluster.raft.messages.RequestVote;
-import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipChange;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-/**
- * Unit tests for EntityOwnershipShard.
- *
- * @author Thomas Pantelis
- */
-public class EntityOwnershipShardTest extends AbstractEntityOwnershipTest {
- private static final String ENTITY_TYPE = "test type";
- private static final YangInstanceIdentifier ENTITY_ID1 =
- YangInstanceIdentifier.of(QName.create("test", "2015-08-14", "entity1"));
- private static final YangInstanceIdentifier ENTITY_ID2 =
- YangInstanceIdentifier.of(QName.create("test", "2015-08-14", "entity2"));
- private static final YangInstanceIdentifier ENTITY_ID3 =
- YangInstanceIdentifier.of(QName.create("test", "2015-08-14", "entity3"));
- private static final YangInstanceIdentifier ENTITY_ID4 =
- YangInstanceIdentifier.of(QName.create("test", "2015-08-14", "entity4"));
- private static final YangInstanceIdentifier ENTITY_ID5 =
- YangInstanceIdentifier.of(QName.create("test", "2015-08-14", "entity5"));
- private static final String LOCAL_MEMBER_NAME = "local-member-1";
- private static final String PEER_MEMBER_1_NAME = "peer-member-1";
- private static final String PEER_MEMBER_2_NAME = "peer-member-2";
-
- private Builder dataStoreContextBuilder = DatastoreContext.newBuilder().persistent(false);
- private final TestActorFactory actorFactory = new TestActorFactory(getSystem());
-
- @After
- public void tearDown() {
- actorFactory.close();
- }
-
- @Test
- public void testOnRegisterCandidateLocal() {
- testLog.info("testOnRegisterCandidateLocal starting");
-
- ShardTestKit kit = new ShardTestKit(getSystem());
-
- TestActorRef<EntityOwnershipShard> shard = actorFactory.createTestActor(newLocalShardProps());
-
- ShardTestKit.waitUntilLeader(shard);
-
- YangInstanceIdentifier entityId = ENTITY_ID1;
- DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId);
-
- shard.tell(new RegisterCandidateLocal(entity), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
-
- verifyCommittedEntityCandidate(shard, ENTITY_TYPE, entityId, LOCAL_MEMBER_NAME);
- verifyOwner(shard, ENTITY_TYPE, entityId, LOCAL_MEMBER_NAME);
-
- testLog.info("testOnRegisterCandidateLocal ending");
- }
-
- @Test
- public void testOnRegisterCandidateLocalWithNoInitialLeader() {
- testLog.info("testOnRegisterCandidateLocalWithNoInitialLeader starting");
-
- final ShardTestKit kit = new ShardTestKit(getSystem());
-
- dataStoreContextBuilder.shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(2);
-
- ShardIdentifier leaderId = newShardId(LOCAL_MEMBER_NAME);
- ShardIdentifier peerId = newShardId(PEER_MEMBER_1_NAME);
-
- TestActorRef<TestEntityOwnershipShard> peer = actorFactory.createTestActor(TestEntityOwnershipShard.props(
- newShardBuilder(peerId, peerMap(leaderId.toString()), PEER_MEMBER_1_NAME)), peerId.toString());
- TestEntityOwnershipShard peerShard = peer.underlyingActor();
- peerShard.startDroppingMessagesOfType(RequestVote.class);
- peerShard.startDroppingMessagesOfType(ElectionTimeout.class);
-
- TestActorRef<EntityOwnershipShard> shard = actorFactory.createTestActor(
- newShardProps(leaderId, peerMap(peerId.toString()), LOCAL_MEMBER_NAME), leaderId.toString());
-
- YangInstanceIdentifier entityId = ENTITY_ID1;
- DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId);
-
- shard.tell(new RegisterCandidateLocal(entity), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
-
- // Now allow RequestVotes to the peer so the shard becomes the leader. This should retry the commit.
- peerShard.stopDroppingMessagesOfType(RequestVote.class);
-
- verifyCommittedEntityCandidate(shard, ENTITY_TYPE, entityId, LOCAL_MEMBER_NAME);
- verifyOwner(shard, ENTITY_TYPE, entityId, LOCAL_MEMBER_NAME);
-
- testLog.info("testOnRegisterCandidateLocalWithNoInitialLeader ending");
- }
-
- @Test
- public void testOnRegisterCandidateLocalWithNoInitialConsensus() {
- testLog.info("testOnRegisterCandidateLocalWithNoInitialConsensus starting");
-
- final ShardTestKit kit = new ShardTestKit(getSystem());
-
- dataStoreContextBuilder.shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(2)
- .shardTransactionCommitTimeoutInSeconds(1);
-
- ShardIdentifier leaderId = newShardId(LOCAL_MEMBER_NAME);
- ShardIdentifier peerId = newShardId(PEER_MEMBER_1_NAME);
-
- TestActorRef<TestEntityOwnershipShard> peer = actorFactory.createTestActor(TestEntityOwnershipShard.props(
- newShardBuilder(peerId, peerMap(leaderId.toString()), PEER_MEMBER_1_NAME)), peerId.toString());
- TestEntityOwnershipShard peerShard = peer.underlyingActor();
- peerShard.startDroppingMessagesOfType(ElectionTimeout.class);
-
- // Drop AppendEntries so consensus isn't reached.
- peerShard.startDroppingMessagesOfType(AppendEntries.class);
-
- TestActorRef<EntityOwnershipShard> leader = actorFactory.createTestActor(
- newShardProps(leaderId, peerMap(peerId.toString()), LOCAL_MEMBER_NAME), leaderId.toString());
-
- ShardTestKit.waitUntilLeader(leader);
-
- YangInstanceIdentifier entityId = ENTITY_ID1;
- DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId);
-
- leader.tell(new RegisterCandidateLocal(entity), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
-
- // Wait enough time for the commit to timeout.
- Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
-
- // Resume AppendEntries - the follower should ack the commit which should then result in the candidate
- // write being applied to the state.
- peerShard.stopDroppingMessagesOfType(AppendEntries.class);
-
- verifyCommittedEntityCandidate(leader, ENTITY_TYPE, entityId, LOCAL_MEMBER_NAME);
- verifyOwner(leader, ENTITY_TYPE, entityId, LOCAL_MEMBER_NAME);
-
- testLog.info("testOnRegisterCandidateLocalWithNoInitialConsensus ending");
- }
-
- @Test
- public void testOnRegisterCandidateLocalWithIsolatedLeader() throws Exception {
- testLog.info("testOnRegisterCandidateLocalWithIsolatedLeader starting");
-
- final ShardTestKit kit = new ShardTestKit(getSystem());
-
- dataStoreContextBuilder.shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(2)
- .shardIsolatedLeaderCheckIntervalInMillis(50);
-
- ShardIdentifier leaderId = newShardId(LOCAL_MEMBER_NAME);
- ShardIdentifier peerId = newShardId(PEER_MEMBER_1_NAME);
-
- TestActorRef<TestEntityOwnershipShard> peer = actorFactory.createTestActor(TestEntityOwnershipShard.props(
- newShardBuilder(peerId, peerMap(leaderId.toString()), PEER_MEMBER_1_NAME)), peerId.toString());
- TestEntityOwnershipShard peerShard = peer.underlyingActor();
- peerShard.startDroppingMessagesOfType(ElectionTimeout.class);
-
- TestActorRef<EntityOwnershipShard> leader = actorFactory.createTestActor(
- newShardProps(leaderId, peerMap(peerId.toString()), LOCAL_MEMBER_NAME));
-
- ShardTestKit.waitUntilLeader(leader);
-
- // Drop AppendEntries and wait enough time for the shard to switch to IsolatedLeader.
- peerShard.startDroppingMessagesOfType(AppendEntries.class);
- verifyRaftState(leader, state ->
- assertEquals("getRaftState", RaftState.IsolatedLeader.toString(), state.getRaftState()));
-
- YangInstanceIdentifier entityId = ENTITY_ID1;
- DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId);
-
- leader.tell(new RegisterCandidateLocal(entity), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
-
- // Resume AppendEntries - the candidate write should now be committed.
- peerShard.stopDroppingMessagesOfType(AppendEntries.class);
- verifyCommittedEntityCandidate(leader, ENTITY_TYPE, entityId, LOCAL_MEMBER_NAME);
- verifyOwner(leader, ENTITY_TYPE, entityId, LOCAL_MEMBER_NAME);
-
- testLog.info("testOnRegisterCandidateLocalWithIsolatedLeader ending");
- }
-
- @Test
- public void testOnRegisterCandidateLocalWithRemoteLeader() {
- testLog.info("testOnRegisterCandidateLocalWithRemoteLeader starting");
-
- ShardTestKit kit = new ShardTestKit(getSystem());
-
- dataStoreContextBuilder.shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(2)
- .shardBatchedModificationCount(5);
-
- ShardIdentifier leaderId = newShardId(PEER_MEMBER_1_NAME);
- ShardIdentifier localId = newShardId(LOCAL_MEMBER_NAME);
- TestActorRef<TestEntityOwnershipShard> leader = actorFactory.createTestActor(TestEntityOwnershipShard.props(
- newShardBuilder(leaderId, peerMap(localId.toString()), PEER_MEMBER_1_NAME),
- actorFactory.createActor(MessageCollectorActor.props())), leaderId.toString());
- final TestEntityOwnershipShard leaderShard = leader.underlyingActor();
-
- TestActorRef<TestEntityOwnershipShard> local = actorFactory.createTestActor(TestEntityOwnershipShard.props(
- newShardBuilder(localId, peerMap(leaderId.toString()),LOCAL_MEMBER_NAME)), localId.toString());
- local.underlyingActor().startDroppingMessagesOfType(ElectionTimeout.class);
-
- local.tell(new RegisterCandidateLocal(new DOMEntity(ENTITY_TYPE, ENTITY_ID1)), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
-
- verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME);
- verifyOwner(leader, ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME);
-
- // Test with initial commit timeout and subsequent retry.
-
- local.tell(dataStoreContextBuilder.shardTransactionCommitTimeoutInSeconds(1).build(), ActorRef.noSender());
- leaderShard.startDroppingMessagesOfType(BatchedModifications.class);
-
- local.tell(new RegisterCandidateLocal(new DOMEntity(ENTITY_TYPE, ENTITY_ID2)), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
-
- expectFirstMatching(leaderShard.collectorActor(), BatchedModifications.class);
-
- // Send a bunch of registration messages quickly and verify.
-
- leaderShard.stopDroppingMessagesOfType(BatchedModifications.class);
- clearMessages(leaderShard.collectorActor());
-
- int max = 100;
- List<YangInstanceIdentifier> entityIds = new ArrayList<>();
- for (int i = 1; i <= max; i++) {
- YangInstanceIdentifier id = YangInstanceIdentifier.of(QName.create("test", "2015-08-14", "test" + i));
- entityIds.add(id);
- local.tell(new RegisterCandidateLocal(new DOMEntity(ENTITY_TYPE, id)), kit.getRef());
- }
-
- for (int i = 0; i < max; i++) {
- verifyCommittedEntityCandidate(local, ENTITY_TYPE, entityIds.get(i), LOCAL_MEMBER_NAME);
- }
-
- testLog.info("testOnRegisterCandidateLocalWithRemoteLeader ending");
- }
-
- @Test
- public void testOnUnregisterCandidateLocal() {
- testLog.info("testOnUnregisterCandidateLocal starting");
-
- ShardTestKit kit = new ShardTestKit(getSystem());
- TestActorRef<EntityOwnershipShard> shard = actorFactory.createTestActor(newLocalShardProps());
- ShardTestKit.waitUntilLeader(shard);
-
- DOMEntity entity = new DOMEntity(ENTITY_TYPE, ENTITY_ID1);
-
- // Register
-
- shard.tell(new RegisterCandidateLocal(entity), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
-
- verifyCommittedEntityCandidate(shard, ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME);
- verifyOwner(shard, ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME);
-
- // Unregister
-
- shard.tell(new UnregisterCandidateLocal(entity), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
-
- verifyOwner(shard, ENTITY_TYPE, ENTITY_ID1, "");
-
- // Register again
-
- shard.tell(new RegisterCandidateLocal(entity), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
-
- verifyCommittedEntityCandidate(shard, ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME);
- verifyOwner(shard, ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME);
-
- testLog.info("testOnUnregisterCandidateLocal ending");
- }
-
- @Test
- public void testOwnershipChanges() {
- testLog.info("testOwnershipChanges starting");
-
- final ShardTestKit kit = new ShardTestKit(getSystem());
-
- dataStoreContextBuilder.shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(2);
-
- ShardIdentifier leaderId = newShardId(LOCAL_MEMBER_NAME);
- ShardIdentifier peerId1 = newShardId(PEER_MEMBER_1_NAME);
- ShardIdentifier peerId2 = newShardId(PEER_MEMBER_2_NAME);
-
- TestActorRef<TestEntityOwnershipShard> peer1 = actorFactory.createTestActor(TestEntityOwnershipShard.props(
- newShardBuilder(peerId1, peerMap(leaderId.toString(), peerId2.toString()), PEER_MEMBER_1_NAME)),
- peerId1.toString());
- peer1.underlyingActor().startDroppingMessagesOfType(ElectionTimeout.class);
-
- TestActorRef<TestEntityOwnershipShard> peer2 = actorFactory.createTestActor(TestEntityOwnershipShard.props(
- newShardBuilder(peerId2, peerMap(leaderId.toString(), peerId1.toString()), PEER_MEMBER_2_NAME)),
- peerId2.toString());
- peer2.underlyingActor().startDroppingMessagesOfType(ElectionTimeout.class);
-
- TestActorRef<EntityOwnershipShard> leader = actorFactory.createTestActor(
- newShardProps(leaderId, peerMap(peerId1.toString(), peerId2.toString()), LOCAL_MEMBER_NAME),
- leaderId.toString());
-
- ShardTestKit.waitUntilLeader(leader);
-
- DOMEntity entity = new DOMEntity(ENTITY_TYPE, ENTITY_ID1);
-
- // Add a remote candidate
-
- peer1.tell(new RegisterCandidateLocal(entity), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
-
- verifyCommittedEntityCandidate(leader, entity.getType(), entity.getIdentifier(), PEER_MEMBER_1_NAME);
-
- // Register local
-
- leader.tell(new RegisterCandidateLocal(entity), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
-
- // Verify the remote candidate becomes owner
-
- verifyCommittedEntityCandidate(leader, entity.getType(), entity.getIdentifier(), LOCAL_MEMBER_NAME);
- verifyOwner(leader, entity.getType(), entity.getIdentifier(), PEER_MEMBER_1_NAME);
-
- // Add another remote candidate and verify ownership doesn't change
-
- peer2.tell(new RegisterCandidateLocal(entity), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
-
- verifyCommittedEntityCandidate(leader, entity.getType(), entity.getIdentifier(), PEER_MEMBER_2_NAME);
- Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
- verifyOwner(leader, entity.getType(), entity.getIdentifier(), PEER_MEMBER_1_NAME);
-
- // Remove the second remote candidate and verify ownership doesn't change
-
- peer2.tell(new UnregisterCandidateLocal(entity), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
-
- verifyEntityCandidateRemoved(leader, entity.getType(), entity.getIdentifier(), PEER_MEMBER_2_NAME);
- Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
- verifyOwner(leader, entity.getType(), entity.getIdentifier(), PEER_MEMBER_1_NAME);
-
- // Remove the first remote candidate and verify the local candidate becomes owner
-
- peer1.tell(new UnregisterCandidateLocal(entity), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
-
- verifyEntityCandidateRemoved(leader, entity.getType(), entity.getIdentifier(), PEER_MEMBER_1_NAME);
- verifyOwner(leader, entity.getType(), entity.getIdentifier(), LOCAL_MEMBER_NAME);
-
- // Add the second remote candidate back and verify ownership doesn't change
-
- peer2.tell(new RegisterCandidateLocal(entity), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
-
- verifyCommittedEntityCandidate(leader, entity.getType(), entity.getIdentifier(), PEER_MEMBER_2_NAME);
- Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
- verifyOwner(leader, entity.getType(), entity.getIdentifier(), LOCAL_MEMBER_NAME);
-
- // Unregister the local candidate and verify the second remote candidate becomes owner
-
- leader.tell(new UnregisterCandidateLocal(entity), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
-
- verifyEntityCandidateRemoved(leader, entity.getType(), entity.getIdentifier(), LOCAL_MEMBER_NAME);
- verifyOwner(leader, entity.getType(), entity.getIdentifier(), PEER_MEMBER_2_NAME);
-
- testLog.info("testOwnershipChanges ending");
- }
-
- @Test
- public void testOwnerChangesOnPeerAvailabilityChanges() throws Exception {
- testLog.info("testOwnerChangesOnPeerAvailabilityChanges starting");
-
- final ShardTestKit kit = new ShardTestKit(getSystem());
-
- dataStoreContextBuilder.shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(4)
- .shardIsolatedLeaderCheckIntervalInMillis(100000);
-
- ShardIdentifier leaderId = newShardId(LOCAL_MEMBER_NAME);
- ShardIdentifier peerId1 = newShardId(PEER_MEMBER_1_NAME);
- ShardIdentifier peerId2 = newShardId(PEER_MEMBER_2_NAME);
-
- TestActorRef<TestEntityOwnershipShard> peer1 = actorFactory.createTestActor(TestEntityOwnershipShard.props(
- newShardBuilder(peerId1, peerMap(leaderId.toString(), peerId2.toString()), PEER_MEMBER_1_NAME)),
- peerId1.toString());
- peer1.underlyingActor().startDroppingMessagesOfType(ElectionTimeout.class);
-
- TestActorRef<TestEntityOwnershipShard> peer2 = actorFactory.createTestActor(TestEntityOwnershipShard.props(
- newShardBuilder(peerId2, peerMap(leaderId.toString(), peerId1.toString()), PEER_MEMBER_2_NAME)),
- peerId2.toString());
- peer2.underlyingActor().startDroppingMessagesOfType(ElectionTimeout.class);
-
- TestActorRef<EntityOwnershipShard> leader = actorFactory.createTestActor(
- newShardProps(leaderId, peerMap(peerId1.toString(), peerId2.toString()), LOCAL_MEMBER_NAME),
- leaderId.toString());
-
- verifyRaftState(leader, state ->
- assertEquals("getRaftState", RaftState.Leader.toString(), state.getRaftState()));
-
- // Send PeerDown and PeerUp with no entities
-
- leader.tell(new PeerDown(peerId2.getMemberName(), peerId2.toString()), ActorRef.noSender());
- leader.tell(new PeerUp(peerId2.getMemberName(), peerId2.toString()), ActorRef.noSender());
-
- // Add candidates for entity1 with the local leader as the owner
-
- leader.tell(new RegisterCandidateLocal(new DOMEntity(ENTITY_TYPE, ENTITY_ID1)), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
- verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME);
-
- peer2.tell(new RegisterCandidateLocal(new DOMEntity(ENTITY_TYPE, ENTITY_ID1)), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
- verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID1, PEER_MEMBER_2_NAME);
-
- peer1.tell(new RegisterCandidateLocal(new DOMEntity(ENTITY_TYPE, ENTITY_ID1)), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
- verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID1, PEER_MEMBER_1_NAME);
- verifyOwner(leader, ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME);
-
- // Add candidates for entity2 with peerMember2 as the owner
-
- peer2.tell(new RegisterCandidateLocal(new DOMEntity(ENTITY_TYPE, ENTITY_ID2)), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
- verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_2_NAME);
-
- peer1.tell(new RegisterCandidateLocal(new DOMEntity(ENTITY_TYPE, ENTITY_ID2)), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
- verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_1_NAME);
- verifyOwner(leader, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_2_NAME);
-
- // Add candidates for entity3 with peerMember2 as the owner.
-
- peer2.tell(new RegisterCandidateLocal(new DOMEntity(ENTITY_TYPE, ENTITY_ID3)), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
- verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID3, PEER_MEMBER_2_NAME);
-
- leader.tell(new RegisterCandidateLocal(new DOMEntity(ENTITY_TYPE, ENTITY_ID3)), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
- verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID3, LOCAL_MEMBER_NAME);
-
- peer1.tell(new RegisterCandidateLocal(new DOMEntity(ENTITY_TYPE, ENTITY_ID3)), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
- verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID3, PEER_MEMBER_1_NAME);
- verifyOwner(leader, ENTITY_TYPE, ENTITY_ID3, PEER_MEMBER_2_NAME);
-
- // Add only candidate peerMember2 for entity4.
-
- peer2.tell(new RegisterCandidateLocal(new DOMEntity(ENTITY_TYPE, ENTITY_ID4)), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
- verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID4, PEER_MEMBER_2_NAME);
- verifyOwner(leader, ENTITY_TYPE, ENTITY_ID4, PEER_MEMBER_2_NAME);
-
- // Add only candidate peerMember1 for entity5.
-
- peer1.tell(new RegisterCandidateLocal(new DOMEntity(ENTITY_TYPE, ENTITY_ID5)), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
- verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID5, PEER_MEMBER_1_NAME);
- verifyOwner(leader, ENTITY_TYPE, ENTITY_ID5, PEER_MEMBER_1_NAME);
-
- // Kill peerMember2 and send PeerDown - the entities (2, 3, 4) owned by peerMember2 should get a new
- // owner selected
-
- kit.watch(peer2);
- peer2.tell(PoisonPill.getInstance(), ActorRef.noSender());
- kit.expectMsgClass(Duration.ofSeconds(5), Terminated.class);
- kit.unwatch(peer2);
-
- leader.tell(new PeerDown(peerId2.getMemberName(), peerId2.toString()), ActorRef.noSender());
- // Send PeerDown again - should be noop
- leader.tell(new PeerDown(peerId2.getMemberName(), peerId2.toString()), ActorRef.noSender());
- peer1.tell(new PeerDown(peerId2.getMemberName(), peerId2.toString()), ActorRef.noSender());
-
- verifyOwner(leader, ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME);
- verifyOwner(leader, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_1_NAME);
- verifyOwner(leader, ENTITY_TYPE, ENTITY_ID3, LOCAL_MEMBER_NAME);
- // no other candidates for entity4 so peerMember2 should remain owner.
- verifyOwner(leader, ENTITY_TYPE, ENTITY_ID4, PEER_MEMBER_2_NAME);
-
- verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID1, PEER_MEMBER_2_NAME);
- verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_2_NAME);
- verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID3, PEER_MEMBER_2_NAME);
- verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID4, PEER_MEMBER_2_NAME);
-
- // Reinstate peerMember2
-
- peer2 = actorFactory.createTestActor(TestEntityOwnershipShard.props(
- newShardBuilder(peerId2, peerMap(leaderId.toString(), peerId1.toString()), PEER_MEMBER_2_NAME)),
- peerId2.toString());
- peer2.underlyingActor().startDroppingMessagesOfType(ElectionTimeout.class);
- leader.tell(new PeerUp(peerId2.getMemberName(), peerId2.toString()), ActorRef.noSender());
- // Send PeerUp again - should be noop
- leader.tell(new PeerUp(peerId2.getMemberName(), peerId2.toString()), ActorRef.noSender());
- peer1.tell(new PeerUp(peerId2.getMemberName(), peerId2.toString()), ActorRef.noSender());
-
- // peerMember2's candidates should be removed on startup.
- verifyNoEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID1, PEER_MEMBER_2_NAME);
- verifyNoEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_2_NAME);
- verifyNoEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID3, PEER_MEMBER_2_NAME);
- verifyNoEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID4, PEER_MEMBER_2_NAME);
-
- verifyOwner(leader, ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME);
- verifyOwner(leader, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_1_NAME);
- verifyOwner(leader, ENTITY_TYPE, ENTITY_ID3, LOCAL_MEMBER_NAME);
- verifyOwner(leader, ENTITY_TYPE, ENTITY_ID4, "");
-
- // Add back candidate peerMember2 for entities 1, 2, & 3.
-
- peer2.tell(new RegisterCandidateLocal(new DOMEntity(ENTITY_TYPE, ENTITY_ID1)), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
- peer2.tell(new RegisterCandidateLocal(new DOMEntity(ENTITY_TYPE, ENTITY_ID2)), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
- peer2.tell(new RegisterCandidateLocal(new DOMEntity(ENTITY_TYPE, ENTITY_ID3)), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
- verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID1, PEER_MEMBER_2_NAME);
- verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_2_NAME);
- verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID3, PEER_MEMBER_2_NAME);
- verifyCommittedEntityCandidate(peer2, ENTITY_TYPE, ENTITY_ID1, PEER_MEMBER_2_NAME);
- verifyCommittedEntityCandidate(peer2, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_2_NAME);
- verifyCommittedEntityCandidate(peer2, ENTITY_TYPE, ENTITY_ID3, PEER_MEMBER_2_NAME);
- verifyOwner(leader, ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME);
- verifyOwner(leader, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_1_NAME);
- verifyOwner(leader, ENTITY_TYPE, ENTITY_ID3, LOCAL_MEMBER_NAME);
- verifyOwner(peer2, ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME);
- verifyOwner(peer2, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_1_NAME);
- verifyOwner(peer2, ENTITY_TYPE, ENTITY_ID3, LOCAL_MEMBER_NAME);
- verifyOwner(peer2, ENTITY_TYPE, ENTITY_ID4, "");
-
- // Kill peerMember1 and send PeerDown - entity 2 should get a new owner selected
-
- kit.watch(peer1);
- peer1.tell(PoisonPill.getInstance(), ActorRef.noSender());
- kit.expectMsgClass(Duration.ofSeconds(5), Terminated.class);
- kit.unwatch(peer1);
- leader.tell(new PeerDown(peerId1.getMemberName(), peerId1.toString()), ActorRef.noSender());
-
- verifyOwner(leader, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_2_NAME);
-
- // Verify the reinstated peerMember2 is fully synced.
-
- verifyOwner(peer2, ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME);
- verifyOwner(peer2, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_2_NAME);
- verifyOwner(peer2, ENTITY_TYPE, ENTITY_ID3, LOCAL_MEMBER_NAME);
- verifyOwner(peer2, ENTITY_TYPE, ENTITY_ID4, "");
-
- // Reinstate peerMember1 and verify no owner changes
-
- peer1 = actorFactory.createTestActor(TestEntityOwnershipShard.props(newShardBuilder(
- peerId1, peerMap(leaderId.toString(), peerId2.toString()), PEER_MEMBER_1_NAME)), peerId1.toString());
- peer1.underlyingActor().startDroppingMessagesOfType(ElectionTimeout.class);
- leader.tell(new PeerUp(peerId1.getMemberName(), peerId1.toString()), ActorRef.noSender());
-
- verifyOwner(leader, ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME);
- verifyOwner(leader, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_2_NAME);
- verifyOwner(leader, ENTITY_TYPE, ENTITY_ID3, LOCAL_MEMBER_NAME);
- verifyOwner(leader, ENTITY_TYPE, ENTITY_ID4, "");
-
- verifyNoEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID1, PEER_MEMBER_1_NAME);
- verifyNoEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_1_NAME);
- verifyNoEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID3, PEER_MEMBER_1_NAME);
-
- verifyNoEntityCandidate(peer2, ENTITY_TYPE, ENTITY_ID1, PEER_MEMBER_1_NAME);
- verifyNoEntityCandidate(peer2, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_1_NAME);
- verifyNoEntityCandidate(peer2, ENTITY_TYPE, ENTITY_ID3, PEER_MEMBER_1_NAME);
-
- // Verify the reinstated peerMember1 is fully synced.
-
- verifyOwner(peer1, ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME);
- verifyOwner(peer1, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_2_NAME);
- verifyOwner(peer1, ENTITY_TYPE, ENTITY_ID3, LOCAL_MEMBER_NAME);
- verifyOwner(peer1, ENTITY_TYPE, ENTITY_ID4, "");
-
- AtomicLong leaderLastApplied = new AtomicLong();
- verifyRaftState(leader, rs -> {
- assertEquals("LastApplied up-to-date", rs.getLastApplied(), rs.getLastIndex());
- leaderLastApplied.set(rs.getLastApplied());
- });
-
- verifyRaftState(peer2, rs -> assertEquals("LastApplied", leaderLastApplied.get(), rs.getLastIndex()));
-
- // Kill the local leader and elect peer2 the leader. This should cause a new owner to be selected for
- // the entities (1 and 3) previously owned by the local leader member.
-
- peer2.tell(new PeerAddressResolved(peerId1.toString(), peer1.path().toString()), ActorRef.noSender());
- peer2.tell(new PeerUp(leaderId.getMemberName(), leaderId.toString()), ActorRef.noSender());
- peer2.tell(new PeerUp(peerId1.getMemberName(), peerId1.toString()), ActorRef.noSender());
-
- kit.watch(leader);
- leader.tell(PoisonPill.getInstance(), ActorRef.noSender());
- kit.expectMsgClass(Duration.ofSeconds(5), Terminated.class);
- kit.unwatch(leader);
- peer2.tell(new PeerDown(leaderId.getMemberName(), leaderId.toString()), ActorRef.noSender());
- peer2.tell(TimeoutNow.INSTANCE, peer2);
-
- verifyRaftState(peer2, state ->
- assertEquals("getRaftState", RaftState.Leader.toString(), state.getRaftState()));
-
- verifyOwner(peer2, ENTITY_TYPE, ENTITY_ID1, PEER_MEMBER_2_NAME);
- verifyOwner(peer2, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_2_NAME);
- verifyOwner(peer2, ENTITY_TYPE, ENTITY_ID3, PEER_MEMBER_2_NAME);
- verifyOwner(peer2, ENTITY_TYPE, ENTITY_ID4, "");
-
- testLog.info("testOwnerChangesOnPeerAvailabilityChanges ending");
- }
-
- @Test
- public void testLeaderIsolation() throws Exception {
- testLog.info("testLeaderIsolation starting");
-
- final ShardTestKit kit = new ShardTestKit(getSystem());
-
- ShardIdentifier leaderId = newShardId(LOCAL_MEMBER_NAME);
- ShardIdentifier peerId1 = newShardId(PEER_MEMBER_1_NAME);
- ShardIdentifier peerId2 = newShardId(PEER_MEMBER_2_NAME);
-
- dataStoreContextBuilder.shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(4)
- .shardIsolatedLeaderCheckIntervalInMillis(100000);
-
- TestActorRef<TestEntityOwnershipShard> peer1 = actorFactory.createTestActor(TestEntityOwnershipShard.props(
- newShardBuilder(peerId1, peerMap(leaderId.toString(), peerId2.toString()), PEER_MEMBER_1_NAME)),
- peerId1.toString());
- peer1.underlyingActor().startDroppingMessagesOfType(ElectionTimeout.class);
-
- TestActorRef<TestEntityOwnershipShard> peer2 = actorFactory.createTestActor(TestEntityOwnershipShard.props(
- newShardBuilder(peerId2, peerMap(leaderId.toString(), peerId1.toString()), PEER_MEMBER_2_NAME)),
- peerId2.toString());
- peer2.underlyingActor().startDroppingMessagesOfType(ElectionTimeout.class);
-
- dataStoreContextBuilder = DatastoreContext.newBuilderFrom(dataStoreContextBuilder.build())
- .shardIsolatedLeaderCheckIntervalInMillis(500);
-
- TestActorRef<TestEntityOwnershipShard> leader = actorFactory.createTestActor(TestEntityOwnershipShard.props(
- newShardBuilder(leaderId, peerMap(peerId1.toString(), peerId2.toString()), LOCAL_MEMBER_NAME)),
- leaderId.toString());
-
- ShardTestKit.waitUntilLeader(leader);
-
- // Add entity1 candidates for all members with the leader as the owner
-
- DOMEntity entity1 = new DOMEntity(ENTITY_TYPE, ENTITY_ID1);
- leader.tell(new RegisterCandidateLocal(entity1), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
- verifyCommittedEntityCandidate(leader, entity1.getType(), entity1.getIdentifier(), LOCAL_MEMBER_NAME);
-
- peer1.tell(new RegisterCandidateLocal(entity1), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
- verifyCommittedEntityCandidate(leader, entity1.getType(), entity1.getIdentifier(), PEER_MEMBER_1_NAME);
-
- peer2.tell(new RegisterCandidateLocal(entity1), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
- verifyCommittedEntityCandidate(leader, entity1.getType(), entity1.getIdentifier(), PEER_MEMBER_2_NAME);
-
- verifyOwner(leader, entity1.getType(), entity1.getIdentifier(), LOCAL_MEMBER_NAME);
- verifyOwner(peer1, entity1.getType(), entity1.getIdentifier(), LOCAL_MEMBER_NAME);
- verifyOwner(peer2, entity1.getType(), entity1.getIdentifier(), LOCAL_MEMBER_NAME);
-
- // Add entity2 candidates for all members with peer1 as the owner
-
- DOMEntity entity2 = new DOMEntity(ENTITY_TYPE, ENTITY_ID2);
- peer1.tell(new RegisterCandidateLocal(entity2), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
- verifyCommittedEntityCandidate(leader, entity2.getType(), entity2.getIdentifier(), PEER_MEMBER_1_NAME);
-
- peer2.tell(new RegisterCandidateLocal(entity2), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
- verifyCommittedEntityCandidate(leader, entity2.getType(), entity2.getIdentifier(), PEER_MEMBER_2_NAME);
-
- leader.tell(new RegisterCandidateLocal(entity2), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
- verifyCommittedEntityCandidate(leader, entity2.getType(), entity2.getIdentifier(), LOCAL_MEMBER_NAME);
-
- verifyOwner(leader, entity2.getType(), entity2.getIdentifier(), PEER_MEMBER_1_NAME);
- verifyOwner(peer1, entity2.getType(), entity2.getIdentifier(), PEER_MEMBER_1_NAME);
- verifyOwner(peer2, entity2.getType(), entity2.getIdentifier(), PEER_MEMBER_1_NAME);
-
- // Add entity3 candidates for all members with peer2 as the owner
-
- DOMEntity entity3 = new DOMEntity(ENTITY_TYPE, ENTITY_ID3);
- peer2.tell(new RegisterCandidateLocal(entity3), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
- verifyCommittedEntityCandidate(leader, entity3.getType(), entity3.getIdentifier(), PEER_MEMBER_2_NAME);
-
- leader.tell(new RegisterCandidateLocal(entity3), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
- verifyCommittedEntityCandidate(leader, entity3.getType(), entity3.getIdentifier(), LOCAL_MEMBER_NAME);
-
- peer1.tell(new RegisterCandidateLocal(entity3), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
- verifyCommittedEntityCandidate(leader, entity3.getType(), entity3.getIdentifier(), PEER_MEMBER_1_NAME);
-
- verifyOwner(leader, entity3.getType(), entity3.getIdentifier(), PEER_MEMBER_2_NAME);
- verifyOwner(peer1, entity3.getType(), entity3.getIdentifier(), PEER_MEMBER_2_NAME);
- verifyOwner(peer2, entity3.getType(), entity3.getIdentifier(), PEER_MEMBER_2_NAME);
-
- // Add listeners on all members
-
- DOMEntityOwnershipListener leaderListener = mock(DOMEntityOwnershipListener.class);
- leader.tell(new RegisterListenerLocal(leaderListener, ENTITY_TYPE), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
- verify(leaderListener, timeout(5000).times(3)).ownershipChanged(or(or(
- ownershipChange(entity1, false, true, true), ownershipChange(entity2, false, false, true)),
- ownershipChange(entity3, false, false, true)));
- reset(leaderListener);
-
- DOMEntityOwnershipListener peer1Listener = mock(DOMEntityOwnershipListener.class);
- peer1.tell(new RegisterListenerLocal(peer1Listener, ENTITY_TYPE), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
- verify(peer1Listener, timeout(5000).times(3)).ownershipChanged(or(or(
- ownershipChange(entity1, false, false, true), ownershipChange(entity2, false, true, true)),
- ownershipChange(entity3, false, false, true)));
- reset(peer1Listener);
-
- DOMEntityOwnershipListener peer2Listener = mock(DOMEntityOwnershipListener.class);
- peer2.tell(new RegisterListenerLocal(peer2Listener, ENTITY_TYPE), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
- verify(peer2Listener, timeout(5000).times(3)).ownershipChanged(or(or(
- ownershipChange(entity1, false, false, true), ownershipChange(entity2, false, false, true)),
- ownershipChange(entity3, false, true, true)));
- reset(peer2Listener);
-
- // Isolate the leader by dropping AppendEntries to the followers and incoming messages from the followers.
-
- leader.underlyingActor().startDroppingMessagesOfType(RequestVote.class);
- leader.underlyingActor().startDroppingMessagesOfType(AppendEntries.class);
-
- peer2.underlyingActor().startDroppingMessagesOfType(AppendEntries.class,
- ae -> ae.getLeaderId().equals(leaderId.toString()));
- peer1.underlyingActor().startDroppingMessagesOfType(AppendEntries.class);
-
- // Make peer1 start an election and become leader by enabling the ElectionTimeout message.
-
- peer1.underlyingActor().stopDroppingMessagesOfType(ElectionTimeout.class);
-
- // Send PeerDown to the isolated leader so it tries to re-assign ownership for the entities owned by the
- // isolated peers.
-
- leader.tell(new PeerDown(peerId1.getMemberName(), peerId1.toString()), ActorRef.noSender());
- leader.tell(new PeerDown(peerId2.getMemberName(), peerId2.toString()), ActorRef.noSender());
-
- verifyRaftState(leader, state ->
- assertEquals("getRaftState", RaftState.IsolatedLeader.toString(), state.getRaftState()));
-
- // Expect inJeopardy notification on the isolated leader.
-
- verify(leaderListener, timeout(5000).times(3)).ownershipChanged(or(or(
- ownershipChange(entity1, true, true, true, true), ownershipChange(entity2, false, false, true, true)),
- ownershipChange(entity3, false, false, true, true)));
- reset(leaderListener);
-
- verifyRaftState(peer1, state ->
- assertEquals("getRaftState", RaftState.Leader.toString(), state.getRaftState()));
-
- // Send PeerDown to the new leader peer1 so it re-assigns ownership for the entities owned by the
- // isolated leader.
-
- peer1.tell(new PeerDown(leaderId.getMemberName(), leaderId.toString()), ActorRef.noSender());
-
- verifyOwner(peer1, entity1.getType(), entity1.getIdentifier(), PEER_MEMBER_1_NAME);
-
- verify(peer1Listener, timeout(5000)).ownershipChanged(ownershipChange(entity1, false, true, true));
- reset(peer1Listener);
-
- verify(peer2Listener, timeout(5000)).ownershipChanged(ownershipChange(entity1, false, false, true));
- reset(peer2Listener);
-
- // Remove the isolation.
-
- leader.underlyingActor().stopDroppingMessagesOfType(RequestVote.class);
- leader.underlyingActor().stopDroppingMessagesOfType(AppendEntries.class);
- peer2.underlyingActor().stopDroppingMessagesOfType(AppendEntries.class);
- peer1.underlyingActor().stopDroppingMessagesOfType(AppendEntries.class);
-
- // Previous leader should switch to Follower and send inJeopardy cleared notifications for all entities.
-
- verifyRaftState(leader, state ->
- assertEquals("getRaftState", RaftState.Follower.toString(), state.getRaftState()));
-
- verify(leaderListener, timeout(5000).times(3)).ownershipChanged(or(or(
- ownershipChange(entity1, true, true, true), ownershipChange(entity2, false, false, true)),
- ownershipChange(entity3, false, false, true)));
-
- verifyOwner(leader, entity1.getType(), entity1.getIdentifier(), PEER_MEMBER_1_NAME);
- verify(leaderListener, timeout(5000)).ownershipChanged(ownershipChange(entity1, true, false, true));
-
- Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
- verifyOwner(leader, entity2.getType(), entity2.getIdentifier(), PEER_MEMBER_1_NAME);
- verifyOwner(leader, entity3.getType(), entity3.getIdentifier(), PEER_MEMBER_2_NAME);
-
- verifyNoMoreInteractions(leaderListener);
- verifyNoMoreInteractions(peer1Listener);
- verifyNoMoreInteractions(peer2Listener);
-
- testLog.info("testLeaderIsolation ending");
- }
-
- @Test
- public void testLeaderIsolationWithPendingCandidateAdded() throws Exception {
- testLog.info("testLeaderIsolationWithPendingCandidateAdded starting");
-
- final ShardTestKit kit = new ShardTestKit(getSystem());
-
- ShardIdentifier leaderId = newShardId(LOCAL_MEMBER_NAME);
- ShardIdentifier peerId1 = newShardId(PEER_MEMBER_1_NAME);
- ShardIdentifier peerId2 = newShardId(PEER_MEMBER_2_NAME);
-
- dataStoreContextBuilder.shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(4)
- .shardIsolatedLeaderCheckIntervalInMillis(100000);
-
- TestActorRef<TestEntityOwnershipShard> peer1 = actorFactory.createTestActor(TestEntityOwnershipShard.props(
- newShardBuilder(peerId1, peerMap(leaderId.toString(), peerId2.toString()), PEER_MEMBER_1_NAME),
- actorFactory.createActor(MessageCollectorActor.props())), peerId1.toString());
- peer1.underlyingActor().startDroppingMessagesOfType(ElectionTimeout.class);
-
- TestActorRef<TestEntityOwnershipShard> peer2 = actorFactory.createTestActor(TestEntityOwnershipShard.props(
- newShardBuilder(peerId2, peerMap(leaderId.toString(), peerId1.toString()), PEER_MEMBER_2_NAME),
- actorFactory.createTestActor(MessageCollectorActor.props())), peerId2.toString());
- peer2.underlyingActor().startDroppingMessagesOfType(ElectionTimeout.class);
-
- dataStoreContextBuilder = DatastoreContext.newBuilderFrom(dataStoreContextBuilder.build())
- .shardIsolatedLeaderCheckIntervalInMillis(500);
-
- TestActorRef<TestEntityOwnershipShard> leader = actorFactory.createTestActor(TestEntityOwnershipShard.props(
- newShardBuilder(leaderId, peerMap(peerId1.toString(), peerId2.toString()), LOCAL_MEMBER_NAME),
- actorFactory.createTestActor(MessageCollectorActor.props())), leaderId.toString());
-
- ShardTestKit.waitUntilLeader(leader);
-
- // Add listeners on all members
-
- DOMEntityOwnershipListener leaderListener = mock(DOMEntityOwnershipListener.class,
- "DOMEntityOwnershipListener-" + LOCAL_MEMBER_NAME);
- leader.tell(new RegisterListenerLocal(leaderListener, ENTITY_TYPE), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
-
- DOMEntityOwnershipListener peer1Listener = mock(DOMEntityOwnershipListener.class,
- "DOMEntityOwnershipListener-" + PEER_MEMBER_1_NAME);
- peer1.tell(new RegisterListenerLocal(peer1Listener, ENTITY_TYPE), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
-
- DOMEntityOwnershipListener peer2Listener = mock(DOMEntityOwnershipListener.class,
- "DOMEntityOwnershipListener-" + PEER_MEMBER_2_NAME);
- peer2.tell(new RegisterListenerLocal(peer2Listener, ENTITY_TYPE), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
-
- // Drop the CandidateAdded message to the leader for now.
-
- leader.underlyingActor().startDroppingMessagesOfType(CandidateAdded.class);
-
- // Add an entity candidates for the leader. Since we've blocked the CandidateAdded message, it won't be
- // assigned the owner.
-
- DOMEntity entity1 = new DOMEntity(ENTITY_TYPE, ENTITY_ID1);
- leader.tell(new RegisterCandidateLocal(entity1), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
- verifyCommittedEntityCandidate(leader, entity1.getType(), entity1.getIdentifier(), LOCAL_MEMBER_NAME);
- verifyCommittedEntityCandidate(peer1, entity1.getType(), entity1.getIdentifier(), LOCAL_MEMBER_NAME);
- verifyCommittedEntityCandidate(peer2, entity1.getType(), entity1.getIdentifier(), LOCAL_MEMBER_NAME);
-
- DOMEntity entity2 = new DOMEntity(ENTITY_TYPE, ENTITY_ID2);
- leader.tell(new RegisterCandidateLocal(entity2), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
- verifyCommittedEntityCandidate(leader, entity2.getType(), entity2.getIdentifier(), LOCAL_MEMBER_NAME);
- verifyCommittedEntityCandidate(peer1, entity2.getType(), entity2.getIdentifier(), LOCAL_MEMBER_NAME);
- verifyCommittedEntityCandidate(peer2, entity2.getType(), entity2.getIdentifier(), LOCAL_MEMBER_NAME);
-
- // Capture the CandidateAdded messages.
-
- final List<CandidateAdded> candidateAdded = expectMatching(leader.underlyingActor().collectorActor(),
- CandidateAdded.class, 2);
-
- // Drop AppendEntries to the followers containing a log entry, which will be for the owner writes after we
- // forward the CandidateAdded messages to the leader. This will leave the pending owner write tx's uncommitted.
-
- peer1.underlyingActor().startDroppingMessagesOfType(AppendEntries.class, ae -> ae.getEntries().size() > 0);
- peer2.underlyingActor().startDroppingMessagesOfType(AppendEntries.class, ae -> ae.getEntries().size() > 0);
-
- // Now forward the CandidateAdded messages to the leader and wait for it to send out the AppendEntries.
-
- leader.underlyingActor().stopDroppingMessagesOfType(CandidateAdded.class);
- leader.tell(candidateAdded.get(0), leader);
- leader.tell(candidateAdded.get(1), leader);
-
- expectMatching(peer1.underlyingActor().collectorActor(), AppendEntries.class, 2,
- ae -> ae.getEntries().size() > 0);
-
- // Verify no owner assigned.
-
- verifyNoOwnerSet(leader, entity1.getType(), entity1.getIdentifier());
- verifyNoOwnerSet(leader, entity2.getType(), entity2.getIdentifier());
-
- // Isolate the leader by dropping AppendEntries to the followers and incoming messages from the followers.
-
- leader.underlyingActor().startDroppingMessagesOfType(RequestVote.class);
- leader.underlyingActor().startDroppingMessagesOfType(AppendEntries.class);
-
- peer2.underlyingActor().startDroppingMessagesOfType(AppendEntries.class,
- ae -> ae.getLeaderId().equals(leaderId.toString()));
- peer1.underlyingActor().startDroppingMessagesOfType(AppendEntries.class);
-
- // Send PeerDown to the isolated leader - should be no-op since there's no owned entities.
-
- leader.tell(new PeerDown(peerId1.getMemberName(), peerId1.toString()), ActorRef.noSender());
- leader.tell(new PeerDown(peerId2.getMemberName(), peerId2.toString()), ActorRef.noSender());
-
- // Verify the leader transitions to IsolatedLeader.
-
- verifyRaftState(leader, state -> assertEquals("getRaftState", RaftState.IsolatedLeader.toString(),
- state.getRaftState()));
-
- // Send PeerDown to the new leader peer1.
-
- peer1.tell(new PeerDown(leaderId.getMemberName(), leaderId.toString()), ActorRef.noSender());
-
- // Make peer1 start an election and become leader by sending the TimeoutNow message.
-
- peer1.tell(TimeoutNow.INSTANCE, ActorRef.noSender());
-
- // Verify the peer1 transitions to Leader.
-
- verifyRaftState(peer1, state -> assertEquals("getRaftState", RaftState.Leader.toString(),
- state.getRaftState()));
-
- verifyNoOwnerSet(peer1, entity1.getType(), entity1.getIdentifier());
- verifyNoOwnerSet(peer2, entity1.getType(), entity2.getIdentifier());
-
- verifyNoMoreInteractions(peer1Listener);
- verifyNoMoreInteractions(peer2Listener);
-
- // Add candidate peer1 candidate for entity2.
-
- peer1.tell(new RegisterCandidateLocal(entity2), kit.getRef());
-
- verifyOwner(peer1, entity2.getType(), entity2.getIdentifier(), PEER_MEMBER_1_NAME);
- verify(peer1Listener, timeout(5000)).ownershipChanged(ownershipChange(entity2, false, true, true));
- verify(peer2Listener, timeout(5000)).ownershipChanged(ownershipChange(entity2, false, false, true));
-
- reset(leaderListener, peer1Listener, peer2Listener);
-
- // Remove the isolation.
-
- leader.underlyingActor().stopDroppingMessagesOfType(RequestVote.class);
- leader.underlyingActor().stopDroppingMessagesOfType(AppendEntries.class);
- peer2.underlyingActor().stopDroppingMessagesOfType(AppendEntries.class);
- peer1.underlyingActor().stopDroppingMessagesOfType(AppendEntries.class);
-
- // Previous leader should switch to Follower.
-
- verifyRaftState(leader, state -> assertEquals("getRaftState", RaftState.Follower.toString(),
- state.getRaftState()));
-
- // Send PeerUp to peer1 and peer2.
-
- peer1.tell(new PeerUp(leaderId.getMemberName(), leaderId.toString()), ActorRef.noSender());
- peer2.tell(new PeerUp(leaderId.getMemberName(), leaderId.toString()), ActorRef.noSender());
-
- // The previous leader should become the owner of entity1.
-
- verifyOwner(leader, entity1.getType(), entity1.getIdentifier(), LOCAL_MEMBER_NAME);
-
- // The previous leader's DOMEntityOwnershipListener should get 4 total notifications:
- // - inJeopardy cleared for entity1 (wasOwner=false, isOwner=false, hasOwner=false, inJeopardy=false)
- // - inJeopardy cleared for entity2 (wasOwner=false, isOwner=false, hasOwner=false, inJeopardy=false)
- // - local owner granted for entity1 (wasOwner=false, isOwner=true, hasOwner=true, inJeopardy=false)
- // - remote owner for entity2 (wasOwner=false, isOwner=false, hasOwner=true, inJeopardy=false)
- verify(leaderListener, timeout(5000).times(4)).ownershipChanged(or(
- or(ownershipChange(entity1, false, false, false), ownershipChange(entity2, false, false, false)),
- or(ownershipChange(entity1, false, true, true), ownershipChange(entity2, false, false, true))));
-
- verify(peer1Listener, timeout(5000)).ownershipChanged(ownershipChange(entity1, false, false, true));
- verify(peer2Listener, timeout(5000)).ownershipChanged(ownershipChange(entity1, false, false, true));
-
- // Verify entity2's owner doesn't change.
-
- Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
- verifyOwner(peer1, entity2.getType(), entity2.getIdentifier(), PEER_MEMBER_1_NAME);
-
- verifyNoMoreInteractions(leaderListener);
- verifyNoMoreInteractions(peer1Listener);
- verifyNoMoreInteractions(peer2Listener);
-
- testLog.info("testLeaderIsolationWithPendingCandidateAdded ending");
- }
-
- @Test
- public void testListenerRegistration() {
- testLog.info("testListenerRegistration starting");
-
- ShardTestKit kit = new ShardTestKit(getSystem());
-
- ShardIdentifier leaderId = newShardId(LOCAL_MEMBER_NAME);
- ShardIdentifier peerId = newShardId(PEER_MEMBER_1_NAME);
-
- TestActorRef<TestEntityOwnershipShard> peer = actorFactory.createTestActor(TestEntityOwnershipShard.props(
- newShardBuilder(peerId, peerMap(leaderId.toString()), PEER_MEMBER_1_NAME)), peerId.toString());
- peer.underlyingActor().startDroppingMessagesOfType(ElectionTimeout.class);
-
- TestActorRef<EntityOwnershipShard> leader = actorFactory.createTestActor(
- newShardProps(leaderId, peerMap(peerId.toString()), LOCAL_MEMBER_NAME), leaderId.toString());
-
- ShardTestKit.waitUntilLeader(leader);
-
- String otherEntityType = "otherEntityType";
- final DOMEntity entity1 = new DOMEntity(ENTITY_TYPE, ENTITY_ID1);
- final DOMEntity entity2 = new DOMEntity(ENTITY_TYPE, ENTITY_ID2);
- final DOMEntity entity3 = new DOMEntity(ENTITY_TYPE, ENTITY_ID3);
- final DOMEntity entity4 = new DOMEntity(otherEntityType, ENTITY_ID3);
- DOMEntityOwnershipListener listener = mock(DOMEntityOwnershipListener.class);
-
- // Register listener
-
- leader.tell(new RegisterListenerLocal(listener, ENTITY_TYPE), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
-
- // Register a couple candidates for the desired entity type and verify listener is notified.
-
- leader.tell(new RegisterCandidateLocal(entity1), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
-
- verify(listener, timeout(5000)).ownershipChanged(ownershipChange(entity1, false, true, true));
-
- leader.tell(new RegisterCandidateLocal(entity2), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
-
- verify(listener, timeout(5000)).ownershipChanged(ownershipChange(entity2, false, true, true));
- reset(listener);
-
- // Register another candidate for another entity type and verify listener is not notified.
-
- leader.tell(new RegisterCandidateLocal(entity4), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
-
- Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
- verify(listener, never()).ownershipChanged(ownershipChange(entity4));
-
- // Register remote candidate for entity1
-
- peer.tell(new RegisterCandidateLocal(entity1), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
- verifyCommittedEntityCandidate(leader, ENTITY_TYPE, entity1.getIdentifier(), PEER_MEMBER_1_NAME);
-
- // Unregister the local candidate for entity1 and verify listener is notified
-
- leader.tell(new UnregisterCandidateLocal(entity1), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
-
- verify(listener, timeout(5000)).ownershipChanged(ownershipChange(entity1, true, false, true));
- reset(listener);
-
- // Unregister the listener, add a candidate for entity3 and verify listener isn't notified
-
- leader.tell(new UnregisterListenerLocal(listener, ENTITY_TYPE), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
-
- leader.tell(new RegisterCandidateLocal(entity3), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
-
- verifyOwner(leader, ENTITY_TYPE, entity3.getIdentifier(), LOCAL_MEMBER_NAME);
- Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
- verify(listener, never()).ownershipChanged(any(DOMEntityOwnershipChange.class));
-
- // Re-register the listener and verify it gets notified of currently owned entities
-
- reset(listener);
-
- leader.tell(new RegisterListenerLocal(listener, ENTITY_TYPE), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
-
- verify(listener, timeout(5000).times(2)).ownershipChanged(or(ownershipChange(entity2, false, true, true),
- ownershipChange(entity3, false, true, true)));
- Uninterruptibles.sleepUninterruptibly(300, TimeUnit.MILLISECONDS);
- verify(listener, never()).ownershipChanged(ownershipChange(entity4));
- verify(listener, times(1)).ownershipChanged(ownershipChange(entity1));
-
- testLog.info("testListenerRegistration ending");
- }
-
- @Test
- public void testDelayedEntityOwnerSelectionWhenMaxPeerRequestsReceived() {
- testLog.info("testDelayedEntityOwnerSelectionWhenMaxPeerRequestsReceived starting");
-
- ShardTestKit kit = new ShardTestKit(getSystem());
- EntityOwnerSelectionStrategyConfig.Builder builder = EntityOwnerSelectionStrategyConfig.newBuilder()
- .addStrategy(ENTITY_TYPE, LastCandidateSelectionStrategy.class, 500);
-
- ShardIdentifier leaderId = newShardId(LOCAL_MEMBER_NAME);
- ShardIdentifier peerId = newShardId(PEER_MEMBER_1_NAME);
-
- TestActorRef<TestEntityOwnershipShard> peer = actorFactory.createTestActor(TestEntityOwnershipShard.props(
- newShardBuilder(peerId, peerMap(leaderId.toString()), PEER_MEMBER_1_NAME)), peerId.toString());
- peer.underlyingActor().startDroppingMessagesOfType(ElectionTimeout.class);
-
- TestActorRef<EntityOwnershipShard> leader = actorFactory.createTestActor(
- newShardProps(leaderId, peerMap(peerId.toString()), LOCAL_MEMBER_NAME, builder.build()),
- leaderId.toString());
-
- ShardTestKit.waitUntilLeader(leader);
-
- DOMEntity entity = new DOMEntity(ENTITY_TYPE, ENTITY_ID1);
-
- // Add a remote candidate
-
- peer.tell(new RegisterCandidateLocal(entity), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
-
- // Register local
-
- leader.tell(new RegisterCandidateLocal(entity), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
-
- // Verify the local candidate becomes owner
-
- verifyCommittedEntityCandidate(leader, entity.getType(), entity.getIdentifier(), PEER_MEMBER_1_NAME);
- verifyCommittedEntityCandidate(leader, entity.getType(), entity.getIdentifier(), LOCAL_MEMBER_NAME);
- verifyOwner(leader, entity.getType(), entity.getIdentifier(), LOCAL_MEMBER_NAME);
-
- testLog.info("testDelayedEntityOwnerSelectionWhenMaxPeerRequestsReceived ending");
- }
-
- @Test
- public void testDelayedEntityOwnerSelection() {
- testLog.info("testDelayedEntityOwnerSelection starting");
-
- final ShardTestKit kit = new ShardTestKit(getSystem());
- EntityOwnerSelectionStrategyConfig.Builder builder = EntityOwnerSelectionStrategyConfig.newBuilder()
- .addStrategy(ENTITY_TYPE, LastCandidateSelectionStrategy.class, 500);
-
- dataStoreContextBuilder.shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(2);
-
- ShardIdentifier leaderId = newShardId(LOCAL_MEMBER_NAME);
- ShardIdentifier peerId1 = newShardId(PEER_MEMBER_1_NAME);
- ShardIdentifier peerId2 = newShardId(PEER_MEMBER_2_NAME);
-
- TestActorRef<TestEntityOwnershipShard> peer1 = actorFactory.createTestActor(TestEntityOwnershipShard.props(
- newShardBuilder(peerId1, peerMap(leaderId.toString(), peerId2.toString()), PEER_MEMBER_1_NAME)),
- peerId1.toString());
- peer1.underlyingActor().startDroppingMessagesOfType(ElectionTimeout.class);
-
- TestActorRef<TestEntityOwnershipShard> peer2 = actorFactory.createTestActor(TestEntityOwnershipShard.props(
- newShardBuilder(peerId2, peerMap(leaderId.toString(), peerId1.toString()), PEER_MEMBER_2_NAME)),
- peerId2.toString());
- peer2.underlyingActor().startDroppingMessagesOfType(ElectionTimeout.class);
-
- TestActorRef<EntityOwnershipShard> leader = actorFactory.createTestActor(
- newShardProps(leaderId, peerMap(peerId1.toString(), peerId2.toString()), LOCAL_MEMBER_NAME,
- builder.build()), leaderId.toString());
-
- ShardTestKit.waitUntilLeader(leader);
-
- DOMEntity entity = new DOMEntity(ENTITY_TYPE, ENTITY_ID1);
-
- // Add a remote candidate
-
- peer1.tell(new RegisterCandidateLocal(entity), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
-
- // Register local
-
- leader.tell(new RegisterCandidateLocal(entity), kit.getRef());
- kit.expectMsgClass(SuccessReply.class);
-
- // Verify the local candidate becomes owner
-
- verifyCommittedEntityCandidate(leader, entity.getType(), entity.getIdentifier(), PEER_MEMBER_1_NAME);
- verifyCommittedEntityCandidate(leader, entity.getType(), entity.getIdentifier(), LOCAL_MEMBER_NAME);
- verifyOwner(leader, entity.getType(), entity.getIdentifier(), LOCAL_MEMBER_NAME);
-
- testLog.info("testDelayedEntityOwnerSelection ending");
- }
-
- private Props newLocalShardProps() {
- return newShardProps(newShardId(LOCAL_MEMBER_NAME), Collections.<String,String>emptyMap(), LOCAL_MEMBER_NAME);
- }
-
- private Props newShardProps(final ShardIdentifier shardId, final Map<String,String> peers,
- final String memberName) {
- return newShardProps(shardId, peers, memberName, EntityOwnerSelectionStrategyConfig.newBuilder().build());
- }
-
- private Props newShardProps(final ShardIdentifier shardId, final Map<String,String> peers, final String memberName,
- final EntityOwnerSelectionStrategyConfig config) {
- return newShardBuilder(shardId, peers, memberName).ownerSelectionStrategyConfig(config).props()
- .withDispatcher(Dispatchers.DefaultDispatcherId());
- }
-
- private EntityOwnershipShard.Builder newShardBuilder(final ShardIdentifier shardId, final Map<String, String> peers,
- final String memberName) {
- return EntityOwnershipShard.newBuilder()
- .id(shardId)
- .peerAddresses(peers)
- .datastoreContext(dataStoreContextBuilder.build())
- .schemaContextProvider(() -> EOSTestUtils.SCHEMA_CONTEXT)
- .localMemberName(MemberName.forName(memberName))
- .ownerSelectionStrategyConfig(EntityOwnerSelectionStrategyConfig.newBuilder().build());
- }
-
- private Map<String, String> peerMap(final String... peerIds) {
- ImmutableMap.Builder<String, String> builder = ImmutableMap.<String, String>builder();
- for (String peerId: peerIds) {
- builder.put(peerId, actorFactory.createTestActorPath(peerId)).build();
- }
-
- return builder.build();
- }
-
- private static class TestEntityOwnershipShard extends EntityOwnershipShard {
- private final ActorRef collectorActor;
- private final Map<Class<?>, Predicate<?>> dropMessagesOfType = new ConcurrentHashMap<>();
-
- TestEntityOwnershipShard(final Builder builder, final ActorRef collectorActor) {
- super(builder);
- this.collectorActor = collectorActor;
- }
-
- @SuppressWarnings({ "unchecked", "rawtypes" })
- @Override
- public void handleCommand(final Object message) {
- Predicate drop = dropMessagesOfType.get(message.getClass());
- if (drop == null || !drop.test(message)) {
- super.handleCommand(message);
- }
-
- if (collectorActor != null) {
- collectorActor.tell(message, ActorRef.noSender());
- }
- }
-
- void startDroppingMessagesOfType(final Class<?> msgClass) {
- dropMessagesOfType.put(msgClass, msg -> true);
- }
-
- <T> void startDroppingMessagesOfType(final Class<T> msgClass, final Predicate<T> filter) {
- dropMessagesOfType.put(msgClass, filter);
- }
-
- void stopDroppingMessagesOfType(final Class<?> msgClass) {
- dropMessagesOfType.remove(msgClass);
- }
-
- ActorRef collectorActor() {
- return collectorActor;
- }
-
- static Props props(final Builder builder) {
- return props(builder, null);
- }
-
- static Props props(final Builder builder, final ActorRef collectorActor) {
- return Props.create(TestEntityOwnershipShard.class, builder, collectorActor)
- .withDispatcher(Dispatchers.DefaultDispatcherId());
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership;
-
-import static org.junit.Assert.assertEquals;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.ENTITY_OWNERS_PATH;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.entityEntryWithOwner;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.entityOwnersWithCandidate;
-import static org.opendaylight.controller.cluster.entityownership.EntityOwnersModel.entityPath;
-
-import java.util.Map;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-import org.opendaylight.controller.cluster.datastore.AbstractActorTest;
-import org.opendaylight.controller.cluster.datastore.Shard;
-import org.opendaylight.controller.cluster.datastore.ShardDataTree;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
-
-public class EntityOwnershipStatisticsTest extends AbstractActorTest {
- private static final String LOCAL_MEMBER_NAME = "member-1";
- private static final String REMOTE_MEMBER_NAME1 = "member-2";
- private static final String REMOTE_MEMBER_NAME2 = "member-3";
- private static final String ENTITY_TYPE = "test";
- private static final YangInstanceIdentifier ENTITY_ID1 =
- YangInstanceIdentifier.of(QName.create("test", "2015-08-14", "entity1"));
- private static final YangInstanceIdentifier ENTITY_ID2 =
- YangInstanceIdentifier.of(QName.create("test", "2015-08-14", "entity2"));
-
- private final Shard mockShard = Mockito.mock(Shard.class);
-
- private final ShardDataTree shardDataTree = new ShardDataTree(mockShard, EOSTestUtils.SCHEMA_CONTEXT,
- TreeType.OPERATIONAL);
- private EntityOwnershipStatistics ownershipStatistics;
-
- @Before
- public void setup() {
- ownershipStatistics = new EntityOwnershipStatistics();
- ownershipStatistics.init(shardDataTree);
- }
-
- @Test
- public void testOnDataTreeChanged() throws Exception {
- writeNode(ENTITY_OWNERS_PATH, entityOwnersWithCandidate(ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME));
- writeNode(ENTITY_OWNERS_PATH, entityOwnersWithCandidate(ENTITY_TYPE, ENTITY_ID2, LOCAL_MEMBER_NAME));
-
- // Write local member as owner for entity 1
-
- writeNode(entityPath(ENTITY_TYPE, ENTITY_ID1), entityEntryWithOwner(ENTITY_ID1, LOCAL_MEMBER_NAME));
- assertStatistics(ownershipStatistics.all(), LOCAL_MEMBER_NAME, 1L);
-
- // Add remote member 1 as candidate for entity 1 - ownershipStatistics support should not get notified
-
- writeNode(ENTITY_OWNERS_PATH, entityOwnersWithCandidate(ENTITY_TYPE, ENTITY_ID1, REMOTE_MEMBER_NAME1));
- assertStatistics(ownershipStatistics.all(), LOCAL_MEMBER_NAME, 1L);
-
- // Change owner to remote member 1 for entity 1
-
- writeNode(entityPath(ENTITY_TYPE, ENTITY_ID1), entityEntryWithOwner(ENTITY_ID1, REMOTE_MEMBER_NAME1));
- Map<String, Map<String, Long>> statistics = ownershipStatistics.all();
- assertStatistics(statistics, LOCAL_MEMBER_NAME, 0L);
- assertStatistics(statistics, REMOTE_MEMBER_NAME1, 1L);
-
- // Change owner to remote member 2 for entity 1
-
- writeNode(entityPath(ENTITY_TYPE, ENTITY_ID1), entityEntryWithOwner(ENTITY_ID1, REMOTE_MEMBER_NAME2));
- statistics = ownershipStatistics.all();
- assertStatistics(statistics, LOCAL_MEMBER_NAME, 0L);
- assertStatistics(statistics, REMOTE_MEMBER_NAME1, 0L);
- assertStatistics(statistics, REMOTE_MEMBER_NAME2, 1L);
-
- // Clear the owner for entity 1
-
- writeNode(entityPath(ENTITY_TYPE, ENTITY_ID1), entityEntryWithOwner(ENTITY_ID1, ""));
- statistics = ownershipStatistics.all();
- assertStatistics(statistics, LOCAL_MEMBER_NAME, 0L);
- assertStatistics(statistics, REMOTE_MEMBER_NAME1, 0L);
- assertStatistics(statistics, REMOTE_MEMBER_NAME2, 0L);
-
- // Change owner to the local member for entity 1
-
- writeNode(entityPath(ENTITY_TYPE, ENTITY_ID1), entityEntryWithOwner(ENTITY_ID1, LOCAL_MEMBER_NAME));
- statistics = ownershipStatistics.all();
- assertStatistics(statistics, LOCAL_MEMBER_NAME, 1L);
- assertStatistics(statistics, REMOTE_MEMBER_NAME1, 0L);
- assertStatistics(statistics, REMOTE_MEMBER_NAME2, 0L);
-
- // Change owner to remote member 1 for entity 2
-
- writeNode(entityPath(ENTITY_TYPE, ENTITY_ID2), entityEntryWithOwner(ENTITY_ID2, REMOTE_MEMBER_NAME1));
- statistics = ownershipStatistics.all();
- assertStatistics(statistics, LOCAL_MEMBER_NAME, 1L);
- assertStatistics(statistics, REMOTE_MEMBER_NAME1, 1L);
- assertStatistics(statistics, REMOTE_MEMBER_NAME2, 0L);
-
- // Change owner to the local member for entity 2
-
- writeNode(entityPath(ENTITY_TYPE, ENTITY_ID2), entityEntryWithOwner(ENTITY_ID2, LOCAL_MEMBER_NAME));
- statistics = ownershipStatistics.all();
- assertStatistics(statistics, LOCAL_MEMBER_NAME, 2L);
- assertStatistics(statistics, REMOTE_MEMBER_NAME1, 0L);
- assertStatistics(statistics, REMOTE_MEMBER_NAME2, 0L);
-
- // Write local member owner for entity 2 again - expect no change
- writeNode(entityPath(ENTITY_TYPE, ENTITY_ID2), entityEntryWithOwner(ENTITY_ID2, LOCAL_MEMBER_NAME));
- statistics = ownershipStatistics.all();
- assertStatistics(statistics, LOCAL_MEMBER_NAME, 2L);
- assertStatistics(statistics, REMOTE_MEMBER_NAME1, 0L);
- assertStatistics(statistics, REMOTE_MEMBER_NAME2, 0L);
-
- // Clear the owner for entity 2
- writeNode(entityPath(ENTITY_TYPE, ENTITY_ID2), entityEntryWithOwner(ENTITY_ID2, ""));
- statistics = ownershipStatistics.all();
- assertStatistics(statistics, LOCAL_MEMBER_NAME, 1L);
- assertStatistics(statistics, REMOTE_MEMBER_NAME1, 0L);
- assertStatistics(statistics, REMOTE_MEMBER_NAME2, 0L);
-
- // Clear the owner for entity 2 again - expect no change
-
- writeNode(entityPath(ENTITY_TYPE, ENTITY_ID2), entityEntryWithOwner(ENTITY_ID2, ""));
- statistics = ownershipStatistics.all();
- assertStatistics(statistics, LOCAL_MEMBER_NAME, 1L);
- assertStatistics(statistics, REMOTE_MEMBER_NAME1, 0L);
- assertStatistics(statistics, REMOTE_MEMBER_NAME2, 0L);
-
- }
-
- private static void assertStatistics(final Map<String, Map<String, Long>> statistics, final String memberName,
- final long val) {
- assertEquals(val, statistics.get(ENTITY_TYPE).get(memberName).longValue());
- }
-
- private void writeNode(final YangInstanceIdentifier path, final NormalizedNode<?, ?> node)
- throws DataValidationFailedException {
- AbstractEntityOwnershipTest.writeNode(path, node, shardDataTree);
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership.selectionstrategy;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import org.junit.Test;
-
-public class EntityOwnerSelectionStrategyConfigReaderTest {
-
- @Test
- public void testReadStrategies() {
- final Map<Object, Object> props = new java.util.HashMap<>();
- props.put("entity.type.test",
- "org.opendaylight.controller.cluster.entityownership.selectionstrategy.LastCandidateSelectionStrategy,100");
-
-
- final EntityOwnerSelectionStrategyConfig config = EntityOwnerSelectionStrategyConfigReader
- .loadStrategyWithConfig(props);
-
- assertTrue(config.isStrategyConfigured("test"));
-
- final EntityOwnerSelectionStrategy strategy = config.createStrategy("test",
- Collections.<String, Long>emptyMap());
- assertTrue(strategy.toString(), strategy instanceof LastCandidateSelectionStrategy);
- assertEquals(100L, strategy.getSelectionDelayInMillis());
-
- final EntityOwnerSelectionStrategy strategy1 = config.createStrategy("test", Collections.emptyMap());
- assertEquals(strategy, strategy1);
-
- config.clearStrategies();
-
- final EntityOwnerSelectionStrategy strategy2 = config.createStrategy("test", Collections.emptyMap());
- assertNotEquals(strategy1, strategy2);
- }
-
- @Test
- public void testReadStrategiesWithEmptyConfiguration() {
-
- final Map<Object, Object> props = new HashMap<>();
- final EntityOwnerSelectionStrategyConfig config = EntityOwnerSelectionStrategyConfigReader
- .loadStrategyWithConfig(props);
-
- assertFalse(config.isStrategyConfigured("test"));
- }
-
- @Test
- public void testReadStrategiesWithNullConfiguration() {
- final EntityOwnerSelectionStrategyConfig config = EntityOwnerSelectionStrategyConfigReader
- .loadStrategyWithConfig(null);
- assertFalse(config.isStrategyConfigured("test"));
- }
-
- @Test(expected = IllegalArgumentException.class)
- public void testReadStrategiesInvalidDelay() {
- final Map<Object, Object> props = new HashMap<>();
- props.put("entity.type.test",
- "org.opendaylight.controller.cluster.entityownership.selectionstrategy.LastCandidateSelectionStrategy,foo");
- EntityOwnerSelectionStrategyConfigReader.loadStrategyWithConfig(props);
- }
-
- @Test(expected = IllegalArgumentException.class)
- public void testReadStrategiesInvalidClassType() {
- final Map<Object, Object> props = new HashMap<>();
- props.put("entity.type.test", "String,100");
- EntityOwnerSelectionStrategyConfigReader.loadStrategyWithConfig(props);
- }
-
- @Test
- public void testReadStrategiesMissingDelay() {
- final Map<Object, Object> props = new HashMap<>();
- props.put("entity.type.test",
- "org.opendaylight.controller.cluster.entityownership.selectionstrategy.LastCandidateSelectionStrategy,100");
- props.put("entity.type.test1",
- "org.opendaylight.controller.cluster.entityownership.selectionstrategy.LastCandidateSelectionStrategy");
-
-
- final EntityOwnerSelectionStrategyConfig config = EntityOwnerSelectionStrategyConfigReader
- .loadStrategyWithConfig(props);
-
- assertEquals(100, config.createStrategy("test", Collections.emptyMap()).getSelectionDelayInMillis());
- assertEquals(0, config.createStrategy("test2", Collections.emptyMap()).getSelectionDelayInMillis());
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.entityownership.selectionstrategy;
-
-import com.google.common.collect.Iterables;
-import java.util.Collection;
-import java.util.Map;
-
-public class LastCandidateSelectionStrategy extends AbstractEntityOwnerSelectionStrategy {
- public LastCandidateSelectionStrategy(long selectionDelayInMillis, Map<String, Long> initialStatistics) {
- super(selectionDelayInMillis, initialStatistics);
- }
-
- @Override
- public String newOwner(String currentOwner, Collection<String> viableCandidates) {
- return Iterables.getLast(viableCandidates);
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.entityownership.selectionstrategy;
-
-import static org.junit.Assert.assertEquals;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import org.junit.Test;
-
-public class LeastLoadedCandidateSelectionStrategyTest {
-
- @Test
- public void testLeastLoadedStrategy() {
- LeastLoadedCandidateSelectionStrategy strategy = new LeastLoadedCandidateSelectionStrategy(
- 0L, Collections.<String, Long>emptyMap());
-
- String owner = strategy.newOwner(null, prepareViableCandidates(3));
- assertEquals("member-1", owner);
-
- Map<String, Long> localStatistics = strategy.getLocalStatistics();
- assertEquals(1L, (long) localStatistics.get("member-1"));
-
- // member-2 has least load
- strategy = new LeastLoadedCandidateSelectionStrategy(0L, prepareStatistics(5,2,4));
- owner = strategy.newOwner(null, prepareViableCandidates(3));
- assertEquals("member-2", owner);
-
- assertStatistics(strategy.getLocalStatistics(), 5,3,4);
-
- // member-3 has least load
- strategy = new LeastLoadedCandidateSelectionStrategy(0L, prepareStatistics(5,7,4));
- owner = strategy.newOwner(null, prepareViableCandidates(3));
- assertEquals("member-3", owner);
-
- assertStatistics(strategy.getLocalStatistics(), 5,7,5);
-
- // member-1 has least load
- strategy = new LeastLoadedCandidateSelectionStrategy(0L, prepareStatistics(1,7,4));
- owner = strategy.newOwner(null, prepareViableCandidates(3));
- assertEquals("member-1", owner);
-
- assertStatistics(strategy.getLocalStatistics(), 2,7,4);
-
- // Let member-3 become the owner
- strategy = new LeastLoadedCandidateSelectionStrategy(0L, prepareStatistics(3,3,0));
- owner = strategy.newOwner(null, prepareViableCandidates(3));
- assertEquals("member-3", owner);
-
- assertStatistics(strategy.getLocalStatistics(), 3,3,1);
-
- // member-3 is no longer viable so choose a new owner
- owner = strategy.newOwner("member-3", prepareViableCandidates(2));
- assertEquals("member-1", owner);
-
- assertStatistics(strategy.getLocalStatistics(), 4,3,0);
-
- }
-
- private static Map<String, Long> prepareStatistics(long... count) {
- Map<String, Long> statistics = new HashMap<>();
- for (int i = 0; i < count.length; i++) {
- statistics.put("member-" + (i + 1), count[i]);
- }
- return statistics;
- }
-
- private static Collection<String> prepareViableCandidates(int count) {
- Collection<String> viableCandidates = new ArrayList<>();
- for (int i = 0; i < count; i++) {
- viableCandidates.add("member-" + (i + 1));
- }
- return viableCandidates;
- }
-
- private static void assertStatistics(Map<String, Long> statistics, long... count) {
- for (int i = 0; i < count.length; i++) {
- assertEquals(count[i], (long) statistics.get("member-" + (i + 1)));
- }
- }
-}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>mdsal-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../parent</relativePath>
</parent>
<packaging>bundle</packaging>
<dependencies>
- <dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-actor_2.13</artifactId>
- </dependency>
-
- <dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-cluster_2.13</artifactId>
- </dependency>
-
- <dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-persistence_2.13</artifactId>
- </dependency>
-
- <dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-remote_2.13</artifactId>
- </dependency>
-
- <dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-testkit_2.13</artifactId>
- <scope>test</scope>
- </dependency>
-
- <dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-slf4j_2.13</artifactId>
- </dependency>
-
- <dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-osgi_2.13</artifactId>
- </dependency>
-
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-clustering-commons</artifactId>
- </dependency>
-
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-akka-raft</artifactId>
<!-- Test Dependencies -->
<dependency>
- <groupId>org.mockito</groupId>
- <artifactId>mockito-core</artifactId>
- <scope>test</scope>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-testkit_2.13</artifactId>
</dependency>
<dependency>
import akka.actor.Props;
import akka.actor.UntypedAbstractActor;
-public class DummyShardManager extends UntypedAbstractActor {
+public final class DummyShardManager extends UntypedAbstractActor {
public DummyShardManager(final Configuration configuration, final String memberName, final String[] shardNames,
final String type) {
new DummyShardsCreator(configuration, getContext(), memberName, shardNames, type).create();
cluster {
seed-nodes = ["akka://opendaylight-cluster-data@127.0.0.1:2550", "akka://opendaylight-cluster-data@127.0.0.1:2553"]
- auto-down-unreachable-after = 10s
-
roles = [
"member-2"
]
cluster {
seed-nodes = ["akka://opendaylight-cluster-data@127.0.0.1:2550", "akka://opendaylight-cluster-data@127.0.0.1:2554"]
- auto-down-unreachable-after = 10s
-
roles = [
"member-3"
]
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>mdsal-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../parent</relativePath>
</parent>
<dependencies>
<dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-actor_2.13</artifactId>
+ <groupId>com.github.spotbugs</groupId>
+ <artifactId>spotbugs-annotations</artifactId>
+ <optional>true</optional>
</dependency>
<dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-cluster_2.13</artifactId>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
</dependency>
<dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-remote_2.13</artifactId>
+ <groupId>com.typesafe</groupId>
+ <artifactId>config</artifactId>
</dependency>
<dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-testkit_2.13</artifactId>
- <scope>test</scope>
+ <groupId>org.eclipse.jdt</groupId>
+ <artifactId>org.eclipse.jdt.annotation</artifactId>
</dependency>
<dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-osgi_2.13</artifactId>
- <exclusions>
- <exclusion>
- <groupId>org.osgi</groupId>
- <artifactId>org.osgi.compendium</artifactId>
- </exclusion>
- </exclusions>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>concepts</artifactId>
</dependency>
<dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-slf4j_2.13</artifactId>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-common</artifactId>
</dependency>
<dependency>
- <groupId>com.typesafe.akka</groupId>
- <artifactId>akka-persistence_2.13</artifactId>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-codec-binfmt</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-model-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>repackaged-akka</artifactId>
</dependency>
- <!-- SAL Dependencies -->
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-common-util</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>mdsal-common-api</artifactId>
+ </dependency>
<dependency>
<groupId>org.opendaylight.mdsal</groupId>
<artifactId>mdsal-dom-api</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-clustering-commons</artifactId>
</dependency>
- <!-- Yang tools-->
<dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-data-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-model-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-data-impl</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-common</artifactId>
+ <groupId>org.osgi</groupId>
+ <artifactId>org.osgi.service.component.annotations</artifactId>
</dependency>
<dependency>
<groupId>org.osgi</groupId>
- <artifactId>osgi.cmpn</artifactId>
+ <artifactId>org.osgi.service.metatype.annotations</artifactId>
</dependency>
<dependency>
<groupId>org.scala-lang</groupId>
<artifactId>scala-library</artifactId>
</dependency>
+
<!-- Test Dependencies -->
+ <dependency>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-testkit_2.13</artifactId>
+ </dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-simple</artifactId>
<version>1.0</version>
<scope>test</scope>
</dependency>
+ <dependency>
+ <groupId>org.apache.commons</groupId>
+ <artifactId>commons-lang3</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>util</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-impl</artifactId>
+ <scope>test</scope>
+ </dependency>
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>yang-test-util</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
- <dependency>
- <groupId>commons-lang</groupId>
- <artifactId>commons-lang</artifactId>
- <scope>test</scope>
- </dependency>
</dependencies>
<build>
import java.util.concurrent.TimeoutException;
import org.eclipse.jdt.annotation.NonNull;
import org.eclipse.jdt.annotation.Nullable;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.concurrent.ExecutionContext;
import scala.concurrent.Future;
-abstract class AbstractRemoteFuture<T, E extends Exception> extends AbstractFuture<T> {
+abstract class AbstractRemoteFuture<T, O, E extends Exception> extends AbstractFuture<O> {
private static final Logger LOG = LoggerFactory.getLogger(AbstractRemoteFuture.class);
- private final @NonNull SchemaPath type;
+ private final @NonNull T type;
- AbstractRemoteFuture(final @NonNull SchemaPath type, final Future<Object> requestFuture) {
+ AbstractRemoteFuture(final @NonNull T type, final Future<Object> requestFuture) {
this.type = requireNonNull(type);
requestFuture.onComplete(new FutureUpdater(), ExecutionContext.Implicits$.MODULE$.global());
}
@Override
- public final T get() throws InterruptedException, ExecutionException {
+ public final O get() throws InterruptedException, ExecutionException {
try {
return super.get();
} catch (ExecutionException e) {
}
@Override
- public final T get(final long timeout, final TimeUnit unit)
+ public final O get(final long timeout, final TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
try {
return super.get(timeout, unit);
}
@Override
- protected final boolean set(final T value) {
+ protected final boolean set(final O value) {
final boolean ret = super.set(value);
if (ret) {
LOG.debug("Future {} for action {} successfully completed", this, type);
setException(error);
}
- abstract @Nullable T processReply(Object reply);
+ abstract @Nullable O processReply(Object reply);
abstract @NonNull Class<E> exceptionClass();
@Override
public void onComplete(final Throwable error, final Object reply) {
if (error == null) {
- final T result = processReply(reply);
+ final O result = processReply(reply);
if (result != null) {
LOG.debug("Received response for operation {}: result is {}", type, result);
set(result);
/**
* An abstract base class for remote RPC/action implementations.
*/
-abstract class AbstractRemoteImplementation<T extends AbstractExecute<?>> {
+abstract class AbstractRemoteImplementation<T extends AbstractExecute<?, ?>> {
// 0 for local, 1 for binding, 2 for remote
static final long COST = 2;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-@Component(immediate = true, configurationPid = "org.opendaylight.controller.remoterpc")
+@Component(configurationPid = "org.opendaylight.controller.remoterpc")
@Designate(ocd = OSGiRemoteOpsProvider.Config.class)
public final class OSGiRemoteOpsProvider {
@ObjectClassDefinition()
private static final Logger LOG = LoggerFactory.getLogger(OSGiRemoteOpsProvider.class);
- @Reference
- ActorSystemProvider actorSystemProvider = null;
- @Reference
- DOMRpcProviderService rpcProviderService = null;
- @Reference
- DOMRpcService rpcService = null;
- @Reference
- DOMActionProviderService actionProviderService = null;
- @Reference
- DOMActionService actionService = null;
-
private ActorRef opsManager;
@Activate
- void activate(final Config config) {
+ public OSGiRemoteOpsProvider(@Reference final ActorSystemProvider actorSystemProvider,
+ @Reference final DOMRpcProviderService rpcProviderService, @Reference final DOMRpcService rpcService,
+ @Reference final DOMActionProviderService actionProviderService,
+ @Reference final DOMActionService actionService, final Config config) {
LOG.info("Remote Operations service starting");
final ActorSystem actorSystem = actorSystemProvider.getActorSystem();
final RemoteOpsProviderConfig opsConfig = RemoteOpsProviderConfig.newInstance(actorSystem.name(),
import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
import org.opendaylight.mdsal.dom.api.DOMRpcResult;
import org.opendaylight.mdsal.dom.api.DOMRpcService;
+import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.common.RpcError;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+import org.opendaylight.yangtools.yang.model.api.stmt.SchemaNodeIdentifier.Absolute;
/**
* Actor receiving invocation requests from remote nodes, routing them to
@Override
protected void handleReceive(final Object message) {
- if (message instanceof ExecuteRpc) {
+ if (message instanceof ExecuteRpc executeRpc) {
LOG.debug("Handling ExecuteOps Message");
- execute((ExecuteRpc) message);
- } else if (message instanceof ExecuteAction) {
- execute((ExecuteAction) message);
+ execute(executeRpc);
+ } else if (message instanceof ExecuteAction executeAction) {
+ execute(executeAction);
} else {
unknownMessage(message);
}
return;
}
- Futures.addCallback(future, new AbstractCallback<DOMRpcResult>(getSender(), msg.getType()) {
+ Futures.addCallback(future, new AbstractCallback<QName, DOMRpcResult>(getSender(), msg.getType()) {
@Override
- Object nullResponse(final SchemaPath type) {
+ Object nullResponse(final QName type) {
LOG.warn("Execution of {} resulted in null result", type);
return new RpcResponse(null);
}
@Override
- Object response(final SchemaPath type, final DOMRpcResult result) {
- final Collection<? extends RpcError> errors = result.getErrors();
- return errors.isEmpty() ? new RpcResponse(result.getResult())
+ Object response(final QName type, final DOMRpcResult result) {
+ final Collection<? extends RpcError> errors = result.errors();
+ return errors.isEmpty() ? new RpcResponse(result.value())
// This is legacy (wrong) behavior, which ignores the fact that errors may be just warnings,
// discarding any output
: new Failure(new RpcErrorsException(String.format("Execution of rpc %s failed", type),
return;
}
- Futures.addCallback(future, new AbstractCallback<DOMActionResult>(getSender(), msg.getType()) {
+ Futures.addCallback(future, new AbstractCallback<Absolute, DOMActionResult>(getSender(), msg.getType()) {
@Override
- Object nullResponse(final SchemaPath type) {
+ Object nullResponse(final Absolute type) {
throw new IllegalStateException("Null invocation result of action " + type);
}
@Override
- Object response(final SchemaPath type, final DOMActionResult result) {
+ Object response(final Absolute type, final DOMActionResult result) {
final Collection<? extends RpcError> errors = result.getErrors();
return errors.isEmpty() ? new ActionResponse(result.getOutput(), result.getErrors())
// This is legacy (wrong) behavior, which ignores the fact that errors may be just warnings,
}, MoreExecutors.directExecutor());
}
- private abstract class AbstractCallback<T> implements FutureCallback<T> {
+ private abstract class AbstractCallback<T, R> implements FutureCallback<R> {
private final ActorRef replyTo;
- private final SchemaPath type;
+ private final T type;
- AbstractCallback(final ActorRef replyTo, final SchemaPath type) {
+ AbstractCallback(final ActorRef replyTo, final T type) {
this.replyTo = requireNonNull(replyTo);
this.type = requireNonNull(type);
}
@Override
- public final void onSuccess(final T result) {
+ public final void onSuccess(final R result) {
final Object response;
if (result == null) {
// This shouldn't happen but the FutureCallback annotates the result param with Nullable so handle null
replyTo.tell(new Failure(failure), self());
}
- abstract @NonNull Object nullResponse(@NonNull SchemaPath type);
+ abstract @NonNull Object nullResponse(@NonNull T type);
- abstract @NonNull Object response(@NonNull SchemaPath type, @NonNull T result);
+ abstract @NonNull Object response(@NonNull T type, @NonNull R result);
}
}
import org.opendaylight.mdsal.dom.api.DOMActionService;
import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
import org.opendaylight.mdsal.dom.api.DOMRpcService;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
import scala.concurrent.duration.FiniteDuration;
/**
private final DOMActionProviderService actionProvisionRegistry;
private final DOMActionService actionService;
- private ListenerRegistration<OpsListener> listenerReg;
+ private Registration listenerReg;
private ActorRef opsInvoker;
private ActorRef actionRegistry;
private ActorRef rpcRegistry;
this.rpcProvisionRegistry = requireNonNull(rpcProvisionRegistry);
this.rpcServices = requireNonNull(rpcServices);
this.config = requireNonNull(config);
- this.actionProvisionRegistry = requireNonNull(actionProviderService);
+ actionProvisionRegistry = requireNonNull(actionProviderService);
this.actionService = requireNonNull(actionService);
}
import akka.actor.Address;
import akka.actor.Props;
import java.util.ArrayList;
-import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import org.opendaylight.controller.remote.rpc.registry.ActionRegistry.RemoteActionEndpoint;
import org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.UpdateRemoteEndpoints;
import org.opendaylight.controller.remote.rpc.registry.RpcRegistry.RemoteRpcEndpoint;
-import org.opendaylight.mdsal.dom.api.DOMActionImplementation;
import org.opendaylight.mdsal.dom.api.DOMActionProviderService;
-import org.opendaylight.mdsal.dom.api.DOMRpcImplementation;
import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
-import org.opendaylight.yangtools.concepts.ObjectRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
/**
* Actor handling registration of RPCs and Actions available on remote nodes with the local
* {@link DOMRpcProviderService} and {@link DOMActionProviderService}.
*/
final class OpsRegistrar extends AbstractUntypedActor {
- private final Map<Address, ObjectRegistration<DOMRpcImplementation>> rpcRegs = new HashMap<>();
- private final Map<Address, ObjectRegistration<DOMActionImplementation>> actionRegs = new HashMap<>();
+ private final Map<Address, Registration> rpcRegs = new HashMap<>();
+ private final Map<Address, Registration> actionRegs = new HashMap<>();
private final DOMRpcProviderService rpcProviderService;
private final RemoteOpsProviderConfig config;
private final DOMActionProviderService actionProviderService;
@Override
public void postStop() throws Exception {
- rpcRegs.values().forEach(ObjectRegistration::close);
+ rpcRegs.values().forEach(Registration::close);
rpcRegs.clear();
- actionRegs.values().forEach(ObjectRegistration::close);
+ actionRegs.values().forEach(Registration::close);
actionRegs.clear();
super.postStop();
@Override
protected void handleReceive(final Object message) {
- if (message instanceof UpdateRemoteEndpoints) {
+ if (message instanceof UpdateRemoteEndpoints updateEndpoints) {
LOG.debug("Handling updateRemoteEndpoints message");
- updateRemoteRpcEndpoints(((UpdateRemoteEndpoints) message).getRpcEndpoints());
- } else if (message instanceof UpdateRemoteActionEndpoints) {
+ updateRemoteRpcEndpoints(updateEndpoints.getRpcEndpoints());
+ } else if (message instanceof UpdateRemoteActionEndpoints updateEndpoints) {
LOG.debug("Handling updateRemoteActionEndpoints message");
- updateRemoteActionEndpoints(((UpdateRemoteActionEndpoints) message).getActionEndpoints());
+ updateRemoteActionEndpoints(updateEndpoints.getActionEndpoints());
} else {
unknownMessage(message);
}
* Note that when an RPC moves from one remote node to another, we also do not want to expose the gap,
* hence we register all new implementations before closing all registrations.
*/
- final Collection<ObjectRegistration<?>> prevRegs = new ArrayList<>(rpcEndpoints.size());
+ final var prevRegs = new ArrayList<Registration>(rpcEndpoints.size());
for (Entry<Address, Optional<RemoteRpcEndpoint>> e : rpcEndpoints.entrySet()) {
LOG.debug("Updating RPC registrations for {}", e.getKey());
- final ObjectRegistration<DOMRpcImplementation> prevReg;
+ final Registration prevReg;
final Optional<RemoteRpcEndpoint> maybeEndpoint = e.getValue();
if (maybeEndpoint.isPresent()) {
- final RemoteRpcEndpoint endpoint = maybeEndpoint.get();
+ final RemoteRpcEndpoint endpoint = maybeEndpoint.orElseThrow();
final RemoteRpcImplementation impl = new RemoteRpcImplementation(endpoint.getRouter(), config);
prevReg = rpcRegs.put(e.getKey(), rpcProviderService.registerRpcImplementation(impl,
endpoint.getRpcs()));
}
}
- prevRegs.forEach(ObjectRegistration::close);
+ prevRegs.forEach(Registration::close);
}
/**
* Note that when an Action moves from one remote node to another, we also do not want to expose the gap,
* hence we register all new implementations before closing all registrations.
*/
- final Collection<ObjectRegistration<?>> prevRegs = new ArrayList<>(actionEndpoints.size());
+ final var prevRegs = new ArrayList<Registration>(actionEndpoints.size());
for (Entry<Address, Optional<RemoteActionEndpoint>> e : actionEndpoints.entrySet()) {
LOG.debug("Updating action registrations for {}", e.getKey());
- final ObjectRegistration<DOMActionImplementation> prevReg;
+ final Registration prevReg;
final Optional<RemoteActionEndpoint> maybeEndpoint = e.getValue();
if (maybeEndpoint.isPresent()) {
- final RemoteActionEndpoint endpoint = maybeEndpoint.get();
+ final RemoteActionEndpoint endpoint = maybeEndpoint.orElseThrow();
final RemoteActionImplementation impl = new RemoteActionImplementation(endpoint.getRouter(), config);
prevReg = actionRegs.put(e.getKey(), actionProviderService.registerActionImplementation(impl,
endpoint.getActions()));
}
}
- prevRegs.forEach(ObjectRegistration::close);
+ prevRegs.forEach(Registration::close);
}
}
import org.opendaylight.mdsal.dom.api.DOMActionResult;
import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+import org.opendaylight.yangtools.yang.model.api.stmt.SchemaNodeIdentifier.Absolute;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
* Routes action request to a remote invoker, which will execute the action and return with result.
*/
@Override
- public ListenableFuture<DOMActionResult> invokeAction(final SchemaPath type, final DOMDataTreeIdentifier path,
+ public ListenableFuture<DOMActionResult> invokeAction(final Absolute type, final DOMDataTreeIdentifier path,
final ContainerNode input) {
LOG.debug("invoking action {} with path {}", type, path);
return new RemoteDOMActionFuture(type, ask(ExecuteAction.from(type, path, input)));
import org.opendaylight.mdsal.dom.api.DOMActionResult;
import org.opendaylight.mdsal.dom.spi.SimpleDOMActionResult;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+import org.opendaylight.yangtools.yang.model.api.stmt.SchemaNodeIdentifier.Absolute;
import scala.concurrent.Future;
-final class RemoteDOMActionFuture extends AbstractRemoteFuture<DOMActionResult, DOMActionException> {
- RemoteDOMActionFuture(final @NonNull SchemaPath type, final @NonNull Future<Object> requestFuture) {
+final class RemoteDOMActionFuture extends AbstractRemoteFuture<Absolute, DOMActionResult, DOMActionException> {
+ RemoteDOMActionFuture(final @NonNull Absolute type, final @NonNull Future<Object> requestFuture) {
super(type, requestFuture);
}
@Override
DOMActionResult processReply(final Object reply) {
- if (reply instanceof ActionResponse) {
- final ActionResponse actionReply = (ActionResponse) reply;
+ if (reply instanceof ActionResponse actionReply) {
final ContainerNode output = actionReply.getOutput();
return output == null ? new SimpleDOMActionResult(actionReply.getErrors())
: new SimpleDOMActionResult(output, actionReply.getErrors());
import org.opendaylight.mdsal.dom.api.DOMRpcException;
import org.opendaylight.mdsal.dom.api.DOMRpcResult;
import org.opendaylight.mdsal.dom.spi.DefaultDOMRpcResult;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+import org.opendaylight.yangtools.yang.common.QName;
import scala.concurrent.Future;
-final class RemoteDOMRpcFuture extends AbstractRemoteFuture<DOMRpcResult, DOMRpcException> {
- RemoteDOMRpcFuture(final @NonNull SchemaPath type, final @NonNull Future<Object> requestFuture) {
+final class RemoteDOMRpcFuture extends AbstractRemoteFuture<QName, DOMRpcResult, DOMRpcException> {
+ RemoteDOMRpcFuture(final @NonNull QName type, final @NonNull Future<Object> requestFuture) {
super(type, requestFuture);
}
@Override
DOMRpcResult processReply(final Object reply) {
- return reply instanceof RpcResponse ? new DefaultDOMRpcResult(((RpcResponse) reply).getOutput()) : null;
+ return reply instanceof RpcResponse response ? new DefaultDOMRpcResult(response.getOutput()) : null;
}
@Override
import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
import org.opendaylight.mdsal.dom.api.DOMRpcImplementation;
import org.opendaylight.mdsal.dom.api.DOMRpcResult;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
/**
* A {@link DOMRpcImplementation} which routes invocation requests to a remote invoker actor.
}
@Override
- public ListenableFuture<DOMRpcResult> invokeRpc(final DOMRpcIdentifier rpc,
- final NormalizedNode<?, ?> input) {
+ public ListenableFuture<DOMRpcResult> invokeRpc(final DOMRpcIdentifier rpc, final ContainerNode input) {
return new RemoteDOMRpcFuture(rpc.getType(), ask(ExecuteRpc.from(rpc, input)));
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.remote.rpc;
import java.io.Serializable;
import java.util.Collection;
import java.util.List;
import org.opendaylight.mdsal.dom.api.DOMRpcException;
+import org.opendaylight.yangtools.yang.common.ErrorSeverity;
+import org.opendaylight.yangtools.yang.common.ErrorTag;
+import org.opendaylight.yangtools.yang.common.ErrorType;
import org.opendaylight.yangtools.yang.common.RpcError;
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorSeverity;
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
/**
* @author Thomas Pantelis
*/
public class RpcErrorsException extends DOMRpcException {
-
+ @java.io.Serial
private static final long serialVersionUID = 1L;
private static class RpcErrorData implements Serializable {
+ @java.io.Serial
private static final long serialVersionUID = 1L;
final ErrorSeverity severity;
final ErrorType errorType;
- final String tag;
+ final ErrorTag tag;
final String applicationTag;
final String message;
final String info;
final Throwable cause;
- RpcErrorData(final ErrorSeverity severity, final ErrorType errorType, final String tag,
+ RpcErrorData(final ErrorSeverity severity, final ErrorType errorType, final ErrorTag tag,
final String applicationTag, final String message, final String info, final Throwable cause) {
this.severity = severity;
this.errorType = errorType;
public RpcErrorsException(final String message, final Iterable<? extends RpcError> rpcErrors) {
super(message);
- for (final RpcError rpcError: rpcErrors) {
+ for (var rpcError : rpcErrors) {
rpcErrorDataList.add(new RpcErrorData(rpcError.getSeverity(), rpcError.getErrorType(),
rpcError.getTag(), rpcError.getApplicationTag(), rpcError.getMessage(),
rpcError.getInfo(), rpcError.getCause()));
}
public Collection<RpcError> getRpcErrors() {
- final Collection<RpcError> rpcErrors = new ArrayList<>();
- for (final RpcErrorData ed: rpcErrorDataList) {
+ final var rpcErrors = new ArrayList<RpcError>();
+ for (var ed : rpcErrorDataList) {
final RpcError rpcError = ed.severity == ErrorSeverity.ERROR
? RpcResultBuilder.newError(ed.errorType, ed.tag, ed.message, ed.applicationTag,
ed.info, ed.cause) :
@Override
public void onReceive(final Object message) {
- if (message instanceof Terminated) {
- Terminated terminated = (Terminated) message;
+ if (message instanceof Terminated terminated) {
LOG.debug("Actor terminated : {}", terminated.actor());
- } else if (message instanceof Monitor) {
- Monitor monitor = (Monitor) message;
+ } else if (message instanceof Monitor monitor) {
getContext().watch(monitor.getActorRef());
}
}
import java.io.Serializable;
import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
/**
* An abstract base class for invocation requests. Specialized via {@link ExecuteAction} and {@link ExecuteRpc}.
*/
-public abstract class AbstractExecute<T extends NormalizedNode<?, ?>> implements Serializable {
+public abstract class AbstractExecute<T, I extends NormalizedNode> implements Serializable {
private static final long serialVersionUID = 1L;
- private final transient @NonNull SchemaPath type;
- private final transient T input;
+ private final transient @NonNull T type;
+ private final transient I input;
- AbstractExecute(final @NonNull SchemaPath type, final T input) {
+ AbstractExecute(final @NonNull T type, final I input) {
this.type = requireNonNull(type);
this.input = input;
}
- public final @NonNull SchemaPath getType() {
+ public final @NonNull T getType() {
return type;
}
- public final T getInput() {
+ public final I getInput() {
return input;
}
/**
* An abstract base class for invocation responses. Specialized via {@link ActionResponse} and {@link RpcResponse}.
*/
-public abstract class AbstractResponse<T extends NormalizedNode<?, ?>> implements Serializable {
+public abstract class AbstractResponse<T extends NormalizedNode> implements Serializable {
private static final long serialVersionUID = 1L;
private final transient @Nullable T output;
public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
@SuppressWarnings("unchecked")
final ImmutableList<RpcError> errors = (ImmutableList<RpcError>) in.readObject();
- final Optional<NormalizedNode<?, ?>> output = SerializationUtils.readNormalizedNode(in);
+ final Optional<NormalizedNode> output = SerializationUtils.readNormalizedNode(in);
actionResponse = new ActionResponse(output.map(ContainerNode.class::cast), errors);
}
import com.google.common.base.MoreObjects.ToStringHelper;
import java.io.Externalizable;
import java.io.IOException;
+import java.io.InvalidObjectException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
-import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+import org.opendaylight.yangtools.yang.model.api.stmt.SchemaNodeIdentifier;
+import org.opendaylight.yangtools.yang.model.api.stmt.SchemaNodeIdentifier.Absolute;
-public final class ExecuteAction extends AbstractExecute<@NonNull ContainerNode> {
+public final class ExecuteAction extends AbstractExecute<Absolute, @NonNull ContainerNode> {
private static final long serialVersionUID = 1128904894827335676L;
private final @NonNull DOMDataTreeIdentifier path;
- private ExecuteAction(final @NonNull SchemaPath type, final @NonNull DOMDataTreeIdentifier path,
+ private ExecuteAction(final @NonNull Absolute type, final @NonNull DOMDataTreeIdentifier path,
final @NonNull ContainerNode input) {
super(type, requireNonNull(input));
this.path = requireNonNull(path);
}
- public static @NonNull ExecuteAction from(final @NonNull SchemaPath type, @NonNull final DOMDataTreeIdentifier path,
+ public static @NonNull ExecuteAction from(final @NonNull Absolute type, @NonNull final DOMDataTreeIdentifier path,
final @NonNull ContainerNode input) {
return new ExecuteAction(type, path, input);
}
@Override
public void writeExternal(final ObjectOutput out) throws IOException {
- try (NormalizedNodeDataOutput stream = NormalizedNodeStreamVersion.current().newDataOutput(out)) {
- stream.writeSchemaPath(executeAction.getType());
- executeAction.getPath().getDatastoreType().writeTo(out);
- stream.writeYangInstanceIdentifier(executeAction.getPath().getRootIdentifier());
+ try (var stream = NormalizedNodeStreamVersion.current().newDataOutput(out)) {
+ stream.writeSchemaNodeIdentifier(executeAction.getType());
+ executeAction.getPath().datastore().writeTo(out);
+ stream.writeYangInstanceIdentifier(executeAction.getPath().path());
stream.writeOptionalNormalizedNode(executeAction.getInput());
}
}
@Override
public void readExternal(final ObjectInput in) throws IOException {
final NormalizedNodeDataInput stream = NormalizedNodeDataInput.newDataInput(in);
- final SchemaPath name = stream.readSchemaPath();
+ final SchemaNodeIdentifier sni = stream.readSchemaNodeIdentifier();
+ if (!(sni instanceof Absolute absolute)) {
+ throw new InvalidObjectException("Non-absolute type " + sni);
+ }
+
final LogicalDatastoreType type = LogicalDatastoreType.readFrom(in);
final YangInstanceIdentifier path = stream.readYangInstanceIdentifier();
final ContainerNode input = (ContainerNode) stream.readOptionalNormalizedNode().orElse(null);
- executeAction = new ExecuteAction(name, new DOMDataTreeIdentifier(type, path), input);
+ executeAction = new ExecuteAction(absolute, DOMDataTreeIdentifier.of(type, path), input);
}
private Object readResolve() {
import org.eclipse.jdt.annotation.NonNull;
import org.eclipse.jdt.annotation.Nullable;
import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-public final class ExecuteRpc extends AbstractExecute<@Nullable NormalizedNode<?, ?>> {
+public final class ExecuteRpc extends AbstractExecute<QName, @Nullable ContainerNode> {
private static final long serialVersionUID = 1128904894827335676L;
- private ExecuteRpc(final @NonNull SchemaPath type, final @Nullable NormalizedNode<?, ?> input) {
+ private ExecuteRpc(final @NonNull QName type, final @Nullable ContainerNode input) {
super(type, input);
}
public static @NonNull ExecuteRpc from(final @NonNull DOMRpcIdentifier rpc,
- final @Nullable NormalizedNode<?, ?> input) {
+ final @Nullable ContainerNode input) {
return new ExecuteRpc(rpc.getType(), input);
}
@Override
public void writeExternal(final ObjectOutput out) throws IOException {
try (NormalizedNodeDataOutput stream = NormalizedNodeStreamVersion.current().newDataOutput(out)) {
- stream.writeQName(executeRpc.getType().getLastComponent());
+ stream.writeQName(executeRpc.getType());
stream.writeOptionalNormalizedNode(executeRpc.getInput());
}
}
@Override
public void readExternal(final ObjectInput in) throws IOException {
final NormalizedNodeDataInput stream = NormalizedNodeDataInput.newDataInput(in);
- final SchemaPath type = SchemaPath.ROOT.createChild(stream.readQName());
- final NormalizedNode<?, ?> input = stream.readOptionalNormalizedNode().orElse(null);
+ final QName type = stream.readQName();
+ final ContainerNode input = RpcResponse.unmaskContainer(stream.readOptionalNormalizedNode());
executeRpc = new ExecuteRpc(type, input);
}
import java.io.Externalizable;
import java.io.IOException;
+import java.io.InvalidObjectException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
+import java.util.Optional;
import org.eclipse.jdt.annotation.Nullable;
import org.opendaylight.controller.cluster.datastore.node.utils.stream.SerializationUtils;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-public class RpcResponse extends AbstractResponse<NormalizedNode<?, ?>> {
+public class RpcResponse extends AbstractResponse<ContainerNode> {
private static final long serialVersionUID = -4211279498688989245L;
- public RpcResponse(final @Nullable NormalizedNode<?, ?> output) {
+ public RpcResponse(final @Nullable ContainerNode output) {
super(output);
}
return new Proxy(this);
}
+ static @Nullable ContainerNode unmaskContainer(final Optional<NormalizedNode> optNode)
+ throws InvalidObjectException {
+ if (optNode.isEmpty()) {
+ return null;
+ }
+ final var node = optNode.orElseThrow();
+ if (node instanceof ContainerNode container) {
+ return container;
+ }
+ throw new InvalidObjectException("Unexpected data " + node.contract().getSimpleName());
+ }
+
private static class Proxy implements Externalizable {
private static final long serialVersionUID = 1L;
@Override
public void readExternal(final ObjectInput in) throws IOException {
- rpcResponse = new RpcResponse(SerializationUtils.readNormalizedNode(in).orElse(null));
+ rpcResponse = new RpcResponse(unmaskContainer(SerializationUtils.readNormalizedNode(in)));
}
private Object readResolve() {
@Override
protected void handleCommand(final Object message) throws Exception {
- if (message instanceof ActionRegistry.Messages.UpdateActions) {
+ if (message instanceof ActionRegistry.Messages.UpdateActions updateActions) {
LOG.debug("handling updatesActionRoutes message");
- updatesActionRoutes((Messages.UpdateActions) message);
+ updatesActionRoutes(updateActions);
} else {
super.handleCommand(message);
}
}
Collection<DOMActionInstance> getAddedActions() {
- return this.addedActions;
+ return addedActions;
}
Collection<DOMActionInstance> getRemovedActions() {
- return this.removedActions;
+ return removedActions;
}
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.io.Externalizable;
import java.io.IOException;
+import java.io.InvalidObjectException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.ArrayList;
import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
+import org.opendaylight.yangtools.yang.model.api.stmt.SchemaNodeIdentifier;
+import org.opendaylight.yangtools.yang.model.api.stmt.SchemaNodeIdentifier.Absolute;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
final NormalizedNodeDataOutput nnout = NormalizedNodeStreamVersion.current().newDataOutput(out);
nnout.writeInt(actions.size());
for (DOMActionInstance id : actions) {
- nnout.writeSchemaPath(id.getType());
- YangInstanceIdentifier actionPath = YangInstanceIdentifier.create(
- new YangInstanceIdentifier.NodeIdentifier(id.getType().getLastComponent()));
- nnout.writeYangInstanceIdentifier(actionPath);
+ final Absolute type = id.getType();
+ nnout.writeSchemaNodeIdentifier(type);
+ nnout.writeYangInstanceIdentifier(YangInstanceIdentifier.of(type.lastNodeIdentifier()));
}
}
final int size = nnin.readInt();
actions = new ArrayList<>(size);
for (int i = 0; i < size; ++i) {
- actions.add(DOMActionInstance.of(nnin.readSchemaPath(), LogicalDatastoreType.OPERATIONAL,
+ final SchemaNodeIdentifier sni = nnin.readSchemaNodeIdentifier();
+ if (!(sni instanceof Absolute absolute)) {
+ throw new InvalidObjectException("Non-absolute type " + sni);
+ }
+
+ actions.add(DOMActionInstance.of(absolute, LogicalDatastoreType.OPERATIONAL,
nnin.readYangInstanceIdentifier()));
}
}
import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
+import org.opendaylight.yangtools.yang.model.api.stmt.SchemaNodeIdentifier.Absolute;
public final class RoutingTable extends AbstractRoutingTable<RoutingTable, DOMRpcIdentifier> {
private static final class Proxy implements Externalizable {
try (NormalizedNodeDataOutput nnout = NormalizedNodeStreamVersion.current().newDataOutput(out)) {
nnout.writeInt(rpcs.size());
for (DOMRpcIdentifier id : rpcs) {
- nnout.writeSchemaPath(id.getType());
+ // TODO: we should be able to get by with just a QName
+ nnout.writeSchemaNodeIdentifier(Absolute.of(id.getType()));
nnout.writeYangInstanceIdentifier(id.getContextReference());
}
}
final int size = nnin.readInt();
rpcs = new ArrayList<>(size);
for (int i = 0; i < size; ++i) {
- rpcs.add(DOMRpcIdentifier.create(nnin.readSchemaPath(), nnin.readYangInstanceIdentifier()));
+ // TODO: we should be able to get by with just a QName
+ rpcs.add(DOMRpcIdentifier.create(nnin.readSchemaNodeIdentifier().firstNodeIdentifier(),
+ nnin.readYangInstanceIdentifier()));
}
}
@Override
protected void handleCommand(final Object message) throws Exception {
- if (message instanceof AddOrUpdateRoutes) {
- receiveAddRoutes((AddOrUpdateRoutes) message);
- } else if (message instanceof RemoveRoutes) {
- receiveRemoveRoutes((RemoveRoutes) message);
+ if (message instanceof AddOrUpdateRoutes addRoutes) {
+ receiveAddRoutes(addRoutes);
+ } else if (message instanceof RemoveRoutes removeRoutes) {
+ receiveRemoveRoutes(removeRoutes);
} else {
super.handleCommand(message);
}
}
List<DOMRpcIdentifier> getRouteIdentifiers() {
- return this.rpcRouteIdentifiers;
+ return rpcRouteIdentifiers;
}
@Override
*/
package org.opendaylight.controller.remote.rpc.registry.gossip;
+import static java.util.Objects.requireNonNull;
import static org.opendaylight.controller.remote.rpc.registry.gossip.BucketStoreActor.getBucketsByMembersMessage;
import static org.opendaylight.controller.remote.rpc.registry.gossip.BucketStoreActor.getLocalDataMessage;
import static org.opendaylight.controller.remote.rpc.registry.gossip.BucketStoreActor.getRemoteBucketsMessage;
import akka.dispatch.OnComplete;
import akka.pattern.Patterns;
import akka.util.Timeout;
-import com.google.common.annotations.Beta;
import com.google.common.annotations.VisibleForTesting;
import java.util.Collection;
import java.util.Map;
-import java.util.Objects;
import java.util.function.Consumer;
import scala.concurrent.ExecutionContext;
import scala.concurrent.Future;
/**
* Convenience access to {@link BucketStoreActor}. Used mostly by {@link Gossiper}.
- *
- * @author Robert Varga
*/
-@Beta
@VisibleForTesting
public final class BucketStoreAccess {
private final ActorRef actorRef;
private final Timeout timeout;
public BucketStoreAccess(final ActorRef actorRef, final ExecutionContext dispatcher, final Timeout timeout) {
- this.actorRef = Objects.requireNonNull(actorRef);
- this.dispatcher = Objects.requireNonNull(dispatcher);
- this.timeout = Objects.requireNonNull(timeout);
+ this.actorRef = requireNonNull(actorRef);
+ this.dispatcher = requireNonNull(dispatcher);
+ this.timeout = requireNonNull(timeout);
}
<T extends BucketData<T>> void getBucketsByMembers(final Collection<Address> members,
final Consumer<Map<Address, Bucket<T>>> callback) {
Patterns.ask(actorRef, getBucketsByMembersMessage(members), timeout)
- .onComplete(new OnComplete<Object>() {
+ .onComplete(new OnComplete<>() {
@SuppressWarnings("unchecked")
@Override
public void onComplete(final Throwable failure, final Object success) {
}
void getBucketVersions(final Consumer<Map<Address, Long>> callback) {
- Patterns.ask(actorRef, Singletons.GET_BUCKET_VERSIONS, timeout).onComplete(new OnComplete<Object>() {
+ Patterns.ask(actorRef, Singletons.GET_BUCKET_VERSIONS, timeout).onComplete(new OnComplete<>() {
@SuppressWarnings("unchecked")
@Override
public void onComplete(final Throwable failure, final Object success) {
}
public enum Singletons {
- // Sent from Gossiper to BucketStore, response is an immutable Map<Address, Bucket<?>>
+ /**
+ * Sent from Gossiper to BucketStore, response is an immutable {@code Map<Address, Bucket<?>>}.
+ */
GET_ALL_BUCKETS,
- // Sent from Gossiper to BucketStore, response is an immutable Map<Address, Long>
+ /**
+ * Sent from Gossiper to BucketStore, response is an immutable {@code Map<Address, Long>}.
+ */
GET_BUCKET_VERSIONS,
}
}
return;
}
- if (message instanceof ExecuteInActor) {
- ((ExecuteInActor) message).accept(this);
+ if (message instanceof ExecuteInActor execute) {
+ execute.accept(this);
} else if (GET_BUCKET_VERSIONS == message) {
// FIXME: do we need to send ourselves?
getSender().tell(ImmutableMap.copyOf(versions), getSelf());
- } else if (message instanceof Terminated) {
- actorTerminated((Terminated) message);
- } else if (message instanceof DeleteSnapshotsSuccess) {
- LOG.debug("{}: got command: {}", persistenceId(), message);
- } else if (message instanceof DeleteSnapshotsFailure) {
- LOG.warn("{}: failed to delete prior snapshots", persistenceId(),
- ((DeleteSnapshotsFailure) message).cause());
+ } else if (message instanceof Terminated terminated) {
+ actorTerminated(terminated);
+ } else if (message instanceof DeleteSnapshotsSuccess deleteSuccess) {
+ LOG.debug("{}: got command: {}", persistenceId(), deleteSuccess);
+ } else if (message instanceof DeleteSnapshotsFailure deleteFailure) {
+ LOG.warn("{}: failed to delete prior snapshots", persistenceId(), deleteFailure.cause());
} else {
LOG.debug("Unhandled message [{}]", message);
unhandled(message);
}
private void handleSnapshotMessage(final Object message) {
- if (message instanceof SaveSnapshotFailure) {
- LOG.error("{}: failed to persist state", persistenceId(), ((SaveSnapshotFailure) message).cause());
+ if (message instanceof SaveSnapshotFailure saveFailure) {
+ LOG.error("{}: failed to persist state", persistenceId(), saveFailure.cause());
persisting = false;
self().tell(PoisonPill.getInstance(), ActorRef.noSender());
- } else if (message instanceof SaveSnapshotSuccess) {
- LOG.debug("{}: got command: {}", persistenceId(), message);
- SaveSnapshotSuccess saved = (SaveSnapshotSuccess)message;
- deleteSnapshots(new SnapshotSelectionCriteria(scala.Long.MaxValue(),
- saved.metadata().timestamp() - 1, 0L, 0L));
+ } else if (message instanceof SaveSnapshotSuccess saveSuccess) {
+ LOG.debug("{}: got command: {}", persistenceId(), saveSuccess);
+ deleteSnapshots(new SnapshotSelectionCriteria(scala.Long.MaxValue(), saveSuccess.metadata().timestamp() - 1,
+ 0L, 0L));
persisting = false;
unstash();
} else {
incarnation = 0;
}
- this.localBucket = new LocalBucket<>(incarnation.intValue(), initialData);
+ this.localBucket = new LocalBucket<>(incarnation, initialData);
initialData = null;
LOG.debug("{}: persisting new incarnation {}", persistenceId(), incarnation);
persisting = true;
saveSnapshot(incarnation);
- } else if (message instanceof SnapshotOffer) {
- incarnation = (Integer) ((SnapshotOffer)message).snapshot();
+ } else if (message instanceof SnapshotOffer snapshotOffer) {
+ incarnation = (Integer) snapshotOffer.snapshot();
LOG.debug("{}: recovered incarnation {}", persistenceId(), incarnation);
} else {
LOG.warn("{}: ignoring recovery message {}", persistenceId(), message);
Gossiper(final RemoteOpsProviderConfig config, final Boolean autoStartGossipTicks) {
this.config = requireNonNull(config);
- this.autoStartGossipTicks = autoStartGossipTicks.booleanValue();
+ this.autoStartGossipTicks = autoStartGossipTicks;
}
Gossiper(final RemoteOpsProviderConfig config) {
}
if (autoStartGossipTicks) {
- gossipTask = getContext().system().scheduler().schedule(
- new FiniteDuration(1, TimeUnit.SECONDS), //initial delay
- config.getGossipTickInterval(), //interval
- getSelf(), //target
- GOSSIP_TICK, //message
- getContext().dispatcher(), //execution context
- getSelf() //sender
- );
+ gossipTask = getContext().system().scheduler().scheduleAtFixedRate(
+ // initial delay
+ new FiniteDuration(1, TimeUnit.SECONDS),
+ // interval
+ config.getGossipTickInterval(),
+ // target
+ getSelf(),
+ // message
+ GOSSIP_TICK,
+ // execution context
+ getContext().dispatcher(),
+ // sender
+ getSelf());
}
}
//These ticks can be sent by another actor as well which is esp. useful while testing
if (GOSSIP_TICK.equals(message)) {
receiveGossipTick();
- } else if (message instanceof GossipStatus) {
+ } else if (message instanceof GossipStatus status) {
// Message from remote gossiper with its bucket versions
- receiveGossipStatus((GossipStatus) message);
- } else if (message instanceof GossipEnvelope) {
+ receiveGossipStatus(status);
+ } else if (message instanceof GossipEnvelope envelope) {
// Message from remote gossiper with buckets. This is usually in response to GossipStatus
// message. The contained buckets are newer as determined by the remote gossiper by
// comparing the GossipStatus message with its local versions.
- receiveGossip((GossipEnvelope) message);
- } else if (message instanceof ClusterEvent.MemberUp) {
- receiveMemberUpOrReachable(((ClusterEvent.MemberUp) message).member());
+ receiveGossip(envelope);
+ } else if (message instanceof ClusterEvent.MemberUp memberUp) {
+ receiveMemberUpOrReachable(memberUp.member());
- } else if (message instanceof ClusterEvent.ReachableMember) {
- receiveMemberUpOrReachable(((ClusterEvent.ReachableMember) message).member());
+ } else if (message instanceof ClusterEvent.ReachableMember reachableMember) {
+ receiveMemberUpOrReachable(reachableMember.member());
- } else if (message instanceof ClusterEvent.MemberRemoved) {
- receiveMemberRemoveOrUnreachable(((ClusterEvent.MemberRemoved) message).member());
+ } else if (message instanceof ClusterEvent.MemberRemoved memberRemoved) {
+ receiveMemberRemoveOrUnreachable(memberRemoved.member());
- } else if (message instanceof ClusterEvent.UnreachableMember) {
- receiveMemberRemoveOrUnreachable(((ClusterEvent.UnreachableMember) message).member());
+ } else if (message instanceof ClusterEvent.UnreachableMember unreachableMember) {
+ receiveMemberRemoveOrUnreachable(unreachableMember.member());
} else {
unhandled(message);
import akka.util.Timeout;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.util.Map;
+import java.util.concurrent.TimeoutException;
import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
import org.opendaylight.controller.remote.rpc.registry.AbstractRoutingTable;
private final BucketStoreAccess bucketAccess;
private final FiniteDuration timeout;
+ @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR",
+ justification = "registerMBean() is expected to be stateless")
AbstractRegistryMXBean(final @NonNull String beanName, final @NonNull String beanType,
final @NonNull BucketStoreAccess bucketAccess, final @NonNull Timeout timeout) {
super(beanName, beanType, null);
registerMBean();
}
- @SuppressWarnings({"unchecked", "checkstyle:IllegalCatch", "rawtypes"})
+ @SuppressWarnings({"unchecked", "rawtypes"})
final T localData() {
try {
return (T) Await.result((Future) bucketAccess.getLocalData(), timeout);
- } catch (Exception e) {
- throw new RuntimeException("getLocalData failed", e);
+ } catch (InterruptedException | TimeoutException e) {
+ throw new IllegalStateException("getLocalData failed", e);
}
}
- @SuppressWarnings({"unchecked", "checkstyle:IllegalCatch", "rawtypes"})
+ @SuppressWarnings({"unchecked", "rawtypes"})
final Map<Address, Bucket<T>> remoteBuckets() {
try {
return (Map<Address, Bucket<T>>) Await.result((Future)bucketAccess.getRemoteBuckets(), timeout);
- } catch (Exception e) {
- throw new RuntimeException("getRemoteBuckets failed", e);
+ } catch (InterruptedException | TimeoutException e) {
+ throw new IllegalStateException("getRemoteBuckets failed", e);
}
}
- @SuppressWarnings({"unchecked", "checkstyle:IllegalCatch", "rawtypes"})
+ @SuppressWarnings({"unchecked", "rawtypes"})
final String bucketVersions() {
try {
return Await.result((Future)bucketAccess.getBucketVersions(), timeout).toString();
- } catch (Exception e) {
- throw new RuntimeException("getVersions failed", e);
+ } catch (InterruptedException | TimeoutException e) {
+ throw new IllegalStateException("getVersions failed", e);
}
}
}
import org.opendaylight.controller.remote.rpc.registry.gossip.BucketStoreAccess;
import org.opendaylight.mdsal.dom.api.DOMActionInstance;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
public class RemoteActionRegistryMXBeanImpl extends AbstractRegistryMXBean<ActionRoutingTable, DOMActionInstance>
implements RemoteActionRegistryMXBean {
-
public RemoteActionRegistryMXBeanImpl(final BucketStoreAccess actionRegistryAccess, final Timeout timeout) {
super("RemoteActionRegistry", "RemoteActionBroker", actionRegistryAccess, timeout);
}
ActionRoutingTable table = localData();
Set<String> routedAction = new HashSet<>(table.getItems().size());
for (DOMActionInstance route : table.getItems()) {
- if (route.getType().getLastComponent() != null) {
- final YangInstanceIdentifier actionPath = YangInstanceIdentifier.create(
- new NodeIdentifier(route.getType().getLastComponent()));
- if (!actionPath.isEmpty()) {
- routedAction.add(ROUTE_CONSTANT + actionPath + NAME_CONSTANT + route.getType());
- }
+ final YangInstanceIdentifier actionPath = YangInstanceIdentifier.of(route.getType().lastNodeIdentifier());
+ if (!actionPath.isEmpty()) {
+ routedAction.add(ROUTE_CONSTANT + actionPath + NAME_CONSTANT + route.getType());
}
}
Collection<DOMActionInstance> routes = table.getItems();
Map<String, String> actionMap = new HashMap<>(routes.size());
for (DOMActionInstance route : routes) {
- if (route.getType().getLastComponent() != null) {
- final YangInstanceIdentifier actionPath = YangInstanceIdentifier.create(
- new NodeIdentifier(route.getType().getLastComponent()));
- if (!actionPath.isEmpty()) {
- String routeString = actionPath.toString();
- if (routeString.contains(routeName)) {
- actionMap.put(ROUTE_CONSTANT + routeString + NAME_CONSTANT + route.getType(), address);
- }
+ final YangInstanceIdentifier actionPath = YangInstanceIdentifier.of(route.getType().lastNodeIdentifier());
+ if (!actionPath.isEmpty()) {
+ String routeString = actionPath.toString();
+ if (routeString.contains(routeName)) {
+ actionMap.put(ROUTE_CONSTANT + routeString + NAME_CONSTANT + route.getType(), address);
}
}
}
Collection<DOMActionInstance> routes = table.getItems();
Map<String, String> actionMap = new HashMap<>(routes.size());
for (DOMActionInstance route : routes) {
- if (route.getType().getLastComponent() != null) {
- final YangInstanceIdentifier actionPath = YangInstanceIdentifier.create(
- new NodeIdentifier(route.getType().getLastComponent()));
- if (!actionPath.isEmpty()) {
- String type = route.getType().toString();
- if (type.contains(name)) {
- actionMap.put(ROUTE_CONSTANT + actionPath + NAME_CONSTANT + type, address);
- }
+ final YangInstanceIdentifier actionPath = YangInstanceIdentifier.of(route.getType().lastNodeIdentifier());
+ if (!actionPath.isEmpty()) {
+ String type = route.getType().toString();
+ if (type.contains(name)) {
+ actionMap.put(ROUTE_CONSTANT + actionPath + NAME_CONSTANT + type, address);
}
}
}
import akka.actor.ActorSystem;
import akka.testkit.javadsl.TestKit;
import java.net.URI;
-import java.util.Collection;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
import org.opendaylight.mdsal.dom.api.DOMRpcResult;
import org.opendaylight.mdsal.dom.api.DOMRpcService;
+import org.opendaylight.yangtools.yang.common.ErrorSeverity;
+import org.opendaylight.yangtools.yang.common.ErrorType;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.common.RpcError;
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorSeverity;
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+import org.opendaylight.yangtools.yang.model.api.stmt.SchemaNodeIdentifier.Absolute;
import org.opendaylight.yangtools.yang.test.util.YangParserTestUtils;
/**
static final QName TEST_RPC_OUTPUT = QName.create(TEST_NS, TEST_REV, "output");
- static final SchemaPath TEST_RPC_TYPE = SchemaPath.create(true, TEST_RPC);
- static final YangInstanceIdentifier TEST_PATH = YangInstanceIdentifier.create(
- new YangInstanceIdentifier.NodeIdentifier(TEST_RPC));
- public static final DOMRpcIdentifier TEST_RPC_ID = DOMRpcIdentifier.create(TEST_RPC_TYPE, TEST_PATH);
- public static final DOMDataTreeIdentifier TEST_DATA_TREE_ID = new DOMDataTreeIdentifier(
- LogicalDatastoreType.OPERATIONAL, TEST_PATH);
+ static final Absolute TEST_RPC_TYPE = Absolute.of(TEST_RPC);
+ static final YangInstanceIdentifier TEST_PATH = YangInstanceIdentifier.of(TEST_RPC);
+ public static final DOMRpcIdentifier TEST_RPC_ID = DOMRpcIdentifier.create(TEST_RPC, TEST_PATH);
+ public static final DOMDataTreeIdentifier TEST_DATA_TREE_ID =
+ DOMDataTreeIdentifier.of(LogicalDatastoreType.OPERATIONAL, TEST_PATH);
static ActorSystem node1;
static ActorSystem node2;
}
}
- static void assertCompositeNodeEquals(final NormalizedNode<? , ?> exp, final NormalizedNode<? , ?> actual) {
+ static void assertCompositeNodeEquals(final NormalizedNode exp, final NormalizedNode actual) {
assertEquals(exp, actual);
}
public static ContainerNode makeRPCInput(final String data) {
- return Builders.containerBuilder().withNodeIdentifier(new NodeIdentifier(TEST_RPC_INPUT))
- .withChild(ImmutableNodes.leafNode(TEST_RPC_INPUT_DATA, data)).build();
+ return ImmutableNodes.newContainerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TEST_RPC_INPUT))
+ .withChild(ImmutableNodes.leafNode(TEST_RPC_INPUT_DATA, data))
+ .build();
}
public static ContainerNode makeRPCOutput(final String data) {
- return Builders.containerBuilder().withNodeIdentifier(new NodeIdentifier(TEST_RPC_OUTPUT))
- .withChild(ImmutableNodes.leafNode(TEST_RPC_OUTPUT, data)).build();
+ return ImmutableNodes.newContainerBuilder()
+ .withNodeIdentifier(new NodeIdentifier(TEST_RPC_OUTPUT))
+ .withChild(ImmutableNodes.leafNode(TEST_RPC_OUTPUT, data))
+ .build();
}
static void assertFailedRpcResult(final DOMRpcResult rpcResult, final ErrorSeverity severity,
final ErrorType errorType, final String tag, final String message,
final String applicationTag, final String info, final String causeMsg) {
assertNotNull("RpcResult was null", rpcResult);
- final Collection<? extends RpcError> rpcErrors = rpcResult.getErrors();
+ final var rpcErrors = rpcResult.errors();
assertEquals("RpcErrors count", 1, rpcErrors.size());
assertRpcErrorEquals(rpcErrors.iterator().next(), severity, errorType, tag, message,
applicationTag, info, causeMsg);
}
- static void assertSuccessfulRpcResult(final DOMRpcResult rpcResult,
- final NormalizedNode<? , ?> expOutput) {
+ static void assertSuccessfulRpcResult(final DOMRpcResult rpcResult, final NormalizedNode expOutput) {
assertNotNull("RpcResult was null", rpcResult);
- assertCompositeNodeEquals(expOutput, rpcResult.getResult());
+ assertCompositeNodeEquals(expOutput, rpcResult.value());
}
static class TestException extends Exception {
final ContainerNode invokeRpcResult = makeRPCOutput("bar");
final DOMRpcResult rpcResult = new DefaultDOMRpcResult(invokeRpcResult);
doReturn(FluentFutures.immediateFluentFuture(rpcResult)).when(domRpcService1)
- .invokeRpc(eq(TEST_RPC_TYPE), any());
+ .invokeRpc(eq(TEST_RPC), any());
final ExecuteRpc executeRpc = ExecuteRpc.from(TEST_RPC_ID, null);
rpcInvoker1.tell(executeRpc, rpcRegistry1Probe.getRef());
final RpcResponse rpcResponse = rpcRegistry1Probe.expectMsgClass(Duration.ofSeconds(5), RpcResponse.class);
- assertEquals(rpcResult.getResult(), rpcResponse.getOutput());
+ assertEquals(rpcResult.value(), rpcResponse.getOutput());
}
@Test
public void testExecuteRpcFailureWithException() {
- when(domRpcService1.invokeRpc(eq(TEST_RPC_TYPE), any())).thenReturn(FluentFutures.immediateFailedFluentFuture(
+ when(domRpcService1.invokeRpc(eq(TEST_RPC), any())).thenReturn(FluentFutures.immediateFailedFluentFuture(
new DOMRpcImplementationNotAvailableException("NOT FOUND")));
final ExecuteRpc executeMsg = ExecuteRpc.from(TEST_RPC_ID, null);
import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+import org.opendaylight.yangtools.yang.model.api.stmt.SchemaNodeIdentifier.Absolute;
public class OpsListenerTest {
private static final QName TEST_QNAME = QName.create("test", "2015-06-12", "test");
- private static final SchemaPath RPC_TYPE = SchemaPath.create(true, TEST_QNAME);
- private static final YangInstanceIdentifier TEST_PATH = YangInstanceIdentifier
- .create(new YangInstanceIdentifier.NodeIdentifier(TEST_QNAME));
- private static final DOMRpcIdentifier RPC_ID = DOMRpcIdentifier.create(RPC_TYPE, TEST_PATH);
+ private static final Absolute RPC_TYPE = Absolute.of(TEST_QNAME);
+ private static final YangInstanceIdentifier TEST_PATH = YangInstanceIdentifier.of(TEST_QNAME);
+ private static final DOMRpcIdentifier RPC_ID = DOMRpcIdentifier.create(TEST_QNAME, TEST_PATH);
private static final DOMActionInstance ACTION_INSTANCE = DOMActionInstance.of(RPC_TYPE,
LogicalDatastoreType.OPERATIONAL, TEST_PATH);
import org.opendaylight.mdsal.dom.api.DOMActionInstance;
import org.opendaylight.mdsal.dom.api.DOMActionProviderService;
import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMRpcImplementationRegistration;
import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
import org.opendaylight.yangtools.concepts.ObjectRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+import org.opendaylight.yangtools.yang.model.api.stmt.SchemaNodeIdentifier.Absolute;
public class OpsRegistrarTest {
@Mock
@Mock
private DOMActionProviderService actionService;
@Mock
- private DOMRpcImplementationRegistration<RemoteRpcImplementation> oldReg;
+ private Registration oldReg;
@Mock
- private DOMRpcImplementationRegistration<RemoteRpcImplementation> newReg;
+ private Registration newReg;
@Mock
private ObjectRegistration<RemoteActionImplementation> oldActionReg;
@Mock
testActorRef = new TestActorRef<>(system, props, testKit.getRef(), "actorRef");
endpointAddress = new Address("http", "local");
- final DOMRpcIdentifier firstEndpointId = DOMRpcIdentifier.create(
- SchemaPath.create(true, QName.create("first:identifier", "foo")));
- final DOMRpcIdentifier secondEndpointId = DOMRpcIdentifier.create(
- SchemaPath.create(true, QName.create("second:identifier", "bar")));
+ final DOMRpcIdentifier firstEndpointId = DOMRpcIdentifier.create(QName.create("first:identifier", "foo"));
+ final DOMRpcIdentifier secondEndpointId = DOMRpcIdentifier.create(QName.create("second:identifier", "bar"));
final QName firstActionQName = QName.create("first:actionIdentifier", "fooAction");
- final DOMActionInstance firstActionInstance = DOMActionInstance.of(
- SchemaPath.create(true, firstActionQName), LogicalDatastoreType.OPERATIONAL,
- YangInstanceIdentifier.create(new YangInstanceIdentifier.NodeIdentifier(firstActionQName)));
+ final DOMActionInstance firstActionInstance = DOMActionInstance.of(Absolute.of(firstActionQName),
+ LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of(firstActionQName));
- final DOMActionInstance secondActionInstance = DOMActionInstance.of(
- SchemaPath.create(true, firstActionQName), LogicalDatastoreType.OPERATIONAL,
- YangInstanceIdentifier.create(new YangInstanceIdentifier.NodeIdentifier(firstActionQName)));
+ final DOMActionInstance secondActionInstance = DOMActionInstance.of(Absolute.of(firstActionQName),
+ LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of(firstActionQName));
final TestKit senderKit = new TestKit(system);
firstEndpoint = new RemoteRpcEndpoint(senderKit.getRef(), Collections.singletonList(firstEndpointId));
*/
package org.opendaylight.controller.remote.rpc;
+import static org.hamcrest.CoreMatchers.instanceOf;
+import static org.hamcrest.MatcherAssert.assertThat;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertThrows;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.when;
import com.google.common.util.concurrent.ListenableFuture;
import java.util.Collections;
+import java.util.Optional;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import org.junit.Ignore;
import org.opendaylight.mdsal.dom.spi.DefaultDOMRpcResult;
import org.opendaylight.mdsal.dom.spi.SimpleDOMActionResult;
import org.opendaylight.yangtools.util.concurrent.FluentFutures;
+import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+import org.opendaylight.yangtools.yang.model.api.stmt.SchemaNodeIdentifier.Absolute;
/**
* Unit tests for RemoteRpcImplementation.
final ContainerNode rpcOutput = makeRPCOutput("bar");
final DOMRpcResult rpcResult = new DefaultDOMRpcResult(rpcOutput);
- final NormalizedNode<?, ?> invokeRpcInput = makeRPCInput("foo");
- @SuppressWarnings({"unchecked", "rawtypes"})
- final ArgumentCaptor<NormalizedNode<?, ?>> inputCaptor =
- ArgumentCaptor.forClass(NormalizedNode.class);
+ final ContainerNode invokeRpcInput = makeRPCInput("foo");
+ final ArgumentCaptor<ContainerNode> inputCaptor = ArgumentCaptor.forClass(ContainerNode.class);
doReturn(FluentFutures.immediateFluentFuture(rpcResult)).when(domRpcService2)
- .invokeRpc(eq(TEST_RPC_TYPE), inputCaptor.capture());
+ .invokeRpc(eq(TEST_RPC), inputCaptor.capture());
final ListenableFuture<DOMRpcResult> frontEndFuture = remoteRpcImpl1.invokeRpc(TEST_RPC_ID, invokeRpcInput);
- assertTrue(frontEndFuture instanceof RemoteDOMRpcFuture);
+ assertThat(frontEndFuture, instanceOf(RemoteDOMRpcFuture.class));
final DOMRpcResult result = frontEndFuture.get(5, TimeUnit.SECONDS);
- assertEquals(rpcOutput, result.getResult());
+ assertEquals(rpcOutput, result.value());
}
/**
public void testInvokeAction() throws Exception {
final ContainerNode actionOutput = makeRPCOutput("bar");
final DOMActionResult actionResult = new SimpleDOMActionResult(actionOutput, Collections.emptyList());
- final NormalizedNode<?, ?> invokeActionInput = makeRPCInput("foo");
- @SuppressWarnings({"unchecked", "rawtypes"})
+ final NormalizedNode invokeActionInput = makeRPCInput("foo");
final ArgumentCaptor<ContainerNode> inputCaptor =
ArgumentCaptor.forClass(ContainerNode.class);
doReturn(FluentFutures.immediateFluentFuture(actionResult)).when(domActionService2).invokeAction(
eq(TEST_RPC_TYPE), eq(TEST_DATA_TREE_ID), inputCaptor.capture());
final ListenableFuture<DOMActionResult> frontEndFuture = remoteActionImpl1.invokeAction(TEST_RPC_TYPE,
TEST_DATA_TREE_ID, (ContainerNode) invokeActionInput);
- assertTrue(frontEndFuture instanceof RemoteDOMActionFuture);
+ assertThat(frontEndFuture, instanceOf(RemoteDOMActionFuture.class));
final DOMActionResult result = frontEndFuture.get(5, TimeUnit.SECONDS);
- assertEquals(actionOutput, result.getOutput().get());
-
+ assertEquals(Optional.of(actionOutput), result.getOutput());
}
/**
final ContainerNode rpcOutput = makeRPCOutput("bar");
final DOMRpcResult rpcResult = new DefaultDOMRpcResult(rpcOutput);
- @SuppressWarnings({"unchecked", "rawtypes"})
- final ArgumentCaptor<NormalizedNode<?, ?>> inputCaptor =
- (ArgumentCaptor) ArgumentCaptor.forClass(NormalizedNode.class);
+ final ArgumentCaptor<ContainerNode> inputCaptor = ArgumentCaptor.forClass(ContainerNode.class);
doReturn(FluentFutures.immediateFluentFuture(rpcResult)).when(domRpcService2)
- .invokeRpc(eq(TEST_RPC_TYPE), inputCaptor.capture());
+ .invokeRpc(eq(TEST_RPC), inputCaptor.capture());
ListenableFuture<DOMRpcResult> frontEndFuture = remoteRpcImpl1.invokeRpc(TEST_RPC_ID, null);
- assertTrue(frontEndFuture instanceof RemoteDOMRpcFuture);
+ assertThat(frontEndFuture, instanceOf(RemoteDOMRpcFuture.class));
final DOMRpcResult result = frontEndFuture.get(5, TimeUnit.SECONDS);
- assertEquals(rpcOutput, result.getResult());
+ assertEquals(rpcOutput, result.value());
}
/**
final ContainerNode actionOutput = makeRPCOutput("bar");
final DOMActionResult actionResult = new SimpleDOMActionResult(actionOutput);
- @SuppressWarnings({"unchecked", "rawtypes"})
- final ArgumentCaptor<ContainerNode> inputCaptor =
- ArgumentCaptor.forClass(ContainerNode.class);
+ final ArgumentCaptor<ContainerNode> inputCaptor = ArgumentCaptor.forClass(ContainerNode.class);
doReturn(FluentFutures.immediateFluentFuture(actionResult)).when(domActionService2).invokeAction(
eq(TEST_RPC_TYPE), eq(TEST_DATA_TREE_ID), inputCaptor.capture());
ListenableFuture<DOMActionResult> frontEndFuture = remoteActionImpl1.invokeAction(TEST_RPC_TYPE,
TEST_DATA_TREE_ID, actionOutput);
- assertTrue(frontEndFuture instanceof RemoteDOMActionFuture);
+ assertThat(frontEndFuture, instanceOf(RemoteDOMActionFuture.class));
final DOMActionResult result = frontEndFuture.get(5, TimeUnit.SECONDS);
- assertEquals(actionOutput, result.getOutput().get());
+ assertEquals(Optional.of(actionOutput), result.getOutput());
}
/**
final ContainerNode rpcOutput = null;
final DOMRpcResult rpcResult = new DefaultDOMRpcResult(rpcOutput);
- final NormalizedNode<?, ?> invokeRpcInput = makeRPCInput("foo");
- @SuppressWarnings({"unchecked", "rawtypes"})
- final ArgumentCaptor<NormalizedNode<?, ?>> inputCaptor =
- (ArgumentCaptor) ArgumentCaptor.forClass(NormalizedNode.class);
+ final ContainerNode invokeRpcInput = makeRPCInput("foo");
+ final ArgumentCaptor<ContainerNode> inputCaptor = ArgumentCaptor.forClass(ContainerNode.class);
doReturn(FluentFutures.immediateFluentFuture(rpcResult)).when(domRpcService2)
- .invokeRpc(eq(TEST_RPC_TYPE), inputCaptor.capture());
+ .invokeRpc(eq(TEST_RPC), inputCaptor.capture());
final ListenableFuture<DOMRpcResult> frontEndFuture = remoteRpcImpl1.invokeRpc(TEST_RPC_ID, invokeRpcInput);
- assertTrue(frontEndFuture instanceof RemoteDOMRpcFuture);
+ assertThat(frontEndFuture, instanceOf(RemoteDOMRpcFuture.class));
final DOMRpcResult result = frontEndFuture.get(5, TimeUnit.SECONDS);
- assertNull(result.getResult());
+ assertNull(result.value());
}
/**
* This test method invokes and executes the remote rpc.
*/
- @SuppressWarnings({"checkstyle:AvoidHidingCauseException", "checkstyle:IllegalThrows"})
- @Test(expected = DOMRpcException.class)
- public void testInvokeRpcWithRemoteFailedFuture() throws Throwable {
- final NormalizedNode<?, ?> invokeRpcInput = makeRPCInput("foo");
- @SuppressWarnings({"unchecked", "rawtypes"})
- final ArgumentCaptor<NormalizedNode<?, ?>> inputCaptor =
- (ArgumentCaptor) ArgumentCaptor.forClass(NormalizedNode.class);
+ @Test
+ public void testInvokeRpcWithRemoteFailedFuture() {
+ final ContainerNode invokeRpcInput = makeRPCInput("foo");
+ final ArgumentCaptor<ContainerNode> inputCaptor = ArgumentCaptor.forClass(ContainerNode.class);
- when(domRpcService2.invokeRpc(eq(TEST_RPC_TYPE), inputCaptor.capture())).thenReturn(
- FluentFutures.immediateFailedFluentFuture(new RemoteDOMRpcException("Test Exception", null)));
+ doReturn(FluentFutures.immediateFailedFluentFuture(new RemoteDOMRpcException("Test Exception", null)))
+ .when(domRpcService2).invokeRpc(eq(TEST_RPC), inputCaptor.capture());
final ListenableFuture<DOMRpcResult> frontEndFuture = remoteRpcImpl1.invokeRpc(TEST_RPC_ID, invokeRpcInput);
- assertTrue(frontEndFuture instanceof RemoteDOMRpcFuture);
+ assertThat(frontEndFuture, instanceOf(RemoteDOMRpcFuture.class));
- try {
- frontEndFuture.get(5, TimeUnit.SECONDS);
- } catch (ExecutionException e) {
- throw e.getCause();
- }
+ final var ex = assertThrows(ExecutionException.class, () -> frontEndFuture.get(5, TimeUnit.SECONDS)).getCause();
+ assertThat(ex, instanceOf(DOMRpcException.class));
}
/**
* This test method invokes and executes the remote rpc.
*/
- @SuppressWarnings({"checkstyle:AvoidHidingCauseException", "checkstyle:IllegalThrows"})
- @Test(expected = DOMActionException.class)
- public void testInvokeActionWithRemoteFailedFuture() throws Throwable {
+ @Test
+ public void testInvokeActionWithRemoteFailedFuture() {
final ContainerNode invokeActionInput = makeRPCInput("foo");
- @SuppressWarnings({"unchecked", "rawtypes"})
- final ArgumentCaptor<ContainerNode> inputCaptor =
- ArgumentCaptor.forClass(ContainerNode.class);
+ final ArgumentCaptor<ContainerNode> inputCaptor = ArgumentCaptor.forClass(ContainerNode.class);
- when(domActionService2.invokeAction(eq(TEST_RPC_TYPE), eq(TEST_DATA_TREE_ID),
- inputCaptor.capture())).thenReturn(FluentFutures.immediateFailedFluentFuture(
- new RemoteDOMRpcException("Test Exception", null)));
+ doReturn(FluentFutures.immediateFailedFluentFuture(new RemoteDOMRpcException("Test Exception", null)))
+ .when(domActionService2).invokeAction(eq(TEST_RPC_TYPE), eq(TEST_DATA_TREE_ID), inputCaptor.capture());
final ListenableFuture<DOMActionResult> frontEndFuture = remoteActionImpl1.invokeAction(TEST_RPC_TYPE,
TEST_DATA_TREE_ID, invokeActionInput);
- assertTrue(frontEndFuture instanceof RemoteDOMActionFuture);
+ assertThat(frontEndFuture, instanceOf(RemoteDOMActionFuture.class));
- try {
- frontEndFuture.get(5, TimeUnit.SECONDS);
- } catch (ExecutionException e) {
- throw e.getCause();
- }
+ final var ex = assertThrows(ExecutionException.class, () -> frontEndFuture.get(5, TimeUnit.SECONDS)).getCause();
+ assertThat(ex, instanceOf(DOMActionException.class));
}
/**
* Currently ignored since this test with current config takes around 15 seconds to complete.
*/
@Ignore
- @Test(expected = RemoteDOMRpcException.class)
- public void testInvokeRpcWithAkkaTimeoutException() throws Exception {
- final NormalizedNode<?, ?> invokeRpcInput = makeRPCInput("foo");
+ @Test
+ public void testInvokeRpcWithAkkaTimeoutException() {
+ final ContainerNode invokeRpcInput = makeRPCInput("foo");
final ListenableFuture<DOMRpcResult> frontEndFuture = remoteRpcImpl1.invokeRpc(TEST_RPC_ID, invokeRpcInput);
- assertTrue(frontEndFuture instanceof RemoteDOMRpcFuture);
+ assertThat(frontEndFuture, instanceOf(RemoteDOMRpcFuture.class));
- frontEndFuture.get(20, TimeUnit.SECONDS);
+ assertThrows(RemoteDOMRpcException.class, () -> frontEndFuture.get(20, TimeUnit.SECONDS));
}
/**
- * This test method invokes remote rpc and lookup failed
- * with runtime exception.
+ * This test method invokes remote rpc and lookup failed with runtime exception.
*/
- @Test(expected = DOMRpcException.class)
- @SuppressWarnings({"checkstyle:AvoidHidingCauseException", "checkstyle:IllegalThrows"})
- public void testInvokeRpcWithLookupException() throws Throwable {
- final NormalizedNode<?, ?> invokeRpcInput = makeRPCInput("foo");
+ @Test
+ public void testInvokeRpcWithLookupException() {
+ final ContainerNode invokeRpcInput = makeRPCInput("foo");
- doThrow(new RuntimeException("test")).when(domRpcService2).invokeRpc(any(SchemaPath.class),
- any(NormalizedNode.class));
+ doThrow(new RuntimeException("test")).when(domRpcService2).invokeRpc(any(QName.class),
+ any(ContainerNode.class));
final ListenableFuture<DOMRpcResult> frontEndFuture = remoteRpcImpl1.invokeRpc(TEST_RPC_ID, invokeRpcInput);
- assertTrue(frontEndFuture instanceof RemoteDOMRpcFuture);
+ assertThat(frontEndFuture, instanceOf(RemoteDOMRpcFuture.class));
- try {
- frontEndFuture.get(5, TimeUnit.SECONDS);
- } catch (ExecutionException e) {
- throw e.getCause();
- }
+ final var ex = assertThrows(ExecutionException.class, () -> frontEndFuture.get(5, TimeUnit.SECONDS)).getCause();
+ assertThat(ex, instanceOf(DOMRpcException.class));
}
/**
* This test method invokes remote rpc and lookup failed
* with runtime exception.
*/
- @Test(expected = DOMActionException.class)
- @SuppressWarnings({"checkstyle:AvoidHidingCauseException", "checkstyle:IllegalThrows"})
- public void testInvokeActionWithLookupException() throws Throwable {
+ @Test
+ public void testInvokeActionWithLookupException() {
final ContainerNode invokeRpcInput = makeRPCInput("foo");
- doThrow(new RuntimeException("test")).when(domActionService2).invokeAction(any(SchemaPath.class),
+ doThrow(new RuntimeException("test")).when(domActionService2).invokeAction(any(Absolute.class),
any(DOMDataTreeIdentifier.class), any(ContainerNode.class));
final ListenableFuture<DOMActionResult> frontEndFuture = remoteActionImpl1.invokeAction(TEST_RPC_TYPE,
TEST_DATA_TREE_ID, invokeRpcInput);
- assertTrue(frontEndFuture instanceof RemoteDOMActionFuture);
+ assertThat(frontEndFuture, instanceOf(RemoteDOMActionFuture.class));
- try {
- frontEndFuture.get(5, TimeUnit.SECONDS);
- } catch (ExecutionException e) {
- throw e.getCause();
- }
+ final var ex = assertThrows(ExecutionException.class, () -> frontEndFuture.get(5, TimeUnit.SECONDS)).getCause();
+ assertThat(ex, instanceOf(DOMActionException.class));
}
}
import java.util.List;
import org.junit.Before;
import org.junit.Test;
+import org.opendaylight.yangtools.yang.common.ErrorTag;
+import org.opendaylight.yangtools.yang.common.ErrorType;
import org.opendaylight.yangtools.yang.common.RpcError;
import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
@Before
public void setUp() {
- final RpcError rpcError = RpcResultBuilder.newError(
- RpcError.ErrorType.RPC, "error", "error message");
- final RpcError rpcWarning = RpcResultBuilder.newWarning(
- RpcError.ErrorType.RPC, "warning", "warning message");
+ final RpcError rpcError = RpcResultBuilder.newError(ErrorType.RPC, new ErrorTag("error"), "error message");
+ final RpcError rpcWarning = RpcResultBuilder.newWarning(ErrorType.RPC, new ErrorTag("warning"),
+ "warning message");
rpcErrors = new ArrayList<>();
rpcErrors.add(rpcError);
import static org.junit.Assert.assertEquals;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
import org.opendaylight.controller.remote.rpc.AbstractOpsTest;
ExecuteRpc expected = ExecuteRpc.from(AbstractOpsTest.TEST_RPC_ID,
AbstractOpsTest.makeRPCInput("serialization-test"));
- ExecuteRpc actual = (ExecuteRpc) SerializationUtils.clone(expected);
+ ExecuteRpc actual = SerializationUtils.clone(expected);
assertEquals("getName", expected.getType(), actual.getType());
assertEquals("getInputNormalizedNode", expected.getInput(), actual.getInput());
import java.util.Collections;
import java.util.Optional;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
import org.opendaylight.controller.remote.rpc.AbstractOpsTest;
ActionResponse expectedAction = new ActionResponse(
Optional.of(AbstractOpsTest.makeRPCOutput("serialization-test")), Collections.emptyList());
- RpcResponse actualRpc = (RpcResponse) SerializationUtils.clone(expectedRpc);
+ RpcResponse actualRpc = SerializationUtils.clone(expectedRpc);
- ActionResponse actualAction = (ActionResponse) SerializationUtils.clone(expectedAction);
+ ActionResponse actualAction = SerializationUtils.clone(expectedAction);
assertEquals("getResultNormalizedNode", expectedRpc.getOutput(),
actualRpc.getOutput());
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.Uninterruptibles;
import com.typesafe.config.ConfigFactory;
-import java.net.URI;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import org.opendaylight.mdsal.dom.api.DOMActionInstance;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+import org.opendaylight.yangtools.yang.model.api.stmt.SchemaNodeIdentifier.Absolute;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
assertNotNull(maybeEndpoint);
assertTrue(maybeEndpoint.isPresent());
- final RemoteActionEndpoint endpoint = maybeEndpoint.get();
+ final RemoteActionEndpoint endpoint = maybeEndpoint.orElseThrow();
final ActorRef router = endpoint.getRouter();
assertNotNull(router);
final int nRoutes = 500;
final Collection<DOMActionInstance> added = new ArrayList<>(nRoutes);
for (int i = 0; i < nRoutes; i++) {
- QName type = QName.create(URI.create("/mockaction"), "mockaction" + routeIdCounter++);
- final DOMActionInstance routeId = DOMActionInstance.of(SchemaPath.create(true,
- type), LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.create(new
- YangInstanceIdentifier.NodeIdentifier(type)));
+ QName type = QName.create("/mockaction", "mockaction" + routeIdCounter++);
+ final DOMActionInstance routeId = DOMActionInstance.of(Absolute.of(type), LogicalDatastoreType.OPERATIONAL,
+ YangInstanceIdentifier.of(type));
added.add(routeId);
//Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
}
private List<DOMActionInstance> createRouteIds() {
- QName type = QName.create(URI.create("/mockaction"), "mockaction" + routeIdCounter++);
- List<DOMActionInstance> routeIds = new ArrayList<>(1);
- routeIds.add(DOMActionInstance.of(SchemaPath.create(true, type),
- LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.create(
- new YangInstanceIdentifier.NodeIdentifier(type))));
+ QName type = QName.create("/mockaction", "mockaction" + routeIdCounter++);
+ var routeIds = new ArrayList<DOMActionInstance>(1);
+ routeIds.add(DOMActionInstance.of(Absolute.of(type), LogicalDatastoreType.OPERATIONAL,
+ YangInstanceIdentifier.of(type)));
return routeIds;
}
}
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.Uninterruptibles;
import com.typesafe.config.ConfigFactory;
-import java.net.URI;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import org.opendaylight.controller.remote.rpc.registry.gossip.Bucket;
import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
assertNotNull(maybeEndpoint);
assertTrue(maybeEndpoint.isPresent());
- final RemoteRpcEndpoint endpoint = maybeEndpoint.get();
+ final RemoteRpcEndpoint endpoint = maybeEndpoint.orElseThrow();
final ActorRef router = endpoint.getRouter();
assertNotNull(router);
final int nRoutes = 500;
final Collection<DOMRpcIdentifier> added = new ArrayList<>(nRoutes);
for (int i = 0; i < nRoutes; i++) {
- final DOMRpcIdentifier routeId = DOMRpcIdentifier.create(SchemaPath.create(true,
- QName.create(URI.create("/mockrpc"), "type" + i)));
+ final DOMRpcIdentifier routeId = DOMRpcIdentifier.create(QName.create("/mockrpc", "type" + i));
added.add(routeId);
//Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
}
private List<DOMRpcIdentifier> createRouteIds() {
- QName type = QName.create(URI.create("/mockrpc"), "mockrpc" + routeIdCounter++);
+ QName type = QName.create("/mockrpc", "mockrpc" + routeIdCounter++);
List<DOMRpcIdentifier> routeIds = new ArrayList<>(1);
- routeIds.add(DOMRpcIdentifier.create(SchemaPath.create(true, type)));
+ routeIds.add(DOMRpcIdentifier.create(type));
return routeIds;
}
}
import org.opendaylight.controller.remote.rpc.TerminationMonitor;
public class BucketStoreTest {
-
/**
* Dummy class to eliminate rawtype warnings.
*
* @author gwu
- *
*/
- private static class T implements BucketData<T> {
+ private static final class T implements BucketData<T> {
@Override
public Optional<ActorRef> getWatchActor() {
return Optional.empty();
import akka.testkit.javadsl.TestKit;
import akka.util.Timeout;
import com.google.common.collect.Lists;
+import com.typesafe.config.ConfigFactory;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import org.opendaylight.mdsal.dom.api.DOMActionInstance;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+import org.opendaylight.yangtools.yang.model.api.stmt.SchemaNodeIdentifier.Absolute;
public class RemoteActionRegistryMXBeanImplTest {
private static final QName LOCAL_QNAME = QName.create("base", "local");
- private static final SchemaPath EMPTY_SCHEMA_PATH = SchemaPath.ROOT;
- private static final SchemaPath LOCAL_SCHEMA_PATH = SchemaPath.create(true, LOCAL_QNAME);
+ private static final QName REMOTE_QNAME = QName.create("base", "local");
+ private static final Absolute LOCAL_SCHEMA_PATH = Absolute.of(LOCAL_QNAME);
+ private static final Absolute REMOTE_SCHEMA_PATH = Absolute.of(REMOTE_QNAME);
private ActorSystem system;
private TestActorRef<ActionRegistry> testActor;
@Before
public void setUp() {
- system = ActorSystem.create("test");
+ system = ActorSystem.create("test", ConfigFactory.load().getConfig("unit-test"));
final DOMActionInstance emptyActionIdentifier = DOMActionInstance.of(
- EMPTY_SCHEMA_PATH, LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.empty());
+ REMOTE_SCHEMA_PATH, LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of());
final DOMActionInstance localActionIdentifier = DOMActionInstance.of(
LOCAL_SCHEMA_PATH, LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of(LOCAL_QNAME));
*/
package org.opendaylight.controller.remote.rpc.registry.mbeans;
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.MatcherAssert.assertThat;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import akka.testkit.javadsl.TestKit;
import akka.util.Timeout;
import com.google.common.collect.Lists;
+import com.typesafe.config.ConfigFactory;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
public class RemoteRpcRegistryMXBeanImplTest {
-
private static final QName LOCAL_QNAME = QName.create("base", "local");
- private static final SchemaPath EMPTY_SCHEMA_PATH = SchemaPath.ROOT;
- private static final SchemaPath LOCAL_SCHEMA_PATH = SchemaPath.create(true, LOCAL_QNAME);
+ private static final QName REMOTE_QNAME = QName.create("base", "remote");
private ActorSystem system;
private TestActorRef<RpcRegistry> testActor;
@Before
public void setUp() {
- system = ActorSystem.create("test");
+ system = ActorSystem.create("test", ConfigFactory.load().getConfig("unit-test"));
final DOMRpcIdentifier emptyRpcIdentifier = DOMRpcIdentifier.create(
- EMPTY_SCHEMA_PATH, YangInstanceIdentifier.empty());
+ REMOTE_QNAME, YangInstanceIdentifier.of());
final DOMRpcIdentifier localRpcIdentifier = DOMRpcIdentifier.create(
- LOCAL_SCHEMA_PATH, YangInstanceIdentifier.of(LOCAL_QNAME));
+ LOCAL_QNAME, YangInstanceIdentifier.of(LOCAL_QNAME));
buckets = Lists.newArrayList(emptyRpcIdentifier, localRpcIdentifier);
assertEquals(1, globalRpc.size());
final String rpc = globalRpc.iterator().next();
- assertEquals(EMPTY_SCHEMA_PATH.toString(), rpc);
+ assertEquals(REMOTE_QNAME.toString(), rpc);
}
@Test
assertEquals(1, localRegisteredRoutedRpc.size());
final String localRpc = localRegisteredRoutedRpc.iterator().next();
- assertTrue(localRpc.contains(LOCAL_QNAME.toString()));
- assertTrue(localRpc.contains(LOCAL_SCHEMA_PATH.toString()));
+ assertThat(localRpc, containsString(LOCAL_QNAME.toString()));
}
@Test
cluster {
seed-nodes = ["akka://opendaylight-rpc@127.0.0.1:2550"]
-
- auto-down-unreachable-after = 10s
}
}
}
cluster {
seed-nodes = ["akka://opendaylight-rpc@127.0.0.1:2551"]
-
- auto-down-unreachable-after = 10s
}
}
in-memory-journal {
cluster {
seed-nodes = ["akka://opendaylight-rpc@127.0.0.1:2551"]
-
- auto-down-unreachable-after = 10s
}
}
in-memory-journal {
cluster {
seed-nodes = ["akka://opendaylight-rpc@127.0.0.1:2551"]
-
- auto-down-unreachable-after = 10s
}
}
in-memory-journal {
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>mdsal-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../parent</relativePath>
</parent>
<parent>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>odlparent-lite</artifactId>
- <version>7.0.5</version>
+ <version>13.0.11</version>
<relativePath/>
</parent>
<groupId>org.opendaylight.controller.samples</groupId>
<artifactId>clustering-it-config</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<packaging>jar</packaging>
<build>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright © 2021 PANTHEON.tech, s.r.o. and others.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <parent>
+ <artifactId>mdsal-parent</artifactId>
+ <groupId>org.opendaylight.controller</groupId>
+ <version>9.0.3-SNAPSHOT</version>
+ <relativePath>../../../parent/pom.xml</relativePath>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>clustering-it-karaf-cli</artifactId>
+ <packaging>bundle</packaging>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.karaf.shell</groupId>
+ <artifactId>org.apache.karaf.shell.core</artifactId>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>mdsal-binding-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>mdsal-binding-dom-codec-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>mdsal-dom-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>clustering-it-model</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-codec-gson</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.osgi</groupId>
+ <artifactId>org.osgi.service.component.annotations</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.karaf.tooling</groupId>
+ <artifactId>karaf-services-maven-plugin</artifactId>
+ <version>${karaf.version}</version>
+ <executions>
+ <execution>
+ <id>service-metadata-generate</id>
+ <phase>process-classes</phase>
+ <goals>
+ <goal>service-metadata-generate</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+
+</project>
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.concurrent.ExecutionException;
+import org.apache.karaf.shell.api.action.Action;
+import org.opendaylight.mdsal.dom.api.DOMRpcResult;
+
+public abstract class AbstractDOMRpcAction implements Action {
+ @Override
+ @SuppressWarnings("checkstyle:RegexpSinglelineJava")
+ public final Object execute() throws InterruptedException, ExecutionException {
+ final DOMRpcResult result = invokeRpc().get();
+ if (!result.errors().isEmpty()) {
+ // FIXME: is there a better way to report errors?
+ System.out.println("Invocation failed: " + result.errors());
+ return null;
+ } else {
+ return result.value().prettyTree().get();
+ }
+ }
+
+ protected abstract ListenableFuture<? extends DOMRpcResult> invokeRpc();
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.concurrent.ExecutionException;
+import org.apache.karaf.shell.api.action.Action;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+/**
+ * Common base class for all commands which end up invoking an RPC.
+ */
+public abstract class AbstractRpcAction implements Action {
+ @Override
+ @SuppressWarnings("checkstyle:RegexpSinglelineJava")
+ public final Object execute() throws InterruptedException, ExecutionException {
+ final RpcResult<?> result = invokeRpc().get();
+ if (!result.isSuccessful()) {
+ // FIXME: is there a better way to report errors?
+ System.out.println("Invocation failed: " + result.getErrors());
+ return null;
+ } else {
+ return result.getResult();
+ }
+ }
+
+ protected abstract ListenableFuture<? extends RpcResult<?>> invokeRpc();
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Verify.verifyNotNull;
+
+import java.util.Optional;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingCodecTree;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingInstanceIdentifierCodec;
+import org.opendaylight.mdsal.binding.runtime.api.BindingRuntimeContext;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.codec.gson.JSONCodecFactorySupplier;
+import org.opendaylight.yangtools.yang.data.util.codec.TypeAwareCodec;
+import org.opendaylight.yangtools.yang.model.api.Status;
+import org.opendaylight.yangtools.yang.model.api.TypeAware;
+import org.opendaylight.yangtools.yang.model.api.TypeDefinition;
+import org.opendaylight.yangtools.yang.model.api.type.InstanceIdentifierTypeDefinition;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Reference;
+import org.osgi.service.component.annotations.RequireServiceComponentRuntime;
+
+@Component
+@RequireServiceComponentRuntime
+public final class DefaultInstanceIdentifierSupport implements InstanceIdentifierSupport {
+ private final BindingInstanceIdentifierCodec bindingCodec;
+ private final TypeAwareCodec<?, ?, ?> jsonCodec;
+
+ @Activate
+ public DefaultInstanceIdentifierSupport(@Reference final BindingCodecTree bindingCodecTree,
+ @Reference final BindingRuntimeContext runtimeContext) {
+ bindingCodec = bindingCodecTree.getInstanceIdentifierCodec();
+ jsonCodec = JSONCodecFactorySupplier.RFC7951.createLazy(runtimeContext.modelContext())
+ .codecFor(new FakeLeafDefinition(), null);
+ }
+
+ @Override
+ public InstanceIdentifier<?> parseArgument(final String argument) {
+ final YangInstanceIdentifier path = verifyNotNull((YangInstanceIdentifier)jsonCodec.parseValue(null, argument));
+ final InstanceIdentifier<?> ret = bindingCodec.toBinding(path);
+ checkArgument(ret != null, "%s does not have a binding representation", path);
+ return ret;
+ }
+
+ // Mock wiring for JSON codec. Perhaps we should really bind to context-ref, or receive the class, or something.
+ private static final class FakeLeafDefinition implements InstanceIdentifierTypeDefinition, TypeAware {
+ @Override
+ public Optional<String> getReference() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Optional<String> getDescription() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Status getStatus() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public QName getQName() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Optional<String> getUnits() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Optional<? extends Object> getDefaultValue() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public InstanceIdentifierTypeDefinition getBaseType() {
+ return null;
+ }
+
+ @Override
+ public boolean requireInstance() {
+ return false;
+ }
+
+ @Override
+ public TypeDefinition<? extends TypeDefinition<?>> getType() {
+ return this;
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli;
+
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+/**
+ * Codec providing translation between CLI representation and {@link InstanceIdentifier}. This is mostly useful for
+ * injecting invocation contexts for {@code routed RPC}s and actions.
+ */
+public interface InstanceIdentifierSupport {
+ /**
+ * Parse a CLI argument into its {@link InstanceIdentifier} representation.
+ *
+ * @param argument Argument to parse
+ * @return Parse InstanceIdentifier
+ * @throws NullPointerException if {@code argument} is null
+ */
+ @NonNull InstanceIdentifier<?> parseArgument(String argument);
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.car;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterCommitCohort;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterCommitCohortInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "register-commit-cohort", description = "Run a register-commit-cohort test")
+public class RegisterCommitCohortCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(RegisterCommitCohort.class).invoke(new RegisterCommitCohortInputBuilder().build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.car;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterLoggingDtcl;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterLoggingDtclInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "register-logging-dtcl", description = "Run a register-logging-dtcl test")
+public class RegisterLoggingDtclCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(RegisterLoggingDtcl.class).invoke(new RegisterLoggingDtclInputBuilder().build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.car;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterOwnership;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterOwnershipInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "register-ownership", description = "Run a register-ownership test")
+public class RegisterOwnershipCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+ @Argument(index = 0, name = "car-id", required = true)
+ private String carId;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(RegisterOwnership.class)
+ .invoke(new RegisterOwnershipInputBuilder().setCarId(carId).build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.car;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StopStressTest;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StopStressTestInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app" , name = "stop-stress-test", description = "Run a stop-stress-test")
+public class StopStressTestCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(StopStressTest.class).invoke(new StopStressTestInputBuilder().build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.car;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StressTest;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StressTestInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.Uint16;
+import org.opendaylight.yangtools.yang.common.Uint32;
+
+@Service
+@Command(scope = "test-app" , name = "stress-test", description = "Run a stress-test")
+public class StressTestCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+ @Argument(index = 0, name = "rate", required = true)
+ private int rate;
+ @Argument(index = 1, name = "count", required = true)
+ private long count;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(StressTest.class).invoke(new StressTestInputBuilder()
+ .setRate(Uint16.valueOf(rate))
+ .setCount(Uint32.valueOf(count))
+ .build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.car;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterCommitCohort;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterCommitCohortInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "unregister-commit-cohort", description = "Run a unregister-commit-cohort test")
+public class UnregisterCommitCohortCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(UnregisterCommitCohort.class).invoke(new UnregisterCommitCohortInputBuilder().build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.car;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterLoggingDtcls;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterLoggingDtclsInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "unregister-logging-dtcls", description = "Run and unregister-logging-dtcls test")
+public class UnregisterLoggingDtclsCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(UnregisterLoggingDtcls.class).invoke(new UnregisterLoggingDtclsInputBuilder().build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.car;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterOwnership;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterOwnershipInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "unregister-ownership", description = "Run an unregister-ownership test")
+public class UnregisterOwnershipCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+ @Argument(index = 0, name = "car-id", required = true)
+ private String carId;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(UnregisterOwnership.class)
+ .invoke(new UnregisterOwnershipInputBuilder().setCarId(carId).build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.car.purchase;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.clustering.it.karaf.cli.InstanceIdentifierSupport;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.BuyCar;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.BuyCarInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.CarId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.PersonId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.PersonRef;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "buy-car", description = "Run a buy-car test")
+public class BuyCarCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+ @Reference
+ private InstanceIdentifierSupport iidSupport;
+ @Argument(index = 0, name = "person-ref", required = true)
+ String personRef;
+ @Argument(index = 1, name = "car-id", required = true)
+ private CarId carId;
+ @Argument(index = 2, name = "person-id", required = true)
+ private PersonId personId;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(BuyCar.class).invoke(new BuyCarInputBuilder()
+ .setPerson(new PersonRef(iidSupport.parseArgument(personRef)))
+ .setCarId(carId)
+ .setPersonId(personId)
+ .build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.AddShardReplica;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.AddShardReplicaInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "add-shard-replica", description = "Run an add-shard-replica test")
+public class AddShardReplicaCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+ @Argument(index = 0, name = "shard-name", required = true)
+ private String shardName;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(AddShardReplica.class)
+ .invoke(new AddShardReplicaInputBuilder().setShardName(shardName).build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.CheckPublishNotifications;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.CheckPublishNotificationsInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "check-publish-notifications",
+ description = "Run a check-publish-notifications test")
+public class CheckPublishNotificationsCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+ @Argument(index = 0, name = "id", required = true)
+ private String id;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(CheckPublishNotifications.class)
+ .invoke(new CheckPublishNotificationsInputBuilder().setId(id).build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.IsClientAborted;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.IsClientAbortedInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "is-client-aborted", description = "Run an is-client-aborted test")
+public class IsClientAbortedCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(IsClientAborted.class).invoke(new IsClientAbortedInputBuilder().build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.clustering.it.karaf.cli.InstanceIdentifierSupport;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterBoundConstant;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterBoundConstantInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "register-bound-constant", description = "Run a register-bound-constant test")
+public class RegisterBoundConstantCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+ @Reference
+ private InstanceIdentifierSupport iidSupport;
+ @Argument(index = 0, name = "context", required = true)
+ private String context;
+ @Argument(index = 1, name = "constant", required = true)
+ private String constant;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(RegisterBoundConstant.class)
+ .invoke(new RegisterBoundConstantInputBuilder()
+ .setConstant(constant)
+ .setContext(iidSupport.parseArgument(context))
+ .build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterConstant;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterConstantInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "register-contact", description = "Run a register-contact test")
+public class RegisterConstantCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+ @Argument(index = 0, name = "constant", required = true)
+ private String constant;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(RegisterConstant.class)
+ .invoke(new RegisterConstantInputBuilder().setConstant(constant).build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterDefaultConstant;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterDefaultConstantInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "register-default-constant", description = "Run a register-default-constant test")
+public class RegisterDefaultConstantCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+ @Argument(index = 0, name = "constant", required = true)
+ private String constant;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(RegisterDefaultConstant.class)
+ .invoke(new RegisterDefaultConstantInputBuilder().setConstant(constant).build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterFlappingSingleton;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterFlappingSingletonInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "register-flapping-singleton",
+ description = "Run a register-flapping-singleton test")
+public class RegisterFlappingSingletonCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(RegisterFlappingSingleton.class)
+ .invoke(new RegisterFlappingSingletonInputBuilder().build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterSingletonConstant;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterSingletonConstantInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "register-singleton-constant",
+ description = "Run a register-singleton-constant text")
+public class RegisterSingletonConstantCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+ @Argument(index = 0, name = "constant", required = true)
+ private String constant;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(RegisterSingletonConstant.class)
+ .invoke(new RegisterSingletonConstantInputBuilder().setConstant(constant).build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RemoveShardReplica;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RemoveShardReplicaInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "remove-shard-replica", description = "Run a remove-shard-replica test")
+public class RemoveShardReplicaCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+ @Argument(index = 0, name = "shard-name", required = true)
+ private String shardName;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(RemoveShardReplica.class)
+ .invoke(new RemoveShardReplicaInputBuilder().setShardName(shardName).build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ShutdownShardReplica;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ShutdownShardReplicaInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "shutdown-shard-replica", description = " Run a shutdown-shard-replica test")
+public class ShutdownShardReplicaCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+ @Argument(index = 0, name = "shard-name", required = true)
+ private String shardName;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(ShutdownShardReplica.class)
+ .invoke(new ShutdownShardReplicaInputBuilder()
+ .setShardName(shardName)
+ .build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.StartPublishNotifications;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.StartPublishNotificationsInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.Uint32;
+
+@Service
+@Command(scope = "test-app", name = "start-publish-notifications",
+ description = "Run a start-publish-notifications test")
+public class StartPublishNotificationsCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+ @Argument(index = 0, name = "id", required = true)
+ private String id;
+ @Argument(index = 1, name = "seconds", required = true)
+ private long seconds;
+ @Argument(index = 2, name = "notifications-per-second", required = true)
+ private long notificationsPerSecond;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(StartPublishNotifications.class)
+ .invoke(new StartPublishNotificationsInputBuilder()
+ .setId(id)
+ .setSeconds(Uint32.valueOf(seconds))
+ .setNotificationsPerSecond(Uint32.valueOf(notificationsPerSecond))
+ .build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDdtl;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDdtlInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "subscribe-ddtl", description = "Run a subscribe-ddtl test")
+public class SubscribeDdtlCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(SubscribeDdtl.class).invoke(new SubscribeDdtlInputBuilder().build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDtcl;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDtclInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "subscribe-dtcl", description = "Run a subscribe-dtcl test")
+public class SubscribeDtclCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(SubscribeDtcl.class).invoke(new SubscribeDtclInputBuilder().build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeYnl;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeYnlInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "subscribe-ynl", description = "Run a subscribe-ynl test")
+public class SubscribeYnlCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+ @Argument(index = 0, name = "id", required = true)
+ private String id;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(SubscribeYnl.class).invoke(new SubscribeYnlInputBuilder().setId(id).build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.clustering.it.karaf.cli.InstanceIdentifierSupport;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterBoundConstant;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterBoundConstantInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "unregister-bound-constant", description = "Run an unregister-bound-constant test")
+public class UnregisterBoundConstantCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+ @Reference
+ private InstanceIdentifierSupport iidSupport;
+ @Argument(index = 0, name = "context", required = true)
+ private String context;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(UnregisterBoundConstant.class)
+ .invoke(new UnregisterBoundConstantInputBuilder()
+ .setContext(iidSupport.parseArgument(context))
+ .build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterConstant;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterConstantInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "unregister-constant", description = "Run an unregister-constant test")
+public class UnregisterConstantCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(UnregisterConstant.class).invoke(new UnregisterConstantInputBuilder().build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterDefaultConstant;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterDefaultConstantInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "unregister-default-constant",
+ description = "Run an unregister-default-constant test")
+public class UnregisterDefaultConstantCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(UnregisterDefaultConstant.class)
+ .invoke(new UnregisterDefaultConstantInputBuilder().build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterFlappingSingleton;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterFlappingSingletonInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "unregister-flapping-singleton",
+ description = "Run an unregister-flapping-singleton test")
+public class UnregisterFlappingSingletonCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(UnregisterFlappingSingleton.class)
+ .invoke(new UnregisterFlappingSingletonInputBuilder().build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterSingletonConstant;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterSingletonConstantInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "unregister-singleton-constant",
+ description = "Run an unregister-singleton-constant test")
+public class UnregisterSingletonConstantCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(UnregisterSingletonConstant.class)
+ .invoke(new UnregisterSingletonConstantInputBuilder().build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDdtl;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDdtlInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "unsubscribe-ddtl", description = "Run an unsubscribe-ddtl test")
+public class UnsubscribeDdtlCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(UnsubscribeDdtl.class).invoke(new UnsubscribeDdtlInputBuilder().build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDtcl;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDtclInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "unsubscribe-dtcl", description = "Run an unsubscribe-dtcl test")
+public class UnsubscribeDtclCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(UnsubscribeDtcl.class).invoke(new UnsubscribeDtclInputBuilder().build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeYnl;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeYnlInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "unsubscribe-ynl", description = "Run an unsubscribe-ynl test")
+public class UnsubscribeYnlCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+ @Argument(index = 0, name = "id", required = true)
+ private String id;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(UnsubscribeYnl.class)
+ .invoke(new UnsubscribeYnlInputBuilder().setId(id).build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.WriteTransactions;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.WriteTransactionsInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.Uint32;
+
+@Service
+@Command(scope = "test-app", name = "write-transactions", description = "Run a write-transactions test")
+public class WriteTransactionsCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+ @Argument(index = 0, name = "id", required = true)
+ private String id;
+ @Argument(index = 1, name = "seconds", required = true)
+ private long seconds;
+ @Argument(index = 2, name = "trasactions-per-second", required = true)
+ private long transactionsPerSecond;
+ @Argument(index = 3, name = "chained-transations", required = true)
+ private boolean chainedTransactions;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(WriteTransactions.class)
+ .invoke(new WriteTransactionsInputBuilder()
+ .setId(id)
+ .setSeconds(Uint32.valueOf(seconds))
+ .setTransactionsPerSecond(Uint32.valueOf(transactionsPerSecond))
+ .setChainedTransactions(chainedTransactions)
+ .build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.tgt;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractDOMRpcAction;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
+import org.opendaylight.mdsal.dom.api.DOMRpcResult;
+import org.opendaylight.mdsal.dom.api.DOMRpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.target.rev170215.GetConstantInput;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.target.rev170215.GetConstantInputBuilder;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+
+@Service
+@Command(scope = "test-app", name = "get-constant", description = "Run an get-constant test")
+public class GetConstantCommand extends AbstractDOMRpcAction {
+ @Reference
+ private DOMRpcService rpcService;
+ @Reference
+ private BindingNormalizedNodeSerializer serializer;
+
+ @Override
+ protected ListenableFuture<? extends DOMRpcResult> invokeRpc() {
+ final ContainerNode input = serializer.toNormalizedNodeRpcData(new GetConstantInputBuilder().build());
+ return rpcService.invokeRpc(QName.create(GetConstantInput.QNAME, "get-constant"), input);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.tgt;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractDOMRpcAction;
+import org.opendaylight.clustering.it.karaf.cli.InstanceIdentifierSupport;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
+import org.opendaylight.mdsal.dom.api.DOMRpcResult;
+import org.opendaylight.mdsal.dom.api.DOMRpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.target.rev170215.GetContextedConstantInput;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.target.rev170215.GetContextedConstantInputBuilder;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+
+@Service
+@Command(scope = "test-app", name = "get-contexted-constant", description = "Run an get-contexted-constant test")
+public class GetContextedConstantCommand extends AbstractDOMRpcAction {
+ @Reference
+ private DOMRpcService rpcService;
+ @Reference
+ private BindingNormalizedNodeSerializer serializer;
+ @Reference
+ private InstanceIdentifierSupport iidSupport;
+ @Argument(index = 0, name = "context", required = true)
+ private String context;
+
+ @Override
+ protected ListenableFuture<? extends DOMRpcResult> invokeRpc() {
+ final ContainerNode inputNode = serializer.toNormalizedNodeRpcData(new GetContextedConstantInputBuilder()
+ .setContext(iidSupport.parseArgument(context))
+ .build());
+ return rpcService.invokeRpc(QName.create(GetContextedConstantInput.QNAME, "get-contexted-constant"), inputNode);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.tgt;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractDOMRpcAction;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
+import org.opendaylight.mdsal.dom.api.DOMRpcResult;
+import org.opendaylight.mdsal.dom.api.DOMRpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.target.rev170215.GetSingletonConstantInput;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.target.rev170215.GetSingletonConstantInputBuilder;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+
+@Service
+@Command(scope = "test-app", name = "get-singleton-constant", description = "Run an get-singleton-constant test")
+public class GetSingletonConstantCommand extends AbstractDOMRpcAction {
+ @Reference
+ private DOMRpcService rpcService;
+ @Reference
+ private BindingNormalizedNodeSerializer serializer;
+
+ @Override
+ protected ListenableFuture<? extends DOMRpcResult> invokeRpc() {
+ final ContainerNode inputNode =
+ serializer.toNormalizedNodeRpcData(new GetSingletonConstantInputBuilder().build());
+ return rpcService.invokeRpc(QName.create(GetSingletonConstantInput.QNAME, "get-singleton-constant"), inputNode);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.people;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.AddPerson;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.AddPersonInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.PersonId;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.Uint32;
+
+@Service
+@Command(scope = "test-app", name = "add-person", description = " Run an add-person test")
+public class AddPersonCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+ @Argument(index = 0, name = "id", required = true)
+ private PersonId id;
+ @Argument(index = 1, name = "gender", required = true)
+ private String gender;
+ @Argument(index = 2, name = "age", required = true)
+ private long age;
+ @Argument(index = 3, name = "address", required = true)
+ private String address;
+ @Argument(index = 4, name = "contactNo", required = true)
+ private String contactNo;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(AddPerson.class).invoke(new AddPersonInputBuilder()
+ .setId(id)
+ .setGender(gender)
+ .setAge(Uint32.valueOf(age))
+ .setAddress(address)
+ .setContactNo(contactNo)
+ .build());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.rpc.test;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.controller.basic.rpc.test.rev160120.BasicGlobal;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.controller.basic.rpc.test.rev160120.BasicGlobalInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "global-basic", description = "Run a global-basic test")
+public class BasicGlobalCommand extends AbstractRpcAction {
+ @Reference
+ private RpcService rpcService;
+
+ @Override
+ protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+ return rpcService.getRpc(BasicGlobal.class).invoke(new BasicGlobalInputBuilder().build());
+ }
+}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>mdsal-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../../../parent</relativePath>
</parent>
<dependencies>
<dependency>
<groupId>org.opendaylight.mdsal.binding.model.ietf</groupId>
- <artifactId>rfc6991</artifactId>
+ <artifactId>rfc6991-ietf-inet-types</artifactId>
</dependency>
<dependency>
<groupId>org.opendaylight.mdsal.model</groupId>
}
}
- rpc produce-transactions {
- description "Upon receiving this, the member shall make sure the outer list item
- of llt:in-ints exists for the given id, make sure a shard for
- the whole (config) id-ints is created (by creating and closing producer
- for the whole id-ints), and create a DOMDataTreeProducer for that item (using that shard).
-
- FIXME: Is the above the normal way of creating prefix-based chards?
-
- Then start creating (one by one) and submitting transactions
- to randomly add or delete items on the inner list for that id.
- To ensure balanced number of deletes, the first write can create
- a random set of random numbers. Other writes shall be one per number.
- The writes shall use DOMDataTreeProducer API, as opposed to transaction (chains)
- created directly on datastore.
- .get with a timeout on currently earliest non-complete Future (from .submit)
- shall be used as the primary wait method to throttle the submission rate.
- This RPC shall not return until all transactions are confirmed successful,
- or an exception is raised (the exception should propagate to restconf response).
- OptimisticLockException is always considered an error.
- In either case, the producer should be closed before returning,
- but the shard and the whole id item shall be kept as they are.";
- input {
- uses llc:id-grouping;
- uses transactions-params;
- leaf isolated-transactions {
- description "The value for DOMDataTreeProducer#createTransaction argument.";
- mandatory true;
- type boolean;
- }
- }
- output {
- uses transactions-result;
- }
- }
-
- rpc create-prefix-shard {
- description "Upon receiving this, the member creates a prefix shard at the instance-identifier, with replicas
- on the required members.";
- input {
-
- leaf prefix {
- mandatory true;
- type instance-identifier;
- }
- leaf-list replicas {
- min-elements 1;
- type string;
- }
- }
- }
-
- rpc remove-prefix-shard {
- description "Upon receiving this, the member removes the prefix based shard identifier by this prefix.
- This must be called from the same node that created the shard.";
-
- input {
- leaf prefix {
- mandatory true;
- type instance-identifier;
- }
- }
- }
-
-
- rpc become-prefix-leader {
- description "Upon receiving this, the member shall ask the appropriate API
- to become Leader of the given shard (presumably the llt:list-ints one,
- created by produce-transactions) and return immediatelly.";
- input {
- leaf prefix {
- mandatory true;
- type instance-identifier;
- }
- }
- // No output.
- }
-
rpc remove-shard-replica {
description "A specialised copy of cluster-admin:remove-shard-replica.
// The following calls are not required for Carbon testing.
- rpc deconfigure-id-ints-shard {
- description "Upon receiving this, the member shall ask the appropriate API
- to remove the llt:id-ints shard (presumably created by produce-transactions)
- and return immediatelly.
- It is expected the data would move to the root prefix shard seamlessly.
-
- TODO: Make shard name configurable by input?";
- // No input.
- // No output.
- }
-
rpc register-default-constant {
description "Upon receiving this, the member has to create and register
a default llt:get-contexted-constant implementation (routed RPC).
}
}
}
-
- rpc shutdown-prefix-shard-replica {
- description "Upon receiving this, the member will try to gracefully shutdown local configuration
- data store prefix-based shard replica.";
- input {
- leaf prefix {
- description "The prefix of the configuration data store prefix-based shard to be shutdown
- gracefully.";
- mandatory true;
- type instance-identifier;
- }
- }
- }
}
<parent>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>odlparent-lite</artifactId>
- <version>7.0.5</version>
+ <version>13.0.11</version>
<relativePath/>
</parent>
<groupId>org.opendaylight.controller.samples</groupId>
<artifactId>clustering-test-app</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<packaging>pom</packaging>
<properties>
<modules>
<module>configuration</module>
+ <module>karaf-cli</module>
<module>model</module>
<module>provider</module>
</modules>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>mdsal-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../../../parent</relativePath>
</parent>
<packaging>bundle</packaging>
<dependencies>
+ <dependency>
+ <groupId>com.github.spotbugs</groupId>
+ <artifactId>spotbugs-annotations</artifactId>
+ <optional>true</optional>
+ </dependency>
<dependency>
<groupId>org.opendaylight.mdsal</groupId>
<artifactId>mdsal-eos-binding-api</artifactId>
</dependency>
<dependency>
<groupId>org.opendaylight.mdsal</groupId>
- <artifactId>mdsal-singleton-common-api</artifactId>
+ <artifactId>mdsal-singleton-api</artifactId>
</dependency>
<dependency>
<groupId>org.opendaylight.controller.samples</groupId>
</dependency>
<dependency>
<groupId>org.opendaylight.mdsal</groupId>
- <artifactId>mdsal-common-api</artifactId>
+ <artifactId>mdsal-dom-api</artifactId>
</dependency>
<dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-common-util</artifactId>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>mdsal-common-api</artifactId>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-distributed-datastore</artifactId>
</dependency>
+ <dependency>
+ <groupId>jakarta.annotation</groupId>
+ <artifactId>jakarta.annotation-api</artifactId>
+ <optional>true</optional>
+ </dependency>
+ <dependency>
+ <groupId>org.osgi</groupId>
+ <artifactId>org.osgi.service.component.annotations</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.guicedee.services</groupId>
+ <artifactId>javax.inject</artifactId>
+ <optional>true</optional>
+ </dependency>
</dependencies>
</project>
*/
package org.opendaylight.controller.clustering.it.listener;
+import static java.util.Objects.requireNonNull;
+
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.MoreExecutors;
+import javax.annotation.PreDestroy;
+import javax.inject.Inject;
+import javax.inject.Singleton;
import org.opendaylight.mdsal.binding.api.DataBroker;
-import org.opendaylight.mdsal.binding.api.WriteTransaction;
+import org.opendaylight.mdsal.binding.api.NotificationService;
+import org.opendaylight.mdsal.binding.api.NotificationService.Listener;
import org.opendaylight.mdsal.common.api.CommitInfo;
import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.CarPeople;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.car.people.CarPersonBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.car.people.CarPersonKey;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarBought;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseListener;
+import org.opendaylight.yangtools.concepts.Registration;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class PeopleCarListener implements CarPurchaseListener {
- private static final Logger LOG = LoggerFactory.getLogger(PeopleCarListener.class);
+@Singleton
+@Component(service = { })
+public final class CarBoughtListener implements Listener<CarBought> {
+ private static final Logger LOG = LoggerFactory.getLogger(CarBoughtListener.class);
- private DataBroker dataProvider;
+ private final DataBroker dataProvider;
+ private final Registration reg;
- public void setDataProvider(final DataBroker salDataProvider) {
- this.dataProvider = salDataProvider;
+ @Inject
+ @Activate
+ public CarBoughtListener(@Reference final DataBroker dataProvider,
+ @Reference final NotificationService notifService) {
+ this.dataProvider = requireNonNull(dataProvider);
+ reg = notifService.registerListener(CarBought.class, this);
}
- @Override
- public void onCarBought(final CarBought notification) {
+ @PreDestroy
+ @Deactivate
+ public void close() {
+ reg.close();
+ }
- final CarPersonBuilder carPersonBuilder = new CarPersonBuilder();
- carPersonBuilder.setCarId(notification.getCarId());
- carPersonBuilder.setPersonId(notification.getPersonId());
- CarPersonKey key = new CarPersonKey(notification.getCarId(), notification.getPersonId());
- carPersonBuilder.withKey(key);
- final CarPerson carPerson = carPersonBuilder.build();
+ @Override
+ public void onNotification(final CarBought notification) {
+ final var carPerson = new CarPersonBuilder()
+ .withKey(new CarPersonKey(notification.getCarId(), notification.getPersonId()))
+ .build();
LOG.info("Car bought, adding car-person entry: [{}]", carPerson);
- InstanceIdentifier<CarPerson> carPersonIId = InstanceIdentifier.builder(CarPeople.class)
+ final var carPersonIId = InstanceIdentifier.builder(CarPeople.class)
.child(CarPerson.class, carPerson.key()).build();
-
- WriteTransaction tx = dataProvider.newWriteOnlyTransaction();
+ final var tx = dataProvider.newWriteOnlyTransaction();
tx.put(LogicalDatastoreType.CONFIGURATION, carPersonIId, carPerson);
tx.commit().addCallback(new FutureCallback<CommitInfo>() {
import com.google.common.collect.ImmutableSet;
import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.MoreExecutors;
import com.google.common.util.concurrent.SettableFuture;
import java.util.HashSet;
import java.util.Set;
+import javax.annotation.PreDestroy;
+import javax.inject.Inject;
+import javax.inject.Singleton;
import org.opendaylight.mdsal.binding.api.DataBroker;
+import org.opendaylight.mdsal.binding.api.NotificationPublishService;
import org.opendaylight.mdsal.binding.api.RpcProviderService;
import org.opendaylight.mdsal.binding.api.WriteTransaction;
import org.opendaylight.mdsal.common.api.CommitInfo;
import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.BuyCar;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.BuyCarOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarBoughtBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.AddPerson;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.AddPersonInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.AddPersonOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.AddPersonOutputBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.People;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.PeopleService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.people.Person;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.people.PersonBuilder;
-import org.opendaylight.yangtools.concepts.ObjectRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.ErrorType;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class PeopleProvider implements PeopleService, AutoCloseable {
+@Singleton
+@Component(service = { })
+public final class AddPersonImpl implements AddPerson, AutoCloseable {
+ private static final Logger LOG = LoggerFactory.getLogger(AddPersonImpl.class);
- private static final Logger LOG = LoggerFactory.getLogger(PeopleProvider.class);
-
- private final Set<ObjectRegistration<?>> regs = new HashSet<>();
- private final DataBroker dataProvider;
+ private final Set<Registration> regs = new HashSet<>();
private final RpcProviderService rpcProviderService;
- private final CarPurchaseService rpcImplementation;
+ private final DataBroker dataProvider;
+ private final BuyCar buyCarRpc;
- public PeopleProvider(final DataBroker dataProvider, final RpcProviderService rpcProviderService,
- final CarPurchaseService rpcImplementation) {
+ @Inject
+ @Activate
+ public AddPersonImpl(@Reference final DataBroker dataProvider,
+ @Reference final NotificationPublishService notificationProvider,
+ @Reference final RpcProviderService rpcProviderService) {
this.dataProvider = requireNonNull(dataProvider);
this.rpcProviderService = requireNonNull(rpcProviderService);
- this.rpcImplementation = requireNonNull(rpcImplementation);
- // Add global registration
- regs.add(rpcProviderService.registerRpcImplementation(CarPurchaseService.class, rpcImplementation));
+ requireNonNull(notificationProvider);
+ buyCarRpc = input -> {
+ LOG.info("Routed RPC buyCar : generating notification for buying car [{}]", input);
+ final var carBought = new CarBoughtBuilder()
+ .setCarId(input.getCarId())
+ .setPersonId(input.getPersonId())
+ .build();
+ return Futures.transform(notificationProvider.offerNotification(carBought),
+ result -> RpcResultBuilder.success(new BuyCarOutputBuilder().build()).build(),
+ MoreExecutors.directExecutor());
+ };
+
+ regs.add(rpcProviderService.registerRpcImplementation(buyCarRpc));
+ regs.add(rpcProviderService.registerRpcImplementation(this));
+ }
+
+ @PreDestroy
+ @Deactivate
+ @Override
+ public void close() {
+ regs.forEach(Registration::close);
+ regs.clear();
}
@Override
- public ListenableFuture<RpcResult<AddPersonOutput>> addPerson(final AddPersonInput input) {
+ public ListenableFuture<RpcResult<AddPersonOutput>> invoke(final AddPersonInput input) {
LOG.info("RPC addPerson : adding person [{}]", input);
PersonBuilder builder = new PersonBuilder(input);
@Override
public void onSuccess(final CommitInfo result) {
LOG.info("RPC addPerson : person added successfully [{}]", person);
- regs.add(rpcProviderService.registerRpcImplementation(CarPurchaseService.class, rpcImplementation,
- ImmutableSet.of(personId)));
+ regs.add(rpcProviderService.registerRpcImplementation(buyCarRpc, ImmutableSet.of(personId)));
LOG.info("RPC addPerson : routed rpc registered for instance ID [{}]", personId);
futureResult.set(RpcResultBuilder.success(new AddPersonOutputBuilder().build()).build());
}
public void onFailure(final Throwable ex) {
LOG.error("RPC addPerson : person addition failed [{}]", person, ex);
futureResult.set(RpcResultBuilder.<AddPersonOutput>failed()
- .withError(RpcError.ErrorType.APPLICATION, ex.getMessage()).build());
+ .withError(ErrorType.APPLICATION, ex.getMessage()).build());
}
}, MoreExecutors.directExecutor());
return futureResult;
}
-
- @Override
- public void close() {
- regs.forEach(ObjectRegistration::close);
- regs.clear();
- }
}
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
+import javax.annotation.PreDestroy;
+import javax.inject.Inject;
+import javax.inject.Singleton;
import org.opendaylight.mdsal.binding.api.RpcProviderService;
-import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonService;
-import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider;
-import org.opendaylight.mdsal.singleton.common.api.ServiceGroupIdentifier;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.controller.basic.rpc.test.rev160120.BasicGlobalInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.controller.basic.rpc.test.rev160120.BasicGlobalOutput;
+import org.opendaylight.mdsal.singleton.api.ClusterSingletonService;
+import org.opendaylight.mdsal.singleton.api.ClusterSingletonServiceProvider;
+import org.opendaylight.mdsal.singleton.api.ServiceGroupIdentifier;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.controller.basic.rpc.test.rev160120.BasicGlobal;
import org.opendaylight.yang.gen.v1.urn.opendaylight.controller.basic.rpc.test.rev160120.BasicGlobalOutputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.controller.basic.rpc.test.rev160120.BasicRpcTestService;
-import org.opendaylight.yangtools.concepts.ObjectRegistration;
-import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.concepts.Registration;
import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class BasicRpcTestProvider implements ClusterSingletonService, BasicRpcTestService {
-
+@Singleton
+@Component(service = { })
+public final class BasicRpcTestProvider implements ClusterSingletonService {
private static final Logger LOG = LoggerFactory.getLogger(BasicRpcTestProvider.class);
- private static final ServiceGroupIdentifier IDENTIFIER = ServiceGroupIdentifier.create("Basic-rpc-test");
+ private static final ServiceGroupIdentifier IDENTIFIER = new ServiceGroupIdentifier("Basic-rpc-test");
private final RpcProviderService rpcProviderRegistry;
- private final ClusterSingletonServiceProvider singletonService;
+ private final Registration singletonRegistration;
- private ObjectRegistration<?> rpcRegistration;
+ private Registration rpcRegistration = null;
- public BasicRpcTestProvider(final RpcProviderService rpcProviderRegistry,
- final ClusterSingletonServiceProvider singletonService) {
+ @Inject
+ @Activate
+ public BasicRpcTestProvider(@Reference final RpcProviderService rpcProviderRegistry,
+ @Reference final ClusterSingletonServiceProvider singletonService) {
this.rpcProviderRegistry = rpcProviderRegistry;
- this.singletonService = singletonService;
+ singletonRegistration = singletonService.registerClusterSingletonService(this);
+ }
- singletonService.registerClusterSingletonService(this);
+ @PreDestroy
+ @Deactivate
+ public void close() {
+ singletonRegistration.close();
}
@Override
public void instantiateServiceInstance() {
LOG.info("Basic testing rpc registered as global");
- rpcRegistration = rpcProviderRegistry.registerRpcImplementation(BasicRpcTestService.class, this);
+ rpcRegistration = rpcProviderRegistry.registerRpcImplementation((BasicGlobal) input -> {
+ LOG.info("Basic test global rpc invoked");
+ return RpcResultBuilder.success(new BasicGlobalOutputBuilder().build()).buildFuture();
+ });
}
@Override
public ServiceGroupIdentifier getIdentifier() {
return IDENTIFIER;
}
-
- @Override
- public ListenableFuture<RpcResult<BasicGlobalOutput>> basicGlobal(final BasicGlobalInput input) {
- LOG.info("Basic test global rpc invoked");
-
- return Futures.immediateFuture(RpcResultBuilder.success(new BasicGlobalOutputBuilder().build()).build());
- }
}
*/
package org.opendaylight.controller.clustering.it.provider;
-import org.opendaylight.mdsal.binding.api.DataObjectModification;
-import org.opendaylight.mdsal.binding.api.DataObjectModification.ModificationType;
+import java.util.List;
import org.opendaylight.mdsal.binding.api.DataTreeChangeListener;
import org.opendaylight.mdsal.binding.api.DataTreeModification;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.Cars;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
*
* @author Ryan Goulding (ryandgoulding@gmail.com)
*/
-public class CarDataTreeChangeListener implements DataTreeChangeListener<Cars> {
+public final class CarDataTreeChangeListener implements DataTreeChangeListener<Cars> {
private static final Logger LOG = LoggerFactory.getLogger(CarDataTreeChangeListener.class);
- @java.lang.Override
- public void onDataTreeChanged(final java.util.Collection<DataTreeModification<Cars>> changes) {
+ @Override
+ public void onDataTreeChanged(final List<DataTreeModification<Cars>> changes) {
if (LOG.isTraceEnabled()) {
- for (DataTreeModification<Cars> change : changes) {
+ for (var change : changes) {
outputChanges(change);
}
}
}
private static void outputChanges(final DataTreeModification<Cars> change) {
- final DataObjectModification<Cars> rootNode = change.getRootNode();
- final ModificationType modificationType = rootNode.getModificationType();
- final InstanceIdentifier<Cars> rootIdentifier = change.getRootPath().getRootIdentifier();
+ final var rootNode = change.getRootNode();
+ final var modificationType = rootNode.modificationType();
+ final var rootIdentifier = change.getRootPath().path();
switch (modificationType) {
- case WRITE:
- case SUBTREE_MODIFIED: {
+ case WRITE, SUBTREE_MODIFIED -> {
LOG.trace("onDataTreeChanged - Cars config with path {} was added or changed from {} to {}",
- rootIdentifier, rootNode.getDataBefore(), rootNode.getDataAfter());
- break;
+ rootIdentifier, rootNode.dataBefore(), rootNode.dataAfter());
}
- case DELETE: {
+ case DELETE -> {
LOG.trace("onDataTreeChanged - Cars config with path {} was deleted", rootIdentifier);
- break;
}
- default: {
+ default -> {
LOG.trace("onDataTreeChanged called with unknown modificationType: {}", modificationType);
- break;
}
}
}
import org.opendaylight.yangtools.util.concurrent.FluentFutures;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
import org.opendaylight.yangtools.yang.data.api.schema.DataContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
*
* @author Thomas Pantelis
*/
-public class CarEntryDataTreeCommitCohort implements DOMDataTreeCommitCohort {
+public final class CarEntryDataTreeCommitCohort implements DOMDataTreeCommitCohort {
private static final Logger LOG = LoggerFactory.getLogger(CarEntryDataTreeCommitCohort.class);
private static final QName YEAR_QNAME = QName.create(Cars.QNAME, "year").intern();
private static final NodeIdentifier YEAR_NODE_ID = new NodeIdentifier(YEAR_QNAME);
@Override
- public FluentFuture<PostCanCommitStep> canCommit(Object txId, SchemaContext ctx,
- Collection<DOMDataTreeCandidate> candidates) {
+ public FluentFuture<PostCanCommitStep> canCommit(final Object txId, final EffectiveModelContext ctx,
+ final Collection<DOMDataTreeCandidate> candidates) {
for (DOMDataTreeCandidate candidate : candidates) {
// Simple data validation - verify the year, if present, is >= 1990
final DataTreeCandidateNode rootNode = candidate.getRootNode();
- final Optional<NormalizedNode<?, ?>> dataAfter = rootNode.getDataAfter();
+ final NormalizedNode dataAfter = rootNode.dataAfter();
LOG.info("In canCommit: modificationType: {}, dataBefore: {}, dataAfter: {}",
- rootNode.getModificationType(), rootNode.getDataBefore(), dataAfter);
+ rootNode.modificationType(), rootNode.dataBefore(), dataAfter);
// Note: we don't want to process DELETE modifications but we don't need to explicitly check the
// ModificationType because dataAfter will not be present. Also dataAfter *should* always contain a
// MapEntryNode but we verify anyway.
- if (dataAfter.isPresent()) {
- final NormalizedNode<?, ?> normalizedNode = dataAfter.get();
- Verify.verify(normalizedNode instanceof DataContainerNode,
- "Expected type DataContainerNode, actual was %s", normalizedNode.getClass());
- DataContainerNode<?> entryNode = (DataContainerNode<?>) normalizedNode;
- final Optional<DataContainerChild<? extends PathArgument, ?>> possibleYear =
- entryNode.getChild(YEAR_NODE_ID);
+ if (dataAfter != null) {
+ Verify.verify(dataAfter instanceof DataContainerNode,
+ "Expected type DataContainerNode, actual was %s", dataAfter.getClass());
+ DataContainerNode entryNode = (DataContainerNode) dataAfter;
+ final Optional<DataContainerChild> possibleYear = entryNode.findChildByArg(YEAR_NODE_ID);
if (possibleYear.isPresent()) {
- final Number year = (Number) possibleYear.get().getValue();
+ final Number year = (Number) possibleYear.orElseThrow().body();
LOG.info("year is {}", year);
- if (!(year.longValue() >= 1990)) {
+ if (year.longValue() < 1990) {
return FluentFutures.immediateFailedFluentFuture(new DataValidationFailedException(
DOMDataTreeIdentifier.class, candidate.getRootPath(),
String.format("Invalid year %d - year must be >= 1990", year)));
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.MoreExecutors;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
+import javax.annotation.PreDestroy;
+import javax.inject.Inject;
+import javax.inject.Singleton;
import org.opendaylight.mdsal.binding.api.DataBroker;
import org.opendaylight.mdsal.binding.api.DataTreeIdentifier;
+import org.opendaylight.mdsal.binding.api.RpcProviderService;
import org.opendaylight.mdsal.binding.api.WriteTransaction;
import org.opendaylight.mdsal.common.api.CommitInfo;
import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
import org.opendaylight.mdsal.dom.api.DOMDataBroker;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistration;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistry;
+import org.opendaylight.mdsal.dom.api.DOMDataBroker.CommitCohortExtension;
import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
import org.opendaylight.mdsal.eos.binding.api.Entity;
-import org.opendaylight.mdsal.eos.binding.api.EntityOwnershipChange;
import org.opendaylight.mdsal.eos.binding.api.EntityOwnershipListener;
import org.opendaylight.mdsal.eos.binding.api.EntityOwnershipService;
import org.opendaylight.mdsal.eos.common.api.CandidateAlreadyRegisteredException;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.CarId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.CarService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.Cars;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.CarsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterCommitCohort;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterCommitCohortInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterCommitCohortOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterCommitCohortOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterLoggingDtcl;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterLoggingDtclInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterLoggingDtclOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterLoggingDtclOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterOwnership;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterOwnershipInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterOwnershipOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterOwnershipOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StopStressTest;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StopStressTestInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StopStressTestOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StopStressTestOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StressTest;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StressTestInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StressTestOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StressTestOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterCommitCohort;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterCommitCohortInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterCommitCohortOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterCommitCohortOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterLoggingDtcls;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterLoggingDtclsInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterLoggingDtclsOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterLoggingDtclsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterOwnership;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterOwnershipInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterOwnershipOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterOwnershipOutputBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.cars.CarEntry;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.cars.CarEntryBuilder;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.ObjectRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
+import org.opendaylight.yangtools.yang.common.ErrorType;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
import org.opendaylight.yangtools.yang.common.Uint32;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
*
* @author Thomas Pantelis
*/
-@SuppressFBWarnings("SLF4J_ILLEGAL_PASSED_CLASS")
-public class CarProvider implements CarService {
- private static final Logger LOG_PURCHASE_CAR = LoggerFactory.getLogger(PurchaseCarProvider.class);
-
- private static final Logger LOG_CAR_PROVIDER = LoggerFactory.getLogger(CarProvider.class);
+@Singleton
+@Component(service = { })
+public final class CarProvider {
+ private static final Logger LOG = LoggerFactory.getLogger(CarProvider.class);
private static final String ENTITY_TYPE = "cars";
private static final InstanceIdentifier<Cars> CARS_IID = InstanceIdentifier.builder(Cars.class).build();
- private static final DataTreeIdentifier<Cars> CARS_DTID = DataTreeIdentifier.create(
+ private static final DataTreeIdentifier<Cars> CARS_DTID = DataTreeIdentifier.of(
LogicalDatastoreType.CONFIGURATION, CARS_IID);
private final DataBroker dataProvider;
private final AtomicLong succcessCounter = new AtomicLong();
private final AtomicLong failureCounter = new AtomicLong();
- private final CarEntityOwnershipListener ownershipListener = new CarEntityOwnershipListener();
- private final AtomicBoolean registeredListener = new AtomicBoolean();
+ private final EntityOwnershipListener ownershipListener = (entity, change, inJeopardy) ->
+ LOG.info("ownershipChanged: entity={} change={} inJeopardy={}", entity, change, inJeopardy);
- private final Set<ListenerRegistration<?>> carsDclRegistrations = ConcurrentHashMap.newKeySet();
- private final Set<ListenerRegistration<CarDataTreeChangeListener>> carsDtclRegistrations =
- ConcurrentHashMap.newKeySet();
+ private final AtomicBoolean registeredListener = new AtomicBoolean();
+ private final AtomicReference<Registration> commitCohortReg = new AtomicReference<>();
+ private final Set<ObjectRegistration<?>> carsDclRegistrations = ConcurrentHashMap.newKeySet();
+ private final Set<Registration> regs = new HashSet<>();
+ private final Set<Registration> carsDtclRegistrations = ConcurrentHashMap.newKeySet();
private volatile Thread testThread;
private volatile boolean stopThread;
- private final AtomicReference<DOMDataTreeCommitCohortRegistration<CarEntryDataTreeCommitCohort>> commitCohortReg =
- new AtomicReference<>();
- public CarProvider(final DataBroker dataProvider, final EntityOwnershipService ownershipService,
- final DOMDataBroker domDataBroker) {
+ @Inject
+ @Activate
+ public CarProvider(@Reference final DataBroker dataProvider,
+ @Reference final EntityOwnershipService ownershipService, @Reference final DOMDataBroker domDataBroker,
+ @Reference final RpcProviderService rpcProviderService) {
this.dataProvider = dataProvider;
this.ownershipService = ownershipService;
this.domDataBroker = domDataBroker;
+ regs.add(rpcProviderService.registerRpcImplementations(
+ (StressTest) this::stressTest,
+ (StopStressTest) this::stopStressTest,
+ (RegisterOwnership) this::registerOwnership,
+ (UnregisterOwnership) this::unregisterOwnership,
+ (RegisterLoggingDtcl) this::registerLoggingDtcl,
+ (UnregisterLoggingDtcls) this::unregisterLoggingDtcls,
+ (RegisterCommitCohort) this::registerCommitCohort,
+ (UnregisterCommitCohort) this::unregisterCommitCohort));
}
+ @PreDestroy
+ @Deactivate
public void close() {
stopThread();
closeCommitCohortRegistration();
+ regs.forEach(Registration::close);
+ regs.clear();
}
private void stopThread() {
}
}
- @Override
- public ListenableFuture<RpcResult<StressTestOutput>> stressTest(final StressTestInput input) {
+ private ListenableFuture<RpcResult<StressTestOutput>> stressTest(final StressTestInput input) {
final int inputRate;
final long inputCount;
// If rate is not provided, or given as zero, then just return.
if (input.getRate() == null || input.getRate().toJava() == 0) {
- LOG_PURCHASE_CAR.info("Exiting stress test as no rate is given.");
+ LOG.info("Exiting stress test as no rate is given.");
return Futures.immediateFuture(RpcResultBuilder.<StressTestOutput>failed()
.withError(ErrorType.PROTOCOL, "invalid rate")
.build());
inputCount = 0;
}
- LOG_PURCHASE_CAR.info("Stress test starting : rate: {} count: {}", inputRate, inputCount);
+ LOG.info("Stress test starting : rate: {} count: {}", inputRate, inputCount);
stopThread();
// clear counters
try {
tx.commit().get(5, TimeUnit.SECONDS);
} catch (TimeoutException | InterruptedException | ExecutionException e) {
- LOG_PURCHASE_CAR.error("Put Cars failed",e);
+ LOG.error("Put Cars failed",e);
return Futures.immediateFuture(RpcResultBuilder.success(new StressTestOutputBuilder().build()).build());
}
public void onFailure(final Throwable ex) {
// Transaction failed
failureCounter.getAndIncrement();
- LOG_CAR_PROVIDER.error("Put Cars failed", ex);
+ LOG.error("Put Cars failed", ex);
}
}, MoreExecutors.directExecutor());
try {
}
if (count.get() % 1000 == 0) {
- LOG_PURCHASE_CAR.info("Cars created {}, time: {}", count.get(), sw.elapsed(TimeUnit.SECONDS));
+ LOG.info("Cars created {}, time: {}", count.get(), sw.elapsed(TimeUnit.SECONDS));
}
// Check if a count is specified in input and we have created that many cars.
}
}
- LOG_PURCHASE_CAR.info("Stress test thread stopping after creating {} cars.", count.get());
+ LOG.info("Stress test thread stopping after creating {} cars.", count.get());
});
testThread.start();
return Futures.immediateFuture(RpcResultBuilder.success(new StressTestOutputBuilder().build()).build());
}
- @Override
- public ListenableFuture<RpcResult<StopStressTestOutput>> stopStressTest(final StopStressTestInput input) {
+ private ListenableFuture<RpcResult<StopStressTestOutput>> stopStressTest(final StopStressTestInput input) {
stopThread();
StopStressTestOutputBuilder stopStressTestOutput;
stopStressTestOutput = new StopStressTestOutputBuilder()
.setFailureCount(Uint32.valueOf(failureCounter.longValue()));
final StopStressTestOutput result = stopStressTestOutput.build();
- LOG_PURCHASE_CAR.info("Executed Stop Stress test; No. of cars created {}; "
- + "No. of cars failed {}; ", succcessCounter, failureCounter);
+ LOG.info("Executed Stop Stress test; No. of cars created {}; No. of cars failed {}; ",
+ succcessCounter, failureCounter);
// clear counters
succcessCounter.set(0);
failureCounter.set(0);
return Futures.immediateFuture(RpcResultBuilder.<StopStressTestOutput>success(result).build());
}
-
- @Override
- public ListenableFuture<RpcResult<RegisterOwnershipOutput>> registerOwnership(final RegisterOwnershipInput input) {
+ private ListenableFuture<RpcResult<RegisterOwnershipOutput>> registerOwnership(final RegisterOwnershipInput input) {
if (registeredListener.compareAndSet(false, true)) {
ownershipService.registerListener(ENTITY_TYPE, ownershipListener);
}
return RpcResultBuilder.success(new RegisterOwnershipOutputBuilder().build()).buildFuture();
}
- @Override
- public ListenableFuture<RpcResult<UnregisterOwnershipOutput>> unregisterOwnership(
+ private ListenableFuture<RpcResult<UnregisterOwnershipOutput>> unregisterOwnership(
final UnregisterOwnershipInput input) {
return RpcResultBuilder.success(new UnregisterOwnershipOutputBuilder().build()).buildFuture();
}
- private static class CarEntityOwnershipListener implements EntityOwnershipListener {
- @Override
- public void ownershipChanged(final EntityOwnershipChange ownershipChange) {
- LOG_CAR_PROVIDER.info("ownershipChanged: {}", ownershipChange);
- }
- }
-
- @Override
- public ListenableFuture<RpcResult<RegisterLoggingDtclOutput>> registerLoggingDtcl(
+ private ListenableFuture<RpcResult<RegisterLoggingDtclOutput>> registerLoggingDtcl(
final RegisterLoggingDtclInput input) {
- LOG_CAR_PROVIDER.info("Registering a new CarDataTreeChangeListener");
- final ListenerRegistration<CarDataTreeChangeListener> carsDtclRegistration =
- dataProvider.registerDataTreeChangeListener(CARS_DTID, new CarDataTreeChangeListener());
-
- carsDtclRegistrations.add(carsDtclRegistration);
+ LOG.info("Registering a new CarDataTreeChangeListener");
+ final var reg = dataProvider.registerTreeChangeListener(CARS_DTID, new CarDataTreeChangeListener());
+ carsDtclRegistrations.add(reg);
return RpcResultBuilder.success(new RegisterLoggingDtclOutputBuilder().build()).buildFuture();
}
- @Override
- public ListenableFuture<RpcResult<UnregisterLoggingDtclsOutput>> unregisterLoggingDtcls(
+ private ListenableFuture<RpcResult<UnregisterLoggingDtclsOutput>> unregisterLoggingDtcls(
final UnregisterLoggingDtclsInput input) {
- LOG_CAR_PROVIDER.info("Unregistering the CarDataTreeChangeListener(s)");
+ LOG.info("Unregistering the CarDataTreeChangeListener(s)");
synchronized (carsDtclRegistrations) {
int numListeners = 0;
- for (ListenerRegistration<CarDataTreeChangeListener> carsDtclRegistration : carsDtclRegistrations) {
+ for (var carsDtclRegistration : carsDtclRegistrations) {
carsDtclRegistration.close();
numListeners++;
}
carsDtclRegistrations.clear();
- LOG_CAR_PROVIDER.info("Unregistered {} CaraDataTreeChangeListener(s)", numListeners);
+ LOG.info("Unregistered {} CaraDataTreeChangeListener(s)", numListeners);
}
return RpcResultBuilder.success(new UnregisterLoggingDtclsOutputBuilder().build()).buildFuture();
}
- @Override
@SuppressWarnings("checkstyle:IllegalCatch")
- public ListenableFuture<RpcResult<UnregisterCommitCohortOutput>> unregisterCommitCohort(
+ private ListenableFuture<RpcResult<UnregisterCommitCohortOutput>> unregisterCommitCohort(
final UnregisterCommitCohortInput input) {
closeCommitCohortRegistration();
}
private void closeCommitCohortRegistration() {
- final DOMDataTreeCommitCohortRegistration<CarEntryDataTreeCommitCohort> reg = commitCohortReg.getAndSet(null);
+ final var reg = commitCohortReg.getAndSet(null);
if (reg != null) {
reg.close();
- LOG_CAR_PROVIDER.info("Unregistered commit cohort");
+ LOG.info("Unregistered commit cohort");
}
}
- @Override
- public synchronized ListenableFuture<RpcResult<RegisterCommitCohortOutput>> registerCommitCohort(
+ private synchronized ListenableFuture<RpcResult<RegisterCommitCohortOutput>> registerCommitCohort(
final RegisterCommitCohortInput input) {
if (commitCohortReg.get() != null) {
return RpcResultBuilder.success(new RegisterCommitCohortOutputBuilder().build()).buildFuture();
}
- final DOMDataTreeCommitCohortRegistry commitCohortRegistry = domDataBroker.getExtensions().getInstance(
- DOMDataTreeCommitCohortRegistry.class);
-
+ final var commitCohortRegistry = domDataBroker.extension(CommitCohortExtension.class);
if (commitCohortRegistry == null) {
// Shouldn't happen
return RpcResultBuilder.<RegisterCommitCohortOutput>failed().withError(ErrorType.APPLICATION,
// to address all list entries, the second path argument is wild-carded by specifying just the CarEntry.QNAME.
final YangInstanceIdentifier carEntryPath = YangInstanceIdentifier.builder(
YangInstanceIdentifier.of(Cars.QNAME)).node(CarEntry.QNAME).node(CarEntry.QNAME).build();
- commitCohortReg.set(commitCohortRegistry.registerCommitCohort(new DOMDataTreeIdentifier(
+ commitCohortReg.set(commitCohortRegistry.registerCommitCohort(DOMDataTreeIdentifier.of(
LogicalDatastoreType.CONFIGURATION, carEntryPath), new CarEntryDataTreeCommitCohort()));
- LOG_CAR_PROVIDER.info("Registered commit cohort");
+ LOG.info("Registered commit cohort");
return RpcResultBuilder.success(new RegisterCommitCohortOutputBuilder().build()).buildFuture();
}
*/
package org.opendaylight.controller.clustering.it.provider;
-import static akka.actor.ActorRef.noSender;
-
import akka.actor.ActorRef;
-import akka.actor.ActorSystem;
-import akka.actor.PoisonPill;
-import akka.actor.Props;
+import akka.dispatch.Futures;
import akka.dispatch.OnComplete;
import akka.pattern.Patterns;
import com.google.common.base.Strings;
-import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.SettableFuture;
-import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
-import org.opendaylight.controller.cluster.ActorSystemProvider;
-import org.opendaylight.controller.cluster.databroker.actors.dds.ClientLocalHistory;
-import org.opendaylight.controller.cluster.databroker.actors.dds.ClientTransaction;
-import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient;
-import org.opendaylight.controller.cluster.databroker.actors.dds.SimpleDataStoreClientActor;
+import javax.annotation.PreDestroy;
+import javax.inject.Inject;
+import javax.inject.Singleton;
import org.opendaylight.controller.cluster.datastore.DistributedDataStoreInterface;
import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
import org.opendaylight.controller.cluster.raft.client.messages.Shutdown;
-import org.opendaylight.controller.cluster.sharding.DistributedShardFactory;
import org.opendaylight.controller.clustering.it.provider.impl.FlappingSingletonService;
import org.opendaylight.controller.clustering.it.provider.impl.GetConstantService;
-import org.opendaylight.controller.clustering.it.provider.impl.IdIntsDOMDataTreeLIstener;
import org.opendaylight.controller.clustering.it.provider.impl.IdIntsListener;
-import org.opendaylight.controller.clustering.it.provider.impl.PrefixLeaderHandler;
-import org.opendaylight.controller.clustering.it.provider.impl.PrefixShardHandler;
-import org.opendaylight.controller.clustering.it.provider.impl.ProduceTransactionsHandler;
import org.opendaylight.controller.clustering.it.provider.impl.PublishNotificationsTask;
import org.opendaylight.controller.clustering.it.provider.impl.RoutedGetConstantService;
import org.opendaylight.controller.clustering.it.provider.impl.SingletonGetConstantService;
import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
import org.opendaylight.mdsal.dom.api.DOMDataBroker;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeService;
+import org.opendaylight.mdsal.dom.api.DOMDataBroker.DataTreeChangeExtension;
import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeLoopException;
import org.opendaylight.mdsal.dom.api.DOMDataTreeReadTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeService;
-import org.opendaylight.mdsal.dom.api.DOMRpcImplementationRegistration;
import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
import org.opendaylight.mdsal.dom.api.DOMSchemaService;
-import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider;
-import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceRegistration;
+import org.opendaylight.mdsal.singleton.api.ClusterSingletonServiceProvider;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.AddShardReplica;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.AddShardReplicaInput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.AddShardReplicaOutput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.BecomePrefixLeaderInput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.BecomePrefixLeaderOutput;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.CheckPublishNotifications;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.CheckPublishNotificationsInput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.CheckPublishNotificationsOutput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.CheckPublishNotificationsOutputBuilder;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.CreatePrefixShardInput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.CreatePrefixShardOutput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.DeconfigureIdIntsShardInput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.DeconfigureIdIntsShardOutput;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.IsClientAborted;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.IsClientAbortedInput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.IsClientAbortedOutput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.OdlMdsalLowlevelControlService;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ProduceTransactionsInput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ProduceTransactionsOutput;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterBoundConstant;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterBoundConstantInput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterBoundConstantOutput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterBoundConstantOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterConstant;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterConstantInput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterConstantOutput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterConstantOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterDefaultConstant;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterDefaultConstantInput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterDefaultConstantOutput;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterFlappingSingleton;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterFlappingSingletonInput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterFlappingSingletonOutput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterFlappingSingletonOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterSingletonConstant;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterSingletonConstantInput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterSingletonConstantOutput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterSingletonConstantOutputBuilder;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RemovePrefixShardInput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RemovePrefixShardOutput;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RemoveShardReplica;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RemoveShardReplicaInput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RemoveShardReplicaOutput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ShutdownPrefixShardReplicaInput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ShutdownPrefixShardReplicaOutput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ShutdownPrefixShardReplicaOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ShutdownShardReplica;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ShutdownShardReplicaInput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ShutdownShardReplicaOutput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ShutdownShardReplicaOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.StartPublishNotifications;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.StartPublishNotificationsInput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.StartPublishNotificationsOutput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.StartPublishNotificationsOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDdtl;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDdtlInput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDdtlOutput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDdtlOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDtcl;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDtclInput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDtclOutput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDtclOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeYnl;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeYnlInput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeYnlOutput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeYnlOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterBoundConstant;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterBoundConstantInput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterBoundConstantOutput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterBoundConstantOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterConstant;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterConstantInput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterConstantOutput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterConstantOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterDefaultConstant;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterDefaultConstantInput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterDefaultConstantOutput;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterFlappingSingleton;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterFlappingSingletonInput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterFlappingSingletonOutput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterFlappingSingletonOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterSingletonConstant;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterSingletonConstantInput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterSingletonConstantOutput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterSingletonConstantOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDdtl;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDdtlInput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDdtlOutput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDdtlOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDtcl;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDtclInput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDtclOutput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDtclOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeYnl;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeYnlInput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeYnlOutput;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.WriteTransactions;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.WriteTransactionsInput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.WriteTransactionsOutput;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.target.rev170215.IdSequence;
+import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
import org.opendaylight.yangtools.concepts.ObjectRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
+import org.opendaylight.yangtools.yang.common.ErrorTag;
+import org.opendaylight.yangtools.yang.common.ErrorType;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.concurrent.duration.FiniteDuration;
-public class MdsalLowLevelTestProvider implements OdlMdsalLowlevelControlService {
+@Singleton
+@Component(service = {})
+public final class MdsalLowLevelTestProvider {
private static final Logger LOG = LoggerFactory.getLogger(MdsalLowLevelTestProvider.class);
- private final RpcProviderService rpcRegistry;
- private final ObjectRegistration<OdlMdsalLowlevelControlService> registration;
- private final DistributedShardFactory distributedShardFactory;
+ private final Registration registration;
private final DistributedDataStoreInterface configDataStore;
- private final DOMDataTreeService domDataTreeService;
private final BindingNormalizedNodeSerializer bindingNormalizedNodeSerializer;
private final DOMDataBroker domDataBroker;
private final NotificationPublishService notificationPublishService;
private final NotificationService notificationService;
- private final DOMSchemaService schemaService;
private final ClusterSingletonServiceProvider singletonService;
private final DOMRpcProviderService domRpcService;
- private final PrefixLeaderHandler prefixLeaderHandler;
- private final PrefixShardHandler prefixShardHandler;
- private final DOMDataTreeChangeService domDataTreeChangeService;
- private final ActorSystem actorSystem;
-
- private final Map<InstanceIdentifier<?>, DOMRpcImplementationRegistration<RoutedGetConstantService>>
- routedRegistrations = new HashMap<>();
+ private final DataTreeChangeExtension dataTreeChangeExtension;
- private final Map<String, ListenerRegistration<YnlListener>> ynlRegistrations = new HashMap<>();
+ private final Map<InstanceIdentifier<?>, Registration> routedRegistrations = new HashMap<>();
+ private final Map<String, ObjectRegistration<YnlListener>> ynlRegistrations = new HashMap<>();
+ private final Map<String, PublishNotificationsTask> publishNotificationsTasks = new HashMap<>();
- private DOMRpcImplementationRegistration<GetConstantService> globalGetConstantRegistration = null;
- private ClusterSingletonServiceRegistration getSingletonConstantRegistration;
+ private Registration globalGetConstantRegistration = null;
+ private Registration getSingletonConstantRegistration;
private FlappingSingletonService flappingSingletonService;
- private ListenerRegistration<DOMDataTreeChangeListener> dtclReg;
+ private Registration dtclReg;
private IdIntsListener idIntsListener;
- private final Map<String, PublishNotificationsTask> publishNotificationsTasks = new HashMap<>();
- private ListenerRegistration<IdIntsDOMDataTreeLIstener> ddtlReg;
- private IdIntsDOMDataTreeLIstener idIntsDdtl;
-
-
-
- public MdsalLowLevelTestProvider(final RpcProviderService rpcRegistry,
- final DOMRpcProviderService domRpcService,
- final ClusterSingletonServiceProvider singletonService,
- final DOMSchemaService schemaService,
- final BindingNormalizedNodeSerializer bindingNormalizedNodeSerializer,
- final NotificationPublishService notificationPublishService,
- final NotificationService notificationService,
- final DOMDataBroker domDataBroker,
- final DOMDataTreeService domDataTreeService,
- final DistributedShardFactory distributedShardFactory,
- final DistributedDataStoreInterface configDataStore,
- final ActorSystemProvider actorSystemProvider) {
- this.rpcRegistry = rpcRegistry;
+
+ @Inject
+ @Activate
+ public MdsalLowLevelTestProvider(
+ @Reference final RpcProviderService rpcRegistry,
+ @Reference final DOMRpcProviderService domRpcService,
+ @Reference final ClusterSingletonServiceProvider singletonService,
+ @Reference final DOMSchemaService schemaService,
+ @Reference final BindingNormalizedNodeSerializer bindingNormalizedNodeSerializer,
+ @Reference final NotificationPublishService notificationPublishService,
+ @Reference final NotificationService notificationService,
+ @Reference final DOMDataBroker domDataBroker,
+ @Reference final DistributedDataStoreInterface configDataStore) {
this.domRpcService = domRpcService;
this.singletonService = singletonService;
- this.schemaService = schemaService;
this.bindingNormalizedNodeSerializer = bindingNormalizedNodeSerializer;
this.notificationPublishService = notificationPublishService;
this.notificationService = notificationService;
this.domDataBroker = domDataBroker;
- this.domDataTreeService = domDataTreeService;
- this.distributedShardFactory = distributedShardFactory;
this.configDataStore = configDataStore;
- this.actorSystem = actorSystemProvider.getActorSystem();
-
- this.prefixLeaderHandler = new PrefixLeaderHandler(domDataTreeService, bindingNormalizedNodeSerializer);
- domDataTreeChangeService = domDataBroker.getExtensions().getInstance(DOMDataTreeChangeService.class);
- registration = rpcRegistry.registerRpcImplementation(OdlMdsalLowlevelControlService.class, this);
+ dataTreeChangeExtension = domDataBroker.extension(DataTreeChangeExtension.class);
+
+ registration = rpcRegistry.registerRpcImplementations(
+ (UnregisterSingletonConstant) this::unregisterSingletonConstant,
+ (StartPublishNotifications) this::startPublishNotifications,
+ (SubscribeDdtl) this::subscribeDdtl,
+ (WriteTransactions) this::writeTransactions,
+ (IsClientAborted) this::isClientAborted,
+ (RemoveShardReplica) this::removeShardReplica,
+ (SubscribeYnl) this::subscribeYnl,
+ (UnregisterBoundConstant) this::unregisterBoundConstant,
+ (RegisterSingletonConstant) this::registerSingletonConstant,
+ (RegisterDefaultConstant) this::registerDefaultConstant,
+ (UnregisterConstant) this::unregisterConstant,
+ (UnregisterFlappingSingleton) this::unregisterFlappingSingleton,
+ (AddShardReplica) this::addShardReplica,
+ (RegisterBoundConstant) this::registerBoundConstant,
+ (RegisterFlappingSingleton) this::registerFlappingSingleton,
+ (UnsubscribeDdtl) this::unsubscribeDdtl,
+ (UnsubscribeYnl) this::unsubscribeYnl,
+ (CheckPublishNotifications) this::checkPublishNotifications,
+ (ShutdownShardReplica) this::shutdownShardReplica,
+ (RegisterConstant) this::registerConstant,
+ (UnregisterDefaultConstant) this::unregisterDefaultConstant,
+ (SubscribeDtcl) this::subscribeDtcl,
+ (UnsubscribeDtcl) this::unsubscribeDtcl);
+ }
- prefixShardHandler = new PrefixShardHandler(distributedShardFactory, domDataTreeService,
- bindingNormalizedNodeSerializer);
+ @PreDestroy
+ @Deactivate
+ public void close() {
+ registration.close();
}
- @Override
@SuppressWarnings("checkstyle:IllegalCatch")
- public ListenableFuture<RpcResult<UnregisterSingletonConstantOutput>> unregisterSingletonConstant(
+ private ListenableFuture<RpcResult<UnregisterSingletonConstantOutput>> unregisterSingletonConstant(
final UnregisterSingletonConstantInput input) {
LOG.info("In unregisterSingletonConstant");
if (getSingletonConstantRegistration == null) {
- return RpcResultBuilder.<UnregisterSingletonConstantOutput>failed().withError(ErrorType.RPC, "data-missing",
- "No prior RPC was registered").buildFuture();
+ return RpcResultBuilder.<UnregisterSingletonConstantOutput>failed()
+ .withError(ErrorType.RPC, ErrorTag.DATA_MISSING, "No prior RPC was registered")
+ .buildFuture();
}
try {
}
}
- @Override
- public ListenableFuture<RpcResult<StartPublishNotificationsOutput>> startPublishNotifications(
+ private ListenableFuture<RpcResult<StartPublishNotificationsOutput>> startPublishNotifications(
final StartPublishNotificationsInput input) {
LOG.info("In startPublishNotifications - input: {}", input);
return RpcResultBuilder.success(new StartPublishNotificationsOutputBuilder().build()).buildFuture();
}
- @Override
- public ListenableFuture<RpcResult<SubscribeDtclOutput>> subscribeDtcl(final SubscribeDtclInput input) {
+ private ListenableFuture<RpcResult<SubscribeDtclOutput>> subscribeDtcl(final SubscribeDtclInput input) {
LOG.info("In subscribeDtcl - input: {}", input);
if (dtclReg != null) {
- return RpcResultBuilder.<SubscribeDtclOutput>failed().withError(ErrorType.RPC,
- "data-exists", "There is already a DataTreeChangeListener registered for id-ints").buildFuture();
+ return RpcResultBuilder.<SubscribeDtclOutput>failed().withError(ErrorType.RPC, ErrorTag.DATA_EXISTS,
+ "There is already a DataTreeChangeListener registered for id-ints")
+ .buildFuture();
}
idIntsListener = new IdIntsListener();
- dtclReg = domDataTreeChangeService.registerDataTreeChangeListener(
- new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, WriteTransactionsHandler.ID_INT_YID),
+ dtclReg = dataTreeChangeExtension.registerTreeChangeListener(
+ DOMDataTreeIdentifier.of(LogicalDatastoreType.CONFIGURATION, WriteTransactionsHandler.ID_INT_YID),
idIntsListener);
return RpcResultBuilder.success(new SubscribeDtclOutputBuilder().build()).buildFuture();
}
- @Override
- public ListenableFuture<RpcResult<WriteTransactionsOutput>> writeTransactions(final WriteTransactionsInput input) {
+ private ListenableFuture<RpcResult<WriteTransactionsOutput>> writeTransactions(final WriteTransactionsInput input) {
return WriteTransactionsHandler.start(domDataBroker, input);
}
- @Override
- public ListenableFuture<RpcResult<IsClientAbortedOutput>> isClientAborted(final IsClientAbortedInput input) {
+ private ListenableFuture<RpcResult<IsClientAbortedOutput>> isClientAborted(final IsClientAbortedInput input) {
return null;
}
- @Override
- public ListenableFuture<RpcResult<RemoveShardReplicaOutput>> removeShardReplica(
+ private ListenableFuture<RpcResult<RemoveShardReplicaOutput>> removeShardReplica(
final RemoveShardReplicaInput input) {
return null;
}
- @Override
- public ListenableFuture<RpcResult<SubscribeYnlOutput>> subscribeYnl(final SubscribeYnlInput input) {
+ private ListenableFuture<RpcResult<SubscribeYnlOutput>> subscribeYnl(final SubscribeYnlInput input) {
LOG.info("In subscribeYnl - input: {}", input);
if (ynlRegistrations.containsKey(input.getId())) {
- return RpcResultBuilder.<SubscribeYnlOutput>failed().withError(ErrorType.RPC,
- "data-exists", "There is already a listener registered for id: " + input.getId()).buildFuture();
+ return RpcResultBuilder.<SubscribeYnlOutput>failed()
+ .withError(ErrorType.RPC, ErrorTag.DATA_EXISTS,
+ "There is already a listener registered for id: " + input.getId())
+ .buildFuture();
}
- ynlRegistrations.put(input.getId(),
- notificationService.registerNotificationListener(new YnlListener(input.getId())));
+ final var id = input.getId();
+ final var listener = new YnlListener(id);
+ final var reg = notificationService.registerListener(IdSequence.class, listener);
+ ynlRegistrations.put(id, new AbstractObjectRegistration<>(listener) {
+ @Override
+ protected void removeRegistration() {
+ reg.close();
+ }
+ });
return RpcResultBuilder.success(new SubscribeYnlOutputBuilder().build()).buildFuture();
}
- @Override
- public ListenableFuture<RpcResult<RemovePrefixShardOutput>> removePrefixShard(final RemovePrefixShardInput input) {
- LOG.info("In removePrefixShard - input: {}", input);
-
- return prefixShardHandler.onRemovePrefixShard(input);
- }
-
- @Override
- public ListenableFuture<RpcResult<BecomePrefixLeaderOutput>> becomePrefixLeader(
- final BecomePrefixLeaderInput input) {
- LOG.info("n becomePrefixLeader - input: {}", input);
-
- return prefixLeaderHandler.makeLeaderLocal(input);
- }
- @Override
- public ListenableFuture<RpcResult<UnregisterBoundConstantOutput>> unregisterBoundConstant(
+ private ListenableFuture<RpcResult<UnregisterBoundConstantOutput>> unregisterBoundConstant(
final UnregisterBoundConstantInput input) {
LOG.info("In unregisterBoundConstant - {}", input);
- final DOMRpcImplementationRegistration<RoutedGetConstantService> rpcRegistration =
- routedRegistrations.remove(input.getContext());
-
+ final var rpcRegistration = routedRegistrations.remove(input.getContext());
if (rpcRegistration == null) {
- return RpcResultBuilder.<UnregisterBoundConstantOutput>failed().withError(
- ErrorType.RPC, "data-missing", "No prior RPC was registered for " + input.getContext()).buildFuture();
+ return RpcResultBuilder.<UnregisterBoundConstantOutput>failed()
+ .withError(ErrorType.RPC, ErrorTag.DATA_MISSING,
+ "No prior RPC was registered for " + input.getContext())
+ .buildFuture();
}
rpcRegistration.close();
return RpcResultBuilder.success(new UnregisterBoundConstantOutputBuilder().build()).buildFuture();
}
- @Override
- public ListenableFuture<RpcResult<RegisterSingletonConstantOutput>> registerSingletonConstant(
+ private ListenableFuture<RpcResult<RegisterSingletonConstantOutput>> registerSingletonConstant(
final RegisterSingletonConstantInput input) {
LOG.info("In registerSingletonConstant - input: {}", input);
if (input.getConstant() == null) {
- return RpcResultBuilder.<RegisterSingletonConstantOutput>failed().withError(
- ErrorType.RPC, "invalid-value", "Constant value is null").buildFuture();
+ return RpcResultBuilder.<RegisterSingletonConstantOutput>failed()
+ .withError(ErrorType.RPC, ErrorTag.INVALID_VALUE, "Constant value is null")
+ .buildFuture();
}
getSingletonConstantRegistration =
return RpcResultBuilder.success(new RegisterSingletonConstantOutputBuilder().build()).buildFuture();
}
- @Override
- public ListenableFuture<RpcResult<RegisterDefaultConstantOutput>> registerDefaultConstant(
+ private ListenableFuture<RpcResult<RegisterDefaultConstantOutput>> registerDefaultConstant(
final RegisterDefaultConstantInput input) {
return null;
}
- @Override
- public ListenableFuture<RpcResult<UnregisterConstantOutput>> unregisterConstant(
+ private ListenableFuture<RpcResult<UnregisterConstantOutput>> unregisterConstant(
final UnregisterConstantInput input) {
LOG.info("In unregisterConstant");
if (globalGetConstantRegistration == null) {
- return RpcResultBuilder.<UnregisterConstantOutput>failed().withError(
- ErrorType.RPC, "data-missing", "No prior RPC was registered").buildFuture();
+ return RpcResultBuilder.<UnregisterConstantOutput>failed()
+ .withError(ErrorType.RPC, ErrorTag.DATA_MISSING, "No prior RPC was registered")
+ .buildFuture();
}
globalGetConstantRegistration.close();
globalGetConstantRegistration = null;
- return Futures.immediateFuture(RpcResultBuilder.success(new UnregisterConstantOutputBuilder().build()).build());
+ return RpcResultBuilder.success(new UnregisterConstantOutputBuilder().build()).buildFuture();
}
- @Override
- public ListenableFuture<RpcResult<UnregisterFlappingSingletonOutput>> unregisterFlappingSingleton(
+ private ListenableFuture<RpcResult<UnregisterFlappingSingletonOutput>> unregisterFlappingSingleton(
final UnregisterFlappingSingletonInput input) {
LOG.info("In unregisterFlappingSingleton");
if (flappingSingletonService == null) {
- return RpcResultBuilder.<UnregisterFlappingSingletonOutput>failed().withError(
- ErrorType.RPC, "data-missing", "No prior RPC was registered").buildFuture();
+ return RpcResultBuilder.<UnregisterFlappingSingletonOutput>failed()
+ .withError(ErrorType.RPC, ErrorTag.DATA_MISSING, "No prior RPC was registered")
+ .buildFuture();
}
final long flapCount = flappingSingletonService.setInactive();
.buildFuture();
}
- @Override
- public ListenableFuture<RpcResult<AddShardReplicaOutput>> addShardReplica(final AddShardReplicaInput input) {
- return null;
+ private ListenableFuture<RpcResult<AddShardReplicaOutput>> addShardReplica(final AddShardReplicaInput input) {
+ throw new UnsupportedOperationException();
}
- @Override
- public ListenableFuture<RpcResult<SubscribeDdtlOutput>> subscribeDdtl(final SubscribeDdtlInput input) {
- LOG.info("In subscribeDdtl");
-
- if (ddtlReg != null) {
- return RpcResultBuilder.<SubscribeDdtlOutput>failed().withError(ErrorType.RPC,
- "data-exists", "There is already a listener registered for id-ints").buildFuture();
- }
-
- idIntsDdtl = new IdIntsDOMDataTreeLIstener();
-
- try {
- ddtlReg = domDataTreeService.registerListener(idIntsDdtl,
- Collections.singleton(new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION,
- ProduceTransactionsHandler.ID_INT_YID)),
- true, Collections.emptyList());
- } catch (DOMDataTreeLoopException e) {
- LOG.error("Failed to register DOMDataTreeListener", e);
- return RpcResultBuilder.<SubscribeDdtlOutput>failed().withError(
- ErrorType.APPLICATION, "Failed to register DOMDataTreeListener", e).buildFuture();
- }
-
- return RpcResultBuilder.success(new SubscribeDdtlOutputBuilder().build()).buildFuture();
+ private ListenableFuture<RpcResult<SubscribeDdtlOutput>> subscribeDdtl(final SubscribeDdtlInput input) {
+ throw new UnsupportedOperationException();
}
- @Override
- public ListenableFuture<RpcResult<RegisterBoundConstantOutput>> registerBoundConstant(
+ private ListenableFuture<RpcResult<RegisterBoundConstantOutput>> registerBoundConstant(
final RegisterBoundConstantInput input) {
LOG.info("In registerBoundConstant - input: {}", input);
if (input.getContext() == null) {
return RpcResultBuilder.<RegisterBoundConstantOutput>failed().withError(
- ErrorType.RPC, "invalid-value", "Context value is null").buildFuture();
+ ErrorType.RPC, ErrorTag.INVALID_VALUE, "Context value is null").buildFuture();
}
if (input.getConstant() == null) {
return RpcResultBuilder.<RegisterBoundConstantOutput>failed().withError(
- ErrorType.RPC, "invalid-value", "Constant value is null").buildFuture();
+ ErrorType.RPC, ErrorTag.INVALID_VALUE, "Constant value is null").buildFuture();
}
if (routedRegistrations.containsKey(input.getContext())) {
return RpcResultBuilder.<RegisterBoundConstantOutput>failed().withError(ErrorType.RPC,
- "data-exists", "There is already an rpc registered for context: " + input.getContext()).buildFuture();
+ ErrorTag.DATA_EXISTS, "There is already an rpc registered for context: " + input.getContext())
+ .buildFuture();
}
- final DOMRpcImplementationRegistration<RoutedGetConstantService> rpcRegistration =
- RoutedGetConstantService.registerNew(bindingNormalizedNodeSerializer, domRpcService,
- input.getConstant(), input.getContext());
+ final var rpcRegistration = RoutedGetConstantService.registerNew(bindingNormalizedNodeSerializer, domRpcService,
+ input.getConstant(), input.getContext());
routedRegistrations.put(input.getContext(), rpcRegistration);
return RpcResultBuilder.success(new RegisterBoundConstantOutputBuilder().build()).buildFuture();
}
- @Override
- public ListenableFuture<RpcResult<RegisterFlappingSingletonOutput>> registerFlappingSingleton(
+ private ListenableFuture<RpcResult<RegisterFlappingSingletonOutput>> registerFlappingSingleton(
final RegisterFlappingSingletonInput input) {
LOG.info("In registerFlappingSingleton");
if (flappingSingletonService != null) {
- return RpcResultBuilder.<RegisterFlappingSingletonOutput>failed().withError(ErrorType.RPC,
- "data-exists", "There is already an rpc registered").buildFuture();
+ return RpcResultBuilder.<RegisterFlappingSingletonOutput>failed()
+ .withError(ErrorType.RPC, ErrorTag.DATA_EXISTS, "There is already an rpc registered")
+ .buildFuture();
}
flappingSingletonService = new FlappingSingletonService(singletonService);
return RpcResultBuilder.success(new RegisterFlappingSingletonOutputBuilder().build()).buildFuture();
}
- @Override
- public ListenableFuture<RpcResult<UnsubscribeDtclOutput>> unsubscribeDtcl(final UnsubscribeDtclInput input) {
+ private ListenableFuture<RpcResult<UnsubscribeDtclOutput>> unsubscribeDtcl(final UnsubscribeDtclInput input) {
LOG.info("In unsubscribeDtcl");
if (idIntsListener == null || dtclReg == null) {
- return RpcResultBuilder.<UnsubscribeDtclOutput>failed().withError(
- ErrorType.RPC, "data-missing", "No prior listener was registered").buildFuture();
+ return RpcResultBuilder.<UnsubscribeDtclOutput>failed()
+ .withError(ErrorType.RPC, ErrorTag.DATA_MISSING, "No prior listener was registered")
+ .buildFuture();
}
long timeout = 120L;
dtclReg = null;
if (!idIntsListener.hasTriggered()) {
- return RpcResultBuilder.<UnsubscribeDtclOutput>failed().withError(ErrorType.APPLICATION, "operation-failed",
- "id-ints listener has not received any notifications.").buildFuture();
+ return RpcResultBuilder.<UnsubscribeDtclOutput>failed()
+ .withError(ErrorType.APPLICATION, ErrorTag.OPERATION_FAILED,
+ "id-ints listener has not received any notifications.")
+ .buildFuture();
}
try (DOMDataTreeReadTransaction rTx = domDataBroker.newReadOnlyTransaction()) {
- final Optional<NormalizedNode<?, ?>> readResult = rTx.read(LogicalDatastoreType.CONFIGURATION,
+ final Optional<NormalizedNode> readResult = rTx.read(LogicalDatastoreType.CONFIGURATION,
WriteTransactionsHandler.ID_INT_YID).get();
if (!readResult.isPresent()) {
- return RpcResultBuilder.<UnsubscribeDtclOutput>failed().withError(ErrorType.APPLICATION, "data-missing",
- "No data read from id-ints list").buildFuture();
+ return RpcResultBuilder.<UnsubscribeDtclOutput>failed()
+ .withError(ErrorType.APPLICATION, ErrorTag.DATA_MISSING, "No data read from id-ints list")
+ .buildFuture();
}
- final boolean nodesEqual = idIntsListener.checkEqual(readResult.get());
+ final boolean nodesEqual = idIntsListener.checkEqual(readResult.orElseThrow());
if (!nodesEqual) {
LOG.error("Final read of id-int does not match IdIntsListener's copy. {}",
- idIntsListener.diffWithLocalCopy(readResult.get()));
+ idIntsListener.diffWithLocalCopy(readResult.orElseThrow()));
}
- return RpcResultBuilder.success(new UnsubscribeDtclOutputBuilder().setCopyMatches(nodesEqual))
+ return RpcResultBuilder.success(new UnsubscribeDtclOutputBuilder().setCopyMatches(nodesEqual).build())
.buildFuture();
} catch (final InterruptedException | ExecutionException e) {
}
}
- @Override
- public ListenableFuture<RpcResult<CreatePrefixShardOutput>> createPrefixShard(final CreatePrefixShardInput input) {
- LOG.info("In createPrefixShard - input: {}", input);
-
- return prefixShardHandler.onCreatePrefixShard(input);
- }
-
- @Override
- public ListenableFuture<RpcResult<DeconfigureIdIntsShardOutput>> deconfigureIdIntsShard(
- final DeconfigureIdIntsShardInput input) {
- return null;
- }
-
- @Override
- public ListenableFuture<RpcResult<UnsubscribeYnlOutput>> unsubscribeYnl(final UnsubscribeYnlInput input) {
+ private ListenableFuture<RpcResult<UnsubscribeYnlOutput>> unsubscribeYnl(final UnsubscribeYnlInput input) {
LOG.info("In unsubscribeYnl - input: {}", input);
if (!ynlRegistrations.containsKey(input.getId())) {
- return RpcResultBuilder.<UnsubscribeYnlOutput>failed().withError(
- ErrorType.RPC, "data-missing", "No prior listener was registered for " + input.getId()).buildFuture();
+ return RpcResultBuilder.<UnsubscribeYnlOutput>failed()
+ .withError(ErrorType.RPC, ErrorTag.DATA_MISSING,
+ "No prior listener was registered for " + input.getId())
+ .buildFuture();
}
- final ListenerRegistration<YnlListener> reg = ynlRegistrations.remove(input.getId());
- final UnsubscribeYnlOutput output = reg.getInstance().getOutput();
-
- reg.close();
-
- return RpcResultBuilder.<UnsubscribeYnlOutput>success().withResult(output).buildFuture();
+ try (var reg = ynlRegistrations.remove(input.getId())) {
+ return RpcResultBuilder.<UnsubscribeYnlOutput>success()
+ .withResult(reg.getInstance().getOutput())
+ .buildFuture();
+ }
}
- @Override
- public ListenableFuture<RpcResult<CheckPublishNotificationsOutput>> checkPublishNotifications(
+ private ListenableFuture<RpcResult<CheckPublishNotificationsOutput>> checkPublishNotifications(
final CheckPublishNotificationsInput input) {
LOG.info("In checkPublishNotifications - input: {}", input);
final PublishNotificationsTask task = publishNotificationsTasks.get(input.getId());
if (task == null) {
- return Futures.immediateFuture(RpcResultBuilder.success(
- new CheckPublishNotificationsOutputBuilder().setActive(false)).build());
+ return RpcResultBuilder.success(new CheckPublishNotificationsOutputBuilder().setActive(false).build())
+ .buildFuture();
}
final CheckPublishNotificationsOutputBuilder checkPublishNotificationsOutputBuilder =
return RpcResultBuilder.success(output).buildFuture();
}
- @Override
- public ListenableFuture<RpcResult<ProduceTransactionsOutput>> produceTransactions(
- final ProduceTransactionsInput input) {
- LOG.info("In produceTransactions - input: {}", input);
- return ProduceTransactionsHandler.start(domDataTreeService, input);
- }
-
- @Override
- public ListenableFuture<RpcResult<ShutdownShardReplicaOutput>> shutdownShardReplica(
+ private ListenableFuture<RpcResult<ShutdownShardReplicaOutput>> shutdownShardReplica(
final ShutdownShardReplicaInput input) {
LOG.info("In shutdownShardReplica - input: {}", input);
final String shardName = input.getShardName();
if (Strings.isNullOrEmpty(shardName)) {
- return RpcResultBuilder.<ShutdownShardReplicaOutput>failed().withError(ErrorType.RPC, "bad-element",
- shardName + "is not a valid shard name").buildFuture();
+ return RpcResultBuilder.<ShutdownShardReplicaOutput>failed()
+ .withError(ErrorType.RPC, ErrorTag.BAD_ELEMENT, shardName + "is not a valid shard name")
+ .buildFuture();
}
return shutdownShardGracefully(shardName, new ShutdownShardReplicaOutputBuilder().build());
}
- @Override
- public ListenableFuture<RpcResult<ShutdownPrefixShardReplicaOutput>> shutdownPrefixShardReplica(
- final ShutdownPrefixShardReplicaInput input) {
- LOG.info("shutdownPrefixShardReplica - input: {}", input);
-
- final InstanceIdentifier<?> shardPrefix = input.getPrefix();
-
- if (shardPrefix == null) {
- return RpcResultBuilder.<ShutdownPrefixShardReplicaOutput>failed().withError(ErrorType.RPC, "bad-element",
- "A valid shard prefix must be specified").buildFuture();
- }
-
- final YangInstanceIdentifier shardPath = bindingNormalizedNodeSerializer.toYangInstanceIdentifier(shardPrefix);
- final String cleanPrefixShardName = ClusterUtils.getCleanShardName(shardPath);
-
- return shutdownShardGracefully(cleanPrefixShardName, new ShutdownPrefixShardReplicaOutputBuilder().build());
- }
-
private <T> SettableFuture<RpcResult<T>> shutdownShardGracefully(final String shardName, final T success) {
final SettableFuture<RpcResult<T>> rpcResult = SettableFuture.create();
final ActorUtils context = configDataStore.getActorUtils();
long timeoutInMS = Math.max(context.getDatastoreContext().getShardRaftConfig()
.getElectionTimeOutInterval().$times(3).toMillis(), 10000);
final FiniteDuration duration = FiniteDuration.apply(timeoutInMS, TimeUnit.MILLISECONDS);
- final scala.concurrent.Promise<Boolean> shutdownShardAsk = akka.dispatch.Futures.promise();
+ final scala.concurrent.Promise<Boolean> shutdownShardAsk = Futures.promise();
context.findLocalShardAsync(shardName).onComplete(new OnComplete<ActorRef>() {
@Override
return rpcResult;
}
- @Override
- public ListenableFuture<RpcResult<RegisterConstantOutput>> registerConstant(final RegisterConstantInput input) {
+ private ListenableFuture<RpcResult<RegisterConstantOutput>> registerConstant(final RegisterConstantInput input) {
LOG.info("In registerConstant - input: {}", input);
if (input.getConstant() == null) {
- return RpcResultBuilder.<RegisterConstantOutput>failed().withError(
- ErrorType.RPC, "invalid-value", "Constant value is null").buildFuture();
+ return RpcResultBuilder.<RegisterConstantOutput>failed()
+ .withError(ErrorType.RPC, ErrorTag.INVALID_VALUE, "Constant value is null")
+ .buildFuture();
}
if (globalGetConstantRegistration != null) {
- return RpcResultBuilder.<RegisterConstantOutput>failed().withError(ErrorType.RPC,
- "data-exists", "There is already an rpc registered").buildFuture();
+ return RpcResultBuilder.<RegisterConstantOutput>failed()
+ .withError(ErrorType.RPC, ErrorTag.DATA_EXISTS, "There is already an rpc registered")
+ .buildFuture();
}
globalGetConstantRegistration = GetConstantService.registerNew(domRpcService, input.getConstant());
return RpcResultBuilder.success(new RegisterConstantOutputBuilder().build()).buildFuture();
}
- @Override
- public ListenableFuture<RpcResult<UnregisterDefaultConstantOutput>> unregisterDefaultConstant(
+ private ListenableFuture<RpcResult<UnregisterDefaultConstantOutput>> unregisterDefaultConstant(
final UnregisterDefaultConstantInput input) {
- return null;
+ throw new UnsupportedOperationException();
}
- @Override
@SuppressWarnings("checkstyle:IllegalCatch")
- public ListenableFuture<RpcResult<UnsubscribeDdtlOutput>> unsubscribeDdtl(final UnsubscribeDdtlInput input) {
- LOG.info("In unsubscribeDdtl");
-
- if (idIntsDdtl == null || ddtlReg == null) {
- return RpcResultBuilder.<UnsubscribeDdtlOutput>failed().withError(
- ErrorType.RPC, "data-missing", "No prior listener was registered").buildFuture();
- }
-
- long timeout = 120L;
- try {
- idIntsDdtl.tryFinishProcessing().get(timeout, TimeUnit.SECONDS);
- } catch (InterruptedException | ExecutionException | TimeoutException e) {
- LOG.error("Unable to finish notification processing", e);
- return RpcResultBuilder.<UnsubscribeDdtlOutput>failed().withError(ErrorType.APPLICATION,
- "Unable to finish notification processing in " + timeout + " seconds", e).buildFuture();
- }
-
- ddtlReg.close();
- ddtlReg = null;
-
- if (!idIntsDdtl.hasTriggered()) {
- return RpcResultBuilder.<UnsubscribeDdtlOutput>failed().withError(ErrorType.APPLICATION,
- "No notification received.", "id-ints listener has not received any notifications").buildFuture();
- }
-
- final String shardName = ClusterUtils.getCleanShardName(ProduceTransactionsHandler.ID_INTS_YID);
- LOG.debug("Creating distributed datastore client for shard {}", shardName);
-
- final ActorUtils actorUtils = configDataStore.getActorUtils();
- final Props distributedDataStoreClientProps =
- SimpleDataStoreClientActor.props(actorUtils.getCurrentMemberName(),
- "Shard-" + shardName, actorUtils, shardName);
-
- final ActorRef clientActor = actorSystem.actorOf(distributedDataStoreClientProps);
- final DataStoreClient distributedDataStoreClient;
- try {
- distributedDataStoreClient = SimpleDataStoreClientActor
- .getDistributedDataStoreClient(clientActor, 30, TimeUnit.SECONDS);
- } catch (RuntimeException e) {
- LOG.error("Failed to get actor for {}", distributedDataStoreClientProps, e);
- clientActor.tell(PoisonPill.getInstance(), noSender());
- return RpcResultBuilder.<UnsubscribeDdtlOutput>failed()
- .withError(ErrorType.APPLICATION, "Unable to create DataStoreClient for read", e).buildFuture();
- }
-
- final ClientLocalHistory localHistory = distributedDataStoreClient.createLocalHistory();
- final ClientTransaction tx = localHistory.createTransaction();
- final ListenableFuture<java.util.Optional<NormalizedNode<?, ?>>> read =
- tx.read(YangInstanceIdentifier.of(ProduceTransactionsHandler.ID_INT));
-
- tx.abort();
- localHistory.close();
- try {
- final java.util.Optional<NormalizedNode<?, ?>> optional = read.get();
- if (!optional.isPresent()) {
- return RpcResultBuilder.<UnsubscribeDdtlOutput>failed().withError(ErrorType.APPLICATION,
- "data-missing", "Final read from id-ints is empty").buildFuture();
- }
-
- return RpcResultBuilder.success(new UnsubscribeDdtlOutputBuilder().setCopyMatches(
- idIntsDdtl.checkEqual(optional.get()))).buildFuture();
-
- } catch (InterruptedException | ExecutionException e) {
- LOG.error("Unable to read data to verify ddtl data", e);
- return RpcResultBuilder.<UnsubscribeDdtlOutput>failed()
- .withError(ErrorType.APPLICATION, "Final read from id-ints failed", e).buildFuture();
- } finally {
- distributedDataStoreClient.close();
- clientActor.tell(PoisonPill.getInstance(), noSender());
- }
+ private ListenableFuture<RpcResult<UnsubscribeDdtlOutput>> unsubscribeDdtl(final UnsubscribeDdtlInput input) {
+ throw new UnsupportedOperationException();
}
}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.clustering.it.provider;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import org.opendaylight.mdsal.binding.api.NotificationPublishService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.BuyCarInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.BuyCarOutput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.BuyCarOutputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarBoughtBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseService;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class PurchaseCarProvider implements CarPurchaseService, AutoCloseable {
- private static final Logger LOG = LoggerFactory.getLogger(PurchaseCarProvider.class);
-
- private final NotificationPublishService notificationProvider;
-
- public PurchaseCarProvider(final NotificationPublishService notificationProvider) {
- this.notificationProvider = requireNonNull(notificationProvider);
- }
-
- @Override
- public ListenableFuture<RpcResult<BuyCarOutput>> buyCar(final BuyCarInput input) {
- LOG.info("Routed RPC buyCar : generating notification for buying car [{}]", input);
-
- return Futures.transform(notificationProvider.offerNotification(new CarBoughtBuilder()
- .setCarId(input.getCarId())
- .setPersonId(input.getPersonId())
- .build()),
- result -> RpcResultBuilder.success(new BuyCarOutputBuilder().build()).build(),
- MoreExecutors.directExecutor());
- }
-
- @Override
- public void close() {
-
- }
-}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.clustering.it.provider.impl;
+import static java.util.Objects.requireNonNull;
+
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
-import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonService;
-import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider;
-import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceRegistration;
-import org.opendaylight.mdsal.singleton.common.api.ServiceGroupIdentifier;
+import org.opendaylight.mdsal.singleton.api.ClusterSingletonService;
+import org.opendaylight.mdsal.singleton.api.ClusterSingletonServiceProvider;
+import org.opendaylight.mdsal.singleton.api.ServiceGroupIdentifier;
+import org.opendaylight.yangtools.concepts.Registration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class FlappingSingletonService implements ClusterSingletonService {
-
+public final class FlappingSingletonService implements ClusterSingletonService {
private static final Logger LOG = LoggerFactory.getLogger(FlappingSingletonService.class);
-
private static final ServiceGroupIdentifier SERVICE_GROUP_IDENTIFIER =
- ServiceGroupIdentifier.create("flapping-singleton-service");
+ new ServiceGroupIdentifier("flapping-singleton-service");
private final ClusterSingletonServiceProvider singletonServiceProvider;
private final AtomicBoolean active = new AtomicBoolean(true);
-
private final AtomicLong flapCount = new AtomicLong();
- private volatile ClusterSingletonServiceRegistration registration;
+
+ private volatile Registration registration;
public FlappingSingletonService(final ClusterSingletonServiceProvider singletonServiceProvider) {
LOG.debug("Registering flapping-singleton-service.");
-
- this.singletonServiceProvider = singletonServiceProvider;
+ this.singletonServiceProvider = requireNonNull(singletonServiceProvider);
registration = singletonServiceProvider.registerClusterSingletonService(this);
}
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
-import java.net.URI;
import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
import org.opendaylight.mdsal.dom.api.DOMRpcImplementation;
-import org.opendaylight.mdsal.dom.api.DOMRpcImplementationRegistration;
import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
import org.opendaylight.mdsal.dom.api.DOMRpcResult;
import org.opendaylight.mdsal.dom.spi.DefaultDOMRpcResult;
+import org.opendaylight.yangtools.concepts.Registration;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.common.QNameModule;
-import org.opendaylight.yangtools.yang.common.Revision;
import org.opendaylight.yangtools.yang.common.YangConstants;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafNodeBuilder;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public final class GetConstantService implements DOMRpcImplementation {
private static final Logger LOG = LoggerFactory.getLogger(GetConstantService.class);
- private static final QNameModule MODULE = QNameModule.create(
- URI.create("tag:opendaylight.org,2017:controller:yang:lowlevel:target"), Revision.of("2017-02-15")).intern();
+ private static final QNameModule MODULE =
+ QNameModule.ofRevision("tag:opendaylight.org,2017:controller:yang:lowlevel:target", "2017-02-15").intern();
private static final QName OUTPUT = YangConstants.operationOutputQName(MODULE).intern();
private static final QName CONSTANT = QName.create(MODULE, "constant").intern();
this.constant = constant;
}
- public static DOMRpcImplementationRegistration<GetConstantService> registerNew(
- final DOMRpcProviderService rpcProviderService, final String constant) {
+ public static Registration registerNew(final DOMRpcProviderService rpcProviderService, final String constant) {
LOG.debug("Registering get-constant service, constant value: {}", constant);
return rpcProviderService.registerRpcImplementation(new GetConstantService(constant),
- DOMRpcIdentifier.create(SchemaPath.create(true, GET_CONSTANT)));
+ DOMRpcIdentifier.create(GET_CONSTANT));
}
@Override
- public ListenableFuture<DOMRpcResult> invokeRpc(final DOMRpcIdentifier rpc, final NormalizedNode<?, ?> input) {
+ public ListenableFuture<DOMRpcResult> invokeRpc(final DOMRpcIdentifier rpc, final ContainerNode input) {
LOG.debug("get-constant invoked, current value: {}", constant);
- return Futures.immediateFuture(new DefaultDOMRpcResult(ImmutableContainerNodeBuilder.create()
+ return Futures.immediateFuture(new DefaultDOMRpcResult(ImmutableNodes.newContainerBuilder()
.withNodeIdentifier(new NodeIdentifier(OUTPUT))
- .withChild(ImmutableLeafNodeBuilder.create()
- .withNodeIdentifier(new NodeIdentifier(CONSTANT))
- .withValue(constant)
- .build())
+ .withChild(ImmutableNodes.leafNode(CONSTANT, constant))
.build()));
}
}
+++ /dev/null
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.clustering.it.provider.impl;
-
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.Collection;
-import java.util.Map;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.ScheduledFuture;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeListener;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeListeningException;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class IdIntsDOMDataTreeLIstener implements DOMDataTreeListener {
-
- private static final Logger LOG = LoggerFactory.getLogger(IdIntsDOMDataTreeLIstener.class);
- private static final long SECOND_AS_NANO = 1000000000;
-
- private NormalizedNode<?, ?> localCopy = null;
- private final AtomicLong lastNotifTimestamp = new AtomicLong(0);
- private ScheduledFuture<?> scheduledFuture;
- private ScheduledExecutorService executorService;
-
- @Override
- public void onDataTreeChanged(final Collection<DataTreeCandidate> changes,
- final Map<DOMDataTreeIdentifier, NormalizedNode<?, ?>> subtrees) {
-
- // There should only be one candidate reported
- Preconditions.checkState(changes.size() == 1);
-
- lastNotifTimestamp.set(System.nanoTime());
-
- // do not log the change into debug, only use trace since it will lead to OOM on default heap settings
- LOG.debug("Received data tree changed");
-
- changes.forEach(change -> {
- if (change.getRootNode().getDataAfter().isPresent()) {
- LOG.trace("Received change, data before: {}, data after: {}",
- change.getRootNode().getDataBefore().isPresent()
- ? change.getRootNode().getDataBefore().get() : "",
- change.getRootNode().getDataAfter().get());
-
- if (localCopy == null || checkEqual(change.getRootNode().getDataBefore().get())) {
- localCopy = change.getRootNode().getDataAfter().get();
- } else {
- LOG.warn("Ignoring notification.");
- LOG.trace("Ignored notification content: {}", change);
- }
- } else {
- LOG.warn("getDataAfter() is missing from notification. change: {}", change);
- }
- });
- }
-
- @Override
- public void onDataTreeFailed(Collection<DOMDataTreeListeningException> causes) {
-
- }
-
- public boolean hasTriggered() {
- return localCopy != null;
- }
-
- public Future<Void> tryFinishProcessing() {
- executorService = Executors.newSingleThreadScheduledExecutor();
- final SettableFuture<Void> settableFuture = SettableFuture.create();
-
- scheduledFuture = executorService.scheduleAtFixedRate(new CheckFinishedTask(settableFuture),
- 0, 1, TimeUnit.SECONDS);
- return settableFuture;
- }
-
- public boolean checkEqual(final NormalizedNode<?, ?> expected) {
- return localCopy.equals(expected);
- }
-
- private class CheckFinishedTask implements Runnable {
-
- private final SettableFuture<Void> future;
-
- CheckFinishedTask(final SettableFuture<Void> future) {
- this.future = future;
- }
-
- @Override
- public void run() {
- if (System.nanoTime() - lastNotifTimestamp.get() > SECOND_AS_NANO * 4) {
- scheduledFuture.cancel(false);
- future.set(null);
-
- executorService.shutdown();
- }
- }
- }
-}
import com.google.common.util.concurrent.SettableFuture;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.util.Collection;
import java.util.HashMap;
-import java.util.Map;
+import java.util.List;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
-import org.opendaylight.mdsal.dom.api.ClusteredDOMDataTreeChangeListener;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class IdIntsListener implements ClusteredDOMDataTreeChangeListener {
-
+public final class IdIntsListener implements DOMDataTreeChangeListener {
private static final Logger LOG = LoggerFactory.getLogger(IdIntsListener.class);
private static final long SECOND_AS_NANO = 1000000000;
- private volatile NormalizedNode<?, ?> localCopy;
private final AtomicLong lastNotifTimestamp = new AtomicLong(0);
- private ScheduledExecutorService executorService;
- private ScheduledFuture<?> scheduledFuture;
+ private ScheduledExecutorService executorService = null;
+ private ScheduledFuture<?> scheduledFuture = null;
+
+ private volatile NormalizedNode localCopy;
@Override
- public void onDataTreeChanged(final Collection<DataTreeCandidate> changes) {
+ public void onInitialData() {
+ // Intentional no-op
+ }
+
+ @Override
+ public void onDataTreeChanged(final List<DataTreeCandidate> changes) {
// There should only be one candidate reported
checkState(changes.size() == 1);
LOG.debug("Received data tree changed");
changes.forEach(change -> {
- if (change.getRootNode().getDataAfter().isPresent()) {
- LOG.trace("Received change, data before: {}, data after: {}",
- change.getRootNode().getDataBefore().isPresent()
- ? change.getRootNode().getDataBefore().get() : "",
- change.getRootNode().getDataAfter().get());
-
- localCopy = change.getRootNode().getDataAfter().get();
+ final var root = change.getRootNode();
+ final var after = root.dataAfter();
+ if (after != null) {
+ final var before = root.dataBefore();
+ LOG.trace("Received change, data before: {}, data after: {}", before != null ? before : "", after);
+ localCopy = after;
} else {
LOG.warn("getDataAfter() is missing from notification. change: {}", change);
}
return localCopy != null;
}
- public boolean checkEqual(final NormalizedNode<?, ?> expected) {
+ public boolean checkEqual(final NormalizedNode expected) {
return localCopy.equals(expected);
}
@SuppressFBWarnings("BC_UNCONFIRMED_CAST")
- public String diffWithLocalCopy(final NormalizedNode<?, ?> expected) {
+ public String diffWithLocalCopy(final NormalizedNode expected) {
return diffNodes((MapNode)expected, (MapNode)localCopy);
}
public Future<Void> tryFinishProcessing() {
executorService = Executors.newSingleThreadScheduledExecutor();
- final SettableFuture<Void> settableFuture = SettableFuture.create();
+ final var settableFuture = SettableFuture.<Void>create();
scheduledFuture = executorService.scheduleAtFixedRate(new CheckFinishedTask(settableFuture),
0, 1, TimeUnit.SECONDS);
public static String diffNodes(final MapNode expected, final MapNode actual) {
StringBuilder builder = new StringBuilder("MapNodes diff:");
- final YangInstanceIdentifier.NodeIdentifier itemNodeId = new YangInstanceIdentifier.NodeIdentifier(ITEM);
+ final var itemNodeId = new NodeIdentifier(ITEM);
- Map<NodeIdentifierWithPredicates, MapEntryNode> expIdIntMap = new HashMap<>();
- expected.getValue().forEach(node -> expIdIntMap.put(node.getIdentifier(), node));
+ final var expIdIntMap = new HashMap<NodeIdentifierWithPredicates, MapEntryNode>();
+ expected.body().forEach(node -> expIdIntMap.put(node.name(), node));
- actual.getValue().forEach(actIdInt -> {
- final MapEntryNode expIdInt = expIdIntMap.remove(actIdInt.getIdentifier());
+ actual.body().forEach(actIdInt -> {
+ final var expIdInt = expIdIntMap.remove(actIdInt.name());
if (expIdInt == null) {
- builder.append('\n').append(" Unexpected id-int entry for ").append(actIdInt.getIdentifier());
+ builder.append('\n').append(" Unexpected id-int entry for ").append(actIdInt.name());
return;
}
- Map<NodeIdentifierWithPredicates, MapEntryNode> expItemMap = new HashMap<>();
- ((MapNode)expIdInt.getChild(itemNodeId).get()).getValue()
- .forEach(node -> expItemMap.put(node.getIdentifier(), node));
+ final var expItemMap = new HashMap<NodeIdentifierWithPredicates, MapEntryNode>();
+ ((MapNode)expIdInt.getChildByArg(itemNodeId)).body()
+ .forEach(node -> expItemMap.put(node.name(), node));
- ((MapNode)actIdInt.getChild(itemNodeId).get()).getValue().forEach(actItem -> {
- final MapEntryNode expItem = expItemMap.remove(actItem.getIdentifier());
+ ((MapNode)actIdInt.getChildByArg(itemNodeId)).body().forEach(actItem -> {
+ final var expItem = expItemMap.remove(actItem.name());
if (expItem == null) {
- builder.append('\n').append(" Unexpected item entry ").append(actItem.getIdentifier())
- .append(" for id-int entry ").append(actIdInt.getIdentifier());
+ builder.append('\n').append(" Unexpected item entry ").append(actItem.name())
+ .append(" for id-int entry ").append(actIdInt.name());
}
});
expItemMap.values().forEach(node -> builder.append('\n')
- .append(" Actual is missing item entry ").append(node.getIdentifier())
- .append(" for id-int entry ").append(actIdInt.getIdentifier()));
+ .append(" Actual is missing item entry ").append(node.name())
+ .append(" for id-int entry ").append(actIdInt.name()));
});
expIdIntMap.values().forEach(node -> builder.append('\n')
- .append(" Actual is missing id-int entry for ").append(node.getIdentifier()));
+ .append(" Actual is missing id-int entry for ").append(node.name()));
return builder.toString();
}
- private class CheckFinishedTask implements Runnable {
-
+ private final class CheckFinishedTask implements Runnable {
private final SettableFuture<Void> future;
CheckFinishedTask(final SettableFuture<Void> future) {
+++ /dev/null
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.clustering.it.provider.impl;
-
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import java.util.Collections;
-import java.util.concurrent.CompletionStage;
-import org.opendaylight.controller.cluster.datastore.exceptions.TimeoutException;
-import org.opendaylight.controller.cluster.dom.api.CDSDataTreeProducer;
-import org.opendaylight.controller.cluster.dom.api.CDSShardAccess;
-import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeProducerException;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeService;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.BecomePrefixLeaderInput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.BecomePrefixLeaderOutput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.BecomePrefixLeaderOutputBuilder;
-import org.opendaylight.yangtools.yang.common.RpcError;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class PrefixLeaderHandler {
-
- private static final Logger LOG = LoggerFactory.getLogger(PrefixLeaderHandler.class);
-
- private final DOMDataTreeService domDataTreeService;
- private final BindingNormalizedNodeSerializer serializer;
-
- public PrefixLeaderHandler(final DOMDataTreeService domDataTreeService,
- final BindingNormalizedNodeSerializer serializer) {
- this.domDataTreeService = domDataTreeService;
- this.serializer = serializer;
- }
-
- public ListenableFuture<RpcResult<BecomePrefixLeaderOutput>> makeLeaderLocal(final BecomePrefixLeaderInput input) {
-
- final YangInstanceIdentifier yid = serializer.toYangInstanceIdentifier(input.getPrefix());
- final DOMDataTreeIdentifier prefix = new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, yid);
-
- try (CDSDataTreeProducer producer =
- (CDSDataTreeProducer) domDataTreeService.createProducer(Collections.singleton(prefix))) {
-
- final CDSShardAccess shardAccess = producer.getShardAccess(prefix);
-
- final CompletionStage<Void> completionStage = shardAccess.makeLeaderLocal();
-
- completionStage.exceptionally(throwable -> {
- LOG.error("Leader movement failed.", throwable);
- return null;
- });
- } catch (final DOMDataTreeProducerException e) {
- LOG.warn("Error while closing producer", e);
- } catch (final TimeoutException e) {
- LOG.warn("Timeout while on producer operation", e);
- Futures.immediateFuture(RpcResultBuilder.failed().withError(RpcError.ErrorType.RPC,
- "resource-denied-transport", "Timeout while opening producer please retry.", "clustering-it",
- "clustering-it", e));
- }
-
- return Futures.immediateFuture(RpcResultBuilder.success(new BecomePrefixLeaderOutputBuilder().build()).build());
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.clustering.it.provider.impl;
-
-import static org.opendaylight.controller.clustering.it.provider.impl.AbstractTransactionHandler.ID;
-import static org.opendaylight.controller.clustering.it.provider.impl.AbstractTransactionHandler.ID_INT;
-import static org.opendaylight.controller.clustering.it.provider.impl.AbstractTransactionHandler.ID_INTS;
-import static org.opendaylight.controller.clustering.it.provider.impl.AbstractTransactionHandler.ITEM;
-
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.CompletionStage;
-import java.util.stream.Collectors;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.sharding.DistributedShardFactory;
-import org.opendaylight.controller.cluster.sharding.DistributedShardRegistration;
-import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCursorAwareTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeProducer;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeProducerException;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeService;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeShardingConflictException;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteCursor;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.CreatePrefixShardInput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.CreatePrefixShardOutput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.CreatePrefixShardOutputBuilder;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RemovePrefixShardInput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RemovePrefixShardOutput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RemovePrefixShardOutputBuilder;
-import org.opendaylight.yangtools.yang.common.RpcError;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class PrefixShardHandler {
-
- private static final Logger LOG = LoggerFactory.getLogger(PrefixShardHandler.class);
- private static final int MAX_PREFIX = 4;
- private static final String PREFIX_TEMPLATE = "prefix-";
-
- private final DistributedShardFactory shardFactory;
- private final DOMDataTreeService domDataTreeService;
- private final BindingNormalizedNodeSerializer serializer;
-
- private final Map<YangInstanceIdentifier, DistributedShardRegistration> registrations =
- Collections.synchronizedMap(new HashMap<>());
-
- public PrefixShardHandler(final DistributedShardFactory shardFactory,
- final DOMDataTreeService domDataTreeService,
- final BindingNormalizedNodeSerializer serializer) {
-
- this.shardFactory = shardFactory;
- this.domDataTreeService = domDataTreeService;
- this.serializer = serializer;
- }
-
- public ListenableFuture<RpcResult<CreatePrefixShardOutput>> onCreatePrefixShard(
- final CreatePrefixShardInput input) {
-
- final SettableFuture<RpcResult<CreatePrefixShardOutput>> future = SettableFuture.create();
-
- final CompletionStage<DistributedShardRegistration> completionStage;
- final YangInstanceIdentifier identifier = serializer.toYangInstanceIdentifier(input.getPrefix());
-
- try {
- completionStage = shardFactory.createDistributedShard(
- new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, identifier),
- input.getReplicas().stream().map(MemberName::forName).collect(Collectors.toList()));
-
- completionStage.thenAccept(registration -> {
- LOG.debug("Shard[{}] created successfully.", identifier);
- registrations.put(identifier, registration);
-
- final ListenableFuture<?> ensureFuture = ensureListExists();
- Futures.addCallback(ensureFuture, new FutureCallback<Object>() {
- @Override
- public void onSuccess(final Object result) {
- LOG.debug("Initial list write successful.");
- future.set(RpcResultBuilder.success(new CreatePrefixShardOutputBuilder().build()).build());
- }
-
- @Override
- public void onFailure(final Throwable throwable) {
- LOG.warn("Shard[{}] creation failed:", identifier, throwable);
-
- final RpcError error = RpcResultBuilder.newError(RpcError.ErrorType.APPLICATION,
- "create-shard-failed", "Shard creation failed", "cluster-test-app", "", throwable);
- future.set(RpcResultBuilder.<CreatePrefixShardOutput>failed().withRpcError(error).build());
- }
- }, MoreExecutors.directExecutor());
- });
- completionStage.exceptionally(throwable -> {
- LOG.warn("Shard[{}] creation failed:", identifier, throwable);
-
- final RpcError error = RpcResultBuilder.newError(RpcError.ErrorType.APPLICATION, "create-shard-failed",
- "Shard creation failed", "cluster-test-app", "", throwable);
- future.set(RpcResultBuilder.<CreatePrefixShardOutput>failed().withRpcError(error).build());
- return null;
- });
- } catch (final DOMDataTreeShardingConflictException e) {
- LOG.warn("Unable to register shard for: {}.", identifier);
-
- final RpcError error = RpcResultBuilder.newError(RpcError.ErrorType.APPLICATION, "create-shard-failed",
- "Sharding conflict", "cluster-test-app", "", e);
- future.set(RpcResultBuilder.<CreatePrefixShardOutput>failed().withRpcError(error).build());
- }
-
- return future;
- }
-
- public ListenableFuture<RpcResult<RemovePrefixShardOutput>> onRemovePrefixShard(
- final RemovePrefixShardInput input) {
-
- final YangInstanceIdentifier identifier = serializer.toYangInstanceIdentifier(input.getPrefix());
- final DistributedShardRegistration registration = registrations.get(identifier);
-
- if (registration == null) {
- final RpcError error = RpcResultBuilder.newError(RpcError.ErrorType.APPLICATION, "registration-missing",
- "No shard registered at this prefix.");
- return Futures.immediateFuture(RpcResultBuilder.<RemovePrefixShardOutput>failed().withRpcError(error)
- .build());
- }
-
- final SettableFuture<RpcResult<RemovePrefixShardOutput>> future = SettableFuture.create();
-
- final CompletionStage<Void> close = registration.close();
- close.thenRun(() -> future.set(RpcResultBuilder.success(new RemovePrefixShardOutputBuilder().build()).build()));
- close.exceptionally(throwable -> {
- LOG.warn("Shard[{}] removal failed:", identifier, throwable);
-
- final RpcError error = RpcResultBuilder.newError(RpcError.ErrorType.APPLICATION, "remove-shard-failed",
- "Shard removal failed", "cluster-test-app", "", throwable);
- future.set(RpcResultBuilder.<RemovePrefixShardOutput>failed().withRpcError(error).build());
- return null;
- });
-
- return future;
- }
-
- private ListenableFuture<?> ensureListExists() {
-
- final CollectionNodeBuilder<MapEntryNode, MapNode> mapBuilder = ImmutableNodes.mapNodeBuilder(ID_INT);
-
- // hardcoded initial list population for parallel produce-transactions testing on multiple nodes
- for (int i = 1; i < MAX_PREFIX; i++) {
- mapBuilder.withChild(
- ImmutableNodes.mapEntryBuilder(ID_INT, ID, PREFIX_TEMPLATE + i)
- .withChild(ImmutableNodes.mapNodeBuilder(ITEM).build())
- .build());
- }
- final MapNode mapNode = mapBuilder.build();
-
- final ContainerNode containerNode = ImmutableContainerNodeBuilder.create()
- .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(ID_INTS))
- .withChild(mapNode)
- .build();
-
- final DOMDataTreeProducer producer = domDataTreeService.createProducer(Collections.singleton(
- new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.empty())));
-
- final DOMDataTreeCursorAwareTransaction tx = producer.createTransaction(false);
-
- final DOMDataTreeWriteCursor cursor =
- tx.createCursor(new DOMDataTreeIdentifier(
- LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.empty()));
-
- cursor.merge(containerNode.getIdentifier(), containerNode);
- cursor.close();
-
- final ListenableFuture<?> future = tx.commit();
- Futures.addCallback(future, new FutureCallback<Object>() {
- @Override
- public void onSuccess(final Object result) {
- try {
- LOG.debug("Closing producer for initial list.");
- producer.close();
- } catch (DOMDataTreeProducerException e) {
- LOG.warn("Error while closing producer.", e);
- }
- }
-
- @Override
- public void onFailure(final Throwable throwable) {
- //NOOP handled by the caller of this method.
- }
- }, MoreExecutors.directExecutor());
- return future;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.clustering.it.provider.impl;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.util.concurrent.FluentFuture;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.SplittableRandom;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCursorAwareTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeProducer;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeProducerException;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeService;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteCursor;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ProduceTransactionsInput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ProduceTransactionsOutput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ProduceTransactionsOutputBuilder;
-import org.opendaylight.yangtools.yang.common.RpcError;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public final class ProduceTransactionsHandler extends AbstractTransactionHandler {
- private static final Logger LOG = LoggerFactory.getLogger(ProduceTransactionsHandler.class);
-
- private final SettableFuture<RpcResult<ProduceTransactionsOutput>> future = SettableFuture.create();
- private final SplittableRandom random = new SplittableRandom();
- private final Set<Integer> usedValues = new HashSet<>();
- private final DOMDataTreeIdentifier idListItem;
- private final DOMDataTreeProducer itemProducer;
-
- private long insertTx = 0;
- private long deleteTx = 0;
-
- private ProduceTransactionsHandler(final DOMDataTreeProducer producer, final DOMDataTreeIdentifier idListItem,
- final ProduceTransactionsInput input) {
- super(input);
- this.itemProducer = requireNonNull(producer);
- this.idListItem = requireNonNull(idListItem);
- }
-
- public static ListenableFuture<RpcResult<ProduceTransactionsOutput>> start(
- final DOMDataTreeService domDataTreeService, final ProduceTransactionsInput input) {
- final String id = input.getId();
- LOG.debug("Filling the item list {} with initial values.", id);
-
- final YangInstanceIdentifier idListWithKey = ID_INT_YID.node(NodeIdentifierWithPredicates.of(ID_INT, ID, id));
-
- final DOMDataTreeProducer itemProducer = domDataTreeService.createProducer(
- Collections.singleton(new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, idListWithKey)));
-
- final DOMDataTreeCursorAwareTransaction tx = itemProducer.createTransaction(false);
- final DOMDataTreeWriteCursor cursor =
- tx.createCursor(new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, idListWithKey));
-
- final MapNode list = ImmutableNodes.mapNodeBuilder(ITEM).build();
- cursor.write(list.getIdentifier(), list);
- cursor.close();
-
- try {
- tx.commit().get(INIT_TX_TIMEOUT_SECONDS, TimeUnit.SECONDS);
- } catch (InterruptedException | ExecutionException | TimeoutException e) {
- LOG.warn("Unable to fill the initial item list.", e);
- closeProducer(itemProducer);
-
- return Futures.immediateFuture(RpcResultBuilder.<ProduceTransactionsOutput>failed()
- .withError(RpcError.ErrorType.APPLICATION, "Unexpected-exception", e).build());
- }
-
- final ProduceTransactionsHandler handler = new ProduceTransactionsHandler(itemProducer,
- new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, idListWithKey.node(list.getIdentifier())
- .toOptimized()), input);
- // It is handler's responsibility to close itemProducer when the work is finished.
- handler.doStart();
- return handler.future;
- }
-
- private static void closeProducer(final DOMDataTreeProducer producer) {
- try {
- producer.close();
- } catch (final DOMDataTreeProducerException exception) {
- LOG.warn("Failure while closing producer.", exception);
- }
- }
-
- @Override
- FluentFuture<? extends @NonNull CommitInfo> execWrite(final long txId) {
- final int i = random.nextInt(MAX_ITEM + 1);
- final DOMDataTreeCursorAwareTransaction tx = itemProducer.createTransaction(false);
- final DOMDataTreeWriteCursor cursor = tx.createCursor(idListItem);
-
- final NodeIdentifierWithPredicates entryId = NodeIdentifierWithPredicates.of(ITEM, NUMBER, i);
- if (usedValues.contains(i)) {
- LOG.debug("Deleting item: {}", i);
- deleteTx++;
- cursor.delete(entryId);
- usedValues.remove(i);
-
- } else {
- LOG.debug("Inserting item: {}", i);
- insertTx++;
-
- final MapEntryNode entry = ImmutableNodes.mapEntryBuilder().withNodeIdentifier(entryId)
- .withChild(ImmutableNodes.leafNode(NUMBER, i)).build();
- cursor.write(entryId, entry);
- usedValues.add(i);
- }
-
- cursor.close();
-
- return tx.commit();
- }
-
- @Override
- void runFailed(final Throwable cause, final long txId) {
- closeProducer(itemProducer);
- future.set(RpcResultBuilder.<ProduceTransactionsOutput>failed()
- .withError(RpcError.ErrorType.APPLICATION, "Commit failed for tx # " + txId, cause).build());
- }
-
- @Override
- void runSuccessful(final long allTx) {
- closeProducer(itemProducer);
- final ProduceTransactionsOutput output = new ProduceTransactionsOutputBuilder()
- .setAllTx(allTx)
- .setInsertTx(insertTx)
- .setDeleteTx(deleteTx)
- .build();
- future.set(RpcResultBuilder.<ProduceTransactionsOutput>success()
- .withResult(output).build());
- }
-
- @Override
- void runTimedOut(final String cause) {
- closeProducer(itemProducer);
- future.set(RpcResultBuilder.<ProduceTransactionsOutput>failed()
- .withError(RpcError.ErrorType.APPLICATION, cause).build());
- }
-}
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class PublishNotificationsTask implements Runnable {
-
+public final class PublishNotificationsTask implements Runnable {
private static final Logger LOG = LoggerFactory.getLogger(PublishNotificationsTask.class);
- private static final int SECOND_AS_NANO = 1000000000;
+ private static final int SECOND_AS_NANO = 1_000_000_000;
private final NotificationPublishService notificationPublishService;
private final String notificationId;
this.notificationPublishService = requireNonNull(notificationPublishService);
this.notificationId = requireNonNull(notificationId);
checkArgument(secondsToTake > 0);
- this.timeToTake = secondsToTake * SECOND_AS_NANO;
+ timeToTake = secondsToTake * SECOND_AS_NANO;
checkArgument(maxPerSecond > 0);
- this.delay = SECOND_AS_NANO / maxPerSecond;
+ delay = SECOND_AS_NANO / maxPerSecond;
LOG.debug("Delay : {}", delay);
}
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
-import java.net.URI;
import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
import org.opendaylight.mdsal.dom.api.DOMRpcImplementation;
-import org.opendaylight.mdsal.dom.api.DOMRpcImplementationRegistration;
import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
import org.opendaylight.mdsal.dom.api.DOMRpcResult;
import org.opendaylight.mdsal.dom.spi.DefaultDOMRpcResult;
+import org.opendaylight.yangtools.concepts.Registration;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.common.QNameModule;
-import org.opendaylight.yangtools.yang.common.Revision;
import org.opendaylight.yangtools.yang.common.YangConstants;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafNodeBuilder;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public final class RoutedGetConstantService implements DOMRpcImplementation {
private static final Logger LOG = LoggerFactory.getLogger(RoutedGetConstantService.class);
- private static final QNameModule MODULE = QNameModule.create(
- URI.create("tag:opendaylight.org,2017:controller:yang:lowlevel:target"), Revision.of("2017-02-15")).intern();
+ private static final QNameModule MODULE =
+ QNameModule.ofRevision("tag:opendaylight.org,2017:controller:yang:lowlevel:target", "2017-02-15").intern();
private static final QName OUTPUT = YangConstants.operationOutputQName(MODULE).intern();
private static final QName CONSTANT = QName.create(MODULE, "constant").intern();
private static final QName GET_CONTEXTED_CONSTANT = QName.create(MODULE, "get-contexted-constant").intern();
this.constant = constant;
}
- public static DOMRpcImplementationRegistration<RoutedGetConstantService> registerNew(
- final BindingNormalizedNodeSerializer codec, final DOMRpcProviderService rpcProviderService,
- final String constant, final InstanceIdentifier<?> context) {
+ public static Registration registerNew(final BindingNormalizedNodeSerializer codec,
+ final DOMRpcProviderService rpcProviderService, final String constant,
+ final InstanceIdentifier<?> context) {
LOG.debug("Registering get-contexted-constant on context: {}, with value: {}", context, constant);
- final YangInstanceIdentifier yid = codec.toYangInstanceIdentifier(context);
- final DOMRpcIdentifier id = DOMRpcIdentifier.create(SchemaPath.create(true, GET_CONTEXTED_CONSTANT), yid);
+ final var yid = codec.toYangInstanceIdentifier(context);
+ final var id = DOMRpcIdentifier.create(GET_CONTEXTED_CONSTANT, yid);
return rpcProviderService.registerRpcImplementation(new RoutedGetConstantService(constant), id);
}
@Override
- public ListenableFuture<DOMRpcResult> invokeRpc(final DOMRpcIdentifier rpc, final NormalizedNode<?, ?> input) {
+ public ListenableFuture<DOMRpcResult> invokeRpc(final DOMRpcIdentifier rpc, final ContainerNode input) {
LOG.debug("get-contexted-constant invoked, current value: {}", constant);
- return Futures.immediateFuture(new DefaultDOMRpcResult(ImmutableContainerNodeBuilder.create()
+ return Futures.immediateFuture(new DefaultDOMRpcResult(ImmutableNodes.newContainerBuilder()
.withNodeIdentifier(new NodeIdentifier(OUTPUT))
- .withChild(ImmutableLeafNodeBuilder.create()
- .withNodeIdentifier(new NodeIdentifier(CONSTANT))
- .withValue(constant)
- .build())
+ .withChild(ImmutableNodes.leafNode(CONSTANT, constant))
.build()));
}
}
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
-import java.net.URI;
import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
import org.opendaylight.mdsal.dom.api.DOMRpcImplementation;
-import org.opendaylight.mdsal.dom.api.DOMRpcImplementationRegistration;
import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
import org.opendaylight.mdsal.dom.api.DOMRpcResult;
import org.opendaylight.mdsal.dom.spi.DefaultDOMRpcResult;
-import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonService;
-import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider;
-import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceRegistration;
-import org.opendaylight.mdsal.singleton.common.api.ServiceGroupIdentifier;
+import org.opendaylight.mdsal.singleton.api.ClusterSingletonService;
+import org.opendaylight.mdsal.singleton.api.ClusterSingletonServiceProvider;
+import org.opendaylight.mdsal.singleton.api.ServiceGroupIdentifier;
+import org.opendaylight.yangtools.concepts.Registration;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.common.QNameModule;
-import org.opendaylight.yangtools.yang.common.Revision;
import org.opendaylight.yangtools.yang.common.YangConstants;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafNodeBuilder;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public final class SingletonGetConstantService implements DOMRpcImplementation, ClusterSingletonService {
-
private static final Logger LOG = LoggerFactory.getLogger(SingletonGetConstantService.class);
- private static final QNameModule MODULE = QNameModule.create(
- URI.create("tag:opendaylight.org,2017:controller:yang:lowlevel:target"), Revision.of("2017-02-15")).intern();
+ private static final QNameModule MODULE =
+ QNameModule.ofRevision("tag:opendaylight.org,2017:controller:yang:lowlevel:target", "2017-02-15").intern();
private static final QName OUTPUT = YangConstants.operationOutputQName(MODULE).intern();
private static final QName CONSTANT = QName.create(MODULE, "constant").intern();
private static final QName CONTEXT = QName.create(MODULE, "context").intern();
private static final QName GET_SINGLETON_CONSTANT = QName.create(MODULE, "get-singleton-constant").intern();
private static final ServiceGroupIdentifier SERVICE_GROUP_IDENTIFIER =
- ServiceGroupIdentifier.create("get-singleton-constant-service");
+ new ServiceGroupIdentifier("get-singleton-constant-service");
private final DOMRpcProviderService rpcProviderService;
private final String constant;
- private DOMRpcImplementationRegistration<SingletonGetConstantService> rpcRegistration;
+
+ private Registration rpcRegistration = null;
private SingletonGetConstantService(final DOMRpcProviderService rpcProviderService, final String constant) {
this.rpcProviderService = rpcProviderService;
this.constant = constant;
}
- public static ClusterSingletonServiceRegistration registerNew(
- final ClusterSingletonServiceProvider singletonService, final DOMRpcProviderService rpcProviderService,
- final String constant) {
+ public static Registration registerNew(final ClusterSingletonServiceProvider singletonService,
+ final DOMRpcProviderService rpcProviderService, final String constant) {
LOG.debug("Registering get-singleton-constant into ClusterSingletonService, value {}", constant);
return singletonService.registerClusterSingletonService(
}
@Override
- public ListenableFuture<DOMRpcResult> invokeRpc(final DOMRpcIdentifier rpc, final NormalizedNode<?, ?> input) {
+ public ListenableFuture<DOMRpcResult> invokeRpc(final DOMRpcIdentifier rpc, final ContainerNode input) {
LOG.debug("get-singleton-constant invoked, current value: {}", constant);
- return Futures.immediateFuture(new DefaultDOMRpcResult(ImmutableContainerNodeBuilder.create()
+ return Futures.immediateFuture(new DefaultDOMRpcResult(ImmutableNodes.newContainerBuilder()
.withNodeIdentifier(new NodeIdentifier(OUTPUT))
- .withChild(ImmutableLeafNodeBuilder.create()
- .withNodeIdentifier(new NodeIdentifier(CONSTANT))
- .withValue(constant)
- .build())
+ .withChild(ImmutableNodes.leafNode(CONSTANT, constant))
.build()));
}
@Override
public void instantiateServiceInstance() {
LOG.debug("Gained ownership of get-singleton-constant, registering service into rpcService");
- final DOMRpcIdentifier id = DOMRpcIdentifier.create(SchemaPath.create(true, GET_SINGLETON_CONSTANT));
+ final DOMRpcIdentifier id = DOMRpcIdentifier.create(GET_SINGLETON_CONSTANT);
rpcRegistration = rpcProviderService.registerRpcImplementation(this, id);
}
package org.opendaylight.controller.clustering.it.provider.impl;
import static java.util.Objects.requireNonNull;
+import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapEntryBuilder;
import com.google.common.util.concurrent.FluentFuture;
+import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.SettableFuture;
import java.util.LinkedHashSet;
import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
import org.opendaylight.mdsal.common.api.OptimisticLockFailedException;
import org.opendaylight.mdsal.dom.api.DOMDataBroker;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeTransaction;
import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.WriteTransactionsInput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.WriteTransactionsOutput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.WriteTransactionsOutputBuilder;
-import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.common.ErrorType;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public abstract class WriteTransactionsHandler extends AbstractTransactionHandler {
- private static final class Chained extends WriteTransactionsHandler implements DOMTransactionChainListener {
+ private static final class Chained extends WriteTransactionsHandler implements FutureCallback<Empty> {
private final SplittableRandom random = new SplittableRandom();
private final DOMTransactionChain transactionChain;
Chained(final DOMDataBroker dataBroker, final YangInstanceIdentifier idListItem,
final WriteTransactionsInput input) {
super(idListItem, input);
- transactionChain = dataBroker.createTransactionChain(this);
+ transactionChain = dataBroker.createTransactionChain();
+ transactionChain.addCallback(this);
}
@Override
}
@Override
- public void onTransactionChainFailed(final DOMTransactionChain chain, final DOMDataTreeTransaction transaction,
- final Throwable cause) {
+ public void onFailure(final Throwable cause) {
// This is expected to happen frequently in isolation testing.
LOG.debug("Transaction chain failed.", cause);
// Do not return RPC here, rely on transaction failure to call runFailed.
}
@Override
- public void onTransactionChainSuccessful(final DOMTransactionChain chain) {
+ public void onSuccess(final Empty result) {
LOG.debug("Transaction chain closed successfully.");
}
}
LOG.info("Starting write transactions with input {}", input);
final String id = input.getId();
- final MapEntryNode entry = ImmutableNodes.mapEntryBuilder(ID_INT, ID, id)
- .withChild(ImmutableNodes.mapNodeBuilder(ITEM).build())
+ final MapEntryNode entry = mapEntryBuilder(ID_INT, ID, id)
+ .withChild(ImmutableNodes.newSystemMapBuilder()
+ .withNodeIdentifier(new NodeIdentifier(ITEM))
+ .build())
.build();
- final YangInstanceIdentifier idListItem = ID_INT_YID.node(entry.getIdentifier());
+ final YangInstanceIdentifier idListItem = ID_INT_YID.node(entry.name());
- final ContainerNode containerNode = ImmutableContainerNodeBuilder.create()
+ final ContainerNode containerNode = ImmutableNodes.newContainerBuilder()
.withNodeIdentifier(new NodeIdentifier(ID_INTS))
- .withChild(ImmutableNodes.mapNodeBuilder(ID_INT).build())
+ .withChild(ImmutableNodes.newSystemMapBuilder()
+ .withNodeIdentifier(new NodeIdentifier(ID_INT))
+ .build())
.build();
DOMDataTreeWriteTransaction tx = domDataBroker.newWriteOnlyTransaction();
tx.commit().get(INIT_TX_TIMEOUT_SECONDS, TimeUnit.SECONDS);
} catch (InterruptedException | TimeoutException e) {
LOG.error("Error writing top-level path {}: {}", ID_INTS_YID, containerNode, e);
- return RpcResultBuilder.<WriteTransactionsOutput>failed().withError(RpcError.ErrorType.APPLICATION,
+ return RpcResultBuilder.<WriteTransactionsOutput>failed().withError(ErrorType.APPLICATION,
String.format("Could not start write transactions - error writing top-level path %s: %s",
ID_INTS_YID, containerNode), e).buildFuture();
} catch (ExecutionException e) {
LOG.debug("Got an optimistic lock when writing initial top level list element.", e);
} else {
LOG.error("Error writing top-level path {}: {}", ID_INTS_YID, containerNode, e);
- return RpcResultBuilder.<WriteTransactionsOutput>failed().withError(RpcError.ErrorType.APPLICATION,
+ return RpcResultBuilder.<WriteTransactionsOutput>failed().withError(ErrorType.APPLICATION,
String.format("Could not start write transactions - error writing top-level path %s: %s",
ID_INTS_YID, containerNode), e).buildFuture();
}
tx.commit().get(INIT_TX_TIMEOUT_SECONDS, TimeUnit.SECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
LOG.error("Error writing top-level path {}: {}", idListItem, entry, e);
- return RpcResultBuilder.<WriteTransactionsOutput>failed().withError(RpcError.ErrorType.APPLICATION,
+ return RpcResultBuilder.<WriteTransactionsOutput>failed().withError(ErrorType.APPLICATION,
String.format("Could not start write transactions - error writing list entry path %s: %s",
idListItem, entry), e).buildFuture();
}
LOG.debug("Filling the item list with initial values.");
- final CollectionNodeBuilder<MapEntryNode, MapNode> mapBuilder = ImmutableNodes.mapNodeBuilder(ITEM);
-
final YangInstanceIdentifier itemListId = idListItem.node(ITEM);
tx = domDataBroker.newWriteOnlyTransaction();
- final MapNode itemListNode = mapBuilder.build();
+ final MapNode itemListNode = ImmutableNodes.newSystemMapBuilder()
+ .withNodeIdentifier(new NodeIdentifier(ITEM))
+ .build();
tx.put(LogicalDatastoreType.CONFIGURATION, itemListId, itemListNode);
try {
tx.commit().get(INIT_TX_TIMEOUT_SECONDS, TimeUnit.SECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
LOG.error("Error filling initial item list path {}: {}", itemListId, itemListNode, e);
- return RpcResultBuilder.<WriteTransactionsOutput>failed().withError(RpcError.ErrorType.APPLICATION,
+ return RpcResultBuilder.<WriteTransactionsOutput>failed().withError(ErrorType.APPLICATION,
String.format("Could not start write transactions - error filling initial item list path %s: %s",
itemListId, itemListNode), e).buildFuture();
}
final WriteTransactionsHandler handler;
- if (input.isChainedTransactions()) {
+ if (input.getChainedTransactions()) {
handler = new Chained(domDataBroker, idListItem, input);
} else {
handler = new Simple(domDataBroker, idListItem, input);
} else {
LOG.debug("Inserting item: {}", i);
insertTx.incrementAndGet();
- final MapEntryNode entry = ImmutableNodes.mapEntry(ITEM, NUMBER, i);
- tx.put(LogicalDatastoreType.CONFIGURATION, entryId, entry);
+ tx.put(LogicalDatastoreType.CONFIGURATION, entryId, mapEntryBuilder(ITEM, NUMBER, i).build());
usedValues.add(i);
}
@Override
void runFailed(final Throwable cause, final long txId) {
completionFuture.set(RpcResultBuilder.<WriteTransactionsOutput>failed()
- .withError(RpcError.ErrorType.APPLICATION, "Commit failed for tx # " + txId, cause).build());
+ .withError(ErrorType.APPLICATION, "Commit failed for tx # " + txId, cause).build());
}
@Override
@Override
void runTimedOut(final String cause) {
completionFuture.set(RpcResultBuilder.<WriteTransactionsOutput>failed()
- .withError(RpcError.ErrorType.APPLICATION, cause).build());
+ .withError(ErrorType.APPLICATION, cause).build());
}
abstract DOMDataTreeWriteTransaction createTransaction();
import static java.util.Objects.requireNonNull;
import java.util.concurrent.atomic.AtomicLong;
+import org.opendaylight.mdsal.binding.api.NotificationService.Listener;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeYnlOutput;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeYnlOutputBuilder;
import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.target.rev170215.IdSequence;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.target.rev170215.OdlMdsalLowlevelTargetListener;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class YnlListener implements OdlMdsalLowlevelTargetListener {
+public class YnlListener implements Listener<IdSequence> {
private static final Logger LOG = LoggerFactory.getLogger(YnlListener.class);
private final String id;
}
@Override
- public void onIdSequence(final IdSequence notification) {
+ public void onNotification(final IdSequence notification) {
LOG.debug("Received id-sequence notification, : {}", notification);
allNot.incrementAndGet();
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
- xmlns:odl="http://opendaylight.org/xmlns/blueprint/v1.0.0"
- xmlns:cm="http://aries.apache.org/blueprint/xmlns/blueprint-cm/v1.1.0"
- odl:use-default-for-reference-types="true">
-
- <reference id="dataBroker" interface="org.opendaylight.mdsal.binding.api.DataBroker"/>
- <reference id="entityOwnershipService" interface="org.opendaylight.mdsal.eos.binding.api.EntityOwnershipService"/>
- <reference id="bindingRpcRegistry" interface="org.opendaylight.mdsal.binding.api.RpcProviderService"/>
- <reference id="domRpcProviderService" interface="org.opendaylight.mdsal.dom.api.DOMRpcProviderService"/>
- <reference id="clusterSingletonService" interface="org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider"/>
- <reference id="domDataBroker" interface="org.opendaylight.mdsal.dom.api.DOMDataBroker"/>
- <reference id="schemaService" interface="org.opendaylight.mdsal.dom.api.DOMSchemaService"/>
- <reference id="normalizedNodeSerializer" interface="org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer"/>
- <reference id="notificationPublishService" interface="org.opendaylight.mdsal.binding.api.NotificationPublishService" />
- <reference id="notificationListenerService" interface="org.opendaylight.mdsal.binding.api.NotificationService" />
- <reference id="domDataTreeService" interface="org.opendaylight.mdsal.dom.api.DOMDataTreeService"/>
- <reference id="distributedShardFactory" interface="org.opendaylight.controller.cluster.sharding.DistributedShardFactory"/>
- <reference id="configDatastore" interface="org.opendaylight.controller.cluster.datastore.DistributedDataStoreInterface"
- odl:type="distributed-config"/>
- <reference id="actorSystemProvider" interface="org.opendaylight.controller.cluster.ActorSystemProvider"/>
-
-
- <bean id="purchaseCarProvider" class="org.opendaylight.controller.clustering.it.provider.PurchaseCarProvider" >
- <argument ref="notificationPublishService"/>
- </bean>
-
- <bean id="peopleProvider" class="org.opendaylight.controller.clustering.it.provider.PeopleProvider"
- destroy-method="close">
- <argument ref="dataBroker"/>
- <argument ref="bindingRpcRegistry"/>
- <argument ref="purchaseCarProvider"/>
- </bean>
-
- <bean id="carProvider" class="org.opendaylight.controller.clustering.it.provider.CarProvider"
- destroy-method="close">
- <argument ref="dataBroker"/>
- <argument ref="entityOwnershipService"/>
- <argument ref="domDataBroker"/>
- </bean>
-
- <odl:rpc-implementation ref="carProvider"/>
- <odl:rpc-implementation ref="peopleProvider"/>
-
- <bean id="peopleCarListener" class="org.opendaylight.controller.clustering.it.listener.PeopleCarListener" >
- <property name="dataProvider" ref="dataBroker"/>
- </bean>
-
- <odl:notification-listener ref="peopleCarListener"/>
-
- <bean id="basicTestProvider" class="org.opendaylight.controller.clustering.it.provider.BasicRpcTestProvider">
- <argument ref="bindingRpcRegistry"/>
- <argument ref="clusterSingletonService"/>
- </bean>
-
- <bean id="lowLevelTestProvider" class="org.opendaylight.controller.clustering.it.provider.MdsalLowLevelTestProvider">
- <argument ref="bindingRpcRegistry"/>
- <argument ref="domRpcProviderService"/>
- <argument ref="clusterSingletonService"/>
- <argument ref="schemaService"/>
- <argument ref="normalizedNodeSerializer"/>
- <argument ref="notificationPublishService"/>
- <argument ref="notificationListenerService"/>
- <argument ref="domDataBroker"/>
- <argument ref="domDataTreeService"/>
- <argument ref="distributedShardFactory"/>
- <argument ref="configDatastore"/>
- <argument ref="actorSystemProvider"/>
- </bean>
-
-</blueprint>
<parent>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>odlparent-lite</artifactId>
- <version>7.0.5</version>
+ <version>13.0.11</version>
<relativePath/>
</parent>
<groupId>org.opendaylight.controller.samples</groupId>
<artifactId>samples-aggregator</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<packaging>pom</packaging>
<properties>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>mdsal-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../../parent</relativePath>
</parent>
<dependencies>
<dependency>
- <groupId>${project.groupId}</groupId>
+ <groupId>org.opendaylight.controller.samples</groupId>
<artifactId>sample-toaster</artifactId>
</dependency>
<dependency>
<groupId>org.opendaylight.mdsal</groupId>
<artifactId>mdsal-binding-api</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>yang-binding</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>jakarta.annotation</groupId>
+ <artifactId>jakarta.annotation-api</artifactId>
+ <optional>true</optional>
+ </dependency>
<dependency>
<groupId>org.osgi</groupId>
- <artifactId>org.osgi.core</artifactId>
+ <artifactId>org.osgi.service.component.annotations</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.guicedee.services</groupId>
+ <artifactId>javax.inject</artifactId>
+ <optional>true</optional>
</dependency>
</dependencies>
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.sample.kitchen.api;
-import java.util.concurrent.Future;
+import com.google.common.util.concurrent.ListenableFuture;
import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToastType;
import org.opendaylight.yangtools.yang.common.RpcResult;
public interface KitchenService {
- Future<RpcResult<Void>> makeBreakfast(EggsType eggs, Class<? extends ToastType> toast, int toastDoneness);
+
+ ListenableFuture<RpcResult<Void>> makeBreakfast(EggsType eggs, ToastType toast, int toastDoneness);
}
import com.google.common.collect.ImmutableList.Builder;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import java.util.List;
+import java.util.Set;
import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
+import javax.annotation.PreDestroy;
+import javax.inject.Inject;
+import javax.inject.Singleton;
import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
import org.opendaylight.controller.sample.kitchen.api.EggsType;
import org.opendaylight.controller.sample.kitchen.api.KitchenService;
import org.opendaylight.controller.sample.kitchen.api.KitchenServiceRuntimeMXBean;
-import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.MakeToastInput;
+import org.opendaylight.mdsal.binding.api.NotificationService;
+import org.opendaylight.mdsal.binding.api.NotificationService.CompositeListener;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.MakeToast;
import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.MakeToastInputBuilder;
import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.MakeToastOutput;
import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.MakeToastOutputBuilder;
import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToastType;
-import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToasterListener;
import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToasterOutOfBread;
import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToasterRestocked;
-import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToasterService;
import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.WheatBread;
+import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.yang.common.ErrorTag;
+import org.opendaylight.yangtools.yang.common.ErrorType;
import org.opendaylight.yangtools.yang.common.RpcError;
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
import org.opendaylight.yangtools.yang.common.Uint32;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class KitchenServiceImpl extends AbstractMXBean
- implements KitchenService, KitchenServiceRuntimeMXBean, ToasterListener {
-
+@Singleton
+@Component(service = KitchenService.class, immediate = true)
+public final class KitchenServiceImpl extends AbstractMXBean implements KitchenService, KitchenServiceRuntimeMXBean {
private static final Logger LOG = LoggerFactory.getLogger(KitchenServiceImpl.class);
private static final MakeToastOutput EMPTY_MAKE_OUTPUT = new MakeToastOutputBuilder().build();
- private final ToasterService toaster;
-
- private final ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newCachedThreadPool());
+ private final ExecutorService executor = Executors.newCachedThreadPool();
+ private final MakeToast makeToast;
+ private final Registration reg;
private volatile boolean toasterOutOfBread;
- public KitchenServiceImpl(final ToasterService toaster) {
+ @Inject
+ @Activate
+ public KitchenServiceImpl(@Reference final RpcService rpcService,
+ @Reference final NotificationService notifService) {
super("KitchenService", "toaster-consumer", null);
- this.toaster = toaster;
+ makeToast = rpcService.getRpc(MakeToast.class);
+ reg = notifService.registerCompositeListener(new CompositeListener(Set.of(
+ new CompositeListener.Component<>(ToasterOutOfBread.class, notification -> {
+ LOG.info("ToasterOutOfBread notification");
+ toasterOutOfBread = true;
+ }),
+ new CompositeListener.Component<>(ToasterRestocked.class, notification -> {
+ LOG.info("ToasterRestocked notification - amountOfBread: {}", notification.getAmountOfBread());
+ toasterOutOfBread = false;
+ }))));
+ register();
+ }
+
+ @PreDestroy
+ @Deactivate
+ public void close() {
+ unregister();
+ reg.close();
}
@Override
- public Future<RpcResult<Void>> makeBreakfast(final EggsType eggsType, final Class<? extends ToastType> toastType,
+ public ListenableFuture<RpcResult<Void>> makeBreakfast(final EggsType eggsType, final ToastType toastType,
final int toastDoneness) {
// Call makeToast, The OpendaylightToaster impl already returns a ListenableFuture so the conversion is
// actually a no-op.
}
}
- return Futures.immediateFuture(RpcResultBuilder.<Void>status(atLeastOneSucceeded)
- .withRpcErrors(errorList.build()).build());
+ return RpcResultBuilder.<Void>status(atLeastOneSucceeded).withRpcErrors(errorList.build()).buildFuture();
}, MoreExecutors.directExecutor());
}
private ListenableFuture<RpcResult<Void>> makeEggs(final EggsType eggsType) {
- return executor.submit(() -> RpcResultBuilder.<Void>success().build());
+ return Futures.submit(() -> RpcResultBuilder.<Void>success().build(), executor);
}
- private ListenableFuture<RpcResult<MakeToastOutput>> makeToast(final Class<? extends ToastType> toastType,
- final int toastDoneness) {
-
+ private ListenableFuture<RpcResult<MakeToastOutput>> makeToast(final ToastType toastType, final int toastDoneness) {
if (toasterOutOfBread) {
LOG.info("We're out of toast but we can make eggs");
- return Futures.immediateFuture(RpcResultBuilder.success(EMPTY_MAKE_OUTPUT)
- .withWarning(ErrorType.APPLICATION, "partial-operation",
- "Toaster is out of bread but we can make you eggs").build());
+ return RpcResultBuilder.success(EMPTY_MAKE_OUTPUT)
+ .withWarning(ErrorType.APPLICATION, ErrorTag.PARTIAL_OPERATION,
+ "Toaster is out of bread but we can make you eggs")
+ .buildFuture();
}
// Access the ToasterService to make the toast.
-
- MakeToastInput toastInput = new MakeToastInputBuilder().setToasterDoneness(Uint32.valueOf(toastDoneness))
- .setToasterToastType(toastType).build();
-
- return toaster.makeToast(toastInput);
+ return makeToast.invoke(new MakeToastInputBuilder()
+ .setToasterDoneness(Uint32.valueOf(toastDoneness))
+ .setToasterToastType(toastType)
+ .build());
}
@Override
public Boolean makeScrambledWithWheat() {
try {
// This call has to block since we must return a result to the JMX client.
- RpcResult<Void> result = makeBreakfast(EggsType.SCRAMBLED, WheatBread.class, 2).get();
+ RpcResult<Void> result = makeBreakfast(EggsType.SCRAMBLED, WheatBread.VALUE, 2).get();
if (result.isSuccessful()) {
LOG.info("makeBreakfast succeeded");
} else {
return Boolean.FALSE;
}
-
- /**
- * Implemented from the ToasterListener interface.
- */
- @Override
- public void onToasterOutOfBread(final ToasterOutOfBread notification) {
- LOG.info("ToasterOutOfBread notification");
- toasterOutOfBread = true;
- }
-
- /**
- * Implemented from the ToasterListener interface.
- */
- @Override
- public void onToasterRestocked(final ToasterRestocked notification) {
- LOG.info("ToasterRestocked notification - amountOfBread: {}", notification.getAmountOfBread());
- toasterOutOfBread = false;
- }
}
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
- xmlns:odl="http://opendaylight.org/xmlns/blueprint/v1.0.0"
- odl:use-default-for-reference-types="true">
-
- <!-- Retrieves the RPC service for the ToasterService interface -->
- <odl:rpc-service id="toasterService" interface="org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToasterService"/>
-
- <!-- Create the KitchenServiceImpl instance and inject the RPC service identified by "toasterService" -->
- <bean id="kitchenService" class="org.opendaylight.controller.sample.kitchen.impl.KitchenServiceImpl"
- init-method="register" destroy-method="unregister">
- <argument ref="toasterService"/>
- </bean>
-
- <!-- Register the KitchenServiceImpl to receive yang notifications -->
- <odl:notification-listener ref="kitchenService"/>
-
- <!-- Advertise the KitchenServiceImpl with the OSGi registry with the type property set to "default" . The
- type property is optional but can be used to distinguish this implementation from any other potential
- KitchenService implementations (if there were any). Clients consuming the KitchenService can pick the
- desired implementation via the particular type.
- -->
- <service ref="kitchenService" interface="org.opendaylight.controller.sample.kitchen.api.KitchenService"
- odl:type="default"/>
-</blueprint>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>mdsal-it-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../../mdsal-it-parent</relativePath>
</parent>
<artifactId>sample-toaster-it</artifactId>
import org.opendaylight.controller.sample.kitchen.api.EggsType;
import org.opendaylight.controller.sample.kitchen.api.KitchenService;
import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.HashBrown;
+import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.MakeToast;
import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.WhiteBread;
import org.ops4j.pax.exam.junit.PaxExam;
import org.ops4j.pax.exam.options.MavenUrlReference;
@Inject
@Filter(timeout = 60 * 1000)
KitchenService kitchenService;
+ @Inject
+ @Filter(timeout = 60 * 1000)
+ // proxy for the entire toaster, nothing else
+ MakeToast makeToast;
@Override
public MavenUrlReference getFeatureRepo() {
boolean success = true;
// Make toasts using OSGi service
- success &= kitchenService.makeBreakfast(EggsType.SCRAMBLED, HashBrown.class, 4).get().isSuccessful();
- success &= kitchenService.makeBreakfast(EggsType.POACHED, WhiteBread.class, 8).get().isSuccessful();
+ success &= kitchenService.makeBreakfast(EggsType.SCRAMBLED, HashBrown.VALUE, 4).get().isSuccessful();
+ success &= kitchenService.makeBreakfast(EggsType.POACHED, WhiteBread.VALUE, 8).get().isSuccessful();
assertTrue("Not all breakfasts succeeded", success);
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>mdsal-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../../parent</relativePath>
</parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-common-util</artifactId>
</dependency>
- <dependency>
- <groupId>org.osgi</groupId>
- <artifactId>org.osgi.core</artifactId>
- </dependency>
<!-- dependencies to use AbstractDataBrokerTest -->
<dependency>
<groupId>org.opendaylight.mdsal</groupId>
<artifactId>mdsal-binding-test-utils</artifactId>
</dependency>
+
+ <dependency>
+ <groupId>jakarta.annotation</groupId>
+ <artifactId>jakarta.annotation-api</artifactId>
+ <optional>true</optional>
+ </dependency>
+ <dependency>
+ <groupId>org.osgi</groupId>
+ <artifactId>org.osgi.service.component.annotations</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.guicedee.services</groupId>
+ <artifactId>javax.inject</artifactId>
+ <optional>true</optional>
+ </dependency>
+ <dependency>
+ <groupId>org.osgi</groupId>
+ <artifactId>org.osgi.service.metatype.annotations</artifactId>
+ <scope>compile</scope>
+ </dependency>
</dependencies>
<scm>
package org.opendaylight.controller.sample.toaster.provider;
import static java.util.Objects.requireNonNull;
-import static org.opendaylight.mdsal.binding.api.DataObjectModification.ModificationType.DELETE;
-import static org.opendaylight.mdsal.binding.api.DataObjectModification.ModificationType.WRITE;
import static org.opendaylight.mdsal.common.api.LogicalDatastoreType.CONFIGURATION;
import static org.opendaylight.mdsal.common.api.LogicalDatastoreType.OPERATIONAL;
-import static org.opendaylight.yangtools.yang.common.RpcError.ErrorType.APPLICATION;
+import static org.opendaylight.yangtools.yang.common.ErrorType.APPLICATION;
import com.google.common.util.concurrent.FluentFuture;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.MoreExecutors;
import com.google.common.util.concurrent.SettableFuture;
-import java.util.Collection;
+import java.util.List;
import java.util.Optional;
import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Function;
+import javax.annotation.PreDestroy;
+import javax.inject.Inject;
+import javax.inject.Singleton;
+import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
import org.opendaylight.mdsal.binding.api.DataBroker;
-import org.opendaylight.mdsal.binding.api.DataObjectModification;
import org.opendaylight.mdsal.binding.api.DataTreeChangeListener;
import org.opendaylight.mdsal.binding.api.DataTreeIdentifier;
import org.opendaylight.mdsal.binding.api.DataTreeModification;
import org.opendaylight.mdsal.binding.api.NotificationPublishService;
import org.opendaylight.mdsal.binding.api.ReadWriteTransaction;
+import org.opendaylight.mdsal.binding.api.RpcProviderService;
import org.opendaylight.mdsal.binding.api.WriteTransaction;
import org.opendaylight.mdsal.common.api.CommitInfo;
import org.opendaylight.mdsal.common.api.OptimisticLockFailedException;
import org.opendaylight.mdsal.common.api.TransactionCommitFailedException;
+import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.CancelToast;
import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.CancelToastInput;
import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.CancelToastOutput;
import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.CancelToastOutputBuilder;
import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.DisplayString;
+import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.MakeToast;
import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.MakeToastInput;
import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.MakeToastOutput;
import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.MakeToastOutputBuilder;
+import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.RestockToaster;
import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.RestockToasterInput;
import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.RestockToasterOutput;
import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.RestockToasterOutputBuilder;
import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToasterOutOfBreadBuilder;
import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToasterRestocked;
import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToasterRestockedBuilder;
-import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToasterService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.toaster.app.config.rev160503.ToasterAppConfig;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.toaster.app.config.rev160503.ToasterAppConfigBuilder;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.ErrorTag;
+import org.opendaylight.yangtools.yang.common.ErrorType;
import org.opendaylight.yangtools.yang.common.RpcError;
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-import org.opendaylight.yangtools.yang.common.Uint16;
-import org.opendaylight.yangtools.yang.common.Uint32;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
+import org.osgi.service.metatype.annotations.AttributeDefinition;
+import org.osgi.service.metatype.annotations.Designate;
+import org.osgi.service.metatype.annotations.ObjectClassDefinition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class OpendaylightToaster extends AbstractMXBean
- implements ToasterService, ToasterProviderRuntimeMXBean, DataTreeChangeListener<Toaster>, AutoCloseable {
+@Singleton
+@Component(service = MakeToast.class, immediate = true)
+@Designate(ocd = OpendaylightToaster.Configuration.class)
+public final class OpendaylightToaster extends AbstractMXBean
+ implements MakeToast, ToasterProviderRuntimeMXBean, DataTreeChangeListener<Toaster>, AutoCloseable {
+ @ObjectClassDefinition
+ public @interface Configuration {
+ @AttributeDefinition(description = "The name of the toaster's manufacturer", max = "255")
+ String manufacturer() default TOASTER_MANUFACTURER;
+ @AttributeDefinition(description = "The name of the toaster's model", max = "255")
+ String modelNumber() default TOASTER_MODEL_NUMBER;
+ @AttributeDefinition(description = "How many times we attempt to make toast before failing ",
+ min = "0", max = "65535")
+ int maxMakeToastTries() default 2;
+ }
private static final CancelToastOutput EMPTY_CANCEL_OUTPUT = new CancelToastOutputBuilder().build();
private static final MakeToastOutput EMPTY_MAKE_OUTPUT = new MakeToastOutputBuilder().build();
private static final Logger LOG = LoggerFactory.getLogger(OpendaylightToaster.class);
private static final InstanceIdentifier<Toaster> TOASTER_IID = InstanceIdentifier.builder(Toaster.class).build();
- private static final DisplayString TOASTER_MANUFACTURER = new DisplayString("Opendaylight");
- private static final DisplayString TOASTER_MODEL_NUMBER = new DisplayString("Model 1 - Binding Aware");
+ private static final String TOASTER_MANUFACTURER = "Opendaylight";
+ private static final String TOASTER_MODEL_NUMBER = "Model 1 - Binding Aware";
- private DataBroker dataBroker;
- private NotificationPublishService notificationProvider;
- private ListenerRegistration<OpendaylightToaster> dataTreeChangeListenerRegistration;
+ private final DataBroker dataBroker;
+ private final NotificationPublishService notificationProvider;
+ private final Registration dataTreeChangeListenerRegistration;
+ private final Registration reg;
private final ExecutorService executor;
private final AtomicLong toastsMade = new AtomicLong(0);
private final AtomicLong darknessFactor = new AtomicLong(1000);
- private final ToasterAppConfig toasterAppConfig;
-
- public OpendaylightToaster() {
- this(new ToasterAppConfigBuilder().setManufacturer(TOASTER_MANUFACTURER).setModelNumber(TOASTER_MODEL_NUMBER)
- .setMaxMakeToastTries(Uint16.valueOf(2)).build());
- }
+ private final @NonNull DisplayString manufacturer;
+ private final @NonNull DisplayString modelNumber;
+ private final int maxMakeToastTries;
- public OpendaylightToaster(final ToasterAppConfig toasterAppConfig) {
+ public OpendaylightToaster(final DataBroker dataProvider,
+ final NotificationPublishService notificationPublishService, final RpcProviderService rpcProviderService,
+ final String manufacturer, final String modelNumber, final int maxMakeToastTries) {
super("OpendaylightToaster", "toaster-provider", null);
- executor = Executors.newFixedThreadPool(1);
- this.toasterAppConfig = toasterAppConfig;
- }
+ notificationProvider = requireNonNull(notificationPublishService);
+ dataBroker = requireNonNull(dataProvider);
- public void setNotificationProvider(final NotificationPublishService notificationPublishService) {
- this.notificationProvider = notificationPublishService;
- }
+ this.manufacturer = new DisplayString(manufacturer);
+ this.modelNumber = new DisplayString(modelNumber);
+ this.maxMakeToastTries = maxMakeToastTries;
- public void setDataBroker(final DataBroker dataBroker) {
- this.dataBroker = dataBroker;
- }
+ executor = Executors.newFixedThreadPool(1);
+ reg = rpcProviderService.registerRpcImplementations(
+ (CancelToast) this::cancelToast,
+ this,
+ (RestockToaster) this::restockToaster);
- public void init() {
LOG.info("Initializing...");
dataTreeChangeListenerRegistration = requireNonNull(dataBroker, "dataBroker must be set")
- .registerDataTreeChangeListener(DataTreeIdentifier.create(CONFIGURATION, TOASTER_IID), this);
- setToasterStatusUp(null);
+ .registerTreeChangeListener(DataTreeIdentifier.of(CONFIGURATION, TOASTER_IID), this);
+ try {
+ setToasterStatusUp(null).get();
+ } catch (InterruptedException | ExecutionException e) {
+ throw new IllegalStateException("Failed to commit initial data", e);
+ }
// Register our MXBean.
register();
}
+ @Inject
+ public OpendaylightToaster(final DataBroker dataProvider,
+ final NotificationPublishService notificationPublishService, final RpcProviderService rpcProviderService) {
+ this(dataProvider, notificationPublishService, rpcProviderService, TOASTER_MANUFACTURER, TOASTER_MODEL_NUMBER,
+ 2);
+ }
+
+ @Activate
+ public OpendaylightToaster(@Reference final DataBroker dataProvider,
+ @Reference final NotificationPublishService notificationPublishService,
+ @Reference final RpcProviderService rpcProviderService, final @NonNull Configuration configuration) {
+ this(dataProvider, notificationPublishService, rpcProviderService, configuration.manufacturer(),
+ configuration.modelNumber(), configuration.maxMakeToastTries());
+ }
+
/**
* Implemented from the AutoCloseable interface.
*/
@Override
+ @PreDestroy
+ @Deactivate
public void close() {
LOG.info("Closing...");
// Unregister our MXBean.
unregister();
+ reg.close();
// When we close this service we need to shutdown our executor!
executor.shutdown();
// note - we are simulating a device whose manufacture and model are
// fixed (embedded) into the hardware.
// This is why the manufacture and model number are hardcoded.
- return new ToasterBuilder().setToasterManufacturer(toasterAppConfig.getManufacturer())
- .setToasterModelNumber(toasterAppConfig.getModelNumber()).setToasterStatus(status).build();
+ return new ToasterBuilder()
+ .setToasterManufacturer(manufacturer)
+ .setToasterModelNumber(modelNumber)
+ .setToasterStatus(status)
+ .build();
}
/**
* Implemented from the DataTreeChangeListener interface.
*/
@Override
- public void onDataTreeChanged(final Collection<DataTreeModification<Toaster>> changes) {
- for (DataTreeModification<Toaster> change: changes) {
- DataObjectModification<Toaster> rootNode = change.getRootNode();
- if (rootNode.getModificationType() == WRITE) {
- Toaster oldToaster = rootNode.getDataBefore();
- Toaster newToaster = rootNode.getDataAfter();
- LOG.info("onDataTreeChanged - Toaster config with path {} was added or replaced: "
- + "old Toaster: {}, new Toaster: {}", change.getRootPath().getRootIdentifier(),
- oldToaster, newToaster);
-
- Uint32 darkness = newToaster.getDarknessFactor();
- if (darkness != null) {
- darknessFactor.set(darkness.toJava());
+ public void onDataTreeChanged(final List<DataTreeModification<Toaster>> changes) {
+ for (var change: changes) {
+ final var rootNode = change.getRootNode();
+ switch (rootNode.modificationType()) {
+ case WRITE -> {
+ final var oldToaster = rootNode.dataBefore();
+ final var newToaster = rootNode.dataAfter();
+ LOG.info("onDataTreeChanged - Toaster config with path {} was added or replaced: old Toaster: {}, "
+ + "new Toaster: {}", change.getRootPath().path(), oldToaster, newToaster);
+
+ final var darkness = newToaster.getDarknessFactor();
+ if (darkness != null) {
+ darknessFactor.set(darkness.toJava());
+ }
+ }
+ case DELETE -> LOG.info("onDataTreeChanged - Toaster config with path {} was deleted: old Toaster: {}",
+ change.getRootPath().path(), rootNode.dataBefore());
+ default -> {
+ // No-op
}
- } else if (rootNode.getModificationType() == DELETE) {
- LOG.info("onDataTreeChanged - Toaster config with path {} was deleted: old Toaster: {}",
- change.getRootPath().getRootIdentifier(), rootNode.getDataBefore());
}
}
}
/**
* RPC call implemented from the ToasterService interface that cancels the current toast, if any.
*/
- @Override
- public ListenableFuture<RpcResult<CancelToastOutput>> cancelToast(final CancelToastInput input) {
- Future<?> current = currentMakeToastTask.getAndSet(null);
+ private ListenableFuture<RpcResult<CancelToastOutput>> cancelToast(final CancelToastInput input) {
+ final var current = currentMakeToastTask.getAndSet(null);
if (current != null) {
current.cancel(true);
}
* RPC call implemented from the ToasterService interface that attempts to make toast.
*/
@Override
- public ListenableFuture<RpcResult<MakeToastOutput>> makeToast(final MakeToastInput input) {
+ public ListenableFuture<RpcResult<MakeToastOutput>> invoke(final MakeToastInput input) {
LOG.info("makeToast: {}", input);
-
- final SettableFuture<RpcResult<MakeToastOutput>> futureResult = SettableFuture.create();
-
- checkStatusAndMakeToast(input, futureResult, toasterAppConfig.getMaxMakeToastTries().toJava());
-
+ final var futureResult = SettableFuture.<RpcResult<MakeToastOutput>>create();
+ checkStatusAndMakeToast(input, futureResult, maxMakeToastTries);
return futureResult;
}
private static RpcError makeToasterOutOfBreadError() {
- return RpcResultBuilder.newError(APPLICATION, "resource-denied", "Toaster is out of bread", "out-of-stock",
- null, null);
+ return RpcResultBuilder.newError(APPLICATION, ErrorTag.RESOURCE_DENIED, "Toaster is out of bread",
+ "out-of-stock", null, null);
}
private static RpcError makeToasterInUseError() {
- return RpcResultBuilder.newWarning(APPLICATION, "in-use", "Toaster is busy", null, null, null);
+ return RpcResultBuilder.newWarning(APPLICATION, ErrorTag.IN_USE, "Toaster is busy", null, null, null);
}
private void checkStatusAndMakeToast(final MakeToastInput input,
Futures.transformAsync(readFuture, toasterData -> {
ToasterStatus toasterStatus = ToasterStatus.Up;
if (toasterData.isPresent()) {
- toasterStatus = toasterData.get().getToasterStatus();
+ toasterStatus = toasterData.orElseThrow().getToasterStatus();
}
LOG.debug("Read toaster status: {}", toasterStatus);
* Restocks the bread for the toaster, resets the toastsMade counter to 0, and sends a
* ToasterRestocked notification.
*/
- @Override
- public ListenableFuture<RpcResult<RestockToasterOutput>> restockToaster(final RestockToasterInput input) {
+ private ListenableFuture<RpcResult<RestockToasterOutput>> restockToaster(final RestockToasterInput input) {
LOG.info("restockToaster: {}", input);
amountOfBreadInStock.set(input.getAmountOfBreadToStock().toJava());
return toastsMade.get();
}
- private void setToasterStatusUp(final Function<Boolean, MakeToastOutput> resultCallback) {
+ private ListenableFuture<?> setToasterStatusUp(final Function<Boolean, MakeToastOutput> resultCallback) {
WriteTransaction tx = dataBroker.newWriteOnlyTransaction();
tx.put(OPERATIONAL,TOASTER_IID, buildToaster(ToasterStatus.Up));
- Futures.addCallback(tx.commit(), new FutureCallback<CommitInfo>() {
+ final var future = tx.commit();
+ Futures.addCallback(future, new FutureCallback<CommitInfo>() {
@Override
public void onSuccess(final CommitInfo result) {
LOG.info("Successfully set ToasterStatus to Up");
}
}
}, MoreExecutors.directExecutor());
+
+ return future;
}
private boolean outOfBread() {
public Void call() {
try {
// make toast just sleeps for n seconds per doneness level.
- Thread.sleep(OpendaylightToaster.this.darknessFactor.get()
+ Thread.sleep(darknessFactor.get()
* toastRequest.getToasterDoneness().toJava());
} catch (InterruptedException e) {
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
- xmlns:odl="http://opendaylight.org/xmlns/blueprint/v1.0.0"
- xmlns:cm="http://aries.apache.org/blueprint/xmlns/blueprint-cm/v1.1.0"
- odl:restart-dependents-on-updates="true" odl:use-default-for-reference-types="true">
-
- <!-- "restart-dependents-on-updates" is an ODL extension attribute that processes any "property-placeholder"
- elements and reacts to updates to the corresponding cfg file by restarting this blueprint container any
- dependent containers that consume OSGi services provided by this container in an atomic and orderly
- manner.
-
- "use-default-for-reference-types" is an ODL extension attribute that adds a filter to all services
- imported via "reference" elements where the "type" property is either not set or set to "default" if
- the odl:type attribute isn't explicitly specified. This ensures the default implementation is imported
- if there are other implementations advertised with other types.
- -->
-
- <!-- Accesses properties via the etc/org.opendaylight.toaster.cfg file. The properties are made available
- as variables that can be referenced. The variables are substituted with the actual values read from
- the cfg file, if present, or the default-properties.
- -->
- <cm:property-placeholder persistent-id="org.opendaylight.toaster" update-strategy="none">
- <cm:default-properties>
- <cm:property name="databroker-type" value="default"/>
- </cm:default-properties>
- </cm:property-placeholder>
-
- <!-- "clustered-app-config" is an ODL extension that obtains an application configuration yang container
- from the MD-SAL data store and makes the binding DataObject available as a bean that can be injected
- into other beans. Here we obtain the ToasterAppConfig container DataObject. This also shows how to
- specify default data via the "default-config" child element. While default leaf values defined in the
- yang are returned, one may have more complex data, eg lists, that require default data. The
- "default-config" must contain the XML representation of the yang data, including namespace, wrapped
- in a CDATA section to prevent the blueprint container from treating it as markup.
- -->
- <odl:clustered-app-config id="toasterAppConfig"
- binding-class="org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.toaster.app.config.rev160503.ToasterAppConfig">
- <odl:default-config><![CDATA[
- <toaster-app-config xmlns="urn:opendaylight:params:xml:ns:yang:controller:toaster-app-config">
- <max-make-toast-tries>3</max-make-toast-tries>
- </toaster-app-config>
- ]]></odl:default-config>
- </odl:clustered-app-config>
-
- <!-- Import MD-SAL services. For the DataBroker, we explicitly specify the odl:type which is configurable
- via the cfg file. In this manner the toaster can be configured to use the default clustered DataBroker
- or the specialized "pingpong" DataBroker (or any other DataBroker implementation).
- -->
- <reference id="dataBroker" interface="org.opendaylight.mdsal.binding.api.DataBroker" odl:type="${databroker-type}" />
- <reference id="notificationService" interface="org.opendaylight.mdsal.binding.api.NotificationPublishService"/>
-
- <!-- Create the OpendaylightToaster instance and inject its dependencies -->
- <bean id="toaster" class="org.opendaylight.controller.sample.toaster.provider.OpendaylightToaster"
- init-method="init" destroy-method="close">
- <argument ref="toasterAppConfig"/>
- <property name="dataBroker" ref="dataBroker"/>
- <property name="notificationProvider" ref="notificationService"/>
- </bean>
-
- <!-- Register the OpendaylightToaster instance as an RPC implementation provider. The "rpc-implementation"
- element automatically figures out the RpcService interface although it can be explicitly specified.
- -->
- <odl:rpc-implementation ref="toaster"/>
-</blueprint>
+++ /dev/null
-module toaster-app-config {
- yang-version 1;
-
- namespace "urn:opendaylight:params:xml:ns:yang:controller:toaster-app-config";
- prefix toaster-app-config;
-
- import toaster { prefix toaster; revision-date 2009-11-20; }
-
- description
- "Configuration for the Opendaylight toaster application.";
-
- revision "2016-05-03" {
- description
- "Initial revision.";
- }
-
- container toaster-app-config {
- leaf manufacturer {
- type toaster:DisplayString;
- default "Opendaylight";
- }
-
- leaf model-number {
- type toaster:DisplayString;
- default "Model 1 - Binding Aware";
- }
-
- leaf max-make-toast-tries {
- type uint16;
- default 2;
- }
- }
-}
\ No newline at end of file
import org.opendaylight.mdsal.binding.api.DataBroker;
import org.opendaylight.mdsal.binding.api.NotificationPublishService;
import org.opendaylight.mdsal.binding.api.ReadTransaction;
+import org.opendaylight.mdsal.binding.api.RpcProviderService;
import org.opendaylight.mdsal.binding.dom.adapter.test.AbstractConcurrentDataBrokerTest;
import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.DisplayString;
@Before
public void setupToaster() {
- toaster = new OpendaylightToaster();
- toaster.setDataBroker(getDataBroker());
- toaster.init();
-
- // We'll mock the NotificationProviderService.
- NotificationPublishService mockNotification = mock(NotificationPublishService.class);
- toaster.setNotificationProvider(mockNotification);
+ toaster = new OpendaylightToaster(getDataBroker(), mock(NotificationPublishService.class),
+ mock(RpcProviderService.class));
}
@Test
public void testToasterInitOnStartUp() throws Exception {
DataBroker broker = getDataBroker();
- ReadTransaction readTx = broker.newReadOnlyTransaction();
- Optional<Toaster> optional = readTx.read(LogicalDatastoreType.OPERATIONAL, TOASTER_IID).get();
+ Optional<Toaster> optional;
+ try (ReadTransaction readTx = broker.newReadOnlyTransaction()) {
+ optional = readTx.read(LogicalDatastoreType.OPERATIONAL, TOASTER_IID).get();
+ }
assertNotNull(optional);
assertTrue("Operational toaster not present", optional.isPresent());
- Toaster toasterData = optional.get();
+ Toaster toasterData = optional.orElseThrow();
assertEquals(Toaster.ToasterStatus.Up, toasterData.getToasterStatus());
assertEquals(new DisplayString("Opendaylight"), toasterData.getToasterManufacturer());
assertEquals(new DisplayString("Model 1 - Binding Aware"), toasterData.getToasterModelNumber());
- Optional<Toaster> configToaster = readTx.read(LogicalDatastoreType.CONFIGURATION, TOASTER_IID).get();
- assertFalse("Didn't expect config data for toaster.", configToaster.isPresent());
+ try (ReadTransaction readTx = broker.newReadOnlyTransaction()) {
+ Boolean configToaster = readTx.exists(LogicalDatastoreType.CONFIGURATION, TOASTER_IID).get();
+ assertFalse("Didn't expect config data for toaster.", configToaster);
+ }
}
@Test
@Ignore //ignored because it is not a test right now. Illustrative purposes only.
public void testSomething() throws Exception {
MakeToastInput toastInput = new MakeToastInputBuilder().setToasterDoneness(Uint32.valueOf(1))
- .setToasterToastType(WheatBread.class).build();
+ .setToasterToastType(WheatBread.VALUE).build();
// NOTE: In a real test we would want to override the Thread.sleep() to
// prevent our junit test
// for sleeping for a second...
- Future<RpcResult<MakeToastOutput>> makeToast = toaster.makeToast(toastInput);
+ Future<RpcResult<MakeToastOutput>> makeToast = toaster.invoke(toastInput);
RpcResult<MakeToastOutput> rpcResult = makeToast.get();
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>mdsal-parent</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<relativePath>../../parent</relativePath>
</parent>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- vi: set et smarttab sw=4 tabstop=4: -->
-<!--
- Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.opendaylight.mdsal</groupId>
- <artifactId>binding-parent</artifactId>
- <version>6.0.4</version>
- <relativePath/>
- </parent>
-
- <groupId>org.opendaylight.controller.model</groupId>
- <artifactId>model-inventory</artifactId>
- <packaging>bundle</packaging>
- <version>2.0.4-SNAPSHOT</version>
-
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.mdsal.binding.model.ietf</groupId>
- <artifactId>rfc6991-ietf-inet-types</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.mdsal.model</groupId>
- <artifactId>yang-ext</artifactId>
- </dependency>
- </dependencies>
-
- <scm>
- <connection>scm:git:http://git.opendaylight.org/gerrit/controller.git</connection>
- <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
- <tag>HEAD</tag>
- <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
- </scm>
-</project>
+++ /dev/null
-module opendaylight-inventory {
- namespace "urn:opendaylight:inventory";
- prefix inv;
-
- import yang-ext { prefix ext; revision-date "2013-07-09"; }
- import ietf-inet-types { prefix inet; revision-date "2013-07-15"; }
-
- revision "2013-08-19" {
- description "Initial revision of Inventory model";
- }
-
- typedef support-type {
- type enumeration {
- enum native;
- enum emulated;
- enum not-supported;
- }
- }
-
- typedef node-id {
- type inet:uri;
- description "Identifier for a particular node. For example:
-
- myprotocol:<unique_node_id>
-
- myprotocol:12
-
- It is a good practice to always lead with a scoping
- identifier. In the example above the scoping was
- 'myprotocol'. In your app you could use 'myapp' etc.";
- }
-
- typedef node-connector-id {
- type inet:uri;
- description "Identifier for a particular node-connector. For example:
-
- myprotocol:<unique_node_connector_id>
- myprotocol:3
-
- It is a good practice to always lead with a scoping
- identifier. In the example above the scoping was
- 'myprotocol'. In your app you could use 'myapp' etc.";
- }
-
- // YANG does not have a statement which limits the scope of an
- // instance-identifier to a particular subtree, which is why we are using
- // a type capture and not an instance-identifier to define a node-ref and
- // a node-connector-ref.
- typedef node-ref {
- type instance-identifier;
- description "A reference that points to an
- opendaylight-light:nodes/node in the data tree.";
- }
-
- typedef node-connector-ref {
- type instance-identifier;
- description "A reference that points to an
- opendaylight-list:nodes/node/{node-id}/node-connector in
- the data tree.";
- }
-
- identity node-context {
- description "A node-context is a classifier for node elements which
- allows an RPC to provide a service on behalf of a
- particular element in the data tree.";
- }
-
- identity node-connector-context {
- description "A node-connector-context is a classifier for
- node-connector elements which allows an RPC to provide
- a service on behalf of a particular element in the data
- tree.";
- }
-
- // We are defining a base identity here because there are limitations with
- // YANG enums. YANG does not allow you to extend enumeratations, therefore
- // by defining a base identity we allow other yang files to extend this
- // identity to define additional "enumerations". By using node-type as
- // their base they are able to pass their object to fields that accept
- // "node-types" while uniquely describing their type of node, such as
- // "router-node" or "switch-node" etc.
- // See https://wiki.opendaylight.org/view/YANG_Tools:YANG_to_Java_Mapping#Identity
- // for more information.
- identity node-type {
- description "A base identity definition which represents a generic
- node type and can be extended in other yang files.";
- }
-
- identity node-connector-type {
- description "A base identity definition which represents a generic
- node connector type and can be extended in other YANG
- files.";
- }
-
- grouping node {
- description "Describes the contents of a generic node -
- essentially an ID and a list of node-connectors.
- Acts as an augmentation point where other YANG files
- can add additional information.";
-
- leaf id {
- type node-id;
- description "The unique identifier for the node.";
- }
-
- list "node-connector" {
- key "id";
-
- description "A list of node connectors that belong this node.";
- ext:context-instance "node-connector-context";
-
- uses node-connector;
- }
- }
-
- grouping node-connector {
- description "Describes a generic node connector which consists of an ID.
- Acts as an augmentation point where other YANG files can
- add additional information.";
-
- leaf id {
- type node-connector-id;
- description "The unique identifier for the node-connector.";
- }
- }
-
- grouping node-context-ref {
- description "A helper grouping which contains a reference to a node
- classified with a node-context. This allows RPCs in other
- YANG files to refine their input to a particular node
- instance.";
-
- leaf node {
- ext:context-reference "node-context";
- type node-ref;
- description "A reference to a particular node.";
- }
- }
-
- // Base structure
- container nodes {
- description "The root container of all nodes.";
-
- list node {
- key "id";
- ext:context-instance "node-context";
- description "A list of nodes (as defined by the 'grouping node').";
- uses node; //this refers to the 'grouping node' defined above.
- }
- }
-
- // The following notifications should really be replaced by direct writes
- // to the data tree with data change listeners listening to those changes.
- // Notifications should be reserved for one time events which do not
- // require persistence to the data tree.
- notification node-updated {
- status deprecated;
-
- description "A notification sent by someone who realized there was
- a modification to a node, but did not modify the data
- tree.
-
- Describes that something on the node has been updated
- (including addition of a new node), but is for whatever
- reason is not modifying the data tree.
-
- Deprecated: If a process determines that a node was
- updated, then that logic should update the node using
- the DataBroker directly. Listeners interested update
- changes should register a data change listener for
- notifications on removals.";
-
- leaf node-ref {
- ext:context-reference "node-context";
- description "A reference to the node which changed.";
-
- type node-ref;
- }
- uses node;
- }
-
- notification node-connector-updated {
- status deprecated;
-
- description "A notification sent by someone who realized there was
- a modification to a node-connector, but did not modify
- the data tree.
-
- Describes that something on the node-connector has been
- updated (including addition of a new node-connector), but
- is for whatever reason is not modifying the data tree.
-
- Deprecated: If a process determines that a node-connector
- was updated, then that logic should update the
- node-connector using the DataBroker directly. Listeners
- interested update changes should register a data change
- listener for notifications on removals.";
-
- leaf node-connector-ref {
- ext:context-reference "node-connector-context";
- type node-connector-ref;
- description "A reference to the node-connector which changed.";
- }
- uses node-connector;
- }
-
- notification node-removed {
- status deprecated;
-
- description "A notification sent by someone who realized there was
- a node was removed, but did not modify the data tree.
-
- Describes that a node has been removed but is for whatever
- reason is not modifying the data tree.
-
- Deprecated: If a process determines that a node was
- removed, then that logic should remove the node from
- the DataBroker directly. Listeners interested in changes
- should register a data change listener for notifications
- on removals.";
-
- leaf node-ref {
- description "A reference to the node that was removed.";
- ext:context-reference "node-context";
- type node-ref;
- }
- }
-
- notification node-connector-removed {
- status deprecated;
-
- description "A notification sent by someone who realized there was
- a node-connector was removed, but did not modify the data
- tree.
-
- Describes that a node-connector has been removed but is
- for whatever reason is not modifying the data tree.
-
- Deprecated: If a process determines that a node-connector
- was removed, then that logic should remove the
- node-connector from the DataBroker directly. Listeners
- interested in changes should register a data change
- listener for notifications on removals.";
-
- leaf node-connector-ref {
- description "A reference to the node-connector that was removed.";
- ext:context-reference "node-connector-context";
- type node-connector-ref;
- }
- }
-}
+++ /dev/null
-module opendaylight-topology-inventory {
- yang-version 1;
- namespace "urn:opendaylight:model:topology:inventory";
- // replace with IANA namespace when assigned
- prefix "nt";
-
- import yang-ext { prefix "ext"; }
- import opendaylight-inventory {prefix "inv";}
- import network-topology {prefix "topo"; revision-date "2013-10-21"; }
-
- organization "TBD";
-
- contact "WILL-BE-DEFINED-LATER";
-
- revision 2013-10-30 {
- description
- "Initial revision.";
- }
-
- augment "/topo:network-topology/topo:topology/topo:node" {
- ext:augment-identifier "inventory-node";
- leaf inventory-node-ref {
- type inv:node-ref;
- }
- }
-
- augment "/topo:network-topology/topo:topology/topo:node/topo:termination-point" {
- ext:augment-identifier "inventory-node-connector";
- leaf inventory-node-connector-ref {
- ext:context-reference "inv:node-connector-context";
- type inv:node-connector-ref;
- }
- }
-}
+++ /dev/null
-module opendaylight-topology-view {
- yang-version 1;
- namespace "urn:opendaylight:model:topology:view";
- // replace with IANA namespace when assigned
- prefix "nt";
-
- import yang-ext { prefix "ext"; }
- import network-topology {prefix "topo"; revision-date "2013-10-21"; }
-
- organization "TBD";
-
- contact "WILL-BE-DEFINED-LATER";
-
- revision 2013-10-30 {
- description
- "Initial revision.";
- }
-
-
- grouping aggregate-topology {
- leaf-list original-topology {
- type topo:topology-ref;
- }
- }
-
- grouping aggregate-node {
- list original-node {
- leaf topology {
- type topo:topology-ref;
- }
- leaf node {
- type topo:node-ref;
- }
- }
- }
-
- augment "/topo:network-topology/topo:topology" {
- ext:augment-identifier "aggregated-topology";
- uses aggregate-topology;
- }
-
- augment "/topo:network-topology/topo:topology/topo:node" {
- ext:augment-identifier "aggregated-node";
- uses aggregate-node;
- }
-}
+++ /dev/null
-module opendaylight-topology {
- yang-version 1;
- namespace "urn:opendaylight:model:topology:general";
- // replace with IANA namespace when assigned
- prefix "nt";
-
- import yang-ext { prefix "ext"; }
- import ietf-inet-types { prefix "inet"; }
- import network-topology {prefix "topo"; revision-date "2013-10-21"; }
-
- organization "TBD";
-
- contact "WILL-BE-DEFINED-LATER";
-
- revision 2013-10-30 {
- description
- "Initial revision.";
- }
-
- identity node-type {
-
- }
-
- typedef node-type-ref {
- type identityref {
- base node-type;
- }
- }
-
- identity topology-context {
-
- }
-
- identity topology-node-context {
-
- }
-
- grouping node-identifiers {
- list node-identifier {
- key "type identifier";
- leaf type {
- type node-type-ref;
- }
- leaf identifier {
- type inet:uri;
- }
- }
- }
-
- augment "/topo:network-topology/topo:topology" {
- ext:context-instance "topology-context";
- }
-
- /* Inventory Augmentations */
- augment "/topo:network-topology/topo:topology/topo:node" {
- ext:context-instance "topology-node-context";
- }
-
- augment "/topo:network-topology/topo:topology/topo:node" {
- ext:augment-identifier "identifiable-node";
- uses node-identifiers;
- }
-}
<parent>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>odlparent-lite</artifactId>
- <version>7.0.5</version>
+ <version>13.0.11</version>
<relativePath/>
</parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>releasepom</artifactId>
- <version>2.0.4-SNAPSHOT</version>
+ <version>9.0.3-SNAPSHOT</version>
<packaging>pom</packaging>
<name>controller</name>
<!-- Used by Sonar to set project name -->
<modules>
<module>artifacts</module>
+ <module>docs</module>
+ <module>features</module>
+ <module>karaf</module>
- <!-- md-sal -->
- <module>opendaylight/md-sal</module>
- <!-- config -->
- <module>opendaylight/config</module>
-
- <module>opendaylight/model</module>
-
- <module>opendaylight/blueprint</module>
-
- <!-- Parents -->
+ <module>akka</module>
+ <module>atomix-storage</module>
+ <module>bundle-parent</module>
<module>benchmark</module>
- <module>opendaylight/commons/jolokia</module>
-
- <!-- Karaf Distribution -->
- <module>karaf</module>
- <module>features</module>
+ <module>jolokia</module>
- <!-- documentation -->
- <module>docs</module>
+ <module>opendaylight/blueprint</module>
+ <module>opendaylight/md-sal</module>
</modules>
<profiles>