From: Ruslan Kashapov Date: Wed, 27 Mar 2024 08:52:41 +0000 (+0200) Subject: Improve segmented journal actor metrics X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?p=controller.git;a=commitdiff_plain;h=HEAD;hp=7ce039b3e55d153fc75bc88198c49536ab83befc Improve segmented journal actor metrics Update write time marked on actual flush not on flush request. JIRA: CONTROLLER-2108 Change-Id: I92a66ae775cbae6aeea69bddf654df741f473dbd Signed-off-by: Ruslan Kashapov Signed-off-by: Robert Varga --- diff --git a/.readthedocs.yml b/.readthedocs.yml new file mode 100644 index 0000000000..48b1206393 --- /dev/null +++ b/.readthedocs.yml @@ -0,0 +1,21 @@ +# .readthedocs.yml +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +build: + os: ubuntu-22.04 + tools: + python: "3.11" + jobs: + post_checkout: + - git fetch --unshallow || true + +sphinx: + configuration: docs/conf.py + +python: + install: + - requirements: docs/requirements.txt diff --git a/akka/pom.xml b/akka/pom.xml index 29d8db8217..afd11d7410 100644 --- a/akka/pom.xml +++ b/akka/pom.xml @@ -11,13 +11,13 @@ org.opendaylight.odlparent odlparent-lite - 9.0.12 + 13.0.11 org.opendaylight.controller akka-aggregator - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT pom diff --git a/akka/repackaged-akka-jar/pom.xml b/akka/repackaged-akka-jar/pom.xml index d525bc0aa6..6c62c5d247 100644 --- a/akka/repackaged-akka-jar/pom.xml +++ b/akka/repackaged-akka-jar/pom.xml @@ -13,14 +13,14 @@ org.opendaylight.odlparent odlparent - 9.0.12 + 13.0.11 org.opendaylight.controller repackaged-akka-jar jar - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ${project.artifactId} @@ -33,52 +33,52 @@ com.typesafe.akka akka-actor_2.13 - 2.6.18 + 2.6.21 com.typesafe.akka akka-actor-typed_2.13 - 2.6.18 + 2.6.21 com.typesafe.akka akka-cluster_2.13 - 2.6.18 + 2.6.21 com.typesafe.akka akka-cluster-typed_2.13 - 2.6.18 + 2.6.21 com.typesafe.akka akka-osgi_2.13 - 2.6.18 + 2.6.21 com.typesafe.akka akka-persistence_2.13 - 2.6.18 + 2.6.21 com.typesafe.akka akka-protobuf_2.13 - 2.6.18 + 2.6.21 com.typesafe.akka akka-remote_2.13 - 2.6.18 + 2.6.21 com.typesafe.akka akka-slf4j_2.13 - 2.6.18 + 2.6.21 com.typesafe.akka akka-stream_2.13 - 2.6.18 + 2.6.21 @@ -127,6 +127,13 @@ + + maven-source-plugin + + + true + + diff --git a/akka/repackaged-akka-jar/src/main/resources/actor_reference.conf b/akka/repackaged-akka-jar/src/main/resources/actor_reference.conf index d0569ef3a6..d41cb39ae4 100644 --- a/akka/repackaged-akka-jar/src/main/resources/actor_reference.conf +++ b/akka/repackaged-akka-jar/src/main/resources/actor_reference.conf @@ -1307,4 +1307,45 @@ akka { #//#coordinated-shutdown-phases } + #//#circuit-breaker-default + # Configuration for circuit breakers created with the APIs accepting an id to + # identify or look up the circuit breaker. + # Note: Circuit breakers created without ids are not affected by this configuration. + # A child configuration section with the same name as the circuit breaker identifier + # will be used, with fallback to the `akka.circuit-breaker.default` section. + circuit-breaker { + + # Default configuration that is used if a configuration section + # with the circuit breaker identifier is not defined. + default { + # Number of failures before opening the circuit. + max-failures = 10 + + # Duration of time after which to consider a call a failure. + call-timeout = 10s + + # Duration of time in open state after which to attempt to close + # the circuit, by first entering the half-open state. + reset-timeout = 15s + + # The upper bound of reset-timeout + max-reset-timeout = 36500d + + # Exponential backoff + # For details see https://en.wikipedia.org/wiki/Exponential_backoff + exponential-backoff = 1.0 + + # Additional random delay based on this factor is added to backoff + # For example 0.2 adds up to 20% delay + # In order to skip this additional delay set as 0 + random-factor = 0.0 + + # A allowlist of fqcn of Exceptions that the CircuitBreaker + # should not consider failures. By default all exceptions are + # considered failures. + exception-allowlist = [] + } + } + #//#circuit-breaker-default + } diff --git a/akka/repackaged-akka-jar/src/main/resources/remote_reference.conf b/akka/repackaged-akka-jar/src/main/resources/remote_reference.conf index 5f569d5d98..a30bce7190 100644 --- a/akka/repackaged-akka-jar/src/main/resources/remote_reference.conf +++ b/akka/repackaged-akka-jar/src/main/resources/remote_reference.conf @@ -268,465 +268,467 @@ akka { akka { - remote.classic { - #//#classic + remote { + #//#classic + classic { + + ### Configuration for classic remoting. Classic remoting is deprecated, use artery. + + + # If set to a nonempty string remoting will use the given dispatcher for + # its internal actors otherwise the default dispatcher is used. Please note + # that since remoting can load arbitrary 3rd party drivers (see + # "enabled-transport" and "adapters" entries) it is not guaranteed that + # every module will respect this setting. + use-dispatcher = "akka.remote.default-remote-dispatcher" + + # Settings for the failure detector to monitor connections. + # For TCP it is not important to have fast failure detection, since + # most connection failures are captured by TCP itself. + # The default DeadlineFailureDetector will trigger if there are no heartbeats within + # the duration heartbeat-interval + acceptable-heartbeat-pause, i.e. 124 seconds + # with the default settings. + transport-failure-detector { + + # FQCN of the failure detector implementation. + # It must implement akka.remote.FailureDetector and have + # a public constructor with a com.typesafe.config.Config and + # akka.actor.EventStream parameter. + implementation-class = "akka.remote.DeadlineFailureDetector" + + # How often keep-alive heartbeat messages should be sent to each connection. + heartbeat-interval = 4 s + + # Number of potentially lost/delayed heartbeats that will be + # accepted before considering it to be an anomaly. + # A margin to the `heartbeat-interval` is important to be able to survive sudden, + # occasional, pauses in heartbeat arrivals, due to for example garbage collect or + # network drop. + acceptable-heartbeat-pause = 120 s + } - ### Configuration for classic remoting. Classic remoting is deprecated, use artery. + # Timeout after which the startup of the remoting subsystem is considered + # to be failed. Increase this value if your transport drivers (see the + # enabled-transports section) need longer time to be loaded. + startup-timeout = 10 s - # If set to a nonempty string remoting will use the given dispatcher for - # its internal actors otherwise the default dispatcher is used. Please note - # that since remoting can load arbitrary 3rd party drivers (see - # "enabled-transport" and "adapters" entries) it is not guaranteed that - # every module will respect this setting. - use-dispatcher = "akka.remote.default-remote-dispatcher" + # Timout after which the graceful shutdown of the remoting subsystem is + # considered to be failed. After the timeout the remoting system is + # forcefully shut down. Increase this value if your transport drivers + # (see the enabled-transports section) need longer time to stop properly. + shutdown-timeout = 10 s - # Settings for the failure detector to monitor connections. - # For TCP it is not important to have fast failure detection, since - # most connection failures are captured by TCP itself. - # The default DeadlineFailureDetector will trigger if there are no heartbeats within - # the duration heartbeat-interval + acceptable-heartbeat-pause, i.e. 124 seconds - # with the default settings. - transport-failure-detector { + # Before shutting down the drivers, the remoting subsystem attempts to flush + # all pending writes. This setting controls the maximum time the remoting is + # willing to wait before moving on to shut down the drivers. + flush-wait-on-shutdown = 2 s - # FQCN of the failure detector implementation. - # It must implement akka.remote.FailureDetector and have - # a public constructor with a com.typesafe.config.Config and - # akka.actor.EventStream parameter. - implementation-class = "akka.remote.DeadlineFailureDetector" + # Reuse inbound connections for outbound messages + use-passive-connections = on - # How often keep-alive heartbeat messages should be sent to each connection. - heartbeat-interval = 4 s + # Controls the backoff interval after a refused write is reattempted. + # (Transports may refuse writes if their internal buffer is full) + backoff-interval = 5 ms - # Number of potentially lost/delayed heartbeats that will be - # accepted before considering it to be an anomaly. - # A margin to the `heartbeat-interval` is important to be able to survive sudden, - # occasional, pauses in heartbeat arrivals, due to for example garbage collect or - # network drop. - acceptable-heartbeat-pause = 120 s - } + # Acknowledgment timeout of management commands sent to the transport stack. + command-ack-timeout = 30 s + # The timeout for outbound associations to perform the handshake. + # If the transport is akka.remote.classic.netty.tcp or akka.remote.classic.netty.ssl + # the configured connection-timeout for the transport will be used instead. + handshake-timeout = 15 s - # Timeout after which the startup of the remoting subsystem is considered - # to be failed. Increase this value if your transport drivers (see the - # enabled-transports section) need longer time to be loaded. - startup-timeout = 10 s - - # Timout after which the graceful shutdown of the remoting subsystem is - # considered to be failed. After the timeout the remoting system is - # forcefully shut down. Increase this value if your transport drivers - # (see the enabled-transports section) need longer time to stop properly. - shutdown-timeout = 10 s - - # Before shutting down the drivers, the remoting subsystem attempts to flush - # all pending writes. This setting controls the maximum time the remoting is - # willing to wait before moving on to shut down the drivers. - flush-wait-on-shutdown = 2 s - - # Reuse inbound connections for outbound messages - use-passive-connections = on - - # Controls the backoff interval after a refused write is reattempted. - # (Transports may refuse writes if their internal buffer is full) - backoff-interval = 5 ms - - # Acknowledgment timeout of management commands sent to the transport stack. - command-ack-timeout = 30 s - - # The timeout for outbound associations to perform the handshake. - # If the transport is akka.remote.classic.netty.tcp or akka.remote.classic.netty.ssl - # the configured connection-timeout for the transport will be used instead. - handshake-timeout = 15 s - - ### Security settings - - # Enable untrusted mode for full security of server managed actors, prevents - # system messages to be send by clients, e.g. messages like 'Create', - # 'Suspend', 'Resume', 'Terminate', 'Supervise', 'Link' etc. - untrusted-mode = off - - # When 'untrusted-mode=on' inbound actor selections are by default discarded. - # Actors with paths defined in this list are granted permission to receive actor - # selections messages. - # E.g. trusted-selection-paths = ["/user/receptionist", "/user/namingService"] - trusted-selection-paths = [] - - ### Logging - - # If this is "on", Akka will log all inbound messages at DEBUG level, - # if off then they are not logged - log-received-messages = off - - # If this is "on", Akka will log all outbound messages at DEBUG level, - # if off then they are not logged - log-sent-messages = off - - # Sets the log granularity level at which Akka logs remoting events. This setting - # can take the values OFF, ERROR, WARNING, INFO, DEBUG, or ON. For compatibility - # reasons the setting "on" will default to "debug" level. Please note that the effective - # logging level is still determined by the global logging level of the actor system: - # for example debug level remoting events will be only logged if the system - # is running with debug level logging. - # Failures to deserialize received messages also fall under this flag. - log-remote-lifecycle-events = on - - # Logging of message types with payload size in bytes larger than - # this value. Maximum detected size per message type is logged once, - # with an increase threshold of 10%. - # By default this feature is turned off. Activate it by setting the property to - # a value in bytes, such as 1000b. Note that for all messages larger than this - # limit there will be extra performance and scalability cost. - log-frame-size-exceeding = off - - # Log warning if the number of messages in the backoff buffer in the endpoint - # writer exceeds this limit. It can be disabled by setting the value to off. - log-buffer-size-exceeding = 50000 - - # After failed to establish an outbound connection, the remoting will mark the - # address as failed. This configuration option controls how much time should - # be elapsed before reattempting a new connection. While the address is - # gated, all messages sent to the address are delivered to dead-letters. - # Since this setting limits the rate of reconnects setting it to a - # very short interval (i.e. less than a second) may result in a storm of - # reconnect attempts. - retry-gate-closed-for = 5 s - - # After catastrophic communication failures that result in the loss of system - # messages or after the remote DeathWatch triggers the remote system gets - # quarantined to prevent inconsistent behavior. - # This setting controls how long the Quarantine marker will be kept around - # before being removed to avoid long-term memory leaks. - # WARNING: DO NOT change this to a small value to re-enable communication with - # quarantined nodes. Such feature is not supported and any behavior between - # the affected systems after lifting the quarantine is undefined. - prune-quarantine-marker-after = 5 d - - # If system messages have been exchanged between two systems (i.e. remote death - # watch or remote deployment has been used) a remote system will be marked as - # quarantined after the two system has no active association, and no - # communication happens during the time configured here. - # The only purpose of this setting is to avoid storing system message redelivery - # data (sequence number state, etc.) for an undefined amount of time leading to long - # term memory leak. Instead, if a system has been gone for this period, - # or more exactly - # - there is no association between the two systems (TCP connection, if TCP transport is used) - # - neither side has been attempting to communicate with the other - # - there are no pending system messages to deliver - # for the amount of time configured here, the remote system will be quarantined and all state - # associated with it will be dropped. - # - # Maximum value depends on the scheduler's max limit (default 248 days) and if configured - # to a longer duration this feature will effectively be disabled. Setting the value to - # 'off' will also disable the feature. Note that if disabled there is a risk of a long - # term memory leak. - quarantine-after-silence = 2 d - - # This setting defines the maximum number of unacknowledged system messages - # allowed for a remote system. If this limit is reached the remote system is - # declared to be dead and its UID marked as tainted. - system-message-buffer-size = 20000 - - # This setting defines the maximum idle time after an individual - # acknowledgement for system messages is sent. System message delivery - # is guaranteed by explicit acknowledgement messages. These acks are - # piggybacked on ordinary traffic messages. If no traffic is detected - # during the time period configured here, the remoting will send out - # an individual ack. - system-message-ack-piggyback-timeout = 0.3 s - - # This setting defines the time after internal management signals - # between actors (used for DeathWatch and supervision) that have not been - # explicitly acknowledged or negatively acknowledged are resent. - # Messages that were negatively acknowledged are always immediately - # resent. - resend-interval = 2 s - - # Maximum number of unacknowledged system messages that will be resent - # each 'resend-interval'. If you watch many (> 1000) remote actors you can - # increase this value to for example 600, but a too large limit (e.g. 10000) - # may flood the connection and might cause false failure detection to trigger. - # Test such a configuration by watching all actors at the same time and stop - # all watched actors at the same time. - resend-limit = 200 - - # WARNING: this setting should not be not changed unless all of its consequences - # are properly understood which assumes experience with remoting internals - # or expert advice. - # This setting defines the time after redelivery attempts of internal management - # signals are stopped to a remote system that has been not confirmed to be alive by - # this system before. - initial-system-message-delivery-timeout = 3 m - - ### Transports and adapters - - # List of the transport drivers that will be loaded by the remoting. - # A list of fully qualified config paths must be provided where - # the given configuration path contains a transport-class key - # pointing to an implementation class of the Transport interface. - # If multiple transports are provided, the address of the first - # one will be used as a default address. - enabled-transports = ["akka.remote.classic.netty.tcp"] - - # Transport drivers can be augmented with adapters by adding their - # name to the applied-adapters setting in the configuration of a - # transport. The available adapters should be configured in this - # section by providing a name, and the fully qualified name of - # their corresponding implementation. The class given here - # must implement akka.akka.remote.transport.TransportAdapterProvider - # and have public constructor without parameters. - adapters { - gremlin = "akka.remote.transport.FailureInjectorProvider" - trttl = "akka.remote.transport.ThrottlerProvider" - } + ### Security settings - ### Default configuration for the Netty based transport drivers + # Enable untrusted mode for full security of server managed actors, prevents + # system messages to be send by clients, e.g. messages like 'Create', + # 'Suspend', 'Resume', 'Terminate', 'Supervise', 'Link' etc. + untrusted-mode = off - netty.tcp { - # The class given here must implement the akka.remote.transport.Transport - # interface and offer a public constructor which takes two arguments: - # 1) akka.actor.ExtendedActorSystem - # 2) com.typesafe.config.Config - transport-class = "akka.remote.transport.netty.NettyTransport" + # When 'untrusted-mode=on' inbound actor selections are by default discarded. + # Actors with paths defined in this list are granted permission to receive actor + # selections messages. + # E.g. trusted-selection-paths = ["/user/receptionist", "/user/namingService"] + trusted-selection-paths = [] - # Transport drivers can be augmented with adapters by adding their - # name to the applied-adapters list. The last adapter in the - # list is the adapter immediately above the driver, while - # the first one is the top of the stack below the standard - # Akka protocol - applied-adapters = [] - - # The default remote server port clients should connect to. - # Default is 2552 (AKKA), use 0 if you want a random available port - # This port needs to be unique for each actor system on the same machine. - port = 2552 - - # The hostname or ip clients should connect to. - # InetAddress.getLocalHost.getHostAddress is used if empty - hostname = "" - - # Use this setting to bind a network interface to a different port - # than remoting protocol expects messages at. This may be used - # when running akka nodes in a separated networks (under NATs or docker containers). - # Use 0 if you want a random available port. Examples: - # - # akka.remote.classic.netty.tcp.port = 2552 - # akka.remote.classic.netty.tcp.bind-port = 2553 - # Network interface will be bound to the 2553 port, but remoting protocol will - # expect messages sent to port 2552. - # - # akka.remote.classic.netty.tcp.port = 0 - # akka.remote.classic.netty.tcp.bind-port = 0 - # Network interface will be bound to a random port, and remoting protocol will - # expect messages sent to the bound port. - # - # akka.remote.classic.netty.tcp.port = 2552 - # akka.remote.classic.netty.tcp.bind-port = 0 - # Network interface will be bound to a random port, but remoting protocol will - # expect messages sent to port 2552. - # - # akka.remote.classic.netty.tcp.port = 0 - # akka.remote.classic.netty.tcp.bind-port = 2553 - # Network interface will be bound to the 2553 port, and remoting protocol will - # expect messages sent to the bound port. - # - # akka.remote.classic.netty.tcp.port = 2552 - # akka.remote.classic.netty.tcp.bind-port = "" - # Network interface will be bound to the 2552 port, and remoting protocol will - # expect messages sent to the bound port. + ### Logging + + # If this is "on", Akka will log all inbound messages at DEBUG level, + # if off then they are not logged + log-received-messages = off + + # If this is "on", Akka will log all outbound messages at DEBUG level, + # if off then they are not logged + log-sent-messages = off + + # Sets the log granularity level at which Akka logs remoting events. This setting + # can take the values OFF, ERROR, WARNING, INFO, DEBUG, or ON. For compatibility + # reasons the setting "on" will default to "debug" level. Please note that the effective + # logging level is still determined by the global logging level of the actor system: + # for example debug level remoting events will be only logged if the system + # is running with debug level logging. + # Failures to deserialize received messages also fall under this flag. + log-remote-lifecycle-events = on + + # Logging of message types with payload size in bytes larger than + # this value. Maximum detected size per message type is logged once, + # with an increase threshold of 10%. + # By default this feature is turned off. Activate it by setting the property to + # a value in bytes, such as 1000b. Note that for all messages larger than this + # limit there will be extra performance and scalability cost. + log-frame-size-exceeding = off + + # Log warning if the number of messages in the backoff buffer in the endpoint + # writer exceeds this limit. It can be disabled by setting the value to off. + log-buffer-size-exceeding = 50000 + + # After failed to establish an outbound connection, the remoting will mark the + # address as failed. This configuration option controls how much time should + # be elapsed before reattempting a new connection. While the address is + # gated, all messages sent to the address are delivered to dead-letters. + # Since this setting limits the rate of reconnects setting it to a + # very short interval (i.e. less than a second) may result in a storm of + # reconnect attempts. + retry-gate-closed-for = 5 s + + # After catastrophic communication failures that result in the loss of system + # messages or after the remote DeathWatch triggers the remote system gets + # quarantined to prevent inconsistent behavior. + # This setting controls how long the Quarantine marker will be kept around + # before being removed to avoid long-term memory leaks. + # WARNING: DO NOT change this to a small value to re-enable communication with + # quarantined nodes. Such feature is not supported and any behavior between + # the affected systems after lifting the quarantine is undefined. + prune-quarantine-marker-after = 5 d + + # If system messages have been exchanged between two systems (i.e. remote death + # watch or remote deployment has been used) a remote system will be marked as + # quarantined after the two system has no active association, and no + # communication happens during the time configured here. + # The only purpose of this setting is to avoid storing system message redelivery + # data (sequence number state, etc.) for an undefined amount of time leading to long + # term memory leak. Instead, if a system has been gone for this period, + # or more exactly + # - there is no association between the two systems (TCP connection, if TCP transport is used) + # - neither side has been attempting to communicate with the other + # - there are no pending system messages to deliver + # for the amount of time configured here, the remote system will be quarantined and all state + # associated with it will be dropped. # - # akka.remote.classic.netty.tcp.port if empty - bind-port = "" - - # Use this setting to bind a network interface to a different hostname or ip - # than remoting protocol expects messages at. - # Use "0.0.0.0" to bind to all interfaces. - # akka.remote.classic.netty.tcp.hostname if empty - bind-hostname = "" - - # Enables SSL support on this transport - enable-ssl = false - - # Sets the connectTimeoutMillis of all outbound connections, - # i.e. how long a connect may take until it is timed out - connection-timeout = 15 s - - # If set to "" then the specified dispatcher - # will be used to accept inbound connections, and perform IO. If "" then - # dedicated threads will be used. - # Please note that the Netty driver only uses this configuration and does - # not read the "akka.remote.use-dispatcher" entry. Instead it has to be - # configured manually to point to the same dispatcher if needed. - use-dispatcher-for-io = "" - - # Sets the high water mark for the in and outbound sockets, - # set to 0b for platform default - write-buffer-high-water-mark = 0b - - # Sets the low water mark for the in and outbound sockets, - # set to 0b for platform default - write-buffer-low-water-mark = 0b - - # Sets the send buffer size of the Sockets, - # set to 0b for platform default - send-buffer-size = 256000b - - # Sets the receive buffer size of the Sockets, - # set to 0b for platform default - receive-buffer-size = 256000b - - # Maximum message size the transport will accept, but at least - # 32000 bytes. - # Please note that UDP does not support arbitrary large datagrams, - # so this setting has to be chosen carefully when using UDP. - # Both send-buffer-size and receive-buffer-size settings has to - # be adjusted to be able to buffer messages of maximum size. - maximum-frame-size = 128000b - - # Sets the size of the connection backlog - backlog = 4096 - - # Enables the TCP_NODELAY flag, i.e. disables Nagle’s algorithm - tcp-nodelay = on - - # Enables TCP Keepalive, subject to the O/S kernel’s configuration - tcp-keepalive = on - - # Enables SO_REUSEADDR, which determines when an ActorSystem can open - # the specified listen port (the meaning differs between *nix and Windows) - # Valid values are "on", "off" and "off-for-windows" - # due to the following Windows bug: https://bugs.java.com/bugdatabase/view_bug.do?bug_id=4476378 - # "off-for-windows" of course means that it's "on" for all other platforms - tcp-reuse-addr = off-for-windows - - # Used to configure the number of I/O worker threads on server sockets - server-socket-worker-pool { - # Min number of threads to cap factor-based number to - pool-size-min = 2 - - # The pool size factor is used to determine thread pool size - # using the following formula: ceil(available processors * factor). - # Resulting size is then bounded by the pool-size-min and - # pool-size-max values. - pool-size-factor = 1.0 - - # Max number of threads to cap factor-based number to - pool-size-max = 2 + # Maximum value depends on the scheduler's max limit (default 248 days) and if configured + # to a longer duration this feature will effectively be disabled. Setting the value to + # 'off' will also disable the feature. Note that if disabled there is a risk of a long + # term memory leak. + quarantine-after-silence = 2 d + + # This setting defines the maximum number of unacknowledged system messages + # allowed for a remote system. If this limit is reached the remote system is + # declared to be dead and its UID marked as tainted. + system-message-buffer-size = 20000 + + # This setting defines the maximum idle time after an individual + # acknowledgement for system messages is sent. System message delivery + # is guaranteed by explicit acknowledgement messages. These acks are + # piggybacked on ordinary traffic messages. If no traffic is detected + # during the time period configured here, the remoting will send out + # an individual ack. + system-message-ack-piggyback-timeout = 0.3 s + + # This setting defines the time after internal management signals + # between actors (used for DeathWatch and supervision) that have not been + # explicitly acknowledged or negatively acknowledged are resent. + # Messages that were negatively acknowledged are always immediately + # resent. + resend-interval = 2 s + + # Maximum number of unacknowledged system messages that will be resent + # each 'resend-interval'. If you watch many (> 1000) remote actors you can + # increase this value to for example 600, but a too large limit (e.g. 10000) + # may flood the connection and might cause false failure detection to trigger. + # Test such a configuration by watching all actors at the same time and stop + # all watched actors at the same time. + resend-limit = 200 + + # WARNING: this setting should not be not changed unless all of its consequences + # are properly understood which assumes experience with remoting internals + # or expert advice. + # This setting defines the time after redelivery attempts of internal management + # signals are stopped to a remote system that has been not confirmed to be alive by + # this system before. + initial-system-message-delivery-timeout = 3 m + + ### Transports and adapters + + # List of the transport drivers that will be loaded by the remoting. + # A list of fully qualified config paths must be provided where + # the given configuration path contains a transport-class key + # pointing to an implementation class of the Transport interface. + # If multiple transports are provided, the address of the first + # one will be used as a default address. + enabled-transports = ["akka.remote.classic.netty.tcp"] + + # Transport drivers can be augmented with adapters by adding their + # name to the applied-adapters setting in the configuration of a + # transport. The available adapters should be configured in this + # section by providing a name, and the fully qualified name of + # their corresponding implementation. The class given here + # must implement akka.akka.remote.transport.TransportAdapterProvider + # and have public constructor without parameters. + adapters { + gremlin = "akka.remote.transport.FailureInjectorProvider" + trttl = "akka.remote.transport.ThrottlerProvider" } - # Used to configure the number of I/O worker threads on client sockets - client-socket-worker-pool { - # Min number of threads to cap factor-based number to - pool-size-min = 2 + ### Default configuration for the Netty based transport drivers - # The pool size factor is used to determine thread pool size - # using the following formula: ceil(available processors * factor). - # Resulting size is then bounded by the pool-size-min and - # pool-size-max values. - pool-size-factor = 1.0 + netty.tcp { + # The class given here must implement the akka.remote.transport.Transport + # interface and offer a public constructor which takes two arguments: + # 1) akka.actor.ExtendedActorSystem + # 2) com.typesafe.config.Config + transport-class = "akka.remote.transport.netty.NettyTransport" - # Max number of threads to cap factor-based number to - pool-size-max = 2 - } + # Transport drivers can be augmented with adapters by adding their + # name to the applied-adapters list. The last adapter in the + # list is the adapter immediately above the driver, while + # the first one is the top of the stack below the standard + # Akka protocol + applied-adapters = [] + # The default remote server port clients should connect to. + # Default is 2552 (AKKA), use 0 if you want a random available port + # This port needs to be unique for each actor system on the same machine. + port = 2552 - } + # The hostname or ip clients should connect to. + # InetAddress.getLocalHost.getHostAddress is used if empty + hostname = "" - netty.ssl = ${akka.remote.classic.netty.tcp} - netty.ssl = { - # Enable SSL/TLS encryption. - # This must be enabled on both the client and server to work. - enable-ssl = true - - # Factory of SSLEngine. - # Must implement akka.remote.transport.netty.SSLEngineProvider and have a public - # constructor with an ActorSystem parameter. - # The default ConfigSSLEngineProvider is configured by properties in section - # akka.remote.classic.netty.ssl.security - # - # The SSLEngineProvider can also be defined via ActorSystemSetup with - # SSLEngineProviderSetup when starting the ActorSystem. That is useful when - # the SSLEngineProvider implementation requires other external constructor - # parameters or is created before the ActorSystem is created. - # If such SSLEngineProviderSetup is defined this config property is not used. - ssl-engine-provider = akka.remote.transport.netty.ConfigSSLEngineProvider - - security { - # This is the Java Key Store used by the server connection - key-store = "keystore" - - # This password is used for decrypting the key store - key-store-password = "changeme" - - # This password is used for decrypting the key - key-password = "changeme" - - # This is the Java Key Store used by the client connection - trust-store = "truststore" - - # This password is used for decrypting the trust store - trust-store-password = "changeme" - - # Protocol to use for SSL encryption. - protocol = "TLSv1.2" - - # Example: ["TLS_DHE_RSA_WITH_AES_128_GCM_SHA256", - # "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", - # "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384", - # "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"] - # When doing rolling upgrades, make sure to include both the algorithm used - # by old nodes and the preferred algorithm. - # If you use a JDK 8 prior to 8u161 you need to install - # the JCE Unlimited Strength Jurisdiction Policy Files to use AES 256. - # More info here: - # https://www.oracle.com/java/technologies/javase-jce-all-downloads.html - enabled-algorithms = ["TLS_DHE_RSA_WITH_AES_256_GCM_SHA384", - "TLS_RSA_WITH_AES_128_CBC_SHA"] - - # There are two options, and the default SecureRandom is recommended: - # "" or "SecureRandom" => (default) - # "SHA1PRNG" => Can be slow because of blocking issues on Linux + # Use this setting to bind a network interface to a different port + # than remoting protocol expects messages at. This may be used + # when running akka nodes in a separated networks (under NATs or docker containers). + # Use 0 if you want a random available port. Examples: # - # Setting a value here may require you to supply the appropriate cipher - # suite (see enabled-algorithms section above) - random-number-generator = "" - - # Require mutual authentication between TLS peers + # akka.remote.classic.netty.tcp.port = 2552 + # akka.remote.classic.netty.tcp.bind-port = 2553 + # Network interface will be bound to the 2553 port, but remoting protocol will + # expect messages sent to port 2552. + # + # akka.remote.classic.netty.tcp.port = 0 + # akka.remote.classic.netty.tcp.bind-port = 0 + # Network interface will be bound to a random port, and remoting protocol will + # expect messages sent to the bound port. # - # Without mutual authentication only the peer that actively establishes a connection (TLS client side) - # checks if the passive side (TLS server side) sends over a trusted certificate. With the flag turned on, - # the passive side will also request and verify a certificate from the connecting peer. + # akka.remote.classic.netty.tcp.port = 2552 + # akka.remote.classic.netty.tcp.bind-port = 0 + # Network interface will be bound to a random port, but remoting protocol will + # expect messages sent to port 2552. # - # To prevent man-in-the-middle attacks this setting is enabled by default. + # akka.remote.classic.netty.tcp.port = 0 + # akka.remote.classic.netty.tcp.bind-port = 2553 + # Network interface will be bound to the 2553 port, and remoting protocol will + # expect messages sent to the bound port. # - # Note: Nodes that are configured with this setting to 'on' might not be able to receive messages from nodes that - # run on older versions of akka-remote. This is because in versions of Akka < 2.4.12 the active side of the remoting - # connection will not send over certificates even if asked. + # akka.remote.classic.netty.tcp.port = 2552 + # akka.remote.classic.netty.tcp.bind-port = "" + # Network interface will be bound to the 2552 port, and remoting protocol will + # expect messages sent to the bound port. # - # However, starting with Akka 2.4.12, even with this setting "off", the active side (TLS client side) - # will use the given key-store to send over a certificate if asked. A rolling upgrade from versions of - # Akka < 2.4.12 can therefore work like this: - # - upgrade all nodes to an Akka version >= 2.4.12, in the best case the latest version, but keep this setting at "off" - # - then switch this flag to "on" and do again a rolling upgrade of all nodes - # The first step ensures that all nodes will send over a certificate when asked to. The second - # step will ensure that all nodes finally enforce the secure checking of client certificates. - require-mutual-authentication = on + # akka.remote.classic.netty.tcp.port if empty + bind-port = "" + + # Use this setting to bind a network interface to a different hostname or ip + # than remoting protocol expects messages at. + # Use "0.0.0.0" to bind to all interfaces. + # akka.remote.classic.netty.tcp.hostname if empty + bind-hostname = "" + + # Enables SSL support on this transport + enable-ssl = false + + # Sets the connectTimeoutMillis of all outbound connections, + # i.e. how long a connect may take until it is timed out + connection-timeout = 15 s + + # If set to "" then the specified dispatcher + # will be used to accept inbound connections, and perform IO. If "" then + # dedicated threads will be used. + # Please note that the Netty driver only uses this configuration and does + # not read the "akka.remote.use-dispatcher" entry. Instead it has to be + # configured manually to point to the same dispatcher if needed. + use-dispatcher-for-io = "" + + # Sets the high water mark for the in and outbound sockets, + # set to 0b for platform default + write-buffer-high-water-mark = 0b + + # Sets the low water mark for the in and outbound sockets, + # set to 0b for platform default + write-buffer-low-water-mark = 0b + + # Sets the send buffer size of the Sockets, + # set to 0b for platform default + send-buffer-size = 256000b + + # Sets the receive buffer size of the Sockets, + # set to 0b for platform default + receive-buffer-size = 256000b + + # Maximum message size the transport will accept, but at least + # 32000 bytes. + # Please note that UDP does not support arbitrary large datagrams, + # so this setting has to be chosen carefully when using UDP. + # Both send-buffer-size and receive-buffer-size settings has to + # be adjusted to be able to buffer messages of maximum size. + maximum-frame-size = 128000b + + # Sets the size of the connection backlog + backlog = 4096 + + # Enables the TCP_NODELAY flag, i.e. disables Nagle’s algorithm + tcp-nodelay = on + + # Enables TCP Keepalive, subject to the O/S kernel’s configuration + tcp-keepalive = on + + # Enables SO_REUSEADDR, which determines when an ActorSystem can open + # the specified listen port (the meaning differs between *nix and Windows) + # Valid values are "on", "off" and "off-for-windows" + # due to the following Windows bug: https://bugs.java.com/bugdatabase/view_bug.do?bug_id=4476378 + # "off-for-windows" of course means that it's "on" for all other platforms + tcp-reuse-addr = off-for-windows + + # Used to configure the number of I/O worker threads on server sockets + server-socket-worker-pool { + # Min number of threads to cap factor-based number to + pool-size-min = 2 + + # The pool size factor is used to determine thread pool size + # using the following formula: ceil(available processors * factor). + # Resulting size is then bounded by the pool-size-min and + # pool-size-max values. + pool-size-factor = 1.0 + + # Max number of threads to cap factor-based number to + pool-size-max = 2 + } + + # Used to configure the number of I/O worker threads on client sockets + client-socket-worker-pool { + # Min number of threads to cap factor-based number to + pool-size-min = 2 + + # The pool size factor is used to determine thread pool size + # using the following formula: ceil(available processors * factor). + # Resulting size is then bounded by the pool-size-min and + # pool-size-max values. + pool-size-factor = 1.0 + + # Max number of threads to cap factor-based number to + pool-size-max = 2 + } + + } - } - ### Default configuration for the failure injector transport adapter + netty.ssl = ${akka.remote.classic.netty.tcp} + netty.ssl = { + # Enable SSL/TLS encryption. + # This must be enabled on both the client and server to work. + enable-ssl = true - gremlin { - # Enable debug logging of the failure injector transport adapter - debug = off - } + # Factory of SSLEngine. + # Must implement akka.remote.transport.netty.SSLEngineProvider and have a public + # constructor with an ActorSystem parameter. + # The default ConfigSSLEngineProvider is configured by properties in section + # akka.remote.classic.netty.ssl.security + # + # The SSLEngineProvider can also be defined via ActorSystemSetup with + # SSLEngineProviderSetup when starting the ActorSystem. That is useful when + # the SSLEngineProvider implementation requires other external constructor + # parameters or is created before the ActorSystem is created. + # If such SSLEngineProviderSetup is defined this config property is not used. + ssl-engine-provider = akka.remote.transport.netty.ConfigSSLEngineProvider + + security { + # This is the Java Key Store used by the server connection + key-store = "keystore" - backoff-remote-dispatcher { - type = Dispatcher - executor = "fork-join-executor" - fork-join-executor { - # Min number of threads to cap factor-based parallelism number to - parallelism-min = 2 - parallelism-max = 2 + # This password is used for decrypting the key store + key-store-password = "changeme" + + # This password is used for decrypting the key + key-password = "changeme" + + # This is the Java Key Store used by the client connection + trust-store = "truststore" + + # This password is used for decrypting the trust store + trust-store-password = "changeme" + + # Protocol to use for SSL encryption. + protocol = "TLSv1.2" + + # Example: ["TLS_DHE_RSA_WITH_AES_128_GCM_SHA256", + # "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + # "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384", + # "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"] + # When doing rolling upgrades, make sure to include both the algorithm used + # by old nodes and the preferred algorithm. + # If you use a JDK 8 prior to 8u161 you need to install + # the JCE Unlimited Strength Jurisdiction Policy Files to use AES 256. + # More info here: + # https://www.oracle.com/java/technologies/javase-jce-all-downloads.html + enabled-algorithms = ["TLS_DHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_RSA_WITH_AES_128_CBC_SHA"] + + # There are two options, and the default SecureRandom is recommended: + # "" or "SecureRandom" => (default) + # "SHA1PRNG" => Can be slow because of blocking issues on Linux + # + # Setting a value here may require you to supply the appropriate cipher + # suite (see enabled-algorithms section above) + random-number-generator = "" + + # Require mutual authentication between TLS peers + # + # Without mutual authentication only the peer that actively establishes a connection (TLS client side) + # checks if the passive side (TLS server side) sends over a trusted certificate. With the flag turned on, + # the passive side will also request and verify a certificate from the connecting peer. + # + # To prevent man-in-the-middle attacks this setting is enabled by default. + # + # Note: Nodes that are configured with this setting to 'on' might not be able to receive messages from nodes that + # run on older versions of akka-remote. This is because in versions of Akka < 2.4.12 the active side of the remoting + # connection will not send over certificates even if asked. + # + # However, starting with Akka 2.4.12, even with this setting "off", the active side (TLS client side) + # will use the given key-store to send over a certificate if asked. A rolling upgrade from versions of + # Akka < 2.4.12 can therefore work like this: + # - upgrade all nodes to an Akka version >= 2.4.12, in the best case the latest version, but keep this setting at "off" + # - then switch this flag to "on" and do again a rolling upgrade of all nodes + # The first step ensures that all nodes will send over a certificate when asked to. The second + # step will ensure that all nodes finally enforce the secure checking of client certificates. + require-mutual-authentication = on + } + } + + ### Default configuration for the failure injector transport adapter + + gremlin { + # Enable debug logging of the failure injector transport adapter + debug = off + } + + backoff-remote-dispatcher { + type = Dispatcher + executor = "fork-join-executor" + fork-join-executor { + # Min number of threads to cap factor-based parallelism number to + parallelism-min = 2 + parallelism-max = 2 + } } } } diff --git a/akka/repackaged-akka/pom.xml b/akka/repackaged-akka/pom.xml index 50109a1630..cc222188b5 100644 --- a/akka/repackaged-akka/pom.xml +++ b/akka/repackaged-akka/pom.xml @@ -13,7 +13,7 @@ org.opendaylight.controller bundle-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ../../bundle-parent @@ -211,6 +211,7 @@ true + org.opendaylight.controller.repackaged.akka akka.*, com.typesafe.sslconfig.akka.*, diff --git a/artifacts/pom.xml b/artifacts/pom.xml index 65d5b0d3ce..79e8d88f08 100644 --- a/artifacts/pom.xml +++ b/artifacts/pom.xml @@ -14,13 +14,13 @@ org.opendaylight.odlparent odlparent-lite - 9.0.12 + 13.0.11 org.opendaylight.controller controller-artifacts - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT pom @@ -32,6 +32,13 @@ ${project.version} + + + ${project.groupId} + atomix-storage + ${project.version} + + ${project.groupId} @@ -390,40 +397,6 @@ features xml - - - - ${project.groupId} - netty-event-executor-config - 0.16.0-SNAPSHOT - - - ${project.groupId} - netty-threadgroup-config - 0.16.0-SNAPSHOT - - - ${project.groupId} - netty-timer-config - 0.16.0-SNAPSHOT - - - ${project.groupId} - threadpool-config-api - 0.16.0-SNAPSHOT - - - ${project.groupId} - threadpool-config-impl - 0.16.0-SNAPSHOT - - - ${project.groupId} - odl-controller-exp-netty-config - ${project.version} - features - xml - diff --git a/atomix-storage/LICENSE b/atomix-storage/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/atomix-storage/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/atomix-storage/pom.xml b/atomix-storage/pom.xml new file mode 100644 index 0000000000..bb07137137 --- /dev/null +++ b/atomix-storage/pom.xml @@ -0,0 +1,157 @@ + + + 4.0.0 + + + org.opendaylight.controller + bundle-parent + 9.0.3-SNAPSHOT + ../bundle-parent + + + atomix-storage + Atomix Storage + bundle + + + true + false + + + + + com.google.guava + guava + + + io.netty + netty-buffer + + + io.netty + netty-common + + + org.eclipse.jdt + org.eclipse.jdt.annotation + + + com.esotericsoftware + kryo + 4.0.3 + provided + + + com.esotericsoftware + minlog + 1.3.1 + provided + + + com.esotericsoftware + reflectasm + 1.11.9 + provided + + + org.objenesis + objenesis + 2.6 + provided + + + + com.google.guava + guava-testlib + + + + + + + + maven-dependency-plugin + + + unpack-license + + true + + + + + + maven-antrun-plugin + + + copy-license + prepare-package + + run + + + + + + + + + + + maven-checkstyle-plugin + + + check-license + + check + + + true + + + + + + + org.apache.felix + maven-bundle-plugin + true + + + + io.atomix.storage.journal + + + sun.nio.ch;resolution:=optional, + sun.misc;resolution:=optional, + !COM.newmonics.*, + !android.os, + * + + + + + *;inline=true;groupId=com.esotericsoftware, + *;inline=true;groupId=org.objenesis, + + + + + + + diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/CommitsSegmentJournalReader.java b/atomix-storage/src/main/java/io/atomix/storage/journal/CommitsSegmentJournalReader.java new file mode 100644 index 0000000000..767e67fa46 --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/storage/journal/CommitsSegmentJournalReader.java @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal; + +import org.eclipse.jdt.annotation.NonNullByDefault; + +/** + * A {@link JournalReader} traversing only committed entries. + */ +@NonNullByDefault +final class CommitsSegmentJournalReader extends SegmentedJournalReader { + CommitsSegmentJournalReader(final SegmentedJournal journal, final JournalSegment segment) { + super(journal, segment); + } + + @Override + public T tryNext(final EntryMapper mapper) { + return getNextIndex() <= journal.getCommitIndex() ? super.tryNext(mapper) : null; + } +} diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/DiskFileReader.java b/atomix-storage/src/main/java/io/atomix/storage/journal/DiskFileReader.java new file mode 100644 index 0000000000..311d16b150 --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/storage/journal/DiskFileReader.java @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal; + +import static com.google.common.base.Verify.verify; +import static java.util.Objects.requireNonNull; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.file.Path; +import org.eclipse.jdt.annotation.NonNull; + +/** + * A {@link StorageLevel#DISK} implementation of {@link FileReader}. Maintains an internal buffer. + */ +final class DiskFileReader extends FileReader { + /** + * Just do not bother with IO smaller than this many bytes. + */ + private static final int MIN_IO_SIZE = 8192; + + private final FileChannel channel; + private final ByteBuffer buffer; + + // tracks where memory's first available byte maps to in terms of FileChannel.position() + private int bufferPosition; + + DiskFileReader(final Path path, final FileChannel channel, final int maxSegmentSize, final int maxEntrySize) { + this(path, channel, allocateBuffer(maxSegmentSize, maxEntrySize)); + } + + // Note: take ownership of the buffer + DiskFileReader(final Path path, final FileChannel channel, final ByteBuffer buffer) { + super(path); + this.channel = requireNonNull(channel); + this.buffer = buffer.flip(); + bufferPosition = 0; + } + + static ByteBuffer allocateBuffer(final int maxSegmentSize, final int maxEntrySize) { + return ByteBuffer.allocate(chooseBufferSize(maxSegmentSize, maxEntrySize)); + } + + private static int chooseBufferSize(final int maxSegmentSize, final int maxEntrySize) { + if (maxSegmentSize <= MIN_IO_SIZE) { + // just buffer the entire segment + return maxSegmentSize; + } + + // one full entry plus its header, or MIN_IO_SIZE, which benefits the read of many small entries + final int minBufferSize = maxEntrySize + SegmentEntry.HEADER_BYTES; + return minBufferSize <= MIN_IO_SIZE ? MIN_IO_SIZE : minBufferSize; + } + + @Override + void invalidateCache() { + buffer.clear().flip(); + bufferPosition = 0; + } + + @Override + ByteBuffer read(final int position, final int size) { + // calculate logical seek distance between buffer's first byte and position and split flow between + // forward-moving and backwards-moving code paths. + final int seek = bufferPosition - position; + return seek >= 0 ? forwardAndRead(seek, position, size) : rewindAndRead(-seek, position, size); + } + + private @NonNull ByteBuffer forwardAndRead(final int seek, final int position, final int size) { + final int missing = buffer.limit() - seek - size; + if (missing <= 0) { + // fast path: we have the requested region + return buffer.slice(seek, size).asReadOnlyBuffer(); + } + + // We need to read more data, but let's salvage what we can: + // - set buffer position to seek, which means it points to the same as position + // - run compact, which moves everything between position and limit onto the beginning of buffer and + // sets it up to receive more bytes + // - start the read accounting for the seek + buffer.position(seek).compact(); + readAtLeast(position + seek, missing); + return setAndSlice(position, size); + } + + private @NonNull ByteBuffer rewindAndRead(final int rewindBy, final int position, final int size) { + // TODO: Lazy solution. To be super crisp, we want to find out how much of the buffer we can salvage and + // do all the limit/position fiddling before and after read. Right now let's just flow the buffer up and + // read it. + buffer.clear(); + readAtLeast(position, size); + return setAndSlice(position, size); + } + + private void readAtLeast(final int readPosition, final int readAtLeast) { + final int bytesRead; + try { + bytesRead = channel.read(buffer, readPosition); + } catch (IOException e) { + throw new StorageException(e); + } + verify(bytesRead >= readAtLeast, "Short read %s, expected %s", bytesRead, readAtLeast); + buffer.flip(); + } + + private @NonNull ByteBuffer setAndSlice(final int position, final int size) { + bufferPosition = position; + return buffer.slice(0, size).asReadOnlyBuffer(); + } +} diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/DiskFileWriter.java b/atomix-storage/src/main/java/io/atomix/storage/journal/DiskFileWriter.java new file mode 100644 index 0000000000..5f468d46a1 --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/storage/journal/DiskFileWriter.java @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal; + +import static io.atomix.storage.journal.SegmentEntry.HEADER_BYTES; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.MappedByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.file.Path; + +/** + * A {@link StorageLevel#DISK} {@link FileWriter}. + */ +final class DiskFileWriter extends FileWriter { + private static final ByteBuffer ZERO_ENTRY_HEADER = ByteBuffer.wrap(new byte[HEADER_BYTES]); + + private final DiskFileReader reader; + private final ByteBuffer buffer; + + DiskFileWriter(final Path path, final FileChannel channel, final int maxSegmentSize, final int maxEntrySize) { + super(path, channel, maxSegmentSize, maxEntrySize); + buffer = DiskFileReader.allocateBuffer(maxSegmentSize, maxEntrySize); + reader = new DiskFileReader(path, channel, buffer); + } + + @Override + DiskFileReader reader() { + return reader; + } + + @Override + MappedByteBuffer buffer() { + return null; + } + + @Override + MappedFileWriter toMapped() { + flush(); + return new MappedFileWriter(path, channel, maxSegmentSize, maxEntrySize); + } + + @Override + DiskFileWriter toDisk() { + return null; + } + + @Override + void writeEmptyHeader(final int position) { + try { + channel.write(ZERO_ENTRY_HEADER.asReadOnlyBuffer(), position); + } catch (IOException e) { + throw new StorageException(e); + } + } + + @Override + ByteBuffer startWrite(final int position, final int size) { + return buffer.clear().slice(0, size); + } + + @Override + void commitWrite(final int position, final ByteBuffer entry) { + try { + channel.write(entry, position); + } catch (IOException e) { + throw new StorageException(e); + } + } + + @Override + void flush() { + if (channel.isOpen()) { + try { + channel.force(true); + } catch (IOException e) { + throw new StorageException(e); + } + } + } + + @Override + void close() { + flush(); + } +} diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/FileReader.java b/atomix-storage/src/main/java/io/atomix/storage/journal/FileReader.java new file mode 100644 index 0000000000..fdc0597d36 --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/storage/journal/FileReader.java @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal; + +import static java.util.Objects.requireNonNull; + +import com.google.common.base.MoreObjects; +import java.nio.ByteBuffer; +import java.nio.file.Path; +import org.eclipse.jdt.annotation.NonNull; + +/** + * An abstraction over how to read a {@link JournalSegmentFile}. + */ +abstract sealed class FileReader permits DiskFileReader, MappedFileReader { + private final Path path; + + FileReader(final Path path) { + this.path = requireNonNull(path); + } + + /** + * Invalidate any cache that is present, so that the next read is coherent with the backing file. + */ + abstract void invalidateCache(); + + /** + * Read the some bytes as specified position. The sum of position and size is guaranteed not to exceed the maximum + * segment size nor maximum entry size. + * + * @param position position to the entry header + * @param size to read + * @return resulting buffer + */ + abstract @NonNull ByteBuffer read(int position, int size); + + @Override + public final String toString() { + return MoreObjects.toStringHelper(this).add("path", path).toString(); + } +} diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/FileWriter.java b/atomix-storage/src/main/java/io/atomix/storage/journal/FileWriter.java new file mode 100644 index 0000000000..4ead89bfb3 --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/storage/journal/FileWriter.java @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal; + +import static java.util.Objects.requireNonNull; + +import com.google.common.base.MoreObjects; +import java.nio.ByteBuffer; +import java.nio.MappedByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.file.Path; +import org.eclipse.jdt.annotation.Nullable; + +/** + * An abstraction over how to write a {@link JournalSegmentFile}. + */ +abstract sealed class FileWriter permits DiskFileWriter, MappedFileWriter { + final Path path; + final FileChannel channel; + final int maxSegmentSize; + final int maxEntrySize; + + FileWriter(final Path path, final FileChannel channel, final int maxSegmentSize, final int maxEntrySize) { + this.path = requireNonNull(path); + this.channel = requireNonNull(channel); + this.maxSegmentSize = maxSegmentSize; + this.maxEntrySize = maxEntrySize; + } + + /** + * Return the internal {@link FileReader}. + * + * @return the internal FileReader + */ + abstract FileReader reader(); + + /** + * Write {@link SegmentEntry#HEADER_BYTES} worth of zeroes at specified position. + * + * @param position position to write to + */ + abstract void writeEmptyHeader(int position); + + abstract ByteBuffer startWrite(int position, int size); + + abstract void commitWrite(int position, ByteBuffer entry); + + /** + * Flushes written entries to disk. + */ + abstract void flush(); + + /** + * Closes this writer. + */ + abstract void close(); + + @Override + public final String toString() { + return MoreObjects.toStringHelper(this).add("path", path).toString(); + } + + abstract @Nullable MappedByteBuffer buffer(); + + abstract @Nullable MappedFileWriter toMapped(); + + abstract @Nullable DiskFileWriter toDisk(); +} diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/Indexed.java b/atomix-storage/src/main/java/io/atomix/storage/journal/Indexed.java new file mode 100644 index 0000000000..5bf7e6f454 --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/storage/journal/Indexed.java @@ -0,0 +1,44 @@ +/* + * Copyright 2017-2022 Open Networking Foundation and others. All rights reserved. + * Copyright (c) 2024 PANTHEON.tech, s.r.o. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal; + +import static java.util.Objects.requireNonNull; + +import com.google.common.base.MoreObjects; +import org.eclipse.jdt.annotation.NonNullByDefault; + +/** + * Indexed journal entry. + * + * @param entry type + * @param index the entry index + * @param entry the indexed entry + * @param size the serialized entry size + */ +// FIXME: it seems 'index' has to be non-zero, we should enforce that if that really is the case +// FIXME: it seems 'size' has not be non-zero, we should enforce that if that really is the case +@NonNullByDefault +public record Indexed(long index, E entry, int size) { + public Indexed { + requireNonNull(entry); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("index", index).add("entry", entry).toString(); + } +} diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/Journal.java b/atomix-storage/src/main/java/io/atomix/storage/journal/Journal.java new file mode 100644 index 0000000000..5e37c12222 --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/storage/journal/Journal.java @@ -0,0 +1,60 @@ +/* + * Copyright 2017-2022 Open Networking Foundation and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal; + +import java.io.Closeable; + +/** + * Journal. + * + * @author Jordan Halterman + */ +public interface Journal extends Closeable { + + /** + * Returns the journal writer. + * + * @return The journal writer. + */ + JournalWriter writer(); + + /** + * Opens a new journal reader. + * + * @param index The index at which to start the reader. + * @return A new journal reader. + */ + JournalReader openReader(long index); + + /** + * Opens a new journal reader. + * + * @param index The index at which to start the reader. + * @param mode the reader mode + * @return A new journal reader. + */ + JournalReader openReader(long index, JournalReader.Mode mode); + + /** + * Returns a boolean indicating whether the journal is open. + * + * @return Indicates whether the journal is open. + */ + boolean isOpen(); + + @Override + void close(); +} diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/JournalReader.java b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalReader.java new file mode 100644 index 0000000000..a3c6ea5366 --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalReader.java @@ -0,0 +1,97 @@ +/* + * Copyright 2017-2022 Open Networking Foundation and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal; + +import org.eclipse.jdt.annotation.NonNullByDefault; +import org.eclipse.jdt.annotation.Nullable; + +/** + * Log reader. + * + * @author Jordan Halterman + */ +@NonNullByDefault +public interface JournalReader extends AutoCloseable { + /** + * Raft log reader mode. + */ + enum Mode { + /** + * Reads all entries from the log. + */ + ALL, + /** + * Reads committed entries from the log. + */ + COMMITS, + } + + /** + * A journal entry processor. Responsible for transforming entries into their internal representation. + * + * @param Entry type + * @param Internal representation type + */ + @FunctionalInterface + interface EntryMapper { + /** + * Process an entry. + * + * @param index entry index + * @param entry entry itself + * @param size entry size + * @return resulting internal representation + */ + T mapEntry(long index, E entry, int size); + } + + /** + * Returns the first index in the journal. + * + * @return the first index in the journal + */ + long getFirstIndex(); + + /** + * Returns the next reader index. + * + * @return The next reader index. + */ + long getNextIndex(); + + /** + * Try to move to the next entry. + * + * @param mapper callback to be invoked for the entry + * @return processed entry, or {@code null} + */ + @Nullable T tryNext(EntryMapper mapper); + + /** + * Resets the reader to the start. + */ + void reset(); + + /** + * Resets the reader to the given index. + * + * @param index The index to which to reset the reader. + */ + void reset(long index); + + @Override + void close(); +} diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegment.java b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegment.java new file mode 100644 index 0000000000..02921bed2b --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegment.java @@ -0,0 +1,270 @@ +/* + * Copyright 2017-2022 Open Networking Foundation and others. All rights reserved. + * Copyright (c) 2024 PANTHEON.tech, s.r.o. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal; + +import com.google.common.base.MoreObjects; +import io.atomix.storage.journal.index.JournalIndex; +import io.atomix.storage.journal.index.Position; +import io.atomix.storage.journal.index.SparseJournalIndex; +import java.io.IOException; +import java.nio.channels.FileChannel; +import java.nio.file.Files; +import java.nio.file.StandardOpenOption; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicInteger; +import org.eclipse.jdt.annotation.Nullable; + +/** + * Log segment. + * + * @author Jordan Halterman + */ +final class JournalSegment implements AutoCloseable { + private final JournalSegmentFile file; + private final JournalSegmentDescriptor descriptor; + private final StorageLevel storageLevel; + private final int maxEntrySize; + private final JournalIndex journalIndex; + private final Set readers = ConcurrentHashMap.newKeySet(); + private final AtomicInteger references = new AtomicInteger(); + private final FileChannel channel; + + private JournalSegmentWriter writer; + private boolean open = true; + + JournalSegment( + final JournalSegmentFile file, + final JournalSegmentDescriptor descriptor, + final StorageLevel storageLevel, + final int maxEntrySize, + final double indexDensity) { + this.file = file; + this.descriptor = descriptor; + this.storageLevel = storageLevel; + this.maxEntrySize = maxEntrySize; + journalIndex = new SparseJournalIndex(indexDensity); + try { + channel = FileChannel.open(file.file().toPath(), + StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE); + } catch (IOException e) { + throw new StorageException(e); + } + + final var fileWriter = switch (storageLevel) { + case DISK -> new DiskFileWriter(file.file().toPath(), channel, descriptor.maxSegmentSize(), maxEntrySize); + case MAPPED -> new MappedFileWriter(file.file().toPath(), channel, descriptor.maxSegmentSize(), maxEntrySize); + }; + writer = new JournalSegmentWriter(fileWriter, this, maxEntrySize, journalIndex) + // relinquish mapped memory + .toFileChannel(); + } + + /** + * Returns the segment's starting index. + * + * @return The segment's starting index. + */ + long firstIndex() { + return descriptor.index(); + } + + /** + * Returns the last index in the segment. + * + * @return The last index in the segment. + */ + long lastIndex() { + return writer.getLastIndex(); + } + + /** + * Returns the size of the segment. + * + * @return the size of the segment + */ + int size() { + try { + return (int) channel.size(); + } catch (IOException e) { + throw new StorageException(e); + } + } + + /** + * Returns the segment file. + * + * @return The segment file. + */ + JournalSegmentFile file() { + return file; + } + + /** + * Returns the segment descriptor. + * + * @return The segment descriptor. + */ + JournalSegmentDescriptor descriptor() { + return descriptor; + } + + /** + * Looks up the position of the given index. + * + * @param index the index to lookup + * @return the position of the given index or a lesser index, or {@code null} + */ + @Nullable Position lookup(final long index) { + return journalIndex.lookup(index); + } + + /** + * Acquires a reference to the log segment. + */ + private void acquire() { + if (references.getAndIncrement() == 0 && storageLevel == StorageLevel.MAPPED) { + writer = writer.toMapped(); + } + } + + /** + * Releases a reference to the log segment. + */ + private void release() { + if (references.decrementAndGet() == 0) { + if (storageLevel == StorageLevel.MAPPED) { + writer = writer.toFileChannel(); + } + if (!open) { + finishClose(); + } + } + } + + /** + * Acquires a reference to the segment writer. + * + * @return The segment writer. + */ + JournalSegmentWriter acquireWriter() { + checkOpen(); + acquire(); + + return writer; + } + + /** + * Releases the reference to the segment writer. + */ + void releaseWriter() { + release(); + } + + /** + * Creates a new segment reader. + * + * @return A new segment reader. + */ + JournalSegmentReader createReader() { + checkOpen(); + acquire(); + + final var buffer = writer.buffer(); + final var path = file.file().toPath(); + final var fileReader = buffer != null ? new MappedFileReader(path, buffer) + : new DiskFileReader(path, channel, descriptor.maxSegmentSize(), maxEntrySize); + final var reader = new JournalSegmentReader(this, fileReader, maxEntrySize); + reader.setPosition(JournalSegmentDescriptor.BYTES); + readers.add(reader); + return reader; + } + + /** + * Closes a segment reader. + * + * @param reader the closed segment reader + */ + void closeReader(JournalSegmentReader reader) { + if (readers.remove(reader)) { + release(); + } + } + + /** + * Checks whether the segment is open. + */ + private void checkOpen() { + if (!open) { + throw new IllegalStateException("Segment not open"); + } + } + + /** + * Returns a boolean indicating whether the segment is open. + * + * @return indicates whether the segment is open + */ + public boolean isOpen() { + return open; + } + + /** + * Closes the segment. + */ + @Override + public void close() { + if (!open) { + return; + } + + open = false; + readers.forEach(JournalSegmentReader::close); + if (references.get() == 0) { + finishClose(); + } + } + + private void finishClose() { + writer.close(); + try { + channel.close(); + } catch (IOException e) { + throw new StorageException(e); + } + } + + /** + * Deletes the segment. + */ + void delete() { + try { + Files.deleteIfExists(file.file().toPath()); + } catch (IOException e) { + throw new StorageException(e); + } + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("id", descriptor.id()) + .add("version", descriptor.version()) + .add("index", firstIndex()) + .toString(); + } +} diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentDescriptor.java b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentDescriptor.java new file mode 100644 index 0000000000..757ca3a078 --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentDescriptor.java @@ -0,0 +1,289 @@ +/* + * Copyright 2015-2022 Open Networking Foundation and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal; + +import com.google.common.annotations.VisibleForTesting; + +import java.nio.ByteBuffer; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +/** + * Stores information about a {@link JournalSegment} of the log. + *

+ * The segment descriptor manages metadata related to a single segment of the log. Descriptors are stored within the + * first {@code 64} bytes of each segment in the following order: + *

    + *
  • {@code id} (64-bit signed integer) - A unique segment identifier. This is a monotonically increasing number within + * each log. Segments with in-sequence identifiers should contain in-sequence indexes.
  • + *
  • {@code index} (64-bit signed integer) - The effective first index of the segment. This indicates the index at which + * the first entry should be written to the segment. Indexes are monotonically increasing thereafter.
  • + *
  • {@code version} (64-bit signed integer) - The version of the segment. Versions are monotonically increasing + * starting at {@code 1}. Versions will only be incremented whenever the segment is rewritten to another memory/disk + * space, e.g. after log compaction.
  • + *
  • {@code maxSegmentSize} (32-bit unsigned integer) - The maximum number of bytes allowed in the segment.
  • + *
  • {@code maxEntries} (32-bit signed integer) - The total number of expected entries in the segment. This is the final + * number of entries allowed within the segment both before and after compaction. This entry count is used to determine + * the count of internal indexing and deduplication facilities.
  • + *
  • {@code updated} (64-bit signed integer) - The last update to the segment in terms of milliseconds since the epoch. + * When the segment is first constructed, the {@code updated} time is {@code 0}. Once all entries in the segment have + * been committed, the {@code updated} time should be set to the current time. Log compaction should not result in a + * change to {@code updated}.
  • + *
  • {@code locked} (8-bit boolean) - A boolean indicating whether the segment is locked. Segments will be locked once + * all entries have been committed to the segment. The lock state of each segment is used to determine log compaction + * and recovery behavior.
  • + *
+ * The remainder of the 64 segment header bytes are reserved for future metadata. + * + * @author Jordan Halterman + */ +public final class JournalSegmentDescriptor { + public static final int BYTES = 64; + + // Current segment version. + @VisibleForTesting + static final int VERSION = 1; + + // The lengths of each field in the header. + private static final int VERSION_LENGTH = Integer.BYTES; // 32-bit signed integer + private static final int ID_LENGTH = Long.BYTES; // 64-bit signed integer + private static final int INDEX_LENGTH = Long.BYTES; // 64-bit signed integer + private static final int MAX_SIZE_LENGTH = Integer.BYTES; // 32-bit signed integer + private static final int MAX_ENTRIES_LENGTH = Integer.BYTES; // 32-bit signed integer + private static final int UPDATED_LENGTH = Long.BYTES; // 64-bit signed integer + + // The positions of each field in the header. + private static final int VERSION_POSITION = 0; // 0 + private static final int ID_POSITION = VERSION_POSITION + VERSION_LENGTH; // 4 + private static final int INDEX_POSITION = ID_POSITION + ID_LENGTH; // 12 + private static final int MAX_SIZE_POSITION = INDEX_POSITION + INDEX_LENGTH; // 20 + private static final int MAX_ENTRIES_POSITION = MAX_SIZE_POSITION + MAX_SIZE_LENGTH; // 24 + private static final int UPDATED_POSITION = MAX_ENTRIES_POSITION + MAX_ENTRIES_LENGTH; // 28 + + /** + * Returns a descriptor builder. + *

+ * The descriptor builder will write segment metadata to a {@code 48} byte in-memory buffer. + * + * @return The descriptor builder. + */ + public static Builder builder() { + return new Builder(ByteBuffer.allocate(BYTES)); + } + + /** + * Returns a descriptor builder for the given descriptor buffer. + * + * @param buffer The descriptor buffer. + * @return The descriptor builder. + * @throws NullPointerException if {@code buffer} is null + */ + public static Builder builder(ByteBuffer buffer) { + return new Builder(buffer); + } + + private final ByteBuffer buffer; + private final int version; + private final long id; + private final long index; + private final int maxSegmentSize; + private final int maxEntries; + private volatile long updated; + private volatile boolean locked; + + /** + * @throws NullPointerException if {@code buffer} is null + */ + public JournalSegmentDescriptor(ByteBuffer buffer) { + this.buffer = buffer; + this.version = buffer.getInt(); + this.id = buffer.getLong(); + this.index = buffer.getLong(); + this.maxSegmentSize = buffer.getInt(); + this.maxEntries = buffer.getInt(); + this.updated = buffer.getLong(); + this.locked = buffer.get() == 1; + } + + /** + * Returns the segment version. + *

+ * Versions are monotonically increasing starting at {@code 1}. + * + * @return The segment version. + */ + public int version() { + return version; + } + + /** + * Returns the segment identifier. + *

+ * The segment ID is a monotonically increasing number within each log. Segments with in-sequence identifiers should + * contain in-sequence indexes. + * + * @return The segment identifier. + */ + public long id() { + return id; + } + + /** + * Returns the segment index. + *

+ * The index indicates the index at which the first entry should be written to the segment. Indexes are monotonically + * increasing thereafter. + * + * @return The segment index. + */ + public long index() { + return index; + } + + /** + * Returns the maximum count of the segment. + * + * @return The maximum allowed count of the segment. + */ + public int maxSegmentSize() { + return maxSegmentSize; + } + + /** + * Returns the maximum number of entries allowed in the segment. + * + * @return The maximum number of entries allowed in the segment. + */ + public int maxEntries() { + return maxEntries; + } + + /** + * Returns last time the segment was updated. + *

+ * When the segment is first constructed, the {@code updated} time is {@code 0}. Once all entries in the segment have + * been committed, the {@code updated} time should be set to the current time. Log compaction should not result in a + * change to {@code updated}. + * + * @return The last time the segment was updated in terms of milliseconds since the epoch. + */ + public long updated() { + return updated; + } + + /** + * Writes an update to the descriptor. + */ + public void update(long timestamp) { + if (!locked) { + buffer.putLong(UPDATED_POSITION, timestamp); + this.updated = timestamp; + } + } + + /** + * Copies the segment to a new buffer. + */ + JournalSegmentDescriptor copyTo(ByteBuffer buffer) { + buffer.putInt(version); + buffer.putLong(id); + buffer.putLong(index); + buffer.putInt(maxSegmentSize); + buffer.putInt(maxEntries); + buffer.putLong(updated); + buffer.put(locked ? (byte) 1 : (byte) 0); + return this; + } + + @Override + public String toString() { + return toStringHelper(this) + .add("version", version) + .add("id", id) + .add("index", index) + .add("updated", updated) + .toString(); + } + + /** + * Segment descriptor builder. + */ + public static class Builder { + private final ByteBuffer buffer; + + private Builder(ByteBuffer buffer) { + this.buffer = requireNonNull(buffer, "buffer cannot be null"); + buffer.putInt(VERSION_POSITION, VERSION); + } + + /** + * Sets the segment identifier. + * + * @param id The segment identifier. + * @return The segment descriptor builder. + */ + public Builder withId(long id) { + buffer.putLong(ID_POSITION, id); + return this; + } + + /** + * Sets the segment index. + * + * @param index The segment starting index. + * @return The segment descriptor builder. + */ + public Builder withIndex(long index) { + buffer.putLong(INDEX_POSITION, index); + return this; + } + + /** + * Sets maximum count of the segment. + * + * @param maxSegmentSize The maximum count of the segment. + * @return The segment descriptor builder. + */ + public Builder withMaxSegmentSize(int maxSegmentSize) { + buffer.putInt(MAX_SIZE_POSITION, maxSegmentSize); + return this; + } + + /** + * Sets the maximum number of entries in the segment. + * + * @param maxEntries The maximum number of entries in the segment. + * @return The segment descriptor builder. + * @deprecated since 3.0.2 + */ + @Deprecated + public Builder withMaxEntries(int maxEntries) { + buffer.putInt(MAX_ENTRIES_POSITION, maxEntries); + return this; + } + + /** + * Builds the segment descriptor. + * + * @return The built segment descriptor. + */ + public JournalSegmentDescriptor build() { + buffer.rewind(); + return new JournalSegmentDescriptor(buffer); + } + } +} diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentFile.java b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentFile.java new file mode 100644 index 0000000000..2190dee5a7 --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentFile.java @@ -0,0 +1,94 @@ +/* + * Copyright 2015-2022 Open Networking Foundation and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal; + +import java.io.File; + +import static java.util.Objects.requireNonNull; + +/** + * Segment file utility. + * + * @author Jordan Halterman + */ +public final class JournalSegmentFile { + private static final char PART_SEPARATOR = '-'; + private static final char EXTENSION_SEPARATOR = '.'; + private static final String EXTENSION = "log"; + private final File file; + + /** + * Returns a boolean value indicating whether the given file appears to be a parsable segment file. + * + * @throws NullPointerException if {@code file} is null + */ + public static boolean isSegmentFile(String name, File file) { + return isSegmentFile(name, file.getName()); + } + + /** + * Returns a boolean value indicating whether the given file appears to be a parsable segment file. + * + * @param journalName the name of the journal + * @param fileName the name of the file to check + * @throws NullPointerException if {@code file} is null + */ + public static boolean isSegmentFile(String journalName, String fileName) { + requireNonNull(journalName, "journalName cannot be null"); + requireNonNull(fileName, "fileName cannot be null"); + + int partSeparator = fileName.lastIndexOf(PART_SEPARATOR); + int extensionSeparator = fileName.lastIndexOf(EXTENSION_SEPARATOR); + + if (extensionSeparator == -1 + || partSeparator == -1 + || extensionSeparator < partSeparator + || !fileName.endsWith(EXTENSION)) { + return false; + } + + for (int i = partSeparator + 1; i < extensionSeparator; i++) { + if (!Character.isDigit(fileName.charAt(i))) { + return false; + } + } + + return fileName.startsWith(journalName); + } + + /** + * Creates a segment file for the given directory, log name, segment ID, and segment version. + */ + static File createSegmentFile(String name, File directory, long id) { + return new File(directory, String.format("%s-%d.log", requireNonNull(name, "name cannot be null"), id)); + } + + /** + * @throws IllegalArgumentException if {@code file} is not a valid segment file + */ + JournalSegmentFile(File file) { + this.file = file; + } + + /** + * Returns the segment file. + * + * @return The segment file. + */ + public File file() { + return file; + } +} diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentReader.java b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentReader.java new file mode 100644 index 0000000000..d89c720c67 --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentReader.java @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal; + +import static com.google.common.base.Verify.verify; +import static java.util.Objects.requireNonNull; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import java.util.zip.CRC32; +import org.eclipse.jdt.annotation.Nullable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +final class JournalSegmentReader { + private static final Logger LOG = LoggerFactory.getLogger(JournalSegmentReader.class); + + private final JournalSegment segment; + private final FileReader fileReader; + private final int maxSegmentSize; + private final int maxEntrySize; + + private int position; + + JournalSegmentReader(final JournalSegment segment, final FileReader fileReader, final int maxEntrySize) { + this.segment = requireNonNull(segment); + this.fileReader = requireNonNull(fileReader); + maxSegmentSize = segment.descriptor().maxSegmentSize(); + this.maxEntrySize = maxEntrySize; + } + + /** + * Return the current position. + * + * @return current position. + */ + int position() { + return position; + } + + /** + * Set the file position. + * + * @param position new position + */ + void setPosition(final int position) { + verify(position >= JournalSegmentDescriptor.BYTES && position < maxSegmentSize, + "Invalid position %s", position); + this.position = position; + fileReader.invalidateCache(); + } + + /** + * Invalidate any cache that is present, so that the next read is coherent with the backing file. + */ + void invalidateCache() { + fileReader.invalidateCache(); + } + + /** + * Reads the next binary data block + * + * @param index entry index + * @return The binary data, or {@code null} + */ + @Nullable ByteBuf readBytes(final long index) { + // Check if there is enough in the buffer remaining + final int remaining = maxSegmentSize - position - SegmentEntry.HEADER_BYTES; + if (remaining < 0) { + // Not enough space in the segment, there can never be another entry + return null; + } + + // Calculate maximum entry length not exceeding file size nor maxEntrySize + final var maxLength = Math.min(remaining, maxEntrySize); + final var buffer = fileReader.read(position, maxLength + SegmentEntry.HEADER_BYTES); + + // Read the entry length + final var length = buffer.getInt(0); + if (length < 1 || length > maxLength) { + // Invalid length, make sure next read re-tries + invalidateCache(); + return null; + } + + // Read the entry checksum + final int checksum = buffer.getInt(Integer.BYTES); + + // Slice off the entry's bytes + final var entryBuffer = buffer.slice(SegmentEntry.HEADER_BYTES, length); + // Compute the checksum for the entry bytes. + final var crc32 = new CRC32(); + crc32.update(entryBuffer); + + // If the stored checksum does not equal the computed checksum, do not proceed further + final var computed = (int) crc32.getValue(); + if (checksum != computed) { + LOG.warn("Expected checksum {}, computed {}", Integer.toHexString(checksum), Integer.toHexString(computed)); + invalidateCache(); + return null; + } + + // update position + position += SegmentEntry.HEADER_BYTES + length; + + // return bytes + entryBuffer.rewind(); + return Unpooled.buffer(length).writeBytes(entryBuffer); + } + + /** + * Close this reader. + */ + void close() { + segment.closeReader(this); + } +} diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentWriter.java b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentWriter.java new file mode 100644 index 0000000000..e381bc25a7 --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentWriter.java @@ -0,0 +1,226 @@ +/* + * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal; + +import static io.atomix.storage.journal.SegmentEntry.HEADER_BYTES; +import static java.util.Objects.requireNonNull; + +import io.atomix.storage.journal.index.JournalIndex; +import io.netty.buffer.ByteBuf; +import java.nio.MappedByteBuffer; +import java.util.zip.CRC32; +import org.eclipse.jdt.annotation.NonNull; +import org.eclipse.jdt.annotation.Nullable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +final class JournalSegmentWriter { + private static final Logger LOG = LoggerFactory.getLogger(JournalSegmentWriter.class); + + private final FileWriter fileWriter; + final @NonNull JournalSegment segment; + private final @NonNull JournalIndex index; + final int maxSegmentSize; + final int maxEntrySize; + + private int currentPosition; + private Long lastIndex; + + JournalSegmentWriter(final FileWriter fileWriter, final JournalSegment segment, final int maxEntrySize, + final JournalIndex index) { + this.fileWriter = requireNonNull(fileWriter); + this.segment = requireNonNull(segment); + this.index = requireNonNull(index); + maxSegmentSize = segment.descriptor().maxSegmentSize(); + this.maxEntrySize = maxEntrySize; + // adjust lastEntry value + reset(0); + } + + JournalSegmentWriter(final JournalSegmentWriter previous, final FileWriter fileWriter) { + segment = previous.segment; + index = previous.index; + maxSegmentSize = previous.maxSegmentSize; + maxEntrySize = previous.maxEntrySize; + lastIndex = previous.lastIndex; + currentPosition = previous.currentPosition; + this.fileWriter = requireNonNull(fileWriter); + } + + /** + * Returns the last written index. + * + * @return The last written index. + */ + long getLastIndex() { + return lastIndex != null ? lastIndex : segment.firstIndex() - 1; + } + + /** + * Returns the next index to be written. + * + * @return The next index to be written. + */ + long getNextIndex() { + return lastIndex != null ? lastIndex + 1 : segment.firstIndex(); + } + + /** + * Tries to append a binary data to the journal. + * + * @param buf binary data to append + * @return The index of appended data, or {@code null} if segment has no space + */ + Long append(final ByteBuf buf) { + final var length = buf.readableBytes(); + if (length > maxEntrySize) { + throw new StorageException.TooLarge("Serialized entry size exceeds maximum allowed bytes (" + + maxEntrySize + ")"); + } + + // Store the entry index. + final long index = getNextIndex(); + final int position = currentPosition; + + // check space available + final int nextPosition = position + HEADER_BYTES + length; + if (nextPosition >= maxSegmentSize) { + LOG.trace("Not enough space for {} at {}", index, position); + return null; + } + + // allocate buffer and write data + final var writeBuffer = fileWriter.startWrite(position, length + HEADER_BYTES).position(HEADER_BYTES); + writeBuffer.put(buf.nioBuffer()); + + // Compute the checksum for the entry. + final var crc32 = new CRC32(); + crc32.update(writeBuffer.flip().position(HEADER_BYTES)); + + // Create a single byte[] in memory for the entire entry and write it as a batch to the underlying buffer. + writeBuffer.putInt(0, length).putInt(Integer.BYTES, (int) crc32.getValue()); + fileWriter.commitWrite(position, writeBuffer.rewind()); + + // Update the last entry with the correct index/term/length. + currentPosition = nextPosition; + lastIndex = index; + this.index.index(index, position); + + return index; + } + + /** + * Resets the head of the segment to the given index. + * + * @param index the index to which to reset the head of the segment + */ + void reset(final long index) { + // acquire ownership of cache and make sure reader does not see anything we've done once we're done + final var fileReader = fileWriter.reader(); + try { + resetWithBuffer(fileReader, index); + } finally { + // Make sure reader does not see anything we've done + fileReader.invalidateCache(); + } + } + + private void resetWithBuffer(final FileReader fileReader, final long index) { + long nextIndex = segment.firstIndex(); + + // Clear the buffer indexes and acquire ownership of the buffer + currentPosition = JournalSegmentDescriptor.BYTES; + final var reader = new JournalSegmentReader(segment, fileReader, maxEntrySize); + reader.setPosition(JournalSegmentDescriptor.BYTES); + + while (index == 0 || nextIndex <= index) { + final var buf = reader.readBytes(nextIndex); + if (buf == null) { + break; + } + + lastIndex = nextIndex; + this.index.index(nextIndex, currentPosition); + nextIndex++; + + // Update the current position for indexing. + currentPosition += HEADER_BYTES + buf.readableBytes(); + } + } + + /** + * Truncates the log to the given index. + * + * @param index The index to which to truncate the log. + */ + void truncate(final long index) { + // If the index is greater than or equal to the last index, skip the truncate. + if (index >= getLastIndex()) { + return; + } + + // Reset the last written + lastIndex = null; + + // Truncate the index. + this.index.truncate(index); + + if (index < segment.firstIndex()) { + // Reset the writer to the first entry. + currentPosition = JournalSegmentDescriptor.BYTES; + } else { + // Reset the writer to the given index. + reset(index); + } + + // Zero the entry header at current channel position. + fileWriter.writeEmptyHeader(currentPosition); + } + + /** + * Flushes written entries to disk. + */ + void flush() { + fileWriter.flush(); + } + + /** + * Closes this writer. + */ + void close() { + fileWriter.close(); + } + + /** + * Returns the mapped buffer underlying the segment writer, or {@code null} if the writer does not have such a + * buffer. + * + * @return the mapped buffer underlying the segment writer, or {@code null}. + */ + @Nullable MappedByteBuffer buffer() { + return fileWriter.buffer(); + } + + @NonNull JournalSegmentWriter toMapped() { + final var newWriter = fileWriter.toMapped(); + return newWriter == null ? this : new JournalSegmentWriter(this, newWriter); + } + + @NonNull JournalSegmentWriter toFileChannel() { + final var newWriter = fileWriter.toDisk(); + return newWriter == null ? this : new JournalSegmentWriter(this, newWriter); + } +} diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSerdes.java b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSerdes.java new file mode 100644 index 0000000000..a970882edf --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSerdes.java @@ -0,0 +1,209 @@ +/* + * Copyright 2014-2021 Open Networking Foundation + * Copyright 2023 PANTHEON.tech, s.r.o. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal; + +import com.google.common.annotations.Beta; +import com.google.common.annotations.VisibleForTesting; +import io.atomix.utils.serializer.KryoJournalSerdesBuilder; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.ByteBuffer; + +/** + * Support for serialization of {@link Journal} entries. + * + * @deprecated due to dependency on outdated Kryo library, {@link JournalSerializer} to be used instead. + */ +@Deprecated(forRemoval = true, since="9.0.3") +public interface JournalSerdes { + /** + * Serializes given object to byte array. + * + * @param obj Object to serialize + * @return serialized bytes + */ + byte[] serialize(Object obj); + + /** + * Serializes given object to byte array. + * + * @param obj Object to serialize + * @param bufferSize maximum size of serialized bytes + * @return serialized bytes + */ + byte[] serialize(Object obj, int bufferSize); + + /** + * Serializes given object to byte buffer. + * + * @param obj Object to serialize + * @param buffer to write to + */ + void serialize(Object obj, ByteBuffer buffer); + + /** + * Serializes given object to OutputStream. + * + * @param obj Object to serialize + * @param stream to write to + */ + void serialize(Object obj, OutputStream stream); + + /** + * Serializes given object to OutputStream. + * + * @param obj Object to serialize + * @param stream to write to + * @param bufferSize size of the buffer in front of the stream + */ + void serialize(Object obj, OutputStream stream, int bufferSize); + + /** + * Deserializes given byte array to Object. + * + * @param bytes serialized bytes + * @param deserialized Object type + * @return deserialized Object + */ + T deserialize(byte[] bytes); + + /** + * Deserializes given byte buffer to Object. + * + * @param buffer input with serialized bytes + * @param deserialized Object type + * @return deserialized Object + */ + T deserialize(final ByteBuffer buffer); + + /** + * Deserializes given InputStream to an Object. + * + * @param stream input stream + * @param deserialized Object type + * @return deserialized Object + */ + T deserialize(InputStream stream); + + /** + * Deserializes given InputStream to an Object. + * + * @param stream input stream + * @param deserialized Object type + * @param bufferSize size of the buffer in front of the stream + * @return deserialized Object + */ + T deserialize(final InputStream stream, final int bufferSize); + + /** + * Creates a new {@link JournalSerdes} builder. + * + * @return builder + */ + static Builder builder() { + return new KryoJournalSerdesBuilder(); + } + + /** + * Builder for {@link JournalSerdes}. + */ + interface Builder { + /** + * Builds a {@link JournalSerdes} instance. + * + * @return A {@link JournalSerdes} implementation. + */ + JournalSerdes build(); + + /** + * Builds a {@link JournalSerdes} instance. + * + * @param friendlyName friendly name for the namespace + * @return A {@link JournalSerdes} implementation. + */ + JournalSerdes build(String friendlyName); + + /** + * Registers serializer for the given set of classes. + *

+ * When multiple classes are registered with an explicitly provided serializer, the namespace guarantees + * all instances will be serialized with the same type ID. + * + * @param classes list of classes to register + * @param serdes serializer to use for the class + * @return this builder + */ + Builder register(EntrySerdes serdes, Class... classes); + + /** + * Sets the namespace class loader. + * + * @param classLoader the namespace class loader + * @return this builder + */ + Builder setClassLoader(ClassLoader classLoader); + } + + /** + * Input data stream exposed to {@link EntrySerdes#read(EntryInput)}. + */ + @Beta + interface EntryInput { + + byte[] readBytes(int length) throws IOException; + + long readLong() throws IOException; + + String readString() throws IOException; + + Object readObject() throws IOException; + + @VisibleForTesting + int readVarInt() throws IOException; + } + + /** + * Output data stream exposed to {@link EntrySerdes#write(EntryOutput, Object)}. + */ + @Beta + interface EntryOutput { + + void writeBytes(byte[] bytes) throws IOException; + + void writeLong(long value) throws IOException; + + void writeObject(Object value) throws IOException; + + void writeString(String value) throws IOException; + + @VisibleForTesting + void writeVarInt(int value) throws IOException; + } + + /** + * A serializer/deserializer for an entry. + * + * @param Entry type + */ + interface EntrySerdes { + + T read(EntryInput input) throws IOException; + + void write(EntryOutput output, T entry) throws IOException; + } +} diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSerializer.java b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSerializer.java new file mode 100644 index 0000000000..eff9af8559 --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSerializer.java @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2024 PANTHEON.tech s.r.o. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package io.atomix.storage.journal; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufUtil; +import io.netty.buffer.Unpooled; + +/** + * Support for serialization of {@link Journal} entries. + */ +public interface JournalSerializer { + + /** + * Serializes given object to byte array. + * + * @param obj Object to serialize + * @return serialized bytes as {@link ByteBuf} + */ + ByteBuf serialize(T obj) ; + + /** + * Deserializes given byte array to Object. + * + * @param buf serialized bytes as {@link ByteBuf} + * @return deserialized Object + */ + T deserialize(final ByteBuf buf); + + static JournalSerializer wrap(final JournalSerdes serdes) { + return new JournalSerializer<>() { + @Override + public ByteBuf serialize(final E obj) { + return Unpooled.wrappedBuffer(serdes.serialize(obj)); + } + + @Override + public E deserialize(final ByteBuf buf) { + return serdes.deserialize(ByteBufUtil.getBytes(buf)); + } + }; + } +} diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/JournalWriter.java b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalWriter.java new file mode 100644 index 0000000000..064fd019ec --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalWriter.java @@ -0,0 +1,73 @@ +/* + * Copyright 2017-2022 Open Networking Foundation and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal; + +import org.eclipse.jdt.annotation.NonNull; + +/** + * Log writer. + * + * @author Jordan Halterman + */ +public interface JournalWriter { + /** + * Returns the last written index. + * + * @return The last written index. + */ + long getLastIndex(); + + /** + * Returns the next index to be written. + * + * @return The next index to be written. + */ + long getNextIndex(); + + /** + * Appends an entry to the journal. + * + * @param entry The entry to append. + * @return The appended indexed entry. + */ + @NonNull Indexed append(T entry); + + /** + * Commits entries up to the given index. + * + * @param index The index up to which to commit entries. + */ + void commit(long index); + + /** + * Resets the head of the journal to the given index. + * + * @param index the index to which to reset the head of the journal + */ + void reset(long index); + + /** + * Truncates the log to the given index. + * + * @param index The index to which to truncate the log. + */ + void truncate(long index); + + /** + * Flushes written entries to disk. + */ + void flush(); +} diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/MappedFileReader.java b/atomix-storage/src/main/java/io/atomix/storage/journal/MappedFileReader.java new file mode 100644 index 0000000000..204fd72550 --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/storage/journal/MappedFileReader.java @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal; + +import java.nio.ByteBuffer; +import java.nio.file.Path; + +/** + * A {@link StorageLevel#MAPPED} implementation of {@link FileReader}. Operates on direct mapping of the entire file. + */ +final class MappedFileReader extends FileReader { + private final ByteBuffer buffer; + + MappedFileReader(final Path path, final ByteBuffer buffer) { + super(path); + this.buffer = buffer.slice().asReadOnlyBuffer(); + } + + @Override + void invalidateCache() { + // No-op: the mapping is guaranteed to be coherent + } + + @Override + ByteBuffer read(final int position, final int size) { + return buffer.slice(position, size); + } +} diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/MappedFileWriter.java b/atomix-storage/src/main/java/io/atomix/storage/journal/MappedFileWriter.java new file mode 100644 index 0000000000..47f26ba151 --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/storage/journal/MappedFileWriter.java @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal; + +import io.netty.util.internal.PlatformDependent; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.MappedByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.file.Path; +import org.eclipse.jdt.annotation.NonNull; + +/** + * A {@link StorageLevel#MAPPED} {@link FileWriter}. + */ +final class MappedFileWriter extends FileWriter { + private final @NonNull MappedByteBuffer mappedBuffer; + private final MappedFileReader reader; + private final ByteBuffer buffer; + + MappedFileWriter(final Path path, final FileChannel channel, final int maxSegmentSize, final int maxEntrySize) { + super(path, channel, maxSegmentSize, maxEntrySize); + + mappedBuffer = mapBuffer(channel, maxSegmentSize); + buffer = mappedBuffer.slice(); + reader = new MappedFileReader(path, mappedBuffer); + } + + private static @NonNull MappedByteBuffer mapBuffer(final FileChannel channel, final int maxSegmentSize) { + try { + return channel.map(FileChannel.MapMode.READ_WRITE, 0, maxSegmentSize); + } catch (IOException e) { + throw new StorageException(e); + } + } + + @Override + MappedFileReader reader() { + return reader; + } + + @Override + MappedByteBuffer buffer() { + return mappedBuffer; + } + + @Override + MappedFileWriter toMapped() { + return null; + } + + @Override + DiskFileWriter toDisk() { + close(); + return new DiskFileWriter(path, channel, maxSegmentSize, maxEntrySize); + } + + @Override + void writeEmptyHeader(final int position) { + // Note: we issue a single putLong() instead of two putInt()s. + buffer.putLong(position, 0L); + } + + @Override + ByteBuffer startWrite(final int position, final int size) { + return buffer.slice(position, size); + } + + @Override + void commitWrite(final int position, final ByteBuffer entry) { + // No-op, buffer is write-through + } + + @Override + void flush() { + mappedBuffer.force(); + } + + @Override + void close() { + flush(); + PlatformDependent.freeDirectBuffer(mappedBuffer); + } +} diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/SegmentEntry.java b/atomix-storage/src/main/java/io/atomix/storage/journal/SegmentEntry.java new file mode 100644 index 0000000000..be6c6ba831 --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/storage/journal/SegmentEntry.java @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal; + +import java.nio.ByteBuffer; + +/** + * An {@link Indexed} entry read from {@link JournalSegment}. + * + * @param checksum The CRC32 checksum of data + * @param bytes Entry bytes + */ +record SegmentEntry(int checksum, ByteBuffer bytes) { + /** + * The size of the header, comprising of: + *

    + *
  • 32-bit signed entry length
  • + *
  • 32-bit unsigned CRC32 checksum
  • + * + */ + static final int HEADER_BYTES = Integer.BYTES + Integer.BYTES; + + SegmentEntry { + if (bytes.remaining() < 1) { + throw new IllegalArgumentException("Invalid entry bytes " + bytes); + } + } +} diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/SegmentedJournal.java b/atomix-storage/src/main/java/io/atomix/storage/journal/SegmentedJournal.java new file mode 100644 index 0000000000..4b2f0334aa --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/storage/journal/SegmentedJournal.java @@ -0,0 +1,877 @@ +/* + * Copyright 2017-2022 Open Networking Foundation and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkState; +import static java.util.Objects.requireNonNull; + +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.file.StandardOpenOption; +import java.util.Collection; +import java.util.Iterator; +import java.util.Map; +import java.util.TreeMap; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentNavigableMap; +import java.util.concurrent.ConcurrentSkipListMap; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Segmented journal. + */ +public final class SegmentedJournal implements Journal { + /** + * Returns a new Raft log builder. + * + * @return A new Raft log builder. + */ + public static Builder builder() { + return new Builder<>(); + } + + private static final Logger LOG = LoggerFactory.getLogger(SegmentedJournal.class); + private static final int SEGMENT_BUFFER_FACTOR = 3; + + private final String name; + private final StorageLevel storageLevel; + private final File directory; + private final JournalSerializer serializer; + private final int maxSegmentSize; + private final int maxEntrySize; + private final int maxEntriesPerSegment; + private final double indexDensity; + private final boolean flushOnCommit; + private final SegmentedJournalWriter writer; + private volatile long commitIndex; + + private final ConcurrentNavigableMap segments = new ConcurrentSkipListMap<>(); + private final Collection readers = ConcurrentHashMap.newKeySet(); + private JournalSegment currentSegment; + + private volatile boolean open = true; + + public SegmentedJournal( + String name, + StorageLevel storageLevel, + File directory, + JournalSerdes namespace, + int maxSegmentSize, + int maxEntrySize, + int maxEntriesPerSegment, + double indexDensity, + boolean flushOnCommit) { + this.name = requireNonNull(name, "name cannot be null"); + this.storageLevel = requireNonNull(storageLevel, "storageLevel cannot be null"); + this.directory = requireNonNull(directory, "directory cannot be null"); + this.serializer = JournalSerializer.wrap(requireNonNull(namespace, "namespace cannot be null")); + this.maxSegmentSize = maxSegmentSize; + this.maxEntrySize = maxEntrySize; + this.maxEntriesPerSegment = maxEntriesPerSegment; + this.indexDensity = indexDensity; + this.flushOnCommit = flushOnCommit; + open(); + this.writer = new SegmentedJournalWriter<>(this); + } + + /** + * Returns the segment file name prefix. + * + * @return The segment file name prefix. + */ + public String name() { + return name; + } + + /** + * Returns the storage directory. + *

    + * The storage directory is the directory to which all segments write files. Segment files for multiple logs may be + * stored in the storage directory, and files for each log instance will be identified by the {@code prefix} provided + * when the log is opened. + * + * @return The storage directory. + */ + public File directory() { + return directory; + } + + /** + * Returns the storage level. + *

    + * The storage level dictates how entries within individual journal segments should be stored. + * + * @return The storage level. + */ + public StorageLevel storageLevel() { + return storageLevel; + } + + /** + * Returns the maximum journal segment size. + *

    + * The maximum segment size dictates the maximum size any segment in a segment may consume in bytes. + * + * @return The maximum segment size in bytes. + */ + public int maxSegmentSize() { + return maxSegmentSize; + } + + /** + * Returns the maximum journal entry size. + *

    + * The maximum entry size dictates the maximum size any entry in the segment may consume in bytes. + * + * @return the maximum entry size in bytes + */ + public int maxEntrySize() { + return maxEntrySize; + } + + /** + * Returns the maximum number of entries per segment. + *

    + * The maximum entries per segment dictates the maximum number of entries that are allowed to be stored in any segment + * in a journal. + * + * @return The maximum number of entries per segment. + * @deprecated since 3.0.2 + */ + @Deprecated + public int maxEntriesPerSegment() { + return maxEntriesPerSegment; + } + + /** + * Returns the collection of journal segments. + * + * @return the collection of journal segments + */ + public Collection segments() { + return segments.values(); + } + + /** + * Returns the collection of journal segments with indexes greater than the given index. + * + * @param index the starting index + * @return the journal segments starting with indexes greater than or equal to the given index + */ + public Collection segments(long index) { + return segments.tailMap(index).values(); + } + + /** + * Returns serializer instance. + * + * @return serializer instance + */ + JournalSerializer serializer() { + return serializer; + } + + /** + * Returns the total size of the journal. + * + * @return the total size of the journal + */ + public long size() { + return segments.values().stream() + .mapToLong(segment -> segment.size()) + .sum(); + } + + @Override + public JournalWriter writer() { + return writer; + } + + @Override + public JournalReader openReader(long index) { + return openReader(index, JournalReader.Mode.ALL); + } + + /** + * Opens a new Raft log reader with the given reader mode. + * + * @param index The index from which to begin reading entries. + * @param mode The mode in which to read entries. + * @return The Raft log reader. + */ + public JournalReader openReader(long index, JournalReader.Mode mode) { + final var segment = getSegment(index); + final var reader = switch (mode) { + case ALL -> new SegmentedJournalReader<>(this, segment); + case COMMITS -> new CommitsSegmentJournalReader<>(this, segment); + }; + + // Forward reader to specified index + long next = reader.getNextIndex(); + while (index > next && reader.tryAdvance()) { + next = reader.getNextIndex(); + } + + readers.add(reader); + return reader; + } + + /** + * Opens the segments. + */ + private synchronized void open() { + // Load existing log segments from disk. + for (JournalSegment segment : loadSegments()) { + segments.put(segment.descriptor().index(), segment); + } + + // If a segment doesn't already exist, create an initial segment starting at index 1. + if (!segments.isEmpty()) { + currentSegment = segments.lastEntry().getValue(); + } else { + JournalSegmentDescriptor descriptor = JournalSegmentDescriptor.builder() + .withId(1) + .withIndex(1) + .withMaxSegmentSize(maxSegmentSize) + .withMaxEntries(maxEntriesPerSegment) + .build(); + + currentSegment = createSegment(descriptor); + currentSegment.descriptor().update(System.currentTimeMillis()); + + segments.put(1L, currentSegment); + } + } + + /** + * Asserts that the manager is open. + * + * @throws IllegalStateException if the segment manager is not open + */ + private void assertOpen() { + checkState(currentSegment != null, "journal not open"); + } + + /** + * Asserts that enough disk space is available to allocate a new segment. + */ + private void assertDiskSpace() { + if (directory().getUsableSpace() < maxSegmentSize() * SEGMENT_BUFFER_FACTOR) { + throw new StorageException.OutOfDiskSpace("Not enough space to allocate a new journal segment"); + } + } + + /** + * Resets the current segment, creating a new segment if necessary. + */ + private synchronized void resetCurrentSegment() { + JournalSegment lastSegment = getLastSegment(); + if (lastSegment != null) { + currentSegment = lastSegment; + } else { + JournalSegmentDescriptor descriptor = JournalSegmentDescriptor.builder() + .withId(1) + .withIndex(1) + .withMaxSegmentSize(maxSegmentSize) + .withMaxEntries(maxEntriesPerSegment) + .build(); + + currentSegment = createSegment(descriptor); + + segments.put(1L, currentSegment); + } + } + + /** + * Resets and returns the first segment in the journal. + * + * @param index the starting index of the journal + * @return the first segment + */ + JournalSegment resetSegments(long index) { + assertOpen(); + + // If the index already equals the first segment index, skip the reset. + JournalSegment firstSegment = getFirstSegment(); + if (index == firstSegment.firstIndex()) { + return firstSegment; + } + + for (JournalSegment segment : segments.values()) { + segment.close(); + segment.delete(); + } + segments.clear(); + + JournalSegmentDescriptor descriptor = JournalSegmentDescriptor.builder() + .withId(1) + .withIndex(index) + .withMaxSegmentSize(maxSegmentSize) + .withMaxEntries(maxEntriesPerSegment) + .build(); + currentSegment = createSegment(descriptor); + segments.put(index, currentSegment); + return currentSegment; + } + + /** + * Returns the first segment in the log. + * + * @throws IllegalStateException if the segment manager is not open + */ + JournalSegment getFirstSegment() { + assertOpen(); + Map.Entry segment = segments.firstEntry(); + return segment != null ? segment.getValue() : null; + } + + /** + * Returns the last segment in the log. + * + * @throws IllegalStateException if the segment manager is not open + */ + JournalSegment getLastSegment() { + assertOpen(); + Map.Entry segment = segments.lastEntry(); + return segment != null ? segment.getValue() : null; + } + + /** + * Creates and returns the next segment. + * + * @return The next segment. + * @throws IllegalStateException if the segment manager is not open + */ + synchronized JournalSegment getNextSegment() { + assertOpen(); + assertDiskSpace(); + + JournalSegment lastSegment = getLastSegment(); + JournalSegmentDescriptor descriptor = JournalSegmentDescriptor.builder() + .withId(lastSegment != null ? lastSegment.descriptor().id() + 1 : 1) + .withIndex(currentSegment.lastIndex() + 1) + .withMaxSegmentSize(maxSegmentSize) + .withMaxEntries(maxEntriesPerSegment) + .build(); + + currentSegment = createSegment(descriptor); + + segments.put(descriptor.index(), currentSegment); + return currentSegment; + } + + /** + * Returns the segment following the segment with the given ID. + * + * @param index The segment index with which to look up the next segment. + * @return The next segment for the given index. + */ + JournalSegment getNextSegment(long index) { + Map.Entry nextSegment = segments.higherEntry(index); + return nextSegment != null ? nextSegment.getValue() : null; + } + + /** + * Returns the segment for the given index. + * + * @param index The index for which to return the segment. + * @throws IllegalStateException if the segment manager is not open + */ + synchronized JournalSegment getSegment(long index) { + assertOpen(); + // Check if the current segment contains the given index first in order to prevent an unnecessary map lookup. + if (currentSegment != null && index > currentSegment.firstIndex()) { + return currentSegment; + } + + // If the index is in another segment, get the entry with the next lowest first index. + Map.Entry segment = segments.floorEntry(index); + if (segment != null) { + return segment.getValue(); + } + return getFirstSegment(); + } + + /** + * Removes a segment. + * + * @param segment The segment to remove. + */ + synchronized void removeSegment(JournalSegment segment) { + segments.remove(segment.firstIndex()); + segment.close(); + segment.delete(); + resetCurrentSegment(); + } + + /** + * Creates a new segment. + */ + JournalSegment createSegment(JournalSegmentDescriptor descriptor) { + File segmentFile = JournalSegmentFile.createSegmentFile(name, directory, descriptor.id()); + + RandomAccessFile raf; + FileChannel channel; + try { + raf = new RandomAccessFile(segmentFile, "rw"); + raf.setLength(descriptor.maxSegmentSize()); + channel = raf.getChannel(); + } catch (IOException e) { + throw new StorageException(e); + } + + ByteBuffer buffer = ByteBuffer.allocate(JournalSegmentDescriptor.BYTES); + descriptor.copyTo(buffer); + buffer.flip(); + try { + channel.write(buffer); + } catch (IOException e) { + throw new StorageException(e); + } finally { + try { + channel.close(); + raf.close(); + } catch (IOException e) { + } + } + JournalSegment segment = newSegment(new JournalSegmentFile(segmentFile), descriptor); + LOG.debug("Created segment: {}", segment); + return segment; + } + + /** + * Creates a new segment instance. + * + * @param segmentFile The segment file. + * @param descriptor The segment descriptor. + * @return The segment instance. + */ + protected JournalSegment newSegment(JournalSegmentFile segmentFile, JournalSegmentDescriptor descriptor) { + return new JournalSegment(segmentFile, descriptor, storageLevel, maxEntrySize, indexDensity); + } + + /** + * Loads a segment. + */ + private JournalSegment loadSegment(long segmentId) { + File segmentFile = JournalSegmentFile.createSegmentFile(name, directory, segmentId); + ByteBuffer buffer = ByteBuffer.allocate(JournalSegmentDescriptor.BYTES); + try (FileChannel channel = openChannel(segmentFile)) { + channel.read(buffer); + buffer.flip(); + JournalSegmentDescriptor descriptor = new JournalSegmentDescriptor(buffer); + JournalSegment segment = newSegment(new JournalSegmentFile(segmentFile), descriptor); + LOG.debug("Loaded disk segment: {} ({})", descriptor.id(), segmentFile.getName()); + return segment; + } catch (IOException e) { + throw new StorageException(e); + } + } + + private FileChannel openChannel(File file) { + try { + return FileChannel.open(file.toPath(), StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE); + } catch (IOException e) { + throw new StorageException(e); + } + } + + /** + * Loads all segments from disk. + * + * @return A collection of segments for the log. + */ + protected Collection loadSegments() { + // Ensure log directories are created. + directory.mkdirs(); + + TreeMap segments = new TreeMap<>(); + + // Iterate through all files in the log directory. + for (File file : directory.listFiles(File::isFile)) { + + // If the file looks like a segment file, attempt to load the segment. + if (JournalSegmentFile.isSegmentFile(name, file)) { + JournalSegmentFile segmentFile = new JournalSegmentFile(file); + ByteBuffer buffer = ByteBuffer.allocate(JournalSegmentDescriptor.BYTES); + try (FileChannel channel = openChannel(file)) { + channel.read(buffer); + buffer.flip(); + } catch (IOException e) { + throw new StorageException(e); + } + + JournalSegmentDescriptor descriptor = new JournalSegmentDescriptor(buffer); + + // Load the segment. + JournalSegment segment = loadSegment(descriptor.id()); + + // Add the segment to the segments list. + LOG.debug("Found segment: {} ({})", segment.descriptor().id(), segmentFile.file().getName()); + segments.put(segment.firstIndex(), segment); + } + } + + // Verify that all the segments in the log align with one another. + JournalSegment previousSegment = null; + boolean corrupted = false; + Iterator> iterator = segments.entrySet().iterator(); + while (iterator.hasNext()) { + JournalSegment segment = iterator.next().getValue(); + if (previousSegment != null && previousSegment.lastIndex() != segment.firstIndex() - 1) { + LOG.warn("Journal is inconsistent. {} is not aligned with prior segment {}", segment.file().file(), previousSegment.file().file()); + corrupted = true; + } + if (corrupted) { + segment.close(); + segment.delete(); + iterator.remove(); + } + previousSegment = segment; + } + + return segments.values(); + } + + /** + * Resets journal readers to the given head. + * + * @param index The index at which to reset readers. + */ + void resetHead(long index) { + for (SegmentedJournalReader reader : readers) { + if (reader.getNextIndex() < index) { + reader.reset(index); + } + } + } + + /** + * Resets journal readers to the given tail. + * + * @param index The index at which to reset readers. + */ + void resetTail(long index) { + for (SegmentedJournalReader reader : readers) { + if (reader.getNextIndex() >= index) { + reader.reset(index); + } + } + } + + void closeReader(SegmentedJournalReader reader) { + readers.remove(reader); + } + + @Override + public boolean isOpen() { + return open; + } + + /** + * Returns a boolean indicating whether a segment can be removed from the journal prior to the given index. + * + * @param index the index from which to remove segments + * @return indicates whether a segment can be removed from the journal + */ + public boolean isCompactable(long index) { + Map.Entry segmentEntry = segments.floorEntry(index); + return segmentEntry != null && segments.headMap(segmentEntry.getValue().firstIndex()).size() > 0; + } + + /** + * Returns the index of the last segment in the log. + * + * @param index the compaction index + * @return the starting index of the last segment in the log + */ + public long getCompactableIndex(long index) { + Map.Entry segmentEntry = segments.floorEntry(index); + return segmentEntry != null ? segmentEntry.getValue().firstIndex() : 0; + } + + /** + * Compacts the journal up to the given index. + *

    + * The semantics of compaction are not specified by this interface. + * + * @param index The index up to which to compact the journal. + */ + public void compact(long index) { + final var segmentEntry = segments.floorEntry(index); + if (segmentEntry != null) { + final var compactSegments = segments.headMap(segmentEntry.getValue().firstIndex()); + if (!compactSegments.isEmpty()) { + LOG.debug("{} - Compacting {} segment(s)", name, compactSegments.size()); + for (JournalSegment segment : compactSegments.values()) { + LOG.trace("Deleting segment: {}", segment); + segment.close(); + segment.delete(); + } + compactSegments.clear(); + resetHead(segmentEntry.getValue().firstIndex()); + } + } + } + + @Override + public void close() { + segments.values().forEach(segment -> { + LOG.debug("Closing segment: {}", segment); + segment.close(); + }); + currentSegment = null; + open = false; + } + + /** + * Returns whether {@code flushOnCommit} is enabled for the log. + * + * @return Indicates whether {@code flushOnCommit} is enabled for the log. + */ + boolean isFlushOnCommit() { + return flushOnCommit; + } + + /** + * Commits entries up to the given index. + * + * @param index The index up to which to commit entries. + */ + void setCommitIndex(long index) { + this.commitIndex = index; + } + + /** + * Returns the Raft log commit index. + * + * @return The Raft log commit index. + */ + long getCommitIndex() { + return commitIndex; + } + + /** + * Raft log builder. + */ + public static final class Builder { + private static final boolean DEFAULT_FLUSH_ON_COMMIT = false; + private static final String DEFAULT_NAME = "atomix"; + private static final String DEFAULT_DIRECTORY = System.getProperty("user.dir"); + private static final int DEFAULT_MAX_SEGMENT_SIZE = 1024 * 1024 * 32; + private static final int DEFAULT_MAX_ENTRY_SIZE = 1024 * 1024; + private static final int DEFAULT_MAX_ENTRIES_PER_SEGMENT = 1024 * 1024; + private static final double DEFAULT_INDEX_DENSITY = .005; + + private String name = DEFAULT_NAME; + private StorageLevel storageLevel = StorageLevel.DISK; + private File directory = new File(DEFAULT_DIRECTORY); + private JournalSerdes namespace; + private int maxSegmentSize = DEFAULT_MAX_SEGMENT_SIZE; + private int maxEntrySize = DEFAULT_MAX_ENTRY_SIZE; + private int maxEntriesPerSegment = DEFAULT_MAX_ENTRIES_PER_SEGMENT; + private double indexDensity = DEFAULT_INDEX_DENSITY; + private boolean flushOnCommit = DEFAULT_FLUSH_ON_COMMIT; + + protected Builder() { + } + + /** + * Sets the storage name. + * + * @param name The storage name. + * @return The storage builder. + */ + public Builder withName(String name) { + this.name = requireNonNull(name, "name cannot be null"); + return this; + } + + /** + * Sets the log storage level, returning the builder for method chaining. + *

    + * The storage level indicates how individual entries should be persisted in the journal. + * + * @param storageLevel The log storage level. + * @return The storage builder. + */ + public Builder withStorageLevel(StorageLevel storageLevel) { + this.storageLevel = requireNonNull(storageLevel, "storageLevel cannot be null"); + return this; + } + + /** + * Sets the log directory, returning the builder for method chaining. + *

    + * The log will write segment files into the provided directory. + * + * @param directory The log directory. + * @return The storage builder. + * @throws NullPointerException If the {@code directory} is {@code null} + */ + public Builder withDirectory(String directory) { + return withDirectory(new File(requireNonNull(directory, "directory cannot be null"))); + } + + /** + * Sets the log directory, returning the builder for method chaining. + *

    + * The log will write segment files into the provided directory. + * + * @param directory The log directory. + * @return The storage builder. + * @throws NullPointerException If the {@code directory} is {@code null} + */ + public Builder withDirectory(File directory) { + this.directory = requireNonNull(directory, "directory cannot be null"); + return this; + } + + /** + * Sets the journal namespace, returning the builder for method chaining. + * + * @param namespace The journal serializer. + * @return The journal builder. + */ + public Builder withNamespace(JournalSerdes namespace) { + this.namespace = requireNonNull(namespace, "namespace cannot be null"); + return this; + } + + /** + * Sets the maximum segment size in bytes, returning the builder for method chaining. + *

    + * The maximum segment size dictates when logs should roll over to new segments. As entries are written to a segment + * of the log, once the size of the segment surpasses the configured maximum segment size, the log will create a new + * segment and append new entries to that segment. + *

    + * By default, the maximum segment size is {@code 1024 * 1024 * 32}. + * + * @param maxSegmentSize The maximum segment size in bytes. + * @return The storage builder. + * @throws IllegalArgumentException If the {@code maxSegmentSize} is not positive + */ + public Builder withMaxSegmentSize(int maxSegmentSize) { + checkArgument(maxSegmentSize > JournalSegmentDescriptor.BYTES, "maxSegmentSize must be greater than " + JournalSegmentDescriptor.BYTES); + this.maxSegmentSize = maxSegmentSize; + return this; + } + + /** + * Sets the maximum entry size in bytes, returning the builder for method chaining. + * + * @param maxEntrySize the maximum entry size in bytes + * @return the storage builder + * @throws IllegalArgumentException if the {@code maxEntrySize} is not positive + */ + public Builder withMaxEntrySize(int maxEntrySize) { + checkArgument(maxEntrySize > 0, "maxEntrySize must be positive"); + this.maxEntrySize = maxEntrySize; + return this; + } + + /** + * Sets the maximum number of allows entries per segment, returning the builder for method chaining. + *

    + * The maximum entry count dictates when logs should roll over to new segments. As entries are written to a segment + * of the log, if the entry count in that segment meets the configured maximum entry count, the log will create a + * new segment and append new entries to that segment. + *

    + * By default, the maximum entries per segment is {@code 1024 * 1024}. + * + * @param maxEntriesPerSegment The maximum number of entries allowed per segment. + * @return The storage builder. + * @throws IllegalArgumentException If the {@code maxEntriesPerSegment} not greater than the default max entries + * per segment + * @deprecated since 3.0.2 + */ + @Deprecated + public Builder withMaxEntriesPerSegment(int maxEntriesPerSegment) { + checkArgument(maxEntriesPerSegment > 0, "max entries per segment must be positive"); + checkArgument(maxEntriesPerSegment <= DEFAULT_MAX_ENTRIES_PER_SEGMENT, + "max entries per segment cannot be greater than " + DEFAULT_MAX_ENTRIES_PER_SEGMENT); + this.maxEntriesPerSegment = maxEntriesPerSegment; + return this; + } + + /** + * Sets the journal index density. + *

    + * The index density is the frequency at which the position of entries written to the journal will be recorded in an + * in-memory index for faster seeking. + * + * @param indexDensity the index density + * @return the journal builder + * @throws IllegalArgumentException if the density is not between 0 and 1 + */ + public Builder withIndexDensity(double indexDensity) { + checkArgument(indexDensity > 0 && indexDensity < 1, "index density must be between 0 and 1"); + this.indexDensity = indexDensity; + return this; + } + + /** + * Enables flushing buffers to disk when entries are committed to a segment, returning the builder for method + * chaining. + *

    + * When flush-on-commit is enabled, log entry buffers will be automatically flushed to disk each time an entry is + * committed in a given segment. + * + * @return The storage builder. + */ + public Builder withFlushOnCommit() { + return withFlushOnCommit(true); + } + + /** + * Sets whether to flush buffers to disk when entries are committed to a segment, returning the builder for method + * chaining. + *

    + * When flush-on-commit is enabled, log entry buffers will be automatically flushed to disk each time an entry is + * committed in a given segment. + * + * @param flushOnCommit Whether to flush buffers to disk when entries are committed to a segment. + * @return The storage builder. + */ + public Builder withFlushOnCommit(boolean flushOnCommit) { + this.flushOnCommit = flushOnCommit; + return this; + } + + /** + * Build the {@link SegmentedJournal}. + * + * @return A new {@link SegmentedJournal}. + */ + public SegmentedJournal build() { + return new SegmentedJournal<>( + name, + storageLevel, + directory, + namespace, + maxSegmentSize, + maxEntrySize, + maxEntriesPerSegment, + indexDensity, + flushOnCommit); + } + } +} diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/SegmentedJournalReader.java b/atomix-storage/src/main/java/io/atomix/storage/journal/SegmentedJournalReader.java new file mode 100644 index 0000000000..a5deb6382e --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/storage/journal/SegmentedJournalReader.java @@ -0,0 +1,151 @@ +/* + * Copyright 2017-2022 Open Networking Foundation and others. All rights reserved. + * Copyright (c) 2024 PANTHEON.tech, s.r.o. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal; + +import static java.util.Objects.requireNonNull; + +import org.eclipse.jdt.annotation.NonNull; + +/** + * A {@link JournalReader} traversing all entries. + */ +sealed class SegmentedJournalReader implements JournalReader permits CommitsSegmentJournalReader { + // Marker non-null object for tryAdvance() + private static final @NonNull Object ADVANCED = new Object(); + + final SegmentedJournal journal; + + private JournalSegment currentSegment; + private JournalSegmentReader currentReader; + private long nextIndex; + + SegmentedJournalReader(final SegmentedJournal journal, final JournalSegment segment) { + this.journal = requireNonNull(journal); + currentSegment = requireNonNull(segment); + currentReader = segment.createReader(); + nextIndex = currentSegment.firstIndex(); + } + + @Override + public final long getFirstIndex() { + return journal.getFirstSegment().firstIndex(); + } + + @Override + public final long getNextIndex() { + return nextIndex; + } + + @Override + public final void reset() { + currentReader.close(); + + currentSegment = journal.getFirstSegment(); + currentReader = currentSegment.createReader(); + nextIndex = currentSegment.firstIndex(); + } + + @Override + public final void reset(final long index) { + // If the current segment is not open, it has been replaced. Reset the segments. + if (!currentSegment.isOpen()) { + reset(); + } + + if (index < nextIndex) { + rewind(index); + } else if (index > nextIndex) { + while (index > nextIndex && tryAdvance()) { + // Nothing else + } + } else { + resetCurrentReader(index); + } + } + + private void resetCurrentReader(final long index) { + final var position = currentSegment.lookup(index - 1); + if (position != null) { + nextIndex = position.index(); + currentReader.setPosition(position.position()); + } else { + nextIndex = currentSegment.firstIndex(); + currentReader.setPosition(JournalSegmentDescriptor.BYTES); + } + while (nextIndex < index && tryAdvance()) { + // Nothing else + } + } + + /** + * Rewinds the journal to the given index. + */ + private void rewind(final long index) { + if (currentSegment.firstIndex() >= index) { + JournalSegment segment = journal.getSegment(index - 1); + if (segment != null) { + currentReader.close(); + + currentSegment = segment; + currentReader = currentSegment.createReader(); + } + } + + resetCurrentReader(index); + } + + @Override + public T tryNext(final EntryMapper mapper) { + final var index = nextIndex; + var buf = currentReader.readBytes(index); + if (buf == null) { + final var nextSegment = journal.getNextSegment(currentSegment.firstIndex()); + if (nextSegment == null || nextSegment.firstIndex() != index) { + return null; + } + + currentReader.close(); + + currentSegment = nextSegment; + currentReader = currentSegment.createReader(); + buf = currentReader.readBytes(index); + if (buf == null) { + return null; + } + } + + final var entry = journal.serializer().deserialize(buf); + final var ret = requireNonNull(mapper.mapEntry(index, entry, buf.readableBytes())); + nextIndex = index + 1; + return ret; + } + + /** + * Try to move to the next entry. + * + * @return {@code true} if there was a next entry and this reader has moved to it + */ + final boolean tryAdvance() { + return tryNext((index, entry, size) -> ADVANCED) != null; + } + + @Override + public final void close() { + currentReader.close(); + journal.closeReader(this); + } +} diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/SegmentedJournalWriter.java b/atomix-storage/src/main/java/io/atomix/storage/journal/SegmentedJournalWriter.java new file mode 100644 index 0000000000..71120891a1 --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/storage/journal/SegmentedJournalWriter.java @@ -0,0 +1,108 @@ +/* + * Copyright 2017-2022 Open Networking Foundation and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal; + +import static com.google.common.base.Verify.verifyNotNull; + +/** + * Raft log writer. + */ +final class SegmentedJournalWriter implements JournalWriter { + private final SegmentedJournal journal; + private JournalSegment currentSegment; + private JournalSegmentWriter currentWriter; + + SegmentedJournalWriter(SegmentedJournal journal) { + this.journal = journal; + this.currentSegment = journal.getLastSegment(); + this.currentWriter = currentSegment.acquireWriter(); + } + + @Override + public long getLastIndex() { + return currentWriter.getLastIndex(); + } + + @Override + public long getNextIndex() { + return currentWriter.getNextIndex(); + } + + @Override + public void reset(long index) { + if (index > currentSegment.firstIndex()) { + currentSegment.releaseWriter(); + currentSegment = journal.resetSegments(index); + currentWriter = currentSegment.acquireWriter(); + } else { + truncate(index - 1); + } + journal.resetHead(index); + } + + @Override + public void commit(long index) { + if (index > journal.getCommitIndex()) { + journal.setCommitIndex(index); + if (journal.isFlushOnCommit()) { + flush(); + } + } + } + + @Override + public Indexed append(T entry) { + final var bytes = journal.serializer().serialize(entry); + var index = currentWriter.append(bytes); + if (index != null) { + return new Indexed<>(index, entry, bytes.readableBytes()); + } + + // Slow path: we do not have enough capacity + currentWriter.flush(); + currentSegment.releaseWriter(); + currentSegment = journal.getNextSegment(); + currentWriter = currentSegment.acquireWriter(); + final var newIndex = verifyNotNull(currentWriter.append(bytes)); + return new Indexed<>(newIndex, entry, bytes.readableBytes()); + } + + @Override + public void truncate(long index) { + if (index < journal.getCommitIndex()) { + throw new IndexOutOfBoundsException("Cannot truncate committed index: " + index); + } + + // Delete all segments with first indexes greater than the given index. + while (index < currentSegment.firstIndex() && currentSegment != journal.getFirstSegment()) { + currentSegment.releaseWriter(); + journal.removeSegment(currentSegment); + currentSegment = journal.getLastSegment(); + currentWriter = currentSegment.acquireWriter(); + } + + // Truncate the current index. + currentWriter.truncate(index); + + // Reset segment readers. + journal.resetTail(index + 1); + } + + @Override + public void flush() { + currentWriter.flush(); + } +} diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/StorageException.java b/atomix-storage/src/main/java/io/atomix/storage/journal/StorageException.java new file mode 100644 index 0000000000..0a220ec652 --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/storage/journal/StorageException.java @@ -0,0 +1,70 @@ +/* + * Copyright 2015-2021 Open Networking Foundation + * Copyright 2023 PANTHEON.tech, s.r.o. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal; + +/** + * Log exception. + * + * @author Jordan Halterman + */ +public class StorageException extends RuntimeException { + @java.io.Serial + private static final long serialVersionUID = 1L; + + public StorageException() { + } + + public StorageException(final String message) { + super(message); + } + + public StorageException(final String message, final Throwable cause) { + super(message, cause); + } + + public StorageException(final Throwable cause) { + super(cause); + } + + /** + * Exception thrown when an entry being stored is too large. + */ + public static class TooLarge extends StorageException { + @java.io.Serial + private static final long serialVersionUID = 1L; + + public TooLarge(final String message) { + super(message); + } + + public TooLarge(final String message, final Throwable cause) { + super(message, cause); + } + } + + /** + * Exception thrown when storage runs out of disk space. + */ + public static class OutOfDiskSpace extends StorageException { + @java.io.Serial + private static final long serialVersionUID = 1L; + + public OutOfDiskSpace(final String message) { + super(message); + } + } +} diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/StorageLevel.java b/atomix-storage/src/main/java/io/atomix/storage/journal/StorageLevel.java new file mode 100644 index 0000000000..e76a9892b8 --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/storage/journal/StorageLevel.java @@ -0,0 +1,30 @@ +/* + * Copyright 2015-2022 Open Networking Foundation and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal; + +/** + * Storage level configuration values which control how logs are stored on disk or in memory. + */ +public enum StorageLevel { + /** + * Stores data in a memory-mapped file. + */ + MAPPED, + /** + * Stores data on disk. + */ + DISK +} diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/index/JournalIndex.java b/atomix-storage/src/main/java/io/atomix/storage/journal/index/JournalIndex.java new file mode 100644 index 0000000000..8608e00fc6 --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/storage/journal/index/JournalIndex.java @@ -0,0 +1,48 @@ +/* + * Copyright 2018-2022 Open Networking Foundation and others. All rights reserved. + * Copyright (c) 2024 PANTHEON.tech, s.r.o. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal.index; + +import org.eclipse.jdt.annotation.Nullable; + +/** + * Index of a particular JournalSegment. + */ +public interface JournalIndex { + /** + * Adds an entry for the given index at the given position. + * + * @param index the index for which to add the entry + * @param position the position of the given index + */ + void index(long index, int position); + + /** + * Looks up the position of the given index. + * + * @param index the index to lookup + * @return the position of the given index or a lesser index, or {@code null} + */ + @Nullable Position lookup(long index); + + /** + * Truncates the index to the given index and returns its position, if available. + * + * @param index the index to which to truncate the index, or {@code null} + * @return the position of the given index or a lesser index, or {@code null} + */ + @Nullable Position truncate(long index); +} diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/index/Position.java b/atomix-storage/src/main/java/io/atomix/storage/journal/index/Position.java new file mode 100644 index 0000000000..640a8e8f0f --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/storage/journal/index/Position.java @@ -0,0 +1,33 @@ +/* + * Copyright 2018-2021 Open Networking Foundation + * Copyright 2023 PANTHEON.tech, s.r.o. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal.index; + +import java.util.Map.Entry; +import org.eclipse.jdt.annotation.Nullable; + +/** + * Journal index position. + */ +public record Position(long index, int position) { + public Position(final Entry entry) { + this(entry.getKey(), entry.getValue()); + } + + public static @Nullable Position ofNullable(final Entry entry) { + return entry == null ? null : new Position(entry); + } +} diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/index/SparseJournalIndex.java b/atomix-storage/src/main/java/io/atomix/storage/journal/index/SparseJournalIndex.java new file mode 100644 index 0000000000..2b317362c5 --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/storage/journal/index/SparseJournalIndex.java @@ -0,0 +1,55 @@ +/* + * Copyright 2018-2022 Open Networking Foundation and others. All rights reserved. + * Copyright (c) 2024 PANTHEON.tech, s.r.o. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal.index; + +import java.util.TreeMap; + +/** + * A {@link JournalIndex} maintaining target density. + */ +public final class SparseJournalIndex implements JournalIndex { + private static final int MIN_DENSITY = 1000; + + private final int density; + private final TreeMap positions = new TreeMap<>(); + + public SparseJournalIndex() { + density = MIN_DENSITY; + } + + public SparseJournalIndex(final double density) { + this.density = (int) Math.ceil(MIN_DENSITY / (density * MIN_DENSITY)); + } + + @Override + public void index(final long index, final int position) { + if (index % density == 0) { + positions.put(index, position); + } + } + + @Override + public Position lookup(final long index) { + return Position.ofNullable(positions.floorEntry(index)); + } + + @Override + public Position truncate(final long index) { + positions.tailMap(index, false).clear(); + return Position.ofNullable(positions.lastEntry()); + } +} diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/index/package-info.java b/atomix-storage/src/main/java/io/atomix/storage/journal/index/package-info.java new file mode 100644 index 0000000000..c17cabe9af --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/storage/journal/index/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright 2018-2022 Open Networking Foundation and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Provides classes and interfaces for efficiently managing journal indexes. + */ +package io.atomix.storage.journal.index; diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/package-info.java b/atomix-storage/src/main/java/io/atomix/storage/journal/package-info.java new file mode 100644 index 0000000000..7cabd15455 --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/storage/journal/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright 2018-2022 Open Networking Foundation and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Provides a low-level journal abstraction for appending to logs and managing segmented logs. + */ +package io.atomix.storage.journal; diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/BufferAwareByteArrayOutputStream.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/BufferAwareByteArrayOutputStream.java new file mode 100644 index 0000000000..94fc32246e --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/utils/serializer/BufferAwareByteArrayOutputStream.java @@ -0,0 +1,32 @@ +/* + * Copyright 2014-2022 Open Networking Foundation and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.utils.serializer; + +import java.io.ByteArrayOutputStream; + +/** + * Exposes protected byte array length in {@link ByteArrayOutputStream}. + */ +final class BufferAwareByteArrayOutputStream extends ByteArrayOutputStream { + + BufferAwareByteArrayOutputStream(int size) { + super(size); + } + + int getBufferSize() { + return buf.length; + } +} diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/ByteArrayOutput.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/ByteArrayOutput.java new file mode 100644 index 0000000000..6df25b5ef5 --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/utils/serializer/ByteArrayOutput.java @@ -0,0 +1,36 @@ +/* + * Copyright 2014-2022 Open Networking Foundation and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.utils.serializer; + +import com.esotericsoftware.kryo.io.Output; + +/** + * Convenience class to avoid extra object allocation and casting. + */ +final class ByteArrayOutput extends Output { + + private final BufferAwareByteArrayOutputStream stream; + + ByteArrayOutput(final int bufferSize, final int maxBufferSize, final BufferAwareByteArrayOutputStream stream) { + super(bufferSize, maxBufferSize); + super.setOutputStream(stream); + this.stream = stream; + } + + BufferAwareByteArrayOutputStream getByteArrayOutputStream() { + return stream; + } +} diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/EntrySerializer.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/EntrySerializer.java new file mode 100644 index 0000000000..0508f1eee5 --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/utils/serializer/EntrySerializer.java @@ -0,0 +1,61 @@ +/* + * Copyright 2023 PANTHEON.tech, s.r.o. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.utils.serializer; + +import static java.util.Objects.requireNonNull; + +import com.esotericsoftware.kryo.Kryo; +import com.esotericsoftware.kryo.KryoException; +import com.esotericsoftware.kryo.Serializer; +import com.esotericsoftware.kryo.io.Input; +import com.esotericsoftware.kryo.io.Output; +import com.esotericsoftware.kryo.serializers.JavaSerializer; +import com.google.common.base.MoreObjects; +import io.atomix.storage.journal.JournalSerdes.EntrySerdes; +import java.io.IOException; + +final class EntrySerializer extends Serializer { + // Note: uses identity to create things in Kryo, hence we want an instance for every serdes we wrap + private final JavaSerializer javaSerializer = new JavaSerializer(); + private final EntrySerdes serdes; + + EntrySerializer(final EntrySerdes serdes) { + this.serdes = requireNonNull(serdes); + } + + @Override + public T read(final Kryo kryo, final Input input, final Class type) { + try { + return serdes.read(new KryoEntryInput(kryo, input, javaSerializer)); + } catch (IOException e) { + throw new KryoException(e); + } + } + + @Override + public void write(final Kryo kryo, final Output output, final T object) { + try { + serdes.write(new KryoEntryOutput(kryo, output, javaSerializer), object); + } catch (IOException e) { + throw new KryoException(e); + } + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).addValue(serdes).toString(); + } +} diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/Kryo505ByteBufferInput.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/Kryo505ByteBufferInput.java new file mode 100644 index 0000000000..ed66011aba --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/utils/serializer/Kryo505ByteBufferInput.java @@ -0,0 +1,243 @@ +/* Copyright (c) 2008, Nathan Sweet + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided with the distribution. + * - Neither the name of Esoteric Software nor the names of its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, + * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ + +package io.atomix.utils.serializer; + +import com.esotericsoftware.kryo.io.ByteBufferInput; +import java.nio.ByteBuffer; + +/** + * A Kryo-4.0.3 ByteBufferInput adapted to deal with + * issue 505. + * + * @author Roman Levenstein <romixlev@gmail.com> + * @author Robert Varga + */ +public final class Kryo505ByteBufferInput extends ByteBufferInput { + Kryo505ByteBufferInput (ByteBuffer buffer) { + super(buffer); + } + + @Override + public String readString () { + niobuffer.position(position); + int available = require(1); + position++; + int b = niobuffer.get(); + if ((b & 0x80) == 0) return readAscii(); // ASCII. + // Null, empty, or UTF8. + int charCount = available >= 5 ? readUtf8Length(b) : readUtf8Length_slow(b); + switch (charCount) { + case 0: + return null; + case 1: + return ""; + } + charCount--; + if (chars.length < charCount) chars = new char[charCount]; + readUtf8(charCount); + return new String(chars, 0, charCount); + } + + private int readUtf8Length (int b) { + int result = b & 0x3F; // Mask all but first 6 bits. + if ((b & 0x40) != 0) { // Bit 7 means another byte, bit 8 means UTF8. + position++; + b = niobuffer.get(); + result |= (b & 0x7F) << 6; + if ((b & 0x80) != 0) { + position++; + b = niobuffer.get(); + result |= (b & 0x7F) << 13; + if ((b & 0x80) != 0) { + position++; + b = niobuffer.get(); + result |= (b & 0x7F) << 20; + if ((b & 0x80) != 0) { + position++; + b = niobuffer.get(); + result |= (b & 0x7F) << 27; + } + } + } + } + return result; + } + + private int readUtf8Length_slow (int b) { + int result = b & 0x3F; // Mask all but first 6 bits. + if ((b & 0x40) != 0) { // Bit 7 means another byte, bit 8 means UTF8. + require(1); + position++; + b = niobuffer.get(); + result |= (b & 0x7F) << 6; + if ((b & 0x80) != 0) { + require(1); + position++; + b = niobuffer.get(); + result |= (b & 0x7F) << 13; + if ((b & 0x80) != 0) { + require(1); + position++; + b = niobuffer.get(); + result |= (b & 0x7F) << 20; + if ((b & 0x80) != 0) { + require(1); + position++; + b = niobuffer.get(); + result |= (b & 0x7F) << 27; + } + } + } + } + return result; + } + + private void readUtf8 (int charCount) { + char[] chars = this.chars; + // Try to read 7 bit ASCII chars. + int charIndex = 0; + int count = Math.min(require(1), charCount); + int position = this.position; + int b; + while (charIndex < count) { + position++; + b = niobuffer.get(); + if (b < 0) { + position--; + break; + } + chars[charIndex++] = (char)b; + } + this.position = position; + // If buffer didn't hold all chars or any were not ASCII, use slow path for remainder. + if (charIndex < charCount) { + niobuffer.position(position); + readUtf8_slow(charCount, charIndex); + } + } + + private void readUtf8_slow (int charCount, int charIndex) { + char[] chars = this.chars; + while (charIndex < charCount) { + if (position == limit) require(1); + position++; + int b = niobuffer.get() & 0xFF; + switch (b >> 4) { + case 0: + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + case 7: + chars[charIndex] = (char)b; + break; + case 12: + case 13: + if (position == limit) require(1); + position++; + chars[charIndex] = (char)((b & 0x1F) << 6 | niobuffer.get() & 0x3F); + break; + case 14: + require(2); + position += 2; + int b2 = niobuffer.get(); + int b3 = niobuffer.get(); + chars[charIndex] = (char)((b & 0x0F) << 12 | (b2 & 0x3F) << 6 | b3 & 0x3F); + break; + } + charIndex++; + } + } + + private String readAscii () { + int end = position; + int start = end - 1; + int limit = this.limit; + int b; + do { + if (end == limit) return readAscii_slow(); + end++; + b = niobuffer.get(); + } while ((b & 0x80) == 0); + int count = end - start; + byte[] tmp = new byte[count]; + niobuffer.position(start); + niobuffer.get(tmp); + tmp[count - 1] &= 0x7F; // Mask end of ascii bit. + String value = new String(tmp, 0, 0, count); + position = end; + niobuffer.position(position); + return value; + } + + private String readAscii_slow () { + position--; // Re-read the first byte. + // Copy chars currently in buffer. + int charCount = limit - position; + if (charCount > chars.length) chars = new char[charCount * 2]; + char[] chars = this.chars; + for (int i = position, ii = 0, n = limit; i < n; i++, ii++) + chars[ii] = (char)niobuffer.get(i); + position = limit; + // Copy additional chars one by one. + while (true) { + require(1); + position++; + int b = niobuffer.get(); + if (charCount == chars.length) { + char[] newChars = new char[charCount * 2]; + System.arraycopy(chars, 0, newChars, 0, charCount); + chars = newChars; + this.chars = newChars; + } + if ((b & 0x80) == 0x80) { + chars[charCount++] = (char)(b & 0x7F); + break; + } + chars[charCount++] = (char)b; + } + return new String(chars, 0, charCount); + } + + @Override + public StringBuilder readStringBuilder () { + niobuffer.position(position); + int available = require(1); + position++; + int b = niobuffer.get(); + if ((b & 0x80) == 0) return new StringBuilder(readAscii()); // ASCII. + // Null, empty, or UTF8. + int charCount = available >= 5 ? readUtf8Length(b) : readUtf8Length_slow(b); + switch (charCount) { + case 0: + return null; + case 1: + return new StringBuilder(""); + } + charCount--; + if (chars.length < charCount) chars = new char[charCount]; + readUtf8(charCount); + StringBuilder builder = new StringBuilder(charCount); + builder.append(chars, 0, charCount); + return builder; + } +} diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoEntryInput.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoEntryInput.java new file mode 100644 index 0000000000..2a98f16073 --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoEntryInput.java @@ -0,0 +1,82 @@ +/* + * Copyright 2023 PANTHEON.tech, s.r.o. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.utils.serializer; + +import static java.util.Objects.requireNonNull; + +import com.esotericsoftware.kryo.Kryo; +import com.esotericsoftware.kryo.KryoException; +import com.esotericsoftware.kryo.io.Input; +import com.esotericsoftware.kryo.serializers.JavaSerializer; +import io.atomix.storage.journal.JournalSerdes.EntryInput; +import java.io.IOException; + +final class KryoEntryInput implements EntryInput { + private final Kryo kryo; + private final Input input; + private final JavaSerializer javaSerializer; + + KryoEntryInput(final Kryo kryo, final Input input, final JavaSerializer javaSerializer) { + this.kryo = requireNonNull(kryo); + this.input = requireNonNull(input); + this.javaSerializer = requireNonNull(javaSerializer); + } + + @Override + public byte[] readBytes(final int length) throws IOException { + try { + return input.readBytes(length); + } catch (KryoException e) { + throw new IOException(e); + } + } + + @Override + public long readLong() throws IOException { + try { + return input.readLong(false); + } catch (KryoException e) { + throw new IOException(e); + } + } + + @Override + public Object readObject() throws IOException { + try { + return javaSerializer.read(kryo, input, null); + } catch (KryoException e) { + throw new IOException(e); + } + } + + @Override + public String readString() throws IOException { + try { + return input.readString(); + } catch (KryoException e) { + throw new IOException(e); + } + } + + @Override + public int readVarInt() throws IOException { + try { + return input.readVarInt(true); + } catch (KryoException e) { + throw new IOException(e); + } + } +} diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoEntryOutput.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoEntryOutput.java new file mode 100644 index 0000000000..90886dde03 --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoEntryOutput.java @@ -0,0 +1,82 @@ +/* + * Copyright 2023 PANTHEON.tech, s.r.o. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.utils.serializer; + +import static java.util.Objects.requireNonNull; + +import com.esotericsoftware.kryo.Kryo; +import com.esotericsoftware.kryo.KryoException; +import com.esotericsoftware.kryo.io.Output; +import com.esotericsoftware.kryo.serializers.JavaSerializer; +import io.atomix.storage.journal.JournalSerdes.EntryOutput; +import java.io.IOException; + +final class KryoEntryOutput implements EntryOutput { + private final Kryo kryo; + private final Output output; + private final JavaSerializer javaSerializer; + + KryoEntryOutput(final Kryo kryo, final Output output, final JavaSerializer javaSerializer) { + this.kryo = requireNonNull(kryo); + this.output = requireNonNull(output); + this.javaSerializer = requireNonNull(javaSerializer); + } + + @Override + public void writeBytes(final byte[] bytes) throws IOException { + try { + output.writeBytes(bytes); + } catch (KryoException e) { + throw new IOException(e); + } + } + + @Override + public void writeLong(final long value) throws IOException { + try { + output.writeLong(value, false); + } catch (KryoException e) { + throw new IOException(e); + } + } + + @Override + public void writeObject(final Object value) throws IOException { + try { + javaSerializer.write(kryo, output, value); + } catch (KryoException e) { + throw new IOException(e); + } + } + + @Override + public void writeString(final String value) throws IOException { + try { + output.writeString(value); + } catch (KryoException e) { + throw new IOException(e); + } + } + + @Override + public void writeVarInt(final int value) throws IOException { + try { + output.writeVarInt(value, true); + } catch (KryoException e) { + throw new IOException(e); + } + } +} diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoIOPool.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoIOPool.java new file mode 100644 index 0000000000..6324631d6c --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoIOPool.java @@ -0,0 +1,51 @@ +/* + * Copyright 2014-2022 Open Networking Foundation and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.utils.serializer; + +import java.lang.ref.SoftReference; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.function.Function; + +abstract class KryoIOPool { + + private final ConcurrentLinkedQueue> queue = new ConcurrentLinkedQueue<>(); + + private T borrow(final int bufferSize) { + T element; + SoftReference reference; + while ((reference = queue.poll()) != null) { + if ((element = reference.get()) != null) { + return element; + } + } + return create(bufferSize); + } + + protected abstract T create(final int bufferSize); + + protected abstract boolean recycle(final T element); + + R run(final Function function, final int bufferSize) { + final T element = borrow(bufferSize); + try { + return function.apply(element); + } finally { + if (recycle(element)) { + queue.offer(new SoftReference<>(element)); + } + } + } +} diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoInputPool.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoInputPool.java new file mode 100644 index 0000000000..0eeb8dfc89 --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoInputPool.java @@ -0,0 +1,37 @@ +/* + * Copyright 2014-2022 Open Networking Foundation and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.utils.serializer; + +import com.esotericsoftware.kryo.io.Input; + +class KryoInputPool extends KryoIOPool { + + static final int MAX_POOLED_BUFFER_SIZE = 512 * 1024; + + @Override + protected Input create(int bufferSize) { + return new Input(bufferSize); + } + + @Override + protected boolean recycle(Input input) { + if (input.getBuffer().length < MAX_POOLED_BUFFER_SIZE) { + input.setInputStream(null); + return true; + } + return false; // discard + } +} diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoJournalSerdes.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoJournalSerdes.java new file mode 100644 index 0000000000..64f35389c1 --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoJournalSerdes.java @@ -0,0 +1,273 @@ +/* + * Copyright 2014-2021 Open Networking Foundation + * Copyright 2023 PANTHEON.tech, s.r.o. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.utils.serializer; + +import static java.util.Objects.requireNonNull; + +import com.esotericsoftware.kryo.Kryo; +import com.esotericsoftware.kryo.Registration; +import com.esotericsoftware.kryo.Serializer; +import com.esotericsoftware.kryo.io.ByteBufferInput; +import com.esotericsoftware.kryo.io.ByteBufferOutput; +import com.esotericsoftware.kryo.pool.KryoCallback; +import com.esotericsoftware.kryo.pool.KryoFactory; +import com.esotericsoftware.kryo.pool.KryoPool; +import com.google.common.base.MoreObjects; +import io.atomix.storage.journal.JournalSerdes; +import java.io.ByteArrayInputStream; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.List; +import org.objenesis.strategy.StdInstantiatorStrategy; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Pool of Kryo instances, with classes pre-registered. + */ +final class KryoJournalSerdes implements JournalSerdes, KryoFactory, KryoPool { + /** + * Default buffer size used for serialization. + * + * @see #serialize(Object) + */ + private static final int DEFAULT_BUFFER_SIZE = 4096; + + /** + * Smallest ID free to use for user defined registrations. + */ + private static final int INITIAL_ID = 16; + + static final String NO_NAME = "(no name)"; + + private static final Logger LOGGER = LoggerFactory.getLogger(KryoJournalSerdes.class); + + private final KryoPool kryoPool = new KryoPool.Builder(this).softReferences().build(); + + private final KryoOutputPool kryoOutputPool = new KryoOutputPool(); + private final KryoInputPool kryoInputPool = new KryoInputPool(); + + private final List registeredTypes; + private final ClassLoader classLoader; + private final String friendlyName; + + /** + * Creates a Kryo instance pool. + * + * @param registeredTypes types to register + * @param registrationRequired whether registration is required + * @param friendlyName friendly name for the namespace + */ + KryoJournalSerdes( + final List registeredTypes, + final ClassLoader classLoader, + final String friendlyName) { + this.registeredTypes = List.copyOf(registeredTypes); + this.classLoader = requireNonNull(classLoader); + this.friendlyName = requireNonNull(friendlyName); + + // Pre-populate with a single instance + release(create()); + } + + @Override + public byte[] serialize(final Object obj) { + return serialize(obj, DEFAULT_BUFFER_SIZE); + } + + @Override + public byte[] serialize(final Object obj, final int bufferSize) { + return kryoOutputPool.run(output -> kryoPool.run(kryo -> { + kryo.writeClassAndObject(output, obj); + output.flush(); + return output.getByteArrayOutputStream().toByteArray(); + }), bufferSize); + } + + @Override + public void serialize(final Object obj, final ByteBuffer buffer) { + ByteBufferOutput out = new ByteBufferOutput(buffer); + Kryo kryo = borrow(); + try { + kryo.writeClassAndObject(out, obj); + out.flush(); + } finally { + release(kryo); + } + } + + @Override + public void serialize(final Object obj, final OutputStream stream) { + serialize(obj, stream, DEFAULT_BUFFER_SIZE); + } + + @Override + public void serialize(final Object obj, final OutputStream stream, final int bufferSize) { + ByteBufferOutput out = new ByteBufferOutput(stream, bufferSize); + Kryo kryo = borrow(); + try { + kryo.writeClassAndObject(out, obj); + out.flush(); + } finally { + release(kryo); + } + } + + @Override + public T deserialize(final byte[] bytes) { + return kryoInputPool.run(input -> { + input.setInputStream(new ByteArrayInputStream(bytes)); + return kryoPool.run(kryo -> { + @SuppressWarnings("unchecked") + T obj = (T) kryo.readClassAndObject(input); + return obj; + }); + }, DEFAULT_BUFFER_SIZE); + } + + @Override + public T deserialize(final ByteBuffer buffer) { + Kryo kryo = borrow(); + try { + @SuppressWarnings("unchecked") + T obj = (T) kryo.readClassAndObject(new Kryo505ByteBufferInput(buffer)); + return obj; + } finally { + release(kryo); + } + } + + @Override + public T deserialize(final InputStream stream) { + return deserialize(stream, DEFAULT_BUFFER_SIZE); + } + + @Override + public T deserialize(final InputStream stream, final int bufferSize) { + Kryo kryo = borrow(); + try { + @SuppressWarnings("unchecked") + T obj = (T) kryo.readClassAndObject(new ByteBufferInput(stream, bufferSize)); + return obj; + } finally { + release(kryo); + } + } + + /** + * Creates a Kryo instance. + * + * @return Kryo instance + */ + @Override + public Kryo create() { + LOGGER.trace("Creating Kryo instance for {}", this); + Kryo kryo = new Kryo(); + kryo.setClassLoader(classLoader); + kryo.setRegistrationRequired(true); + + // TODO rethink whether we want to use StdInstantiatorStrategy + kryo.setInstantiatorStrategy( + new Kryo.DefaultInstantiatorStrategy(new StdInstantiatorStrategy())); + + int id = INITIAL_ID; + for (RegisteredType registeredType : registeredTypes) { + register(kryo, registeredType.types(), registeredType.serializer(), id++); + } + return kryo; + } + + /** + * Register {@code type} and {@code serializer} to {@code kryo} instance. + * + * @param kryo Kryo instance + * @param types types to register + * @param serializer Specific serializer to register or null to use default. + * @param id type registration id to use + */ + private void register(final Kryo kryo, final Class[] types, final Serializer serializer, final int id) { + Registration existing = kryo.getRegistration(id); + if (existing != null) { + boolean matches = false; + for (Class type : types) { + if (existing.getType() == type) { + matches = true; + break; + } + } + + if (!matches) { + LOGGER.error("{}: Failed to register {} as {}, {} was already registered.", + friendlyName, types, id, existing.getType()); + + throw new IllegalStateException(String.format( + "Failed to register %s as %s, %s was already registered.", + Arrays.toString(types), id, existing.getType())); + } + // falling through to register call for now. + // Consider skipping, if there's reasonable + // way to compare serializer equivalence. + } + + for (Class type : types) { + Registration r = null; + if (serializer == null) { + r = kryo.register(type, id); + } else if (type.isInterface()) { + kryo.addDefaultSerializer(type, serializer); + } else { + r = kryo.register(type, serializer, id); + } + if (r != null) { + if (r.getId() != id) { + LOGGER.debug("{}: {} already registered as {}. Skipping {}.", + friendlyName, r.getType(), r.getId(), id); + } + LOGGER.trace("{} registered as {}", r.getType(), r.getId()); + } + } + } + + @Override + public Kryo borrow() { + return kryoPool.borrow(); + } + + @Override + public void release(final Kryo kryo) { + kryoPool.release(kryo); + } + + @Override + public T run(final KryoCallback callback) { + return kryoPool.run(callback); + } + + @Override + public String toString() { + if (!NO_NAME.equals(friendlyName)) { + return MoreObjects.toStringHelper(getClass()) + .omitNullValues() + .add("friendlyName", friendlyName) + // omit lengthy detail, when there's a name + .toString(); + } + return MoreObjects.toStringHelper(getClass()).add("registeredTypes", registeredTypes).toString(); + } +} diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoJournalSerdesBuilder.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoJournalSerdesBuilder.java new file mode 100644 index 0000000000..a62d8b3293 --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoJournalSerdesBuilder.java @@ -0,0 +1,54 @@ +/* + * Copyright 2014-2021 Open Networking Foundation + * Copyright 2023 PANTHEON.tech, s.r.o. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.utils.serializer; + +import static com.google.common.base.Preconditions.checkState; +import static java.util.Objects.requireNonNull; + +import io.atomix.storage.journal.JournalSerdes; +import io.atomix.storage.journal.JournalSerdes.Builder; +import io.atomix.storage.journal.JournalSerdes.EntrySerdes; +import java.util.ArrayList; +import java.util.List; + +public final class KryoJournalSerdesBuilder implements Builder { + private final List types = new ArrayList<>(); + private ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); + + @Override + public KryoJournalSerdesBuilder register(final EntrySerdes serdes, final Class... classes) { + types.add(new RegisteredType(new EntrySerializer<>(serdes), classes)); + return this; + } + + @Override + public KryoJournalSerdesBuilder setClassLoader(final ClassLoader classLoader) { + this.classLoader = requireNonNull(classLoader); + return this; + } + + @Override + public JournalSerdes build() { + return build(KryoJournalSerdes.NO_NAME); + } + + @Override + public JournalSerdes build(final String friendlyName) { + checkState(!types.isEmpty(), "No serializers registered"); + return new KryoJournalSerdes(types, classLoader, friendlyName); + } +} \ No newline at end of file diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoOutputPool.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoOutputPool.java new file mode 100644 index 0000000000..6b1737fd12 --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoOutputPool.java @@ -0,0 +1,37 @@ +/* + * Copyright 2014-2022 Open Networking Foundation and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.utils.serializer; + +class KryoOutputPool extends KryoIOPool { + + private static final int MAX_BUFFER_SIZE = 768 * 1024; + static final int MAX_POOLED_BUFFER_SIZE = 512 * 1024; + + @Override + protected ByteArrayOutput create(int bufferSize) { + return new ByteArrayOutput(bufferSize, MAX_BUFFER_SIZE, new BufferAwareByteArrayOutputStream(bufferSize)); + } + + @Override + protected boolean recycle(ByteArrayOutput output) { + if (output.getByteArrayOutputStream().getBufferSize() < MAX_POOLED_BUFFER_SIZE) { + output.getByteArrayOutputStream().reset(); + output.clear(); + return true; + } + return false; // discard + } +} diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/RegisteredType.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/RegisteredType.java new file mode 100644 index 0000000000..0a17c09bf2 --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/utils/serializer/RegisteredType.java @@ -0,0 +1,25 @@ +/* + * Copyright 2023 PANTHEON.tech, s.r.o. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.utils.serializer; + +import static java.util.Objects.requireNonNull; + +record RegisteredType(EntrySerializer serializer, Class[] types) { + RegisteredType { + requireNonNull(serializer); + requireNonNull(types); + } +} diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/package-info.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/package-info.java new file mode 100644 index 0000000000..afc8022bac --- /dev/null +++ b/atomix-storage/src/main/java/io/atomix/utils/serializer/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright 2018-2022 Open Networking Foundation and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Provides classes and interfaces for binary serialization. + */ +package io.atomix.utils.serializer; diff --git a/atomix-storage/src/test/java/io/atomix/storage/journal/AbstractJournalTest.java b/atomix-storage/src/test/java/io/atomix/storage/journal/AbstractJournalTest.java new file mode 100644 index 0000000000..487c314141 --- /dev/null +++ b/atomix-storage/src/test/java/io/atomix/storage/journal/AbstractJournalTest.java @@ -0,0 +1,405 @@ +/* + * Copyright 2017-2021 Open Networking Foundation + * Copyright 2023 PANTHEON.tech, s.r.o. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; + +import java.io.IOException; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; +import java.util.ArrayList; +import java.util.List; +import org.eclipse.jdt.annotation.NonNull; +import org.eclipse.jdt.annotation.Nullable; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +/** + * Base journal test. + * + * @author Jordan Halterman + */ +@RunWith(Parameterized.class) +public abstract class AbstractJournalTest { + private static final JournalSerdes NAMESPACE = JournalSerdes.builder() + .register(new TestEntrySerdes(), TestEntry.class) + .register(new ByteArraySerdes(), byte[].class) + .build(); + + protected static final TestEntry ENTRY = new TestEntry(32); + private static final Path PATH = Paths.get("target/test-logs/"); + + private final StorageLevel storageLevel; + private final int maxSegmentSize; + protected final int entriesPerSegment; + + protected AbstractJournalTest(final StorageLevel storageLevel, final int maxSegmentSize) { + this.storageLevel = storageLevel; + this.maxSegmentSize = maxSegmentSize; + int entryLength = NAMESPACE.serialize(ENTRY).length + 8; + entriesPerSegment = (maxSegmentSize - 64) / entryLength; + } + + @Parameterized.Parameters + public static List primeNumbers() { + var runs = new ArrayList(); + for (int i = 1; i <= 10; i++) { + for (int j = 1; j <= 10; j++) { + runs.add(new Object[] { 64 + i * (NAMESPACE.serialize(ENTRY).length + 8) + j }); + } + } + return runs; + } + + protected SegmentedJournal createJournal() { + return SegmentedJournal.builder() + .withName("test") + .withDirectory(PATH.toFile()) + .withNamespace(NAMESPACE) + .withStorageLevel(storageLevel) + .withMaxSegmentSize(maxSegmentSize) + .withIndexDensity(.2) + .build(); + } + + @Test + public void testCloseMultipleTimes() { + // given + final Journal journal = createJournal(); + + // when + journal.close(); + + // then + journal.close(); + } + + @Test + public void testWriteRead() throws Exception { + try (Journal journal = createJournal()) { + JournalWriter writer = journal.writer(); + JournalReader reader = journal.openReader(1); + + // Append a couple entries. + assertEquals(1, writer.getNextIndex()); + var indexed = writer.append(ENTRY); + assertEquals(1, indexed.index()); + + assertEquals(2, writer.getNextIndex()); + writer.append(ENTRY); + reader.reset(2); + indexed = assertNext(reader); + assertEquals(2, indexed.index()); + assertNoNext(reader); + + // Test reading an entry + reader.reset(); + var entry1 = assertNext(reader); + assertEquals(1, entry1.index()); + + // Test reading a second entry + assertEquals(2, reader.getNextIndex()); + var entry2 = assertNext(reader); + assertEquals(2, entry2.index()); + assertEquals(3, reader.getNextIndex()); + assertNoNext(reader); + + // Test opening a new reader and reading from the journal. + reader = journal.openReader(1); + entry1 = assertNext(reader); + assertEquals(1, entry1.index()); + + assertEquals(2, reader.getNextIndex()); + entry2 = assertNext(reader); + assertEquals(2, entry2.index()); + assertNoNext(reader); + + // Reset the reader. + reader.reset(); + + // Test opening a new reader and reading from the journal. + reader = journal.openReader(1); + entry1 = assertNext(reader); + assertEquals(1, entry1.index()); + + assertEquals(2, reader.getNextIndex()); + entry2 = assertNext(reader); + assertEquals(2, entry2.index()); + assertNoNext(reader); + + // Truncate the journal and write a different entry. + writer.truncate(1); + assertEquals(2, writer.getNextIndex()); + writer.append(ENTRY); + reader.reset(2); + indexed = assertNext(reader); + assertEquals(2, indexed.index()); + + // Reset the reader to a specific index and read the last entry again. + reader.reset(2); + + assertEquals(2, reader.getNextIndex()); + entry2 = assertNext(reader); + assertEquals(2, entry2.index()); + assertNoNext(reader); + } + } + + @Test + public void testResetTruncateZero() throws Exception { + try (SegmentedJournal journal = createJournal()) { + JournalWriter writer = journal.writer(); + JournalReader reader = journal.openReader(1); + + assertEquals(0, writer.getLastIndex()); + writer.append(ENTRY); + writer.append(ENTRY); + writer.reset(1); + assertEquals(0, writer.getLastIndex()); + writer.append(ENTRY); + + var indexed = assertNext(reader); + assertEquals(1, indexed.index()); + writer.reset(1); + assertEquals(0, writer.getLastIndex()); + indexed = writer.append(ENTRY); + assertEquals(1, writer.getLastIndex()); + assertEquals(1, indexed.index()); + + indexed = assertNext(reader); + assertEquals(1, indexed.index()); + + writer.truncate(0); + assertEquals(0, writer.getLastIndex()); + indexed = writer.append(ENTRY); + assertEquals(1, writer.getLastIndex()); + assertEquals(1, indexed.index()); + + indexed = assertNext(reader); + assertEquals(1, indexed.index()); + } + } + + @Test + public void testTruncateRead() throws Exception { + int i = 10; + try (Journal journal = createJournal()) { + JournalWriter writer = journal.writer(); + JournalReader reader = journal.openReader(1); + + for (int j = 1; j <= i; j++) { + assertEquals(j, writer.append(new TestEntry(32)).index()); + } + + for (int j = 1; j <= i - 2; j++) { + assertEquals(j, assertNext(reader).index()); + } + + writer.truncate(i - 2); + + assertNoNext(reader); + assertEquals(i - 1, writer.append(new TestEntry(32)).index()); + assertEquals(i, writer.append(new TestEntry(32)).index()); + + var entry = assertNext(reader); + assertEquals(i - 1, entry.index()); + entry = assertNext(reader); + assertNotNull(entry); + assertEquals(i, entry.index()); + } + } + + @Test + public void testWriteReadEntries() throws Exception { + try (Journal journal = createJournal()) { + JournalWriter writer = journal.writer(); + JournalReader reader = journal.openReader(1); + + for (int i = 1; i <= entriesPerSegment * 5; i++) { + writer.append(ENTRY); + var entry = assertNext(reader); + assertEquals(i, entry.index()); + assertArrayEquals(ENTRY.bytes(), entry.entry().bytes()); + reader.reset(i); + entry = assertNext(reader); + assertEquals(i, entry.index()); + assertArrayEquals(ENTRY.bytes(), entry.entry().bytes()); + + if (i > 6) { + reader.reset(i - 5); + assertEquals(i - 5, reader.getNextIndex()); + assertNext(reader); + reader.reset(i + 1); + } + + writer.truncate(i - 1); + writer.append(ENTRY); + + assertNext(reader); + reader.reset(i); + entry = assertNext(reader); + assertEquals(i, entry.index()); + assertArrayEquals(ENTRY.bytes(), entry.entry().bytes()); + } + } + } + + @Test + public void testWriteReadCommittedEntries() throws Exception { + try (Journal journal = createJournal()) { + JournalWriter writer = journal.writer(); + JournalReader reader = journal.openReader(1, JournalReader.Mode.COMMITS); + + for (int i = 1; i <= entriesPerSegment * 5; i++) { + writer.append(ENTRY); + assertNoNext(reader); + writer.commit(i); + var entry = assertNext(reader); + assertEquals(i, entry.index()); + assertArrayEquals(ENTRY.bytes(), entry.entry().bytes()); + reader.reset(i); + entry = assertNext(reader); + assertEquals(i, entry.index()); + assertArrayEquals(ENTRY.bytes(), entry.entry().bytes()); + } + } + } + + @Test + public void testReadAfterCompact() throws Exception { + try (SegmentedJournal journal = createJournal()) { + JournalWriter writer = journal.writer(); + JournalReader uncommittedReader = journal.openReader(1, JournalReader.Mode.ALL); + JournalReader committedReader = journal.openReader(1, JournalReader.Mode.COMMITS); + + for (int i = 1; i <= entriesPerSegment * 10; i++) { + assertEquals(i, writer.append(ENTRY).index()); + } + + assertEquals(1, uncommittedReader.getNextIndex()); + assertEquals(1, committedReader.getNextIndex()); + + // This creates asymmetry, as uncommitted reader will move one step ahead... + assertNext(uncommittedReader); + assertEquals(2, uncommittedReader.getNextIndex()); + assertNoNext(committedReader); + assertEquals(1, committedReader.getNextIndex()); + + writer.commit(entriesPerSegment * 9); + + // ... so here we catch up ... + assertNext(committedReader); + assertEquals(2, committedReader.getNextIndex()); + + // ... and continue from the second entry + for (int i = 2; i <= entriesPerSegment * 2.5; i++) { + var entry = assertNext(uncommittedReader); + assertEquals(i, entry.index()); + + entry = assertNext(committedReader); + assertEquals(i, entry.index()); + } + + journal.compact(entriesPerSegment * 5 + 1); + + assertEquals(entriesPerSegment * 5 + 1, uncommittedReader.getNextIndex()); + var entry = assertNext(uncommittedReader); + assertEquals(entriesPerSegment * 5 + 1, entry.index()); + + assertEquals(entriesPerSegment * 5 + 1, committedReader.getNextIndex()); + entry = assertNext(committedReader); + assertEquals(entriesPerSegment * 5 + 1, entry.index()); + } + } + + /** + * Tests reading from a compacted journal. + */ + @Test + public void testCompactAndRecover() throws Exception { + try (var journal = createJournal()) { + // Write three segments to the journal. + final var writer = journal.writer(); + for (int i = 0; i < entriesPerSegment * 3; i++) { + writer.append(ENTRY); + } + + // Commit the entries and compact the first segment. + writer.commit(entriesPerSegment * 3); + journal.compact(entriesPerSegment + 1); + } + + // Reopen the journal and create a reader. + try (var journal = createJournal()) { + final var writer = journal.writer(); + final var reader = journal.openReader(1, JournalReader.Mode.COMMITS); + writer.append(ENTRY); + writer.append(ENTRY); + writer.commit(entriesPerSegment * 3); + + // Ensure the reader starts at the first physical index in the journal. + assertEquals(entriesPerSegment + 1, reader.getNextIndex()); + assertEquals(reader.getFirstIndex(), reader.getNextIndex()); + assertEquals(entriesPerSegment + 1, assertNext(reader).index()); + assertEquals(entriesPerSegment + 2, reader.getNextIndex()); + } + } + + @Before + @After + public void cleanupStorage() throws IOException { + if (Files.exists(PATH)) { + Files.walkFileTree(PATH, new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(final Path file, final BasicFileAttributes attrs) throws IOException { + Files.delete(file); + return FileVisitResult.CONTINUE; + } + + @Override + public FileVisitResult postVisitDirectory(final Path dir, final IOException exc) throws IOException { + Files.delete(dir); + return FileVisitResult.CONTINUE; + } + }); + } + } + + private static @NonNull Indexed assertNext(final JournalReader reader) { + final var ret = tryNext(reader); + assertNotNull(ret); + return ret; + } + + private static void assertNoNext(final JournalReader reader) { + assertNull(tryNext(reader)); + } + + private static @Nullable Indexed tryNext(final JournalReader reader) { + return reader.tryNext(Indexed::new); + } +} diff --git a/atomix-storage/src/test/java/io/atomix/storage/journal/ByteArraySerdes.java b/atomix-storage/src/test/java/io/atomix/storage/journal/ByteArraySerdes.java new file mode 100644 index 0000000000..79ce9097a3 --- /dev/null +++ b/atomix-storage/src/test/java/io/atomix/storage/journal/ByteArraySerdes.java @@ -0,0 +1,39 @@ +/* + * Copyright 2023 PANTHEON.tech, s.r.o. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal; + +import io.atomix.storage.journal.JournalSerdes.EntryInput; +import io.atomix.storage.journal.JournalSerdes.EntryOutput; +import io.atomix.storage.journal.JournalSerdes.EntrySerdes; +import java.io.IOException; + +final class ByteArraySerdes implements EntrySerdes { + @Override + public byte[] read(final EntryInput input) throws IOException { + int length = input.readVarInt(); + return length == 0 ? null : input.readBytes(length - 1); + } + + @Override + public void write(final EntryOutput output, final byte[] entry) throws IOException { + if (entry != null) { + output.writeVarInt(entry.length + 1); + output.writeBytes(entry); + } else { + output.writeVarInt(0); + } + } +} diff --git a/atomix-storage/src/test/java/io/atomix/storage/journal/DiskJournalTest.java b/atomix-storage/src/test/java/io/atomix/storage/journal/DiskJournalTest.java new file mode 100644 index 0000000000..11cbd6c32b --- /dev/null +++ b/atomix-storage/src/test/java/io/atomix/storage/journal/DiskJournalTest.java @@ -0,0 +1,25 @@ +/* + * Copyright 2017-2022 Open Networking Foundation and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal; + +/** + * Disk journal test. + */ +public class DiskJournalTest extends AbstractJournalTest { + public DiskJournalTest(final int maxSegmentSize) { + super(StorageLevel.DISK, maxSegmentSize); + } +} diff --git a/atomix-storage/src/test/java/io/atomix/storage/journal/JournalSegmentDescriptorTest.java b/atomix-storage/src/test/java/io/atomix/storage/journal/JournalSegmentDescriptorTest.java new file mode 100644 index 0000000000..6db959dc3f --- /dev/null +++ b/atomix-storage/src/test/java/io/atomix/storage/journal/JournalSegmentDescriptorTest.java @@ -0,0 +1,79 @@ +/* + * Copyright 2017-2022 Open Networking Foundation and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal; + +import org.junit.Test; + +import java.nio.ByteBuffer; + +import static org.junit.Assert.assertEquals; + +/** + * Segment descriptor test. + * + * @author Jordan Halterman + */ +public class JournalSegmentDescriptorTest { + + /** + * Tests the segment descriptor builder. + */ + @Test + public void testDescriptorBuilder() { + JournalSegmentDescriptor descriptor = JournalSegmentDescriptor.builder(ByteBuffer.allocate(JournalSegmentDescriptor.BYTES)) + .withId(2) + .withIndex(1025) + .withMaxSegmentSize(1024 * 1024) + .withMaxEntries(2048) + .build(); + + assertEquals(2, descriptor.id()); + assertEquals(JournalSegmentDescriptor.VERSION, descriptor.version()); + assertEquals(1025, descriptor.index()); + assertEquals(1024 * 1024, descriptor.maxSegmentSize()); + assertEquals(2048, descriptor.maxEntries()); + + assertEquals(0, descriptor.updated()); + long time = System.currentTimeMillis(); + descriptor.update(time); + assertEquals(time, descriptor.updated()); + } + + /** + * Tests copying the segment descriptor. + */ + @Test + public void testDescriptorCopy() { + JournalSegmentDescriptor descriptor = JournalSegmentDescriptor.builder() + .withId(2) + .withIndex(1025) + .withMaxSegmentSize(1024 * 1024) + .withMaxEntries(2048) + .build(); + + long time = System.currentTimeMillis(); + descriptor.update(time); + + descriptor = descriptor.copyTo(ByteBuffer.allocate(JournalSegmentDescriptor.BYTES)); + + assertEquals(2, descriptor.id()); + assertEquals(JournalSegmentDescriptor.VERSION, descriptor.version()); + assertEquals(1025, descriptor.index()); + assertEquals(1024 * 1024, descriptor.maxSegmentSize()); + assertEquals(2048, descriptor.maxEntries()); + assertEquals(time, descriptor.updated()); + } +} diff --git a/atomix-storage/src/test/java/io/atomix/storage/journal/JournalSegmentFileTest.java b/atomix-storage/src/test/java/io/atomix/storage/journal/JournalSegmentFileTest.java new file mode 100644 index 0000000000..114ae094b5 --- /dev/null +++ b/atomix-storage/src/test/java/io/atomix/storage/journal/JournalSegmentFileTest.java @@ -0,0 +1,43 @@ +/* + * Copyright 2017-2022 Open Networking Foundation and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal; + +import java.io.File; + +import org.junit.Test; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +/** + * Journal segment file test. + */ +public class JournalSegmentFileTest { + + @Test + public void testIsSegmentFile() throws Exception { + assertTrue(JournalSegmentFile.isSegmentFile("foo", "foo-1.log")); + assertFalse(JournalSegmentFile.isSegmentFile("foo", "bar-1.log")); + assertTrue(JournalSegmentFile.isSegmentFile("foo", "foo-1-1.log")); + } + + @Test + public void testCreateSegmentFile() throws Exception { + File file = JournalSegmentFile.createSegmentFile("foo", new File(System.getProperty("user.dir")), 1); + assertTrue(JournalSegmentFile.isSegmentFile("foo", file)); + } + +} diff --git a/atomix-storage/src/test/java/io/atomix/storage/journal/MappedJournalTest.java b/atomix-storage/src/test/java/io/atomix/storage/journal/MappedJournalTest.java new file mode 100644 index 0000000000..286c6dfb3f --- /dev/null +++ b/atomix-storage/src/test/java/io/atomix/storage/journal/MappedJournalTest.java @@ -0,0 +1,25 @@ +/* + * Copyright 2017-2022 Open Networking Foundation and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal; + +/** + * Memory mapped journal test. + */ +public class MappedJournalTest extends AbstractJournalTest { + public MappedJournalTest(final int maxSegmentSize) { + super(StorageLevel.MAPPED, maxSegmentSize); + } +} diff --git a/atomix-storage/src/test/java/io/atomix/storage/journal/TestEntry.java b/atomix-storage/src/test/java/io/atomix/storage/journal/TestEntry.java new file mode 100644 index 0000000000..b549362423 --- /dev/null +++ b/atomix-storage/src/test/java/io/atomix/storage/journal/TestEntry.java @@ -0,0 +1,49 @@ +/* + * Copyright 2017-2022 Open Networking Foundation and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal; + +import java.util.Arrays; + +import static com.google.common.base.MoreObjects.toStringHelper; + +/** + * Test entry. + * + * @author Jordan Halterman + */ +public class TestEntry { + private final byte[] bytes; + + public TestEntry(int size) { + this(new byte[size]); + } + + public TestEntry(byte[] bytes) { + this.bytes = bytes; + } + + public byte[] bytes() { + return bytes; + } + + @Override + public String toString() { + return toStringHelper(this) + .add("length", bytes.length) + .add("hash", Arrays.hashCode(bytes)) + .toString(); + } +} diff --git a/atomix-storage/src/test/java/io/atomix/storage/journal/TestEntrySerdes.java b/atomix-storage/src/test/java/io/atomix/storage/journal/TestEntrySerdes.java new file mode 100644 index 0000000000..8b04539bd7 --- /dev/null +++ b/atomix-storage/src/test/java/io/atomix/storage/journal/TestEntrySerdes.java @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2023 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal; + +import io.atomix.storage.journal.JournalSerdes.EntryInput; +import io.atomix.storage.journal.JournalSerdes.EntryOutput; +import io.atomix.storage.journal.JournalSerdes.EntrySerdes; +import java.io.IOException; + +final class TestEntrySerdes implements EntrySerdes { + private static final ByteArraySerdes BA_SERIALIZER = new ByteArraySerdes(); + + @Override + public TestEntry read(final EntryInput input) throws IOException { + return new TestEntry(BA_SERIALIZER.read(input)); + } + + @Override + public void write(final EntryOutput output, final TestEntry entry) throws IOException { + BA_SERIALIZER.write(output, entry.bytes()); + } +} diff --git a/atomix-storage/src/test/java/io/atomix/storage/journal/index/SparseJournalIndexTest.java b/atomix-storage/src/test/java/io/atomix/storage/journal/index/SparseJournalIndexTest.java new file mode 100644 index 0000000000..b7cd38a1a4 --- /dev/null +++ b/atomix-storage/src/test/java/io/atomix/storage/journal/index/SparseJournalIndexTest.java @@ -0,0 +1,75 @@ +/* + * Copyright 2018-2022 Open Networking Foundation and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.storage.journal.index; + +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +/** + * Sparse journal index test. + */ +public class SparseJournalIndexTest { + @Test + public void testSparseJournalIndex() throws Exception { + JournalIndex index = new SparseJournalIndex(.2); + assertNull(index.lookup(1)); + index.index(1, 2); + assertNull(index.lookup(1)); + index.index(2, 4); + index.index(3, 6); + index.index(4, 8); + index.index(5, 10); + assertEquals(new Position(5, 10), index.lookup(5)); + index.index(6, 12); + index.index(7, 14); + index.index(8, 16); + assertEquals(new Position(5, 10), index.lookup(8)); + index.index(9, 18); + index.index(10, 20); + assertEquals(new Position(10, 20), index.lookup(10)); + index.truncate(8); + assertEquals(new Position(5, 10), index.lookup(8)); + assertEquals(new Position(5, 10), index.lookup(10)); + index.truncate(4); + assertNull(index.lookup(4)); + assertNull(index.lookup(8)); + + index = new SparseJournalIndex(.2); + assertNull(index.lookup(100)); + index.index(101, 2); + assertNull(index.lookup(1)); + index.index(102, 4); + index.index(103, 6); + index.index(104, 8); + index.index(105, 10); + assertEquals(new Position(105, 10), index.lookup(105)); + index.index(106, 12); + index.index(107, 14); + index.index(108, 16); + assertEquals(new Position(105, 10), index.lookup(108)); + index.index(109, 18); + index.index(110, 20); + assertEquals(new Position(110, 20), index.lookup(110)); + index.truncate(108); + assertEquals(new Position(105, 10), index.lookup(108)); + assertEquals(new Position(105, 10), index.lookup(110)); + index.truncate(104); + assertNull(index.lookup(104)); + assertNull(index.lookup(108)); + } +} diff --git a/atomix-storage/src/test/java/io/atomix/utils/serializer/BufferAwareByteArrayOutputStreamTest.java b/atomix-storage/src/test/java/io/atomix/utils/serializer/BufferAwareByteArrayOutputStreamTest.java new file mode 100644 index 0000000000..a47d37831e --- /dev/null +++ b/atomix-storage/src/test/java/io/atomix/utils/serializer/BufferAwareByteArrayOutputStreamTest.java @@ -0,0 +1,35 @@ +/* + * Copyright 2017-2022 Open Networking Foundation and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.utils.serializer; + +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + +public class BufferAwareByteArrayOutputStreamTest { + + @Test + public void testBufferSize() throws Exception { + BufferAwareByteArrayOutputStream outputStream = new BufferAwareByteArrayOutputStream(8); + assertEquals(8, outputStream.getBufferSize()); + outputStream.write(new byte[]{1, 2, 3, 4, 5, 6, 7, 8}); + assertEquals(8, outputStream.getBufferSize()); + outputStream.write(new byte[]{1, 2, 3, 4, 5, 6, 7, 8}); + assertEquals(16, outputStream.getBufferSize()); + outputStream.reset(); + assertEquals(16, outputStream.getBufferSize()); + } +} diff --git a/atomix-storage/src/test/java/io/atomix/utils/serializer/KryoInputPoolTest.java b/atomix-storage/src/test/java/io/atomix/utils/serializer/KryoInputPoolTest.java new file mode 100644 index 0000000000..82a9629af5 --- /dev/null +++ b/atomix-storage/src/test/java/io/atomix/utils/serializer/KryoInputPoolTest.java @@ -0,0 +1,69 @@ +/* + * Copyright 2017-2022 Open Networking Foundation and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.utils.serializer; + +import com.esotericsoftware.kryo.io.Input; +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +public class KryoInputPoolTest { + + private KryoInputPool kryoInputPool; + + @Before + public void setUp() throws Exception { + kryoInputPool = new KryoInputPool(); + } + + @Test + public void discardOutput() { + final Input[] result = new Input[2]; + kryoInputPool.run(input -> { + result[0] = input; + return null; + }, KryoInputPool.MAX_POOLED_BUFFER_SIZE + 1); + kryoInputPool.run(input -> { + result[1] = input; + return null; + }, 0); + assertTrue(result[0] != result[1]); + } + + @Test + public void recycleOutput() { + final Input[] result = new Input[2]; + kryoInputPool.run(input -> { + assertEquals(0, input.position()); + byte[] payload = new byte[]{1, 2, 3, 4}; + input.setBuffer(payload); + assertArrayEquals(payload, input.readBytes(4)); + result[0] = input; + return null; + }, 0); + assertNull(result[0].getInputStream()); + assertEquals(0, result[0].position()); + kryoInputPool.run(input -> { + result[1] = input; + return null; + }, 0); + assertTrue(result[0] == result[1]); + } +} diff --git a/atomix-storage/src/test/java/io/atomix/utils/serializer/KryoOutputPoolTest.java b/atomix-storage/src/test/java/io/atomix/utils/serializer/KryoOutputPoolTest.java new file mode 100644 index 0000000000..04d55d6fbd --- /dev/null +++ b/atomix-storage/src/test/java/io/atomix/utils/serializer/KryoOutputPoolTest.java @@ -0,0 +1,66 @@ +/* + * Copyright 2017-2022 Open Networking Foundation and others. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.atomix.utils.serializer; + +import com.esotericsoftware.kryo.io.Output; +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +public class KryoOutputPoolTest { + + private KryoOutputPool kryoOutputPool; + + @Before + public void setUp() throws Exception { + kryoOutputPool = new KryoOutputPool(); + } + + @Test + public void discardOutput() { + final Output[] result = new Output[2]; + kryoOutputPool.run(output -> { + result[0] = output; + return null; + }, KryoOutputPool.MAX_POOLED_BUFFER_SIZE + 1); + kryoOutputPool.run(output -> { + result[1] = output; + return null; + }, 0); + assertTrue(result[0] != result[1]); + } + + @Test + public void recycleOutput() { + final ByteArrayOutput[] result = new ByteArrayOutput[2]; + kryoOutputPool.run(output -> { + output.writeInt(1); + assertEquals(Integer.BYTES, output.position()); + result[0] = output; + return null; + }, 0); + assertEquals(0, result[0].position()); + assertEquals(0, result[0].getByteArrayOutputStream().size()); + kryoOutputPool.run(output -> { + assertEquals(0, output.position()); + result[1] = output; + return null; + }, 0); + assertTrue(result[0] == result[1]); + } +} diff --git a/atomix-storage/src/test/resources/logback.xml b/atomix-storage/src/test/resources/logback.xml new file mode 100644 index 0000000000..41f8f993df --- /dev/null +++ b/atomix-storage/src/test/resources/logback.xml @@ -0,0 +1,29 @@ + + + + + %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n + + + + + + + + + + \ No newline at end of file diff --git a/benchmark/api/pom.xml b/benchmark/api/pom.xml index 3ca170074a..52fde32640 100644 --- a/benchmark/api/pom.xml +++ b/benchmark/api/pom.xml @@ -11,7 +11,7 @@ and is available at http://www.eclipse.org/legal/epl-v10.html org.opendaylight.controller mdsal-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ../../opendaylight/md-sal/parent diff --git a/benchmark/dsbenchmark/pom.xml b/benchmark/dsbenchmark/pom.xml index 769ef1d057..1595fb8111 100644 --- a/benchmark/dsbenchmark/pom.xml +++ b/benchmark/dsbenchmark/pom.xml @@ -12,7 +12,7 @@ and is available at http://www.eclipse.org/legal/epl-v10.html org.opendaylight.controller mdsal-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ../../opendaylight/md-sal/parent @@ -20,6 +20,11 @@ and is available at http://www.eclipse.org/legal/epl-v10.html bundle + + com.github.spotbugs + spotbugs-annotations + true + org.opendaylight.controller benchmark-api @@ -46,8 +51,8 @@ and is available at http://www.eclipse.org/legal/epl-v10.html true - javax.annotation - javax.annotation-api + jakarta.annotation + jakarta.annotation-api true diff --git a/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/BaListBuilder.java b/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/BaListBuilder.java index e6c9548d5f..444ec2fc75 100644 --- a/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/BaListBuilder.java +++ b/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/BaListBuilder.java @@ -28,10 +28,10 @@ public final class BaListBuilder { List outerList = new ArrayList<>(outerElements); for (int j = 0; j < outerElements; j++) { outerList.add(new OuterListBuilder() - .setId(j) - .setInnerList(buildInnerList(j, innerElements)) - .withKey(new OuterListKey(j)) - .build()); + .setId(j) + .setInnerList(buildInnerList(j, innerElements)) + .withKey(new OuterListKey(j)) + .build()); } return outerList; } @@ -39,14 +39,14 @@ public final class BaListBuilder { private static Map buildInnerList(final int index, final int elements) { Builder innerList = ImmutableMap.builderWithExpectedSize(elements); - final String itemStr = "Item-" + String.valueOf(index) + "-"; + final String itemStr = "Item-" + index + "-"; for (int i = 0; i < elements; i++) { final InnerListKey key = new InnerListKey(i); innerList.put(key, new InnerListBuilder() - .withKey(key) - .setName(i) - .setValue(itemStr + String.valueOf(i)) - .build()); + .withKey(key) + .setName(i) + .setValue(itemStr + i) + .build()); } return innerList.build(); } diff --git a/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/DatastoreAbstractWriter.java b/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/DatastoreAbstractWriter.java index 50eb0d4e8b..bddf80e880 100644 --- a/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/DatastoreAbstractWriter.java +++ b/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/DatastoreAbstractWriter.java @@ -7,6 +7,7 @@ */ package org.opendaylight.dsbenchmark; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.util.Random; import org.opendaylight.mdsal.common.api.LogicalDatastoreType; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput; @@ -27,6 +28,7 @@ public abstract class DatastoreAbstractWriter { protected int txOk = 0; protected int txError = 0; + @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "'this' passed to logging") public DatastoreAbstractWriter(final StartTestInput.Operation oper, final int outerListElem, final int innerListElem, final long writesPerTx, final DataStore dataStore) { this.outerListElem = outerListElem; @@ -50,18 +52,7 @@ public abstract class DatastoreAbstractWriter { } protected LogicalDatastoreType getDataStoreType() { - final LogicalDatastoreType dsType; - if (dataStore == DataStore.CONFIG) { - dsType = LogicalDatastoreType.CONFIGURATION; - } else if (dataStore == DataStore.OPERATIONAL) { - dsType = LogicalDatastoreType.OPERATIONAL; - } else { - if (rn.nextBoolean() == true) { - dsType = LogicalDatastoreType.OPERATIONAL; - } else { - dsType = LogicalDatastoreType.CONFIGURATION; - } - } - return dsType; + return dataStore == DataStore.CONFIG || dataStore != DataStore.OPERATIONAL && !rn.nextBoolean() + ? LogicalDatastoreType.CONFIGURATION : LogicalDatastoreType.OPERATIONAL; } } diff --git a/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/DomListBuilder.java b/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/DomListBuilder.java index 6886864378..7bdc4d7768 100644 --- a/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/DomListBuilder.java +++ b/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/DomListBuilder.java @@ -13,12 +13,11 @@ import java.util.List; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.test.exec.OuterList; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.test.exec.outer.list.InnerList; import org.opendaylight.yangtools.yang.common.QName; +import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates; import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode; import org.opendaylight.yangtools.yang.data.api.schema.MapNode; -import org.opendaylight.yangtools.yang.data.api.schema.SystemMapNode; -import org.opendaylight.yangtools.yang.data.api.schema.builder.CollectionNodeBuilder; -import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes; +import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes; public final class DomListBuilder { // Inner List Qname identifiers for yang model's 'name' and 'value' @@ -33,27 +32,28 @@ public final class DomListBuilder { } public static List buildOuterList(final int outerElements, final int innerElements) { - List outerList = new ArrayList<>(outerElements); + final var outerList = new ArrayList(outerElements); for (int j = 0; j < outerElements; j++) { - outerList.add(ImmutableNodes.mapEntryBuilder() - .withNodeIdentifier(NodeIdentifierWithPredicates.of(OuterList.QNAME, OL_ID, j)) - .withChild(ImmutableNodes.leafNode(OL_ID, j)) - .withChild(buildInnerList(j, innerElements)) - .build()); + outerList.add(ImmutableNodes.newMapEntryBuilder() + .withNodeIdentifier(NodeIdentifierWithPredicates.of(OuterList.QNAME, OL_ID, j)) + .withChild(ImmutableNodes.leafNode(OL_ID, j)) + .withChild(buildInnerList(j, innerElements)) + .build()); } return outerList; } private static MapNode buildInnerList(final int index, final int elements) { - CollectionNodeBuilder innerList = ImmutableNodes.mapNodeBuilder(InnerList.QNAME); + final var innerList = ImmutableNodes.newSystemMapBuilder() + .withNodeIdentifier(new NodeIdentifier(InnerList.QNAME)); - final String itemStr = "Item-" + String.valueOf(index) + "-"; + final String itemStr = "Item-" + index + "-"; for (int i = 0; i < elements; i++) { - innerList.addChild(ImmutableNodes.mapEntryBuilder() - .withNodeIdentifier(NodeIdentifierWithPredicates.of(InnerList.QNAME, IL_NAME, i)) - .withChild(ImmutableNodes.leafNode(IL_NAME, i)) - .withChild(ImmutableNodes.leafNode(IL_VALUE, itemStr + String.valueOf(i))) - .build()); + innerList.addChild(ImmutableNodes.newMapEntryBuilder() + .withNodeIdentifier(NodeIdentifierWithPredicates.of(InnerList.QNAME, IL_NAME, i)) + .withChild(ImmutableNodes.leafNode(IL_NAME, i)) + .withChild(ImmutableNodes.leafNode(IL_VALUE, itemStr + String.valueOf(i))) + .build()); } return innerList.build(); } diff --git a/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/DsbenchmarkProvider.java b/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/DsbenchmarkProvider.java index 0bcc581184..32f849596e 100644 --- a/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/DsbenchmarkProvider.java +++ b/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/DsbenchmarkProvider.java @@ -35,10 +35,11 @@ import org.opendaylight.mdsal.binding.api.RpcProviderService; import org.opendaylight.mdsal.binding.api.WriteTransaction; import org.opendaylight.mdsal.common.api.LogicalDatastoreType; import org.opendaylight.mdsal.dom.api.DOMDataBroker; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.CleanupStore; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.CleanupStoreInput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.CleanupStoreOutput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.CleanupStoreOutputBuilder; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.DsbenchmarkService; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTest; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestOutput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestOutputBuilder; @@ -63,17 +64,17 @@ import org.slf4j.LoggerFactory; @Singleton @Component(service = { }) @RequireServiceComponentRuntime -public final class DsbenchmarkProvider implements DsbenchmarkService, AutoCloseable { +public final class DsbenchmarkProvider implements AutoCloseable { private static final Logger LOG = LoggerFactory.getLogger(DsbenchmarkProvider.class); - private static final InstanceIdentifier TEST_EXEC_IID = - InstanceIdentifier.builder(TestExec.class).build(); - private static final InstanceIdentifier TEST_STATUS_IID = - InstanceIdentifier.builder(TestStatus.class).build(); + private static final InstanceIdentifier TEST_EXEC_IID = InstanceIdentifier.create(TestExec.class); + private static final InstanceIdentifier TEST_STATUS_IID = InstanceIdentifier.create(TestStatus.class); private final AtomicReference execStatus = new AtomicReference<>(ExecStatus.Idle); private final DsbenchmarkListenerProvider listenerProvider; - private final DOMDataBroker domDataBroker; // Async DOM Broker for use with all DOM operations - private final DataBroker dataBroker; // Async Binding-Aware Broker for use in tx chains + // Async DOM Broker for use with all DOM operations + private final DOMDataBroker domDataBroker; + // Async Binding-Aware Broker for use in tx chains; + private final DataBroker dataBroker; private final Registration rpcReg; private long testsCompleted = 0; @@ -95,7 +96,7 @@ public final class DsbenchmarkProvider implements DsbenchmarkService, AutoClosea LOG.warn("Working around Bugs 8829 and 6793 by ignoring exception from setTestOperData", e); } - rpcReg = rpcService.registerRpcImplementation(DsbenchmarkService.class, this); + rpcReg = rpcService.registerRpcImplementations((StartTest) this::startTest, (CleanupStore) this::cleanupStore); LOG.info("DsbenchmarkProvider initiated"); } @@ -107,24 +108,22 @@ public final class DsbenchmarkProvider implements DsbenchmarkService, AutoClosea LOG.info("DsbenchmarkProvider closed"); } - @Override - public ListenableFuture> cleanupStore(final CleanupStoreInput input) { + private ListenableFuture> cleanupStore(final CleanupStoreInput input) { cleanupTestStore(); LOG.debug("Data Store cleaned up"); return Futures.immediateFuture(RpcResultBuilder.success(new CleanupStoreOutputBuilder().build()).build()); } - @Override @SuppressWarnings("checkstyle:illegalCatch") - public ListenableFuture> startTest(final StartTestInput input) { + private ListenableFuture> startTest(final StartTestInput input) { LOG.info("Starting the data store benchmark test, input: {}", input); // Check if there is a test in progress - if (execStatus.compareAndSet(ExecStatus.Idle, ExecStatus.Executing) == false) { + if (!execStatus.compareAndSet(ExecStatus.Idle, ExecStatus.Executing)) { LOG.info("Test in progress"); return RpcResultBuilder.success(new StartTestOutputBuilder() - .setStatus(StartTestOutput.Status.TESTINPROGRESS) - .build()).buildFuture(); + .setStatus(StartTestOutput.Status.TESTINPROGRESS) + .build()).buildFuture(); } // Cleanup data that may be left over from a previous test run @@ -153,11 +152,11 @@ public final class DsbenchmarkProvider implements DsbenchmarkService, AutoClosea testsCompleted++; } catch (final Exception e) { - LOG.error("Test error: {}", e.toString()); + LOG.error("Test error", e); execStatus.set(ExecStatus.Idle); return RpcResultBuilder.success(new StartTestOutputBuilder() - .setStatus(StartTestOutput.Status.FAILED) - .build()).buildFuture(); + .setStatus(StartTestOutput.Status.FAILED) + .build()).buildFuture(); } LOG.info("Test finished"); @@ -249,43 +248,37 @@ public final class DsbenchmarkProvider implements DsbenchmarkService, AutoClosea retVal = new SimpletxBaWrite(dataBroker, oper, outerListElem, innerListElem, writesPerTx, dataStore); } + } else if (StartTestInput.Operation.DELETE == oper) { + retVal = new SimpletxDomDelete(domDataBroker, outerListElem, + innerListElem, writesPerTx, dataStore); + } else if (StartTestInput.Operation.READ == oper) { + retVal = new SimpletxDomRead(domDataBroker, outerListElem, + innerListElem, writesPerTx, dataStore); } else { - if (StartTestInput.Operation.DELETE == oper) { - retVal = new SimpletxDomDelete(domDataBroker, outerListElem, - innerListElem, writesPerTx, dataStore); - } else if (StartTestInput.Operation.READ == oper) { - retVal = new SimpletxDomRead(domDataBroker, outerListElem, - innerListElem, writesPerTx, dataStore); - } else { - retVal = new SimpletxDomWrite(domDataBroker, oper, outerListElem, - innerListElem, writesPerTx, dataStore); - } + retVal = new SimpletxDomWrite(domDataBroker, oper, outerListElem, + innerListElem, writesPerTx, dataStore); } - } else { - if (dataFormat == StartTestInput.DataFormat.BINDINGAWARE) { - if (StartTestInput.Operation.DELETE == oper) { - retVal = new TxchainBaDelete(dataBroker, outerListElem, - innerListElem, writesPerTx, dataStore); - } else if (StartTestInput.Operation.READ == oper) { - retVal = new TxchainBaRead(dataBroker, outerListElem, - innerListElem,writesPerTx, dataStore); - } else { - retVal = new TxchainBaWrite(dataBroker, oper, outerListElem, - innerListElem, writesPerTx, dataStore); - } + } else if (dataFormat == StartTestInput.DataFormat.BINDINGAWARE) { + if (StartTestInput.Operation.DELETE == oper) { + retVal = new TxchainBaDelete(dataBroker, outerListElem, + innerListElem, writesPerTx, dataStore); + } else if (StartTestInput.Operation.READ == oper) { + retVal = new TxchainBaRead(dataBroker, outerListElem, + innerListElem,writesPerTx, dataStore); } else { - if (StartTestInput.Operation.DELETE == oper) { - retVal = new TxchainDomDelete(domDataBroker, outerListElem, - innerListElem, writesPerTx, dataStore); - } else if (StartTestInput.Operation.READ == oper) { - retVal = new TxchainDomRead(domDataBroker, outerListElem, - innerListElem, writesPerTx, dataStore); - - } else { - retVal = new TxchainDomWrite(domDataBroker, oper, outerListElem, - innerListElem,writesPerTx, dataStore); - } + retVal = new TxchainBaWrite(dataBroker, oper, outerListElem, + innerListElem, writesPerTx, dataStore); } + } else if (StartTestInput.Operation.DELETE == oper) { + retVal = new TxchainDomDelete(domDataBroker, outerListElem, + innerListElem, writesPerTx, dataStore); + } else if (StartTestInput.Operation.READ == oper) { + retVal = new TxchainDomRead(domDataBroker, outerListElem, + innerListElem, writesPerTx, dataStore); + + } else { + retVal = new TxchainDomWrite(domDataBroker, oper, outerListElem, + innerListElem,writesPerTx, dataStore); } } finally { execStatus.set(ExecStatus.Idle); diff --git a/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/listener/DsbenchmarkListener.java b/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/listener/DsbenchmarkListener.java index 75523eff7a..34c2bfdb68 100644 --- a/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/listener/DsbenchmarkListener.java +++ b/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/listener/DsbenchmarkListener.java @@ -7,15 +7,11 @@ */ package org.opendaylight.dsbenchmark.listener; -import java.util.Collection; +import java.util.List; import java.util.concurrent.atomic.AtomicInteger; -import org.opendaylight.mdsal.binding.api.DataObjectModification; -import org.opendaylight.mdsal.binding.api.DataObjectModification.ModificationType; import org.opendaylight.mdsal.binding.api.DataTreeChangeListener; import org.opendaylight.mdsal.binding.api.DataTreeModification; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestExec; -import org.opendaylight.yangtools.yang.binding.DataObject; -import org.opendaylight.yangtools.yang.binding.InstanceIdentifier.PathArgument; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -25,8 +21,7 @@ public class DsbenchmarkListener implements DataTreeChangeListener { private final AtomicInteger numDataChanges = new AtomicInteger(0); @Override - public void onDataTreeChanged( - final Collection> changes) { + public void onDataTreeChanged(final List> changes) { // Since we're registering the same DsbenchmarkListener object for both // OPERATIONAL and CONFIG, the onDataTreeChanged() method can be called // from different threads, and we need to use atomic counters. @@ -40,20 +35,19 @@ public class DsbenchmarkListener implements DataTreeChangeListener { } private static synchronized void logDataTreeChangeEvent(final int eventNum, - final Collection> changes) { + final List> changes) { LOG.debug("DsbenchmarkListener-onDataTreeChanged: Event {}", eventNum); - for (DataTreeModification change : changes) { - final DataObjectModification rootNode = change.getRootNode(); - final ModificationType modType = rootNode.getModificationType(); - final PathArgument changeId = rootNode.getIdentifier(); - final Collection> modifications = - rootNode.getModifiedChildren(); + for (var change : changes) { + final var rootNode = change.getRootNode(); + final var modType = rootNode.modificationType(); + final var changeId = rootNode.step(); + final var modifications = rootNode.modifiedChildren(); LOG.debug(" changeId {}, modType {}, mods: {}", changeId, modType, modifications.size()); - for (DataObjectModification mod : modifications) { - LOG.debug(" mod-getDataAfter: {}", mod.getDataAfter()); + for (var mod : modifications) { + LOG.debug(" mod-getDataAfter: {}", mod.dataAfter()); } } } diff --git a/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/listener/DsbenchmarkListenerProvider.java b/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/listener/DsbenchmarkListenerProvider.java index 6bc931ca11..20e271ad9a 100644 --- a/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/listener/DsbenchmarkListenerProvider.java +++ b/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/listener/DsbenchmarkListenerProvider.java @@ -15,7 +15,7 @@ import org.opendaylight.mdsal.binding.api.DataBroker; import org.opendaylight.mdsal.binding.api.DataTreeIdentifier; import org.opendaylight.mdsal.common.api.LogicalDatastoreType; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestExec; -import org.opendaylight.yangtools.concepts.ListenerRegistration; +import org.opendaylight.yangtools.concepts.Registration; import org.opendaylight.yangtools.yang.binding.InstanceIdentifier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -24,7 +24,8 @@ public class DsbenchmarkListenerProvider { private static final Logger LOG = LoggerFactory.getLogger(DsbenchmarkListenerProvider.class); private static final InstanceIdentifier TEST_EXEC_IID = InstanceIdentifier.builder(TestExec.class).build(); - private final List> listeners = new ArrayList<>(); + private final List listeners = new ArrayList<>(); + private final List registrations = new ArrayList<>(); private final DataBroker dataBroker; public DsbenchmarkListenerProvider(final DataBroker dataBroker) { @@ -34,11 +35,12 @@ public class DsbenchmarkListenerProvider { public void createAndRegisterListeners(final int numListeners) { for (int i = 0; i < numListeners; i++) { - DsbenchmarkListener listener = new DsbenchmarkListener(); - listeners.add(dataBroker.registerDataTreeChangeListener( - DataTreeIdentifier.create(LogicalDatastoreType.CONFIGURATION, TEST_EXEC_IID), listener)); - listeners.add(dataBroker.registerDataTreeChangeListener( - DataTreeIdentifier.create(LogicalDatastoreType.OPERATIONAL, TEST_EXEC_IID), listener)); + var listener = new DsbenchmarkListener(); + listeners.add(listener); + registrations.add(dataBroker.registerTreeChangeListener( + DataTreeIdentifier.of(LogicalDatastoreType.CONFIGURATION, TEST_EXEC_IID), listener)); + registrations.add(dataBroker.registerTreeChangeListener( + DataTreeIdentifier.of(LogicalDatastoreType.OPERATIONAL, TEST_EXEC_IID), listener)); } LOG.debug("DsbenchmarkListenerProvider created {} listeneres", numListeners); @@ -47,8 +49,8 @@ public class DsbenchmarkListenerProvider { public long getDataChangeCount() { long dataChanges = 0; - for (ListenerRegistration listenerRegistration : listeners) { - dataChanges += listenerRegistration.getInstance().getNumDataChanges(); + for (var listener : listeners) { + dataChanges += listener.getNumDataChanges(); } LOG.debug("DsbenchmarkListenerProvider , total data changes {}", dataChanges); return dataChanges; @@ -57,11 +59,14 @@ public class DsbenchmarkListenerProvider { public long getEventCountAndDestroyListeners() { long totalEvents = 0; - for (ListenerRegistration listenerRegistration : listeners) { - totalEvents += listenerRegistration.getInstance().getNumEvents(); - listenerRegistration.close(); + registrations.forEach(Registration::close); + registrations.clear(); + + for (var listener : listeners) { + totalEvents += listener.getNumEvents(); } listeners.clear(); + LOG.debug("DsbenchmarkListenerProvider destroyed listeneres, total events {}", totalEvents); return totalEvents; } diff --git a/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/simpletx/SimpletxBaRead.java b/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/simpletx/SimpletxBaRead.java index aae96750a9..1f6b7f9884 100644 --- a/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/simpletx/SimpletxBaRead.java +++ b/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/simpletx/SimpletxBaRead.java @@ -64,7 +64,7 @@ public class SimpletxBaRead extends DatastoreAbstractWriter { try { optionalDataObject = submitFuture.get(); if (optionalDataObject != null && optionalDataObject.isPresent()) { - OuterList outerList = optionalDataObject.get(); + OuterList outerList = optionalDataObject.orElseThrow(); String[] objectsArray = new String[outerList.getInnerList().size()]; @@ -77,7 +77,7 @@ public class SimpletxBaRead extends DatastoreAbstractWriter { } for (int i = 0; i < outerList.getInnerList().size(); i++) { String itemStr = objectsArray[i]; - if (!itemStr.contentEquals("Item-" + String.valueOf(l) + "-" + String.valueOf(i))) { + if (!itemStr.contentEquals("Item-" + l + "-" + i)) { LOG.error("innerList: name: {}, value: {}", i, itemStr); break; } diff --git a/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/simpletx/SimpletxBaWrite.java b/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/simpletx/SimpletxBaWrite.java index 8893a70574..5395868aec 100644 --- a/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/simpletx/SimpletxBaWrite.java +++ b/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/simpletx/SimpletxBaWrite.java @@ -5,16 +5,15 @@ * terms of the Eclipse Public License v1.0 which accompanies this distribution, * and is available at http://www.eclipse.org/legal/epl-v10.html */ - package org.opendaylight.dsbenchmark.simpletx; +import static java.util.Objects.requireNonNull; + import java.util.List; import java.util.concurrent.ExecutionException; import org.opendaylight.dsbenchmark.BaListBuilder; import org.opendaylight.dsbenchmark.DatastoreAbstractWriter; import org.opendaylight.mdsal.binding.api.DataBroker; -import org.opendaylight.mdsal.binding.api.WriteTransaction; -import org.opendaylight.mdsal.common.api.LogicalDatastoreType; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput.DataStore; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestExec; @@ -25,31 +24,31 @@ import org.slf4j.LoggerFactory; public class SimpletxBaWrite extends DatastoreAbstractWriter { private static final Logger LOG = LoggerFactory.getLogger(SimpletxBaWrite.class); + private final DataBroker dataBroker; - private List list; + private List list = null; public SimpletxBaWrite(final DataBroker dataBroker, final StartTestInput.Operation oper, final int outerListElem, final int innerListElem, final long writesPerTx, final DataStore dataStore) { super(oper, outerListElem, innerListElem, writesPerTx, dataStore); - this.dataBroker = dataBroker; + this.dataBroker = requireNonNull(dataBroker); LOG.debug("Created SimpletxBaWrite"); } @Override public void createList() { - list = BaListBuilder.buildOuterList(this.outerListElem, this.innerListElem); + list = BaListBuilder.buildOuterList(outerListElem, innerListElem); } @Override public void executeList() { - final LogicalDatastoreType dsType = getDataStoreType(); + final var dsType = getDataStoreType(); - WriteTransaction tx = dataBroker.newWriteOnlyTransaction(); + var tx = dataBroker.newWriteOnlyTransaction(); long writeCnt = 0; - for (OuterList element : this.list) { - InstanceIdentifier iid = InstanceIdentifier.create(TestExec.class) - .child(OuterList.class, element.key()); + for (var element : list) { + final var iid = InstanceIdentifier.create(TestExec.class).child(OuterList.class, element.key()); if (oper == StartTestInput.Operation.PUT) { tx.put(dsType, iid, element); } else { @@ -80,5 +79,4 @@ public class SimpletxBaWrite extends DatastoreAbstractWriter { } } } - } diff --git a/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/simpletx/SimpletxDomRead.java b/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/simpletx/SimpletxDomRead.java index 4e9a59a744..d46a769b09 100644 --- a/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/simpletx/SimpletxDomRead.java +++ b/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/simpletx/SimpletxDomRead.java @@ -66,7 +66,7 @@ public class SimpletxDomRead extends DatastoreAbstractWriter { try { Optional optionalDataObject = submitFuture.get(); if (optionalDataObject != null && optionalDataObject.isPresent()) { - NormalizedNode ret = optionalDataObject.get(); + NormalizedNode ret = optionalDataObject.orElseThrow(); LOG.trace("optionalDataObject is {}", ret); txOk++; } else { diff --git a/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/simpletx/SimpletxDomWrite.java b/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/simpletx/SimpletxDomWrite.java index f4cdbef26d..a57773125f 100644 --- a/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/simpletx/SimpletxDomWrite.java +++ b/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/simpletx/SimpletxDomWrite.java @@ -5,16 +5,15 @@ * terms of the Eclipse Public License v1.0 which accompanies this distribution, * and is available at http://www.eclipse.org/legal/epl-v10.html */ - package org.opendaylight.dsbenchmark.simpletx; +import static java.util.Objects.requireNonNull; + import java.util.List; import java.util.concurrent.ExecutionException; import org.opendaylight.dsbenchmark.DatastoreAbstractWriter; import org.opendaylight.dsbenchmark.DomListBuilder; -import org.opendaylight.mdsal.common.api.LogicalDatastoreType; import org.opendaylight.mdsal.dom.api.DOMDataBroker; -import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput.DataStore; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestExec; @@ -27,33 +26,32 @@ import org.slf4j.LoggerFactory; public class SimpletxDomWrite extends DatastoreAbstractWriter { private static final Logger LOG = LoggerFactory.getLogger(SimpletxDomWrite.class); - private final DOMDataBroker domDataBroker; - private List list; - public SimpletxDomWrite(final DOMDataBroker domDataBroker, final StartTestInput.Operation oper, + private final DOMDataBroker dataBroker; + private List list = null; + + public SimpletxDomWrite(final DOMDataBroker dataBroker, final StartTestInput.Operation oper, final int outerListElem, final int innerListElem, final long putsPerTx, final DataStore dataStore) { super(oper, outerListElem, innerListElem, putsPerTx, dataStore); - this.domDataBroker = domDataBroker; + this.dataBroker = requireNonNull(dataBroker); LOG.debug("Created SimpletxDomWrite"); } @Override public void createList() { - list = DomListBuilder.buildOuterList(this.outerListElem, this.innerListElem); + list = DomListBuilder.buildOuterList(outerListElem, innerListElem); } @Override public void executeList() { - final LogicalDatastoreType dsType = getDataStoreType(); - final YangInstanceIdentifier pid = - YangInstanceIdentifier.builder().node(TestExec.QNAME).node(OuterList.QNAME).build(); + final var dsType = getDataStoreType(); + final var pid = YangInstanceIdentifier.of(TestExec.QNAME, OuterList.QNAME); - DOMDataTreeWriteTransaction tx = domDataBroker.newWriteOnlyTransaction(); + var tx = dataBroker.newWriteOnlyTransaction(); long writeCnt = 0; - for (MapEntryNode element : this.list) { - YangInstanceIdentifier yid = - pid.node(NodeIdentifierWithPredicates.of(OuterList.QNAME, element.getIdentifier().asMap())); + for (var element : list) { + final var yid = pid.node(NodeIdentifierWithPredicates.of(OuterList.QNAME, element.name().asMap())); if (oper == StartTestInput.Operation.PUT) { tx.put(dsType, yid, element); @@ -71,7 +69,7 @@ public class SimpletxDomWrite extends DatastoreAbstractWriter { LOG.error("Transaction failed", e); txError++; } - tx = domDataBroker.newWriteOnlyTransaction(); + tx = dataBroker.newWriteOnlyTransaction(); writeCnt = 0; } } diff --git a/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainBaDelete.java b/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainBaDelete.java index bd947acddb..41b233774a 100644 --- a/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainBaDelete.java +++ b/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainBaDelete.java @@ -12,9 +12,7 @@ import com.google.common.util.concurrent.MoreExecutors; import java.util.concurrent.ExecutionException; import org.opendaylight.dsbenchmark.DatastoreAbstractWriter; import org.opendaylight.mdsal.binding.api.DataBroker; -import org.opendaylight.mdsal.binding.api.Transaction; import org.opendaylight.mdsal.binding.api.TransactionChain; -import org.opendaylight.mdsal.binding.api.TransactionChainListener; import org.opendaylight.mdsal.binding.api.WriteTransaction; import org.opendaylight.mdsal.common.api.CommitInfo; import org.opendaylight.mdsal.common.api.LogicalDatastoreType; @@ -24,10 +22,11 @@ import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchm import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.test.exec.OuterList; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.test.exec.OuterListKey; import org.opendaylight.yangtools.yang.binding.InstanceIdentifier; +import org.opendaylight.yangtools.yang.common.Empty; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class TxchainBaDelete extends DatastoreAbstractWriter implements TransactionChainListener { +public class TxchainBaDelete extends DatastoreAbstractWriter implements FutureCallback { private static final Logger LOG = LoggerFactory.getLogger(TxchainBaDelete.class); private final DataBroker bindingDataBroker; @@ -53,7 +52,8 @@ public class TxchainBaDelete extends DatastoreAbstractWriter implements Transact @Override public void executeList() { final LogicalDatastoreType dsType = getDataStoreType(); - final TransactionChain chain = bindingDataBroker.createMergingTransactionChain(this); + final TransactionChain chain = bindingDataBroker.createMergingTransactionChain(); + chain.addCallback(this); WriteTransaction tx = chain.newWriteOnlyTransaction(); int txSubmitted = 0; @@ -104,13 +104,12 @@ public class TxchainBaDelete extends DatastoreAbstractWriter implements Transact } @Override - public void onTransactionChainFailed(final TransactionChain chain, final Transaction transaction, - final Throwable cause) { - LOG.error("Broken chain {} in TxchainBaDelete, transaction {}", chain, transaction.getIdentifier(), cause); + public void onFailure(final Throwable cause) { + LOG.error("Broken chain in TxchainBaDelete", cause); } @Override - public void onTransactionChainSuccessful(final TransactionChain chain) { - LOG.debug("TxchainBaDelete closed successfully, chain {}", chain); + public void onSuccess(final Empty chain) { + LOG.debug("TxchainBaDelete closed successfully"); } } diff --git a/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainBaRead.java b/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainBaRead.java index 414ad159af..a455995666 100644 --- a/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainBaRead.java +++ b/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainBaRead.java @@ -13,9 +13,6 @@ import java.util.concurrent.ExecutionException; import org.opendaylight.dsbenchmark.DatastoreAbstractWriter; import org.opendaylight.mdsal.binding.api.DataBroker; import org.opendaylight.mdsal.binding.api.ReadTransaction; -import org.opendaylight.mdsal.binding.api.Transaction; -import org.opendaylight.mdsal.binding.api.TransactionChain; -import org.opendaylight.mdsal.binding.api.TransactionChainListener; import org.opendaylight.mdsal.common.api.LogicalDatastoreType; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput.DataStore; @@ -27,7 +24,7 @@ import org.opendaylight.yangtools.yang.binding.InstanceIdentifier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class TxchainBaRead extends DatastoreAbstractWriter implements TransactionChainListener { +public class TxchainBaRead extends DatastoreAbstractWriter { private static final Logger LOG = LoggerFactory.getLogger(TxchainBaRead.class); private final DataBroker bindingDataBroker; @@ -64,19 +61,19 @@ public class TxchainBaRead extends DatastoreAbstractWriter implements Transactio try { Optional optionalDataObject = submitFuture.get(); if (optionalDataObject != null && optionalDataObject.isPresent()) { - OuterList outerList = optionalDataObject.get(); + OuterList outerList = optionalDataObject.orElseThrow(); - String[] objectsArray = new String[outerList.getInnerList().size()]; - for (InnerList innerList : outerList.getInnerList().values()) { + String[] objectsArray = new String[outerList.nonnullInnerList().size()]; + for (InnerList innerList : outerList.nonnullInnerList().values()) { if (objectsArray[innerList.getName()] != null) { LOG.error("innerList: DUPLICATE name: {}, value: {}", innerList.getName(), innerList.getValue()); } objectsArray[innerList.getName()] = innerList.getValue(); } - for (int i = 0; i < outerList.getInnerList().size(); i++) { + for (int i = 0; i < outerList.nonnullInnerList().size(); i++) { String itemStr = objectsArray[i]; - if (!itemStr.contentEquals("Item-" + String.valueOf(l) + "-" + String.valueOf(i))) { + if (!itemStr.contentEquals("Item-" + l + "-" + i)) { LOG.error("innerList: name: {}, value: {}", i, itemStr); break; } @@ -92,15 +89,4 @@ public class TxchainBaRead extends DatastoreAbstractWriter implements Transactio } } } - - @Override - public void onTransactionChainFailed(final TransactionChain chain, final Transaction transaction, - final Throwable cause) { - LOG.error("Broken chain {} in TxchainBaDelete, transaction {}", chain, transaction.getIdentifier(), cause); - } - - @Override - public void onTransactionChainSuccessful(final TransactionChain chain) { - LOG.debug("TxchainBaDelete closed successfully, chain {}", chain); - } } diff --git a/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainBaWrite.java b/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainBaWrite.java index b96c1763b7..3640f607db 100644 --- a/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainBaWrite.java +++ b/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainBaWrite.java @@ -7,6 +7,8 @@ */ package org.opendaylight.dsbenchmark.txchain; +import static java.util.Objects.requireNonNull; + import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.MoreExecutors; import java.util.List; @@ -14,50 +16,47 @@ import java.util.concurrent.ExecutionException; import org.opendaylight.dsbenchmark.BaListBuilder; import org.opendaylight.dsbenchmark.DatastoreAbstractWriter; import org.opendaylight.mdsal.binding.api.DataBroker; -import org.opendaylight.mdsal.binding.api.Transaction; -import org.opendaylight.mdsal.binding.api.TransactionChain; -import org.opendaylight.mdsal.binding.api.TransactionChainListener; -import org.opendaylight.mdsal.binding.api.WriteTransaction; import org.opendaylight.mdsal.common.api.CommitInfo; -import org.opendaylight.mdsal.common.api.LogicalDatastoreType; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput.DataStore; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput.Operation; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestExec; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.test.exec.OuterList; import org.opendaylight.yangtools.yang.binding.InstanceIdentifier; +import org.opendaylight.yangtools.yang.common.Empty; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class TxchainBaWrite extends DatastoreAbstractWriter implements TransactionChainListener { +public class TxchainBaWrite extends DatastoreAbstractWriter implements FutureCallback { private static final Logger LOG = LoggerFactory.getLogger(TxchainBaWrite.class); - private final DataBroker bindingDataBroker; - private List list; - public TxchainBaWrite(final DataBroker bindingDataBroker, final Operation oper, - final int outerListElem, final int innerListElem, final long writesPerTx, final DataStore dataStore) { + private final DataBroker dataBroker; + private List list = null; + + public TxchainBaWrite(final DataBroker dataBroker, final Operation oper, final int outerListElem, + final int innerListElem, final long writesPerTx, final DataStore dataStore) { super(oper, outerListElem, innerListElem, writesPerTx, dataStore); - this.bindingDataBroker = bindingDataBroker; + this.dataBroker = requireNonNull(dataBroker); LOG.debug("Created TxchainBaWrite"); } @Override public void createList() { - list = BaListBuilder.buildOuterList(this.outerListElem, this.innerListElem); + list = BaListBuilder.buildOuterList(outerListElem, innerListElem); } @Override public void executeList() { - final TransactionChain chain = bindingDataBroker.createMergingTransactionChain(this); - final LogicalDatastoreType dsType = getDataStoreType(); + final var chain = dataBroker.createMergingTransactionChain(); + chain.addCallback(this); + final var dsType = getDataStoreType(); - WriteTransaction tx = chain.newWriteOnlyTransaction(); + var tx = chain.newWriteOnlyTransaction(); int txSubmitted = 0; int writeCnt = 0; - for (OuterList element : this.list) { - InstanceIdentifier iid = InstanceIdentifier.create(TestExec.class) - .child(OuterList.class, element.key()); + for (var element : list) { + final var iid = InstanceIdentifier.create(TestExec.class).child(OuterList.class, element.key()); if (oper == StartTestInput.Operation.PUT) { tx.put(dsType, iid, element); @@ -106,14 +105,12 @@ public class TxchainBaWrite extends DatastoreAbstractWriter implements Transacti } @Override - public void onTransactionChainFailed(final TransactionChain chain, final Transaction transaction, - final Throwable cause) { - LOG.error("Broken chain {} in DatastoreBaAbstractWrite, transaction {}", chain, transaction.getIdentifier(), - cause); + public void onFailure(final Throwable cause) { + LOG.error("Broken chain in DatastoreBaAbstractWrite", cause); } @Override - public void onTransactionChainSuccessful(final TransactionChain chain) { - LOG.debug("DatastoreBaAbstractWrite closed successfully, chain {}", chain); + public void onSuccess(final Empty result) { + LOG.debug("DatastoreBaAbstractWrite closed successfully"); } } diff --git a/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainDomDelete.java b/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainDomDelete.java index 75dee62673..c0280c6b02 100644 --- a/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainDomDelete.java +++ b/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainDomDelete.java @@ -14,21 +14,20 @@ import org.opendaylight.dsbenchmark.DatastoreAbstractWriter; import org.opendaylight.mdsal.common.api.CommitInfo; import org.opendaylight.mdsal.common.api.LogicalDatastoreType; import org.opendaylight.mdsal.dom.api.DOMDataBroker; -import org.opendaylight.mdsal.dom.api.DOMDataTreeTransaction; import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction; import org.opendaylight.mdsal.dom.api.DOMTransactionChain; -import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput.DataStore; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestExec; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.test.exec.OuterList; +import org.opendaylight.yangtools.yang.common.Empty; import org.opendaylight.yangtools.yang.common.QName; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class TxchainDomDelete extends DatastoreAbstractWriter implements DOMTransactionChainListener { +public class TxchainDomDelete extends DatastoreAbstractWriter implements FutureCallback { private static final Logger LOG = LoggerFactory.getLogger(TxchainDomDelete.class); private final DOMDataBroker domDataBroker; @@ -57,7 +56,8 @@ public class TxchainDomDelete extends DatastoreAbstractWriter implements DOMTran final org.opendaylight.yangtools.yang.common.QName olId = QName.create(OuterList.QNAME, "id"); final YangInstanceIdentifier pid = YangInstanceIdentifier.builder().node(TestExec.QNAME).node(OuterList.QNAME).build(); - final DOMTransactionChain chain = domDataBroker.createMergingTransactionChain(this); + final DOMTransactionChain chain = domDataBroker.createMergingTransactionChain(); + chain.addCallback(this); DOMDataTreeWriteTransaction tx = chain.newWriteOnlyTransaction(); int txSubmitted = 0; @@ -108,13 +108,12 @@ public class TxchainDomDelete extends DatastoreAbstractWriter implements DOMTran } @Override - public void onTransactionChainFailed(final DOMTransactionChain chain, final DOMDataTreeTransaction transaction, - final Throwable cause) { - LOG.error("Broken chain {} in TxchainDomDelete, transaction {}", chain, transaction.getIdentifier(), cause); + public void onFailure(final Throwable cause) { + LOG.error("Broken chain in TxchainDomDelete", cause); } @Override - public void onTransactionChainSuccessful(final DOMTransactionChain chain) { - LOG.debug("TxchainDomDelete closed successfully, chain {}", chain); + public void onSuccess(final Empty result) { + LOG.debug("TxchainDomDelete closed successfully"); } } diff --git a/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainDomRead.java b/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainDomRead.java index 5c70edcd6c..cccdb4ecdb 100644 --- a/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainDomRead.java +++ b/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainDomRead.java @@ -14,9 +14,6 @@ import org.opendaylight.dsbenchmark.DatastoreAbstractWriter; import org.opendaylight.mdsal.common.api.LogicalDatastoreType; import org.opendaylight.mdsal.dom.api.DOMDataBroker; import org.opendaylight.mdsal.dom.api.DOMDataTreeReadTransaction; -import org.opendaylight.mdsal.dom.api.DOMDataTreeTransaction; -import org.opendaylight.mdsal.dom.api.DOMTransactionChain; -import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput.DataStore; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestExec; @@ -28,7 +25,7 @@ import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class TxchainDomRead extends DatastoreAbstractWriter implements DOMTransactionChainListener { +public class TxchainDomRead extends DatastoreAbstractWriter { private static final Logger LOG = LoggerFactory.getLogger(TxchainDomRead.class); private final DOMDataBroker domDataBroker; @@ -54,7 +51,7 @@ public class TxchainDomRead extends DatastoreAbstractWriter implements DOMTransa @Override public void executeList() { final LogicalDatastoreType dsType = getDataStoreType(); - final org.opendaylight.yangtools.yang.common.QName olId = QName.create(OuterList.QNAME, "id"); + final QName olId = QName.create(OuterList.QNAME, "id"); final YangInstanceIdentifier pid = YangInstanceIdentifier.builder().node(TestExec.QNAME).node(OuterList.QNAME).build(); @@ -75,15 +72,4 @@ public class TxchainDomRead extends DatastoreAbstractWriter implements DOMTransa } } } - - @Override - public void onTransactionChainFailed(final DOMTransactionChain chain, final DOMDataTreeTransaction transaction, - final Throwable cause) { - LOG.error("Broken chain {} in TxchainDomDelete, transaction {}", chain, transaction.getIdentifier(), cause); - } - - @Override - public void onTransactionChainSuccessful(final DOMTransactionChain chain) { - LOG.debug("TxchainDomDelete closed successfully, chain {}", chain); - } } diff --git a/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainDomWrite.java b/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainDomWrite.java index 4f254a2ce9..b50a7e0ad1 100644 --- a/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainDomWrite.java +++ b/benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainDomWrite.java @@ -14,53 +14,49 @@ import java.util.concurrent.ExecutionException; import org.opendaylight.dsbenchmark.DatastoreAbstractWriter; import org.opendaylight.dsbenchmark.DomListBuilder; import org.opendaylight.mdsal.common.api.CommitInfo; -import org.opendaylight.mdsal.common.api.LogicalDatastoreType; import org.opendaylight.mdsal.dom.api.DOMDataBroker; -import org.opendaylight.mdsal.dom.api.DOMDataTreeTransaction; -import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction; -import org.opendaylight.mdsal.dom.api.DOMTransactionChain; -import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput.DataStore; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestExec; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.test.exec.OuterList; +import org.opendaylight.yangtools.yang.common.Empty; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates; import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class TxchainDomWrite extends DatastoreAbstractWriter implements DOMTransactionChainListener { +public class TxchainDomWrite extends DatastoreAbstractWriter implements FutureCallback { private static final Logger LOG = LoggerFactory.getLogger(TxchainDomWrite.class); - private final DOMDataBroker domDataBroker; - private List list; - public TxchainDomWrite(final DOMDataBroker domDataBroker, final StartTestInput.Operation oper, + private final DOMDataBroker dataBroker; + private List list = null; + + public TxchainDomWrite(final DOMDataBroker dataBroker, final StartTestInput.Operation oper, final int outerListElem, final int innerListElem, final long writesPerTx, final DataStore dataStore) { super(oper, outerListElem, innerListElem, writesPerTx, dataStore); - this.domDataBroker = domDataBroker; + this.dataBroker = dataBroker; LOG.debug("Created TxchainDomWrite"); } @Override public void createList() { - list = DomListBuilder.buildOuterList(this.outerListElem, this.innerListElem); + list = DomListBuilder.buildOuterList(outerListElem, innerListElem); } @Override public void executeList() { - final LogicalDatastoreType dsType = getDataStoreType(); - final YangInstanceIdentifier pid = - YangInstanceIdentifier.builder().node(TestExec.QNAME).node(OuterList.QNAME).build(); - final DOMTransactionChain chain = domDataBroker.createMergingTransactionChain(this); + final var dsType = getDataStoreType(); + final var pid = YangInstanceIdentifier.of(TestExec.QNAME, OuterList.QNAME); + final var chain = dataBroker.createMergingTransactionChain(); + chain.addCallback(this); - DOMDataTreeWriteTransaction tx = chain.newWriteOnlyTransaction(); + var tx = chain.newWriteOnlyTransaction(); int txSubmitted = 0; int writeCnt = 0; - for (MapEntryNode element : this.list) { - YangInstanceIdentifier yid = - pid.node(NodeIdentifierWithPredicates.of(OuterList.QNAME, element.getIdentifier().asMap())); + for (var element : list) { + var yid = pid.node(NodeIdentifierWithPredicates.of(OuterList.QNAME, element.name().asMap())); if (oper == StartTestInput.Operation.PUT) { tx.put(dsType, yid, element); @@ -112,13 +108,12 @@ public class TxchainDomWrite extends DatastoreAbstractWriter implements DOMTrans } @Override - public void onTransactionChainFailed(final DOMTransactionChain chain, final DOMDataTreeTransaction transaction, - final Throwable cause) { - LOG.error("Broken chain {} in TxchainDomWrite, transaction {}", chain, transaction.getIdentifier(), cause); + public void onFailure(final Throwable cause) { + LOG.error("Broken chain in TxchainDomWrite", cause); } @Override - public void onTransactionChainSuccessful(final DOMTransactionChain chain) { - LOG.debug("Chain {} closed successfully", chain); + public void onSuccess(final Empty result) { + LOG.debug("Chain closed successfully"); } } diff --git a/benchmark/ntfbenchmark/pom.xml b/benchmark/ntfbenchmark/pom.xml index 519ba4efbf..82b3c3aabf 100644 --- a/benchmark/ntfbenchmark/pom.xml +++ b/benchmark/ntfbenchmark/pom.xml @@ -12,7 +12,7 @@ and is available at http://www.eclipse.org/legal/epl-v10.html org.opendaylight.controller mdsal-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ../../opendaylight/md-sal/parent @@ -38,8 +38,8 @@ and is available at http://www.eclipse.org/legal/epl-v10.html true - javax.annotation - javax.annotation-api + jakarta.annotation + jakarta.annotation-api true diff --git a/benchmark/ntfbenchmark/src/main/java/ntfbenchmark/impl/NtfbenchBlockingProducer.java b/benchmark/ntfbenchmark/src/main/java/ntfbenchmark/impl/NtfbenchBlockingProducer.java index 021369acb0..ee77a6c342 100644 --- a/benchmark/ntfbenchmark/src/main/java/ntfbenchmark/impl/NtfbenchBlockingProducer.java +++ b/benchmark/ntfbenchmark/src/main/java/ntfbenchmark/impl/NtfbenchBlockingProducer.java @@ -8,8 +8,11 @@ package ntfbenchmark.impl; import org.opendaylight.mdsal.binding.api.NotificationPublishService; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class NtfbenchBlockingProducer extends AbstractNtfbenchProducer { + private static final Logger LOG = LoggerFactory.getLogger(NtfbenchBlockingProducer.class); public NtfbenchBlockingProducer(final NotificationPublishService publishService, final int iterations, final int payloadSize) { @@ -22,12 +25,13 @@ public class NtfbenchBlockingProducer extends AbstractNtfbenchProducer { int ntfOk = 0; int ntfError = 0; - for (int i = 0; i < this.iterations; i++) { + for (int i = 0; i < iterations; i++) { try { - this.publishService.putNotification(this.ntf); + publishService.putNotification(ntf); ntfOk++; } catch (final Exception e) { ntfError++; + LOG.debug("Failed to push notification", e); } } diff --git a/benchmark/ntfbenchmark/src/main/java/ntfbenchmark/impl/NtfbenchNonblockingProducer.java b/benchmark/ntfbenchmark/src/main/java/ntfbenchmark/impl/NtfbenchNonblockingProducer.java index b0e239c4d4..4b44e1d73d 100644 --- a/benchmark/ntfbenchmark/src/main/java/ntfbenchmark/impl/NtfbenchNonblockingProducer.java +++ b/benchmark/ntfbenchmark/src/main/java/ntfbenchmark/impl/NtfbenchNonblockingProducer.java @@ -28,9 +28,9 @@ public class NtfbenchNonblockingProducer extends AbstractNtfbenchProducer { int ntfOk = 0; int ntfError = 0; ListenableFuture lastOkFuture = null; - for (int i = 0; i < this.iterations; i++) { + for (int i = 0; i < iterations; i++) { try { - final ListenableFuture result = this.publishService.offerNotification(this.ntf); + final ListenableFuture result = publishService.offerNotification(ntf); if (NotificationPublishService.REJECTED == result) { ntfError++; } else { @@ -50,9 +50,8 @@ public class NtfbenchNonblockingProducer extends AbstractNtfbenchProducer { try { lastOkFuture.get(); } catch (InterruptedException | ExecutionException e) { - throw new RuntimeException(e); + throw new IllegalStateException(e); } } } - } diff --git a/benchmark/ntfbenchmark/src/main/java/ntfbenchmark/impl/NtfbenchTestListener.java b/benchmark/ntfbenchmark/src/main/java/ntfbenchmark/impl/NtfbenchTestListener.java index d126b2774a..b57d37b99b 100644 --- a/benchmark/ntfbenchmark/src/main/java/ntfbenchmark/impl/NtfbenchTestListener.java +++ b/benchmark/ntfbenchmark/src/main/java/ntfbenchmark/impl/NtfbenchTestListener.java @@ -9,11 +9,10 @@ package ntfbenchmark.impl; import com.google.common.util.concurrent.Futures; import java.util.concurrent.Future; +import org.opendaylight.mdsal.binding.api.NotificationService.Listener; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbench.payload.rev150709.Ntfbench; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbench.payload.rev150709.NtfbenchPayloadListener; - -public class NtfbenchTestListener implements NtfbenchPayloadListener { +public class NtfbenchTestListener implements Listener { private final int expectedSize; private int received = 0; @@ -22,7 +21,7 @@ public class NtfbenchTestListener implements NtfbenchPayloadListener { } @Override - public void onNtfbench(final Ntfbench notification) { + public void onNotification(final Ntfbench notification) { if (expectedSize == notification.getPayload().size()) { received++; } diff --git a/benchmark/ntfbenchmark/src/main/java/ntfbenchmark/impl/NtfbenchWTCListener.java b/benchmark/ntfbenchmark/src/main/java/ntfbenchmark/impl/NtfbenchWTCListener.java index 1b16329d52..8cce7448bc 100644 --- a/benchmark/ntfbenchmark/src/main/java/ntfbenchmark/impl/NtfbenchWTCListener.java +++ b/benchmark/ntfbenchmark/src/main/java/ntfbenchmark/impl/NtfbenchWTCListener.java @@ -20,9 +20,8 @@ public class NtfbenchWTCListener extends NtfbenchTestListener { } @Override - public void onNtfbench(final Ntfbench notification) { - // TODO Auto-generated method stub - super.onNtfbench(notification); + public void onNotification(final Ntfbench notification) { + super.onNotification(notification); if (expectedCount == getReceived()) { allDone.set(null); } diff --git a/benchmark/ntfbenchmark/src/main/java/ntfbenchmark/impl/NtfbenchmarkProvider.java b/benchmark/ntfbenchmark/src/main/java/ntfbenchmark/impl/NtfbenchmarkProvider.java index acd5af6143..7c8e51f643 100644 --- a/benchmark/ntfbenchmark/src/main/java/ntfbenchmark/impl/NtfbenchmarkProvider.java +++ b/benchmark/ntfbenchmark/src/main/java/ntfbenchmark/impl/NtfbenchmarkProvider.java @@ -12,7 +12,6 @@ import static java.util.Objects.requireNonNull; import com.google.common.util.concurrent.ListenableFuture; import java.util.ArrayList; -import java.util.List; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -23,14 +22,15 @@ import javax.inject.Singleton; import org.opendaylight.mdsal.binding.api.NotificationPublishService; import org.opendaylight.mdsal.binding.api.NotificationService; import org.opendaylight.mdsal.binding.api.RpcProviderService; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.NtfbenchmarkService; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbench.payload.rev150709.Ntfbench; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.StartTest; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.StartTestInput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.StartTestInput.ProducerType; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.StartTestOutput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.StartTestOutputBuilder; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.TestStatus; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.TestStatusInput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.TestStatusOutput; -import org.opendaylight.yangtools.concepts.ListenerRegistration; import org.opendaylight.yangtools.concepts.Registration; import org.opendaylight.yangtools.yang.common.RpcResult; import org.opendaylight.yangtools.yang.common.RpcResultBuilder; @@ -46,7 +46,7 @@ import org.slf4j.LoggerFactory; @Singleton @Component(service = {}) @RequireServiceComponentRuntime -public final class NtfbenchmarkProvider implements AutoCloseable, NtfbenchmarkService { +public final class NtfbenchmarkProvider implements AutoCloseable { private static final Logger LOG = LoggerFactory.getLogger(NtfbenchmarkProvider.class); private static final int TEST_TIMEOUT = 5; @@ -61,7 +61,7 @@ public final class NtfbenchmarkProvider implements AutoCloseable, NtfbenchmarkSe @Reference final RpcProviderService rpcService) { this.listenService = requireNonNull(listenService); this.publishService = requireNonNull(publishService); - reg = rpcService.registerRpcImplementation(NtfbenchmarkService.class, this); + reg = rpcService.registerRpcImplementations((TestStatus) this::testStatus, (StartTest) this::startTest); LOG.debug("NtfbenchmarkProvider initiated"); } @@ -73,20 +73,20 @@ public final class NtfbenchmarkProvider implements AutoCloseable, NtfbenchmarkSe LOG.info("NtfbenchmarkProvider closed"); } - @Override - public ListenableFuture> startTest(final StartTestInput input) { + private ListenableFuture> startTest(final StartTestInput input) { final int producerCount = input.getProducers().intValue(); final int listenerCount = input.getListeners().intValue(); final int iterations = input.getIterations().intValue(); final int payloadSize = input.getIterations().intValue(); - final List producers = new ArrayList<>(producerCount); - final List> listeners = new ArrayList<>(listenerCount); + final var producers = new ArrayList(producerCount); for (int i = 0; i < producerCount; i++) { producers.add(new NtfbenchBlockingProducer(publishService, iterations, payloadSize)); } int expectedCntPerListener = producerCount * iterations; + final var listeners = new ArrayList(listenerCount); + final var registrations = new ArrayList(listenerCount); for (int i = 0; i < listenerCount; i++) { final NtfbenchTestListener listener; if (input.getProducerType() == ProducerType.BLOCKING) { @@ -94,7 +94,8 @@ public final class NtfbenchmarkProvider implements AutoCloseable, NtfbenchmarkSe } else { listener = new NtfbenchTestListener(payloadSize); } - listeners.add(listenService.registerNotificationListener(listener)); + listeners.add(listener); + registrations.add(listenService.registerListener(Ntfbench.class, listener)); } try { @@ -110,11 +111,11 @@ public final class NtfbenchmarkProvider implements AutoCloseable, NtfbenchmarkSe executor.shutdown(); try { executor.awaitTermination(TEST_TIMEOUT, TimeUnit.MINUTES); - for (ListenerRegistration listenerRegistration : listeners) { - listenerRegistration.getInstance().getAllDone().get(); + for (var listener : listeners) { + listener.getAllDone().get(); } } catch (final InterruptedException | ExecutionException e) { - LOG.error("Out of time: test did not finish within the {} min deadline ", TEST_TIMEOUT); + LOG.error("Out of time: test did not finish within the {} min deadline ", TEST_TIMEOUT, e); } final long producerEndTime = System.nanoTime(); @@ -124,8 +125,8 @@ public final class NtfbenchmarkProvider implements AutoCloseable, NtfbenchmarkSe long allProducersOk = 0; long allProducersError = 0; - for (final ListenerRegistration listenerRegistration : listeners) { - allListeners += listenerRegistration.getInstance().getReceived(); + for (var listener : listeners) { + allListeners += listener.getReceived(); } final long listenerElapsedTime = producerEndTime - startTime; @@ -149,14 +150,11 @@ public final class NtfbenchmarkProvider implements AutoCloseable, NtfbenchmarkSe .build(); return RpcResultBuilder.success(output).buildFuture(); } finally { - for (final ListenerRegistration listenerRegistration : listeners) { - listenerRegistration.close(); - } + registrations.forEach(Registration::close); } } - @Override - public ListenableFuture> testStatus(final TestStatusInput input) { + private ListenableFuture> testStatus(final TestStatusInput input) { throw new UnsupportedOperationException("Not implemented"); } } diff --git a/benchmark/pom.xml b/benchmark/pom.xml index 5d182fd2b1..b79dbd5b8c 100644 --- a/benchmark/pom.xml +++ b/benchmark/pom.xml @@ -10,13 +10,13 @@ and is available at http://www.eclipse.org/legal/epl-v10.html org.opendaylight.odlparent odlparent-lite - 9.0.12 + 13.0.11 org.opendaylight.controller benchmark-aggregator - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT pom @@ -29,5 +29,6 @@ and is available at http://www.eclipse.org/legal/epl-v10.html dsbenchmark ntfbenchmark rpcbenchmark + segjournal-benchmark diff --git a/benchmark/rpcbenchmark/pom.xml b/benchmark/rpcbenchmark/pom.xml index ed020b7afa..8eefdf6664 100644 --- a/benchmark/rpcbenchmark/pom.xml +++ b/benchmark/rpcbenchmark/pom.xml @@ -8,39 +8,55 @@ terms of the Eclipse Public License v1.0 which accompanies this distribution, and is available at http://www.eclipse.org/legal/epl-v10.html --> - 4.0.0 - - org.opendaylight.controller - mdsal-parent - 5.0.0-SNAPSHOT - ../../opendaylight/md-sal/parent - + 4.0.0 + + org.opendaylight.controller + mdsal-parent + 9.0.3-SNAPSHOT + ../../opendaylight/md-sal/parent + - rpcbenchmark - bundle + rpcbenchmark + bundle - - - org.opendaylight.controller - benchmark-api - - - org.opendaylight.mdsal - mdsal-binding-api - - - org.osgi - org.osgi.service.component.annotations - - - com.guicedee.services - javax.inject - true - - - javax.annotation - javax.annotation-api - true - - + + + com.guicedee.services + javax.inject + true + + + com.google.guava + guava + + + jakarta.annotation + jakarta.annotation-api + true + + + org.opendaylight.controller + benchmark-api + + + org.opendaylight.mdsal + yang-binding + + + org.opendaylight.mdsal + mdsal-binding-api + + + org.opendaylight.yangtools + concepts + + + org.opendaylight.yangtools + yang-common + + + org.osgi + org.osgi.service.component.annotations + + diff --git a/benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/AbstractRpcbenchPayloadService.java b/benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/AbstractRpcbenchPayloadService.java index 33328f9664..34aa71ca26 100644 --- a/benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/AbstractRpcbenchPayloadService.java +++ b/benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/AbstractRpcbenchPayloadService.java @@ -14,21 +14,18 @@ import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBenchOut import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBenchInput; import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBenchOutput; import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBenchOutputBuilder; -import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RpcbenchPayloadService; import org.opendaylight.yangtools.yang.common.RpcResult; import org.opendaylight.yangtools.yang.common.RpcResultBuilder; -abstract class AbstractRpcbenchPayloadService implements RpcbenchPayloadService { +abstract class AbstractRpcbenchPayloadService { private int numRpcs = 0; - @Override - public final ListenableFuture> globalRpcBench(final GlobalRpcBenchInput input) { + final ListenableFuture> globalRpcBench(final GlobalRpcBenchInput input) { numRpcs++; return RpcResultBuilder.success(new GlobalRpcBenchOutputBuilder(input).build()).buildFuture(); } - @Override - public final ListenableFuture> routedRpcBench(final RoutedRpcBenchInput input) { + final ListenableFuture> routedRpcBench(final RoutedRpcBenchInput input) { numRpcs++; return RpcResultBuilder.success(new RoutedRpcBenchOutputBuilder(input).build()).buildFuture(); } diff --git a/benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/GlobalBindingRTCClient.java b/benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/GlobalBindingRTCClient.java index bbbf309d88..86e3a097bf 100644 --- a/benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/GlobalBindingRTCClient.java +++ b/benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/GlobalBindingRTCClient.java @@ -9,26 +9,22 @@ package rpcbenchmark.impl; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableMap.Builder; -import java.util.Map; import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicLong; -import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry; +import org.opendaylight.mdsal.binding.api.RpcService; +import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBench; import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBenchInput; import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBenchInputBuilder; -import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBenchOutput; -import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RpcbenchPayloadService; import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.payload.Payload; import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.payload.PayloadBuilder; import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.payload.PayloadKey; -import org.opendaylight.yangtools.yang.common.RpcResult; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class GlobalBindingRTCClient implements RTCClient { private static final Logger LOG = LoggerFactory.getLogger(GlobalBindingRTCClient.class); - private final RpcbenchPayloadService service; + private final GlobalRpcBench globalRpcBench; private final AtomicLong rpcOk = new AtomicLong(0); private final AtomicLong rpcError = new AtomicLong(0); private final GlobalRpcBenchInput inVal; @@ -44,12 +40,8 @@ public class GlobalBindingRTCClient implements RTCClient { return rpcError.get(); } - public GlobalBindingRTCClient(final RpcConsumerRegistry registry, final int inSize) { - if (registry != null) { - this.service = registry.getRpcService(RpcbenchPayloadService.class); - } else { - this.service = null; - } + public GlobalBindingRTCClient(final RpcService rpcService, final int inSize) { + globalRpcBench = rpcService.getRpc(GlobalRpcBench.class); this.inSize = inSize; Builder listVals = ImmutableMap.builderWithExpectedSize(inSize); @@ -66,12 +58,12 @@ public class GlobalBindingRTCClient implements RTCClient { int error = 0; for (int i = 0; i < iterations; i++) { - Future> output = service.globalRpcBench(inVal); + final var output = globalRpcBench.invoke(inVal); try { - RpcResult rpcResult = output.get(); + final var rpcResult = output.get(); if (rpcResult.isSuccessful()) { - Map retVal = rpcResult.getResult().getPayload(); + final var retVal = rpcResult.getResult().getPayload(); if (retVal.size() == inSize) { ok++; } @@ -94,5 +86,4 @@ public class GlobalBindingRTCClient implements RTCClient { // TODO Auto-generated method stub } - } diff --git a/benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/GlobalBindingRTCServer.java b/benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/GlobalBindingRTCServer.java index 67fde76865..78d8e4a1ba 100644 --- a/benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/GlobalBindingRTCServer.java +++ b/benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/GlobalBindingRTCServer.java @@ -8,7 +8,8 @@ package rpcbenchmark.impl; import org.opendaylight.mdsal.binding.api.RpcProviderService; -import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RpcbenchPayloadService; +import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBench; +import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBench; import org.opendaylight.yangtools.concepts.Registration; import org.osgi.service.component.annotations.Reference; import org.slf4j.Logger; @@ -20,7 +21,9 @@ final class GlobalBindingRTCServer extends AbstractRpcbenchPayloadService implem private final Registration reg; GlobalBindingRTCServer(@Reference final RpcProviderService rpcProvider) { - reg = rpcProvider.registerRpcImplementation(RpcbenchPayloadService.class, this); + reg = rpcProvider.registerRpcImplementations( + (GlobalRpcBench) this::globalRpcBench, + (RoutedRpcBench) this::routedRpcBench); LOG.debug("GlobalBindingRTCServer started"); } diff --git a/benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/RoutedBindingRTCServer.java b/benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/RoutedBindingRTCServer.java index 1982bebcd8..a6384606ed 100644 --- a/benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/RoutedBindingRTCServer.java +++ b/benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/RoutedBindingRTCServer.java @@ -7,9 +7,11 @@ */ package rpcbenchmark.impl; +import java.util.List; import java.util.Set; import org.opendaylight.mdsal.binding.api.RpcProviderService; -import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RpcbenchPayloadService; +import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBench; +import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBench; import org.opendaylight.yangtools.concepts.Registration; import org.opendaylight.yangtools.yang.binding.InstanceIdentifier; @@ -17,7 +19,9 @@ final class RoutedBindingRTCServer extends AbstractRpcbenchPayloadService implem private final Registration reg; RoutedBindingRTCServer(final RpcProviderService rpcProvider, final Set> paths) { - reg = rpcProvider.registerRpcImplementation(RpcbenchPayloadService.class, this, paths); + reg = rpcProvider.registerRpcImplementations(List.of( + (GlobalRpcBench) this::globalRpcBench, + (RoutedRpcBench) this::routedRpcBench), paths); } @Override diff --git a/benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/RoutedBindingRTClient.java b/benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/RoutedBindingRTClient.java index d2c3ae339d..bd5e83e8bf 100644 --- a/benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/RoutedBindingRTClient.java +++ b/benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/RoutedBindingRTClient.java @@ -16,11 +16,11 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicLong; -import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry; +import org.opendaylight.mdsal.binding.api.RpcService; +import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBench; import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBenchInput; import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBenchInputBuilder; import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBenchOutput; -import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RpcbenchPayloadService; import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.payload.Payload; import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.payload.PayloadBuilder; import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.payload.PayloadKey; @@ -31,15 +31,15 @@ import org.slf4j.LoggerFactory; public class RoutedBindingRTClient implements RTCClient { private static final Logger LOG = LoggerFactory.getLogger(RoutedBindingRTClient.class); - private final RpcbenchPayloadService service; + private final RoutedRpcBench routedRpcBench; private final AtomicLong rpcOk = new AtomicLong(0); private final AtomicLong rpcError = new AtomicLong(0); private final List inVal = new ArrayList<>(); private final int inSize; - public RoutedBindingRTClient(final RpcConsumerRegistry registry, final int inSize, + public RoutedBindingRTClient(final RpcService rpcService, final int inSize, final List> routeIid) { - service = registry.getRpcService(RpcbenchPayloadService.class); + routedRpcBench = rpcService.getRpc(RoutedRpcBench.class); this.inSize = inSize; Builder listVals = ImmutableMap.builderWithExpectedSize(inSize); @@ -72,7 +72,7 @@ public class RoutedBindingRTClient implements RTCClient { int rpcServerCnt = inVal.size(); for (int i = 0; i < iterations; i++) { RoutedRpcBenchInput input = inVal.get(ThreadLocalRandom.current().nextInt(rpcServerCnt)); - Future> output = service.routedRpcBench(input); + Future> output = routedRpcBench.invoke(input); try { RpcResult rpcResult = output.get(); diff --git a/benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/RpcbenchmarkProvider.java b/benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/RpcbenchmarkProvider.java index 306539ce66..c785e9b384 100644 --- a/benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/RpcbenchmarkProvider.java +++ b/benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/RpcbenchmarkProvider.java @@ -21,15 +21,16 @@ import java.util.concurrent.atomic.AtomicReference; import javax.annotation.PreDestroy; import javax.inject.Inject; import javax.inject.Singleton; -import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry; import org.opendaylight.mdsal.binding.api.RpcProviderService; +import org.opendaylight.mdsal.binding.api.RpcService; import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RpcbenchRpcRoutes; import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.rpcbench.rpc.routes.RpcRoute; import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.rpcbench.rpc.routes.RpcRouteKey; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.RpcbenchmarkService; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.StartTest; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.StartTestInput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.StartTestOutput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.StartTestOutputBuilder; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.TestStatus; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.TestStatusInput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.TestStatusOutput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.TestStatusOutput.ExecStatus; @@ -50,24 +51,24 @@ import org.slf4j.LoggerFactory; @Singleton @Component(service = {}) @RequireServiceComponentRuntime -public final class RpcbenchmarkProvider implements AutoCloseable, RpcbenchmarkService { +public final class RpcbenchmarkProvider implements AutoCloseable { private static final Logger LOG = LoggerFactory.getLogger(RpcbenchmarkProvider.class); private static final int TEST_TIMEOUT = 5; private final AtomicReference execStatus = new AtomicReference<>(ExecStatus.Idle); private final RpcProviderService providerRegistry; - private final RpcConsumerRegistry consumerRegistry; + private final RpcService consumerRegistry; private final GlobalBindingRTCServer globalServer; private final Registration reg; @Inject @Activate public RpcbenchmarkProvider(@Reference final RpcProviderService providerRegistry, - @Reference final RpcConsumerRegistry consumerRegistry) { + @Reference final RpcService consumerRegistry) { this.providerRegistry = requireNonNull(providerRegistry); this.consumerRegistry = requireNonNull(consumerRegistry); globalServer = new GlobalBindingRTCServer(providerRegistry); - reg = providerRegistry.registerRpcImplementation(RpcbenchmarkService.class, this); + reg = providerRegistry.registerRpcImplementations((TestStatus) this::testStatus, (StartTest) this::startTest); LOG.info("RpcbenchmarkProvider initiated"); } @@ -80,8 +81,7 @@ public final class RpcbenchmarkProvider implements AutoCloseable, RpcbenchmarkSe LOG.info("RpcbenchmarkProvider closed"); } - @Override - public ListenableFuture> startTest(final StartTestInput input) { + private ListenableFuture> startTest(final StartTestInput input) { LOG.debug("startTest {}", input); final RTCClient client; @@ -149,8 +149,7 @@ public final class RpcbenchmarkProvider implements AutoCloseable, RpcbenchmarkSe } } - @Override - public ListenableFuture> testStatus(final TestStatusInput input) { + private ListenableFuture> testStatus(final TestStatusInput input) { LOG.info("testStatus"); TestStatusOutput output = new TestStatusOutputBuilder() .setGlobalServerCnt(Uint32.valueOf(globalServer.getNumRpcs())) diff --git a/benchmark/segjournal-benchmark/pom.xml b/benchmark/segjournal-benchmark/pom.xml new file mode 100644 index 0000000000..72197da341 --- /dev/null +++ b/benchmark/segjournal-benchmark/pom.xml @@ -0,0 +1,161 @@ + + + + 4.0.0 + + org.opendaylight.controller + mdsal-parent + 9.0.3-SNAPSHOT + ../../opendaylight/md-sal/parent + + + segjournal-benchmark + jar + + + true + true + true + + + + + com.github.spotbugs + spotbugs-annotations + true + + + org.eclipse.jdt + org.eclipse.jdt.annotation + + + net.sourceforge.argparse4j + argparse4j + + + org.slf4j + slf4j-api + + + ch.qos.logback + logback-classic + compile + + + commons-io + commons-io + + + io.dropwizard.metrics + metrics-core + + + org.opendaylight.controller + repackaged-akka + + + org.opendaylight.controller + sal-clustering-commons + + + org.opendaylight.controller + atomix-storage + + + org.opendaylight.controller + sal-akka-segmented-journal + + + org.opendaylight.controller + sal-clustering-config + + + + + + + org.apache.maven.plugins + maven-shade-plugin + + false + + + + package + + shade + + + true + executable + + + *:* + + + META-INF/*.SF + META-INF/*.DSA + META-INF/*.RSA + + + + + + org.opendaylight.controller.akka.segjournal.BenchmarkMain + + + + + + + + + + + + benchmarks + + + + org.codehaus.mojo + exec-maven-plugin + 3.1.1 + + + execute-segmented-journal-benchmark + integration-test + + exec + + + + + java + true + + -classpath + + + org.opendaylight.controller.akka.segjournal.BenchmarkMain + + --current + + -n100000 + + -p100K + + + + + + + + \ No newline at end of file diff --git a/benchmark/segjournal-benchmark/src/main/java/org/opendaylight/controller/akka/segjournal/BenchmarkMain.java b/benchmark/segjournal-benchmark/src/main/java/org/opendaylight/controller/akka/segjournal/BenchmarkMain.java new file mode 100644 index 0000000000..5c711088e3 --- /dev/null +++ b/benchmark/segjournal-benchmark/src/main/java/org/opendaylight/controller/akka/segjournal/BenchmarkMain.java @@ -0,0 +1,238 @@ +/* + * Copyright (c) 2024 PANTHEON.tech s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.akka.segjournal; + +import static org.opendaylight.controller.akka.segjournal.BenchmarkUtils.buildConfig; +import static org.opendaylight.controller.akka.segjournal.BenchmarkUtils.formatBytes; +import static org.opendaylight.controller.akka.segjournal.BenchmarkUtils.formatNanos; +import static org.opendaylight.controller.akka.segjournal.BenchmarkUtils.toMetricId; + +import akka.actor.ActorRef; +import akka.actor.ActorSystem; +import akka.persistence.AtomicWrite; +import akka.persistence.PersistentRepr; +import com.google.common.base.Stopwatch; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import java.io.Serializable; +import java.util.Optional; +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; +import org.apache.commons.io.FileUtils; +import org.opendaylight.controller.akka.segjournal.BenchmarkUtils.BenchmarkConfig; +import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.WriteMessages; +import org.opendaylight.controller.cluster.common.actor.MeteringBehavior; +import org.opendaylight.controller.cluster.reporting.MetricsReporter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import scala.concurrent.Future; + +public final class BenchmarkMain { + private static final String BENCHMARK = "benchmark"; + private static final Logger LOG = LoggerFactory.getLogger("benchmark"); + + public static void main(String[] args) { + final var config = buildConfig(args); + final var benchmark = new BenchmarkMain(config); + Runtime.getRuntime().addShutdownHook(new Thread(benchmark::shutdown)); + benchmark.execute(); + System.exit(0); + } + + private final BenchmarkConfig config; + private final ActorSystem system; + private final ScheduledExecutorService executor; + private ActorRef actor; + + private BenchmarkMain(BenchmarkConfig config) { + this.config = config; + system = ActorSystem.create(BENCHMARK); + executor = Executors.newSingleThreadScheduledExecutor( + new ThreadFactoryBuilder().setNameFormat("progress-check-%d").build()); + } + + void execute() { + LOG.info("Starting with settings"); + LOG.info("\tstorage : {}", config.storage()); + LOG.info("\tworking dir : {}", config.workingDir().getAbsolutePath()); + LOG.info("\tmaxEntrySize : {}", formatBytes(config.maxEntrySize())); + LOG.info("\tmaxSegmentSize : {}", formatBytes(config.maxSegmentSize())); + LOG.info("\tmaxUnflushedBytes : {}", formatBytes(config.maxUnflushedBytes())); + + final var minLoadSize = Math.round(config.payloadSize() * 0.8f); + final var maxLoadSize = Math.min(Math.round(config.payloadSize() * 1.2f), config.maxEntrySize()); + LOG.info("Preparing load"); + LOG.info("\tnumber of messages : {}", config.messagesNum()); + LOG.info("\tpayload size : {} .. {}", formatBytes(minLoadSize), formatBytes(maxLoadSize)); + + // reset metrics + final var metricsRegistry = MetricsReporter.getInstance(MeteringBehavior.DOMAIN).getMetricsRegistry(); + final var keys = metricsRegistry.getMetrics().keySet(); + keys.forEach(metricsRegistry::remove); + + // get actor + actor = system.actorOf( + SegmentedJournalActor.props("perf", config.workingDir(), config.storage(), + config.maxEntrySize(), config.maxSegmentSize(), config.maxUnflushedBytes())); + + // randomize payloads + final var random = ThreadLocalRandom.current(); + final var payloads = new Payload[1_000]; + for (int i = 0; i < payloads.length; ++i) { + final var bytes = new byte[random.nextInt(minLoadSize, maxLoadSize)]; + random.nextBytes(bytes); + payloads[i] = new Payload(bytes); + } + + // enable periodic check for completed writes + final var results = new ConcurrentLinkedQueue>>(); + final var progressReporter = + new ProgressReporter(executor, results, config.messagesNum(), 10, TimeUnit.SECONDS); + + // start async message writing + final var sw = Stopwatch.createStarted(); + for (int i = 0; i < config.messagesNum(); ++i) { + results.add(writeMessage(i, payloads[random.nextInt(payloads.length)])); + } + LOG.info("{} Messages sent to akka in {}", config.messagesNum(), sw); + + // await completion + try { + progressReporter.awaitCompletion(); + } catch (InterruptedException e) { + LOG.error("Interrupted", e); + } + LOG.info("Messages written in {}", sw.stop()); + + // report + LOG.info("Following metrics collected"); + // meters + metricsRegistry.getMeters().forEach((key, meter) -> { + LOG.info("Meter '{}'", toMetricId(key)); + LOG.info("\tCount = {}", meter.getCount()); + LOG.info("\tMean Rate = {}", meter.getMeanRate()); + LOG.info("\t1 Min Rate = {}", meter.getOneMinuteRate()); + LOG.info("\t5 Min Rate = {}", meter.getFiveMinuteRate()); + LOG.info("\t15 Min Rate = {}", meter.getFifteenMinuteRate()); + }); + // timers + metricsRegistry.getTimers().forEach((key, timer) -> { + LOG.info("Timer '{}'", toMetricId(key)); + final var snap = timer.getSnapshot(); + LOG.info("\tMin = {}", formatNanos(snap.getMin())); + LOG.info("\tMax = {}", formatNanos(snap.getMax())); + LOG.info("\tMean = {}", formatNanos(snap.getMean())); + LOG.info("\tStdDev = {}", formatNanos(snap.getStdDev())); + LOG.info("\tMedian = {}", formatNanos(snap.getMedian())); + LOG.info("\t75th = {}", formatNanos(snap.get75thPercentile())); + LOG.info("\t95th = {}", formatNanos(snap.get95thPercentile())); + LOG.info("\t98th = {}", formatNanos(snap.get98thPercentile())); + LOG.info("\t99th = {}", formatNanos(snap.get99thPercentile())); + LOG.info("\t99.9th = {}", formatNanos(snap.get999thPercentile())); + }); + // histograms + metricsRegistry.getHistograms().forEach((key, histogram) -> { + LOG.info("Histogram '{}'", toMetricId(key)); + final var snap = histogram.getSnapshot(); + LOG.info("\tMin = {}", snap.getMin()); + LOG.info("\tMax = {}", snap.getMax()); + LOG.info("\tMean = {}", snap.getMean()); + LOG.info("\tStdDev = {}", snap.getStdDev()); + LOG.info("\tMedian = {}", snap.getMedian()); + LOG.info("\t75th = {}", snap.get75thPercentile()); + LOG.info("\t95th = {}", snap.get95thPercentile()); + LOG.info("\t98th = {}", snap.get98thPercentile()); + LOG.info("\t99th = {}", snap.get99thPercentile()); + LOG.info("\t99.9th = {}", snap.get999thPercentile()); + }); + } + + Future> writeMessage(final long seqNum, final Payload payload) { + final var writeMessage = new WriteMessages(); + final var result = writeMessage.add(AtomicWrite.apply( + PersistentRepr.apply(payload, seqNum, BENCHMARK, null, false, ActorRef.noSender(), "uuid"))); + actor.tell(writeMessage, ActorRef.noSender()); + return result; + } + + void shutdown() { + LOG.info("shutting down ..."); + executor.shutdown(); + if (actor != null) { + system.stop(actor); + } + if (config.workingDir().exists()) { + FileUtils.deleteQuietly(config.workingDir()); + } + system.terminate(); + LOG.info("Done."); + } + + private static final class Payload implements Serializable { + @java.io.Serial + private static final long serialVersionUID = 1L; + final byte[] bytes; + + Payload(final byte[] bytes) { + this.bytes = bytes; + } + } + + private static final class ProgressReporter implements Runnable { + final ScheduledExecutorService executor; + final CountDownLatch latch = new CountDownLatch(1); + final Queue>> queue; + final long total; + final int checkInterval; + final TimeUnit timeUnit; + long completed; + long errors; + + ProgressReporter(final ScheduledExecutorService executor, final Queue>> queue, + final long total, final int checkInterval, final TimeUnit timeUnit) { + this.executor = executor; + this.queue = queue; + this.total = total; + this.checkInterval = checkInterval; + this.timeUnit = timeUnit; + scheduleNextCheck(); + } + + @Override + public void run() { + // release completed from the beginning of the queue + while (!queue.isEmpty() && queue.peek().isCompleted()) { + final var future = queue.poll(); + completed++; + if (!future.value().get().get().isEmpty()) { + errors++; + } + } + LOG.info("{} of {} = {}% messages written, {} in queue", + completed, total, completed * 100 / total, queue.size()); + if (total == completed) { + LOG.info("Check completed, errors found : {}", errors); + latch.countDown(); + return; + } + scheduleNextCheck(); + } + + void scheduleNextCheck() { + executor.schedule(this, checkInterval, timeUnit); + } + + void awaitCompletion() throws InterruptedException { + latch.await(); + } + } +} diff --git a/benchmark/segjournal-benchmark/src/main/java/org/opendaylight/controller/akka/segjournal/BenchmarkUtils.java b/benchmark/segjournal-benchmark/src/main/java/org/opendaylight/controller/akka/segjournal/BenchmarkUtils.java new file mode 100644 index 0000000000..30ff871a51 --- /dev/null +++ b/benchmark/segjournal-benchmark/src/main/java/org/opendaylight/controller/akka/segjournal/BenchmarkUtils.java @@ -0,0 +1,216 @@ +/* + * Copyright (c) 2024 PANTHEON.tech s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.akka.segjournal; + +import static org.opendaylight.controller.akka.segjournal.SegmentedFileJournal.STORAGE_MAX_ENTRY_SIZE; +import static org.opendaylight.controller.akka.segjournal.SegmentedFileJournal.STORAGE_MAX_ENTRY_SIZE_DEFAULT; +import static org.opendaylight.controller.akka.segjournal.SegmentedFileJournal.STORAGE_MAX_SEGMENT_SIZE; +import static org.opendaylight.controller.akka.segjournal.SegmentedFileJournal.STORAGE_MAX_SEGMENT_SIZE_DEFAULT; +import static org.opendaylight.controller.akka.segjournal.SegmentedFileJournal.STORAGE_MAX_UNFLUSHED_BYTES; +import static org.opendaylight.controller.akka.segjournal.SegmentedFileJournal.STORAGE_MEMORY_MAPPED; + +import com.google.common.base.Stopwatch; +import com.google.common.base.Ticker; +import com.typesafe.config.Config; +import com.typesafe.config.ConfigFactory; +import io.atomix.storage.journal.StorageLevel; +import java.io.File; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.util.HashMap; +import java.util.Map; +import net.sourceforge.argparse4j.ArgumentParsers; +import net.sourceforge.argparse4j.impl.Arguments; +import net.sourceforge.argparse4j.inf.ArgumentParser; +import net.sourceforge.argparse4j.inf.ArgumentParserException; + +@SuppressWarnings("RegexpSinglelineJava") +final class BenchmarkUtils { + + static final String PROG_NAME = "segjourlan-benchmark"; + + static final String BENCHMARK_USE_CURRENT = "current"; + static final String BENCHMARK_NUMBER_OF_MESSAGES = "messages-num"; + static final String BENCHMARK_PAYLOAD_SIZE = "payload-size"; + static final String BENCHMARK_PAYLOAD_SIZE_DEFAULT = "10K"; + + static final String CURRENT_CONFIG_RESOURCE = "/initial/factory-akka.conf"; + static final String CURRENT_CONFIG_PATH = "odl-cluster-data.akka.persistence.journal.segmented-file"; + + private static final String[] BYTE_SFX = {"G", "M", "K"}; + private static final int[] BYTE_THRESH = {1024 * 1024 * 1024, 1024 * 1024, 1024}; + + record BenchmarkConfig(StorageLevel storage, File workingDir, int maxEntrySize, int maxSegmentSize, + int maxUnflushedBytes, int payloadSize, int messagesNum) { + } + + private BenchmarkUtils() { + // utility class + } + + static BenchmarkConfig buildConfig(final String[] args) { + final var parser = getArgumentParser(); + final var paramsMap = new HashMap(); + try { + parser.parseArgs(args, paramsMap); + } catch (ArgumentParserException e) { + parser.handleError(e); + System.exit(1); + return null; + } + return toConfig(paramsMap); + } + + private static ArgumentParser getArgumentParser() { + final var parser = ArgumentParsers.newArgumentParser(PROG_NAME).defaultHelp(true); + + parser.description("Performs asynchronous write to segmented journal, collects and prints variety of metrics"); + + parser.addArgument("--current") + .type(Boolean.class).setDefault(Boolean.FALSE) + .action(Arguments.storeConst()).setConst(Boolean.TRUE) + .dest(BENCHMARK_USE_CURRENT) + .help("indicates base configuration to be taken from current cluster configuration, " + + "all other arguments excepting 'requests' and 'payload size' will be ignored"); + + parser.addArgument("--memory-mapped") + .type(Boolean.class).setDefault(Boolean.FALSE) + .action(Arguments.storeConst()).setConst(Boolean.TRUE) + .dest(STORAGE_MEMORY_MAPPED) + .help("indicates mapping journal segments to memory, otherwise file system is used"); + + parser.addArgument("-e", "--max-entry-size") + .type(String.class).setDefault(formatBytes(STORAGE_MAX_ENTRY_SIZE_DEFAULT)) + .dest(STORAGE_MAX_ENTRY_SIZE) + .help("max entry size, bytes format"); + + parser.addArgument("-s", "--max-segment-size") + .type(String.class).setDefault(formatBytes(STORAGE_MAX_SEGMENT_SIZE_DEFAULT)) + .dest(STORAGE_MAX_SEGMENT_SIZE) + .help("max segment size, bytes "); + + parser.addArgument("-u", "--max-unflushed-bytes") + .type(String.class) + .dest(STORAGE_MAX_UNFLUSHED_BYTES) + .help("max unflushed bytes, bytes format, " + + "if not defined the value is taken from 'max-entry-size'"); + + parser.addArgument("-n", "--messages-num") + .type(Integer.class).required(true) + .dest(BENCHMARK_NUMBER_OF_MESSAGES) + .setDefault(10_000) + .help("number of messages to write"); + + parser.addArgument("-p", "--payload-size") + .type(String.class).setDefault(BENCHMARK_PAYLOAD_SIZE_DEFAULT) + .dest(BENCHMARK_PAYLOAD_SIZE) + .help("median for request payload size, bytes format supported, " + + "actual size is variable 80% to 120% from defined median value"); + + return parser; + } + + static BenchmarkConfig toConfig(final Map paramsMap) { + final var inputConfig = ConfigFactory.parseMap(paramsMap); + final var finalConfig = (Boolean) paramsMap.get(BENCHMARK_USE_CURRENT) + ? currentConfig().withFallback(inputConfig) : inputConfig; + + final var benchmarkConfig = new BenchmarkConfig( + finalConfig.getBoolean(STORAGE_MEMORY_MAPPED) ? StorageLevel.MAPPED : StorageLevel.DISK, + createTempDirectory(), + bytes(finalConfig, STORAGE_MAX_ENTRY_SIZE), + bytes(finalConfig, STORAGE_MAX_SEGMENT_SIZE), + finalConfig.hasPath(STORAGE_MAX_UNFLUSHED_BYTES) + ? bytes(finalConfig, STORAGE_MAX_UNFLUSHED_BYTES) : bytes(finalConfig, STORAGE_MAX_ENTRY_SIZE), + bytes(finalConfig, BENCHMARK_PAYLOAD_SIZE), + finalConfig.getInt(BENCHMARK_NUMBER_OF_MESSAGES) + ); + // validate + if (benchmarkConfig.payloadSize > benchmarkConfig.maxEntrySize) { + printAndExit("payloadSize should be less than maxEntrySize"); + } + return benchmarkConfig; + } + + private static int bytes(final Config config, final String key) { + final var bytesLong = config.getBytes(key); + if (bytesLong <= 0 || bytesLong > Integer.MAX_VALUE) { + printAndExit( + key + " value (" + bytesLong + ") is invalid, expected in range 1 .. " + Integer.MAX_VALUE); + } + return bytesLong.intValue(); + } + + static Config currentConfig() { + try (var in = BenchmarkUtils.class.getResourceAsStream(CURRENT_CONFIG_RESOURCE)) { + final var content = new String(in.readAllBytes(), StandardCharsets.UTF_8); + final var globalConfig = ConfigFactory.parseString(content); + final var currentConfig = globalConfig.getConfig(CURRENT_CONFIG_PATH); + System.out.println("Current configuration loaded from " + CURRENT_CONFIG_RESOURCE); + return currentConfig; + + } catch (IOException e) { + printAndExit("Error loading current configuration from resource " + CURRENT_CONFIG_RESOURCE, e); + return null; + } + } + + private static File createTempDirectory() { + try { + return Files.createTempDirectory(PROG_NAME).toFile(); + } catch (IOException e) { + printAndExit("Cannot create temp directory", e); + } + return null; + } + + private static void printAndExit(final String message) { + printAndExit(message, null); + } + + private static void printAndExit(final String message, final Exception exception) { + System.err.println(message); + if (exception != null) { + exception.printStackTrace(System.err); + } + System.exit(1); + } + + static String formatBytes(int bytes) { + for (int i = 0; i < 3; i++) { + if (bytes > BYTE_THRESH[i]) { + return bytes / BYTE_THRESH[i] + BYTE_SFX[i]; + } + } + return String.valueOf(bytes); + } + + static String formatNanos(final double nanos) { + return formatNanos(Math.round(nanos)); + } + + static String formatNanos(final long nanos) { + return Stopwatch.createStarted(new Ticker() { + boolean started; + + @Override + public long read() { + if (started) { + return nanos; + } + started = true; + return 0; + } + }).toString(); + } + + static String toMetricId(final String metricKey) { + return metricKey.substring(metricKey.lastIndexOf('.') + 1); + } +} diff --git a/benchmark/segjournal-benchmark/src/main/resources/logback.xml b/benchmark/segjournal-benchmark/src/main/resources/logback.xml new file mode 100644 index 0000000000..1ed4b9f412 --- /dev/null +++ b/benchmark/segjournal-benchmark/src/main/resources/logback.xml @@ -0,0 +1,20 @@ + + + + + + %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n + + + + + + + + \ No newline at end of file diff --git a/bundle-parent/pom.xml b/bundle-parent/pom.xml index 54ca4c8741..5d373d3f37 100644 --- a/bundle-parent/pom.xml +++ b/bundle-parent/pom.xml @@ -10,14 +10,14 @@ 4.0.0 org.opendaylight.mdsal - dom-parent - 8.0.10 + bundle-parent + 13.0.1 org.opendaylight.controller bundle-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT pom @@ -25,7 +25,7 @@ org.opendaylight.controller controller-artifacts - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT pom import @@ -34,12 +34,12 @@ org.scala-lang scala-library - 2.13.8 + 2.13.13 org.scala-lang scala-reflect - 2.13.8 + 2.13.13 org.scala-lang.modules @@ -54,7 +54,7 @@ org.scalatestplus junit-4-13_2.13 - 3.2.5.0 + 3.2.13.0 test @@ -63,19 +63,19 @@ com.typesafe config - 1.4.0 + 1.4.2 com.typesafe ssl-config-core_2.13 - 0.4.2 + 0.4.3 com.typesafe.akka akka-testkit_2.13 - 2.6.18 + 2.6.21 test @@ -87,7 +87,7 @@ com.typesafe.akka akka-actor-testkit-typed_2.13 - 2.6.18 + 2.6.21 test @@ -103,7 +103,7 @@ com.typesafe.akka akka-persistence-tck_2.13 - 2.6.18 + 2.6.21 test @@ -117,24 +117,24 @@ org.reactivestreams reactive-streams - 1.0.3 + 1.0.4 org.agrona agrona - 1.14.0 + 1.15.2 io.aeron aeron-client - 1.37.0 + 1.38.1 io.aeron aeron-driver - 1.37.0 + 1.38.1 diff --git a/docs/dev-guide.rst b/docs/dev-guide.rst index 1555fc4e9c..7e8d867cfe 100644 --- a/docs/dev-guide.rst +++ b/docs/dev-guide.rst @@ -28,11 +28,6 @@ The OpenDaylight Controller relies on the following technologies: The OpenDaylight Controller provides following model-driven subsystems as a foundation for Java applications: -- :ref:`config_subsystem` - an activation, - dependency-injection and configuration framework, which allows - two-phase commits of configuration and dependency-injection, and - allows for run-time rewiring. - - :ref:`MD-SAL ` - messaging and data storage functionality for data, notifications and RPCs modeled by application developers. MD-SAL uses YANG as the modeling for both interface and @@ -879,21 +874,21 @@ RESTCONF operations overview .. note:: - | Each request must start with the URI /restconf. + | Each request must start with the URI /rests. | RESTCONF listens on port 8080 for HTTP requests. RESTCONF supports **OPTIONS**, **GET**, **PUT**, **POST**, and **DELETE** operations. Request and response data can either be in the XML or JSON format. XML structures according to yang are defined at: -`XML-YANG `__. JSON structures are +`XML-YANG `__. JSON structures are defined at: -`JSON-YANG `__. +`JSON-YANG `__. Data in the request must have a correctly set **Content-Type** field in the http header with the allowed value of the media type. The media type of the requested data has to be set in the **Accept** field. Get the media types for each resource by calling the OPTIONS operation. Most of the paths of the pathsRestconf endpoints use `Instance -Identifier `__. +Identifier `__. ```` is used in the explanation of the operations. | **** @@ -906,16 +901,16 @@ Identifier can represent a data node which is a list or container yang built-in type. If the data node is a list, there must be defined keys of the list behind the data node name for example, - //. + =,. - | The format : has to be used in this case as well: | Module A has node A1. Module B augments node A1 by adding node X. Module C augments node A1 by adding node X. For clarity, it has to be known which node is X (for example: C:X). For more details about - encoding, see: `RESTCONF 02 - Encoding YANG Instance Identifiers in + encoding, see: `RESTCONF RFC 8040 - Encoding YANG Instance Identifiers in the Request - URI. `__ + URI. `__ Mount point ~~~~~~~~~~~ @@ -927,34 +922,34 @@ Mount point point itself by using /**yang-ext:mount**. | More information on how to actually use mountpoints is available at: `OpenDaylight - Controller:Config:Examples:Netconf `__. + Controller:Config:Examples:Netconf `__. HTTP methods ~~~~~~~~~~~~ -OPTIONS /restconf -^^^^^^^^^^^^^^^^^ +OPTIONS /rests +^^^^^^^^^^^^^^ - Returns the XML description of the resources with the required request and response media types in Web Application Description Language (WADL) -GET /restconf/config/ -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +GET /rests/data/?content=config +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - Returns a data node from the Config datastore. - points to a data node which must be retrieved. -GET /restconf/operational/ -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +GET /rests/data/?content=nonconfig +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -- Returns the value of the data node from the Operational datastore. +- Returns the value of the data node from the non-configuration datastore. - points to a data node which must be retrieved. -PUT /restconf/config/ -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +PUT /rests/data/ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - Updates or creates data in the Config datastore and returns the state about success. @@ -965,7 +960,7 @@ PUT /restconf/config/ :: - PUT http://:8080/restconf/config/module1:foo/bar + PUT http://:8080/rests/data/module1:foo/bar Content-Type: applicaton/xml … @@ -975,14 +970,14 @@ PUT /restconf/config/ :: - PUT http://:8080/restconf/config/module1:foo1/foo2/yang-ext:mount/module2:foo/bar + PUT http://:8080/rests/data/module1:foo1/foo2/yang-ext:mount/module2:foo/bar Content-Type: applicaton/xml … -POST /restconf/config -^^^^^^^^^^^^^^^^^^^^^ +POST /rests/data +^^^^^^^^^^^^^^^^ - Creates the data if it does not exist @@ -990,7 +985,7 @@ POST /restconf/config :: - POST URL: http://localhost:8080/restconf/config/ + POST URL: http://localhost:8080/rests/data/ content-type: application/yang.data+json JSON payload: @@ -1003,8 +998,8 @@ POST /restconf/config } } -POST /restconf/config/ -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +POST /rests/data/ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - Creates the data if it does not exist in the Config datastore, and returns the state about success. @@ -1018,7 +1013,7 @@ POST /restconf/config/ :: - POST http://:8080/restconf/config/module1:foo + POST http://:8080/rests/data/module1:foo Content-Type: applicaton/xml/ … @@ -1028,14 +1023,14 @@ POST /restconf/config/ :: - http://:8080/restconf/config/module1:foo1/foo2/yang-ext:mount/module2:foo + http://:8080/rests/data/module1:foo1/foo2/yang-ext:mount/module2:foo Content-Type: applicaton/xml … -POST /restconf/operations/: -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +POST /rests/operations/: +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - Invokes RPC. @@ -1051,7 +1046,7 @@ POST /restconf/operations/: :: - POST http://:8080/restconf/operations/module1:fooRpc + POST http://:8080/rests/operations/module1:fooRpc Content-Type: applicaton/xml Accept: applicaton/xml @@ -1067,7 +1062,7 @@ POST /restconf/operations/: :: - POST http://localhost:8080/restconf/operations/toaster:make-toast + POST http://localhost:8080/rests/operations/toaster:make-toast Content-Type: application/yang.data+json { "input" : @@ -1082,8 +1077,8 @@ POST /restconf/operations/: Even though this is a default for the toasterToastType value in the yang, you still need to define it. -DELETE /restconf/config/ -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +DELETE /rests/data/ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - Removes the data node in the Config datastore and returns the state about success. @@ -1091,7 +1086,7 @@ DELETE /restconf/config/ - points to a data node which must be removed. More information is available in the `RESTCONF -RFC `__. +RFC `__. How RESTCONF works ~~~~~~~~~~~~~~~~~~ @@ -1131,8 +1126,8 @@ CompositeNode GET in action ~~~~~~~~~~~~~ -Figure 1 shows the GET operation with URI restconf/config/M:N where M is -the module name, and N is the node name. +Figure 1 shows the GET operation with URI rests/data/M:N?content=config +where M is the module name, and N is the node name. .. figure:: ./images/Get.png :alt: Get @@ -1159,7 +1154,7 @@ the module name, and N is the node name. PUT in action ~~~~~~~~~~~~~ -Figure 2 shows the PUT operation with the URI restconf/config/M:N where +Figure 2 shows the PUT operation with the URI rests/data/M:N where M is the module name, and N is the node name. Data is sent in the request either in the XML or JSON format. @@ -1197,7 +1192,7 @@ Something practical :: Operation: POST - URI: http://192.168.11.1:8080/restconf/config/opendaylight-inventory:nodes/node/openflow:1/table/2 + URI: http://192.168.11.1:8080/rests/data/opendaylight-inventory:nodes/node=openflow:1/table=2 Content-Type: application/xml :: @@ -1252,7 +1247,7 @@ Something practical :: Operation: PUT - URI: http://192.168.11.1:8080/restconf/config/opendaylight-inventory:nodes/node/openflow:1/table/2/flow/111 + URI: http://192.168.11.1:8080/rests/data/opendaylight-inventory:nodes/node=openflow:1/table=2/flow=111 Content-Type: application/xml :: @@ -1307,7 +1302,7 @@ Something practical :: Operation: GET - URI: http://192.168.11.1:8080/restconf/config/opendaylight-inventory:nodes/node/openflow:1/table/2/flow/111 + URI: http://192.168.11.1:8080/rests/data/opendaylight-inventory:nodes/node=openflow:1/table=2/flow=111?content=config Accept: application/xml | **HTTP response** @@ -1362,7 +1357,7 @@ Something practical :: Operation: DELETE - URI: http://192.168.11.1:8080/restconf/config/opendaylight-inventory:nodes/node/openflow:1/table/2/flow/111 + URI: http://192.168.11.1:8080/rests/data/opendaylight-inventory:nodes/node=openflow:1/table=2/flow=111 | **HTTP response** @@ -1370,543 +1365,3 @@ Something practical Status: 200 OK -Websocket change event notification subscription tutorial ---------------------------------------------------------- - -Subscribing to data change notifications makes it possible to obtain -notifications about data manipulation (insert, change, delete) which are -done on any specified **path** of any specified **datastore** with -specific **scope**. In following examples *{odlAddress}* is address of -server where ODL is running and *{odlPort}* is port on which -OpenDaylight is running. - -Websocket notifications subscription process -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In this section we will learn what steps need to be taken in order to -successfully subscribe to data change event notifications. - -Create stream -^^^^^^^^^^^^^ - -In order to use event notifications you first need to call RPC that -creates notification stream that you can later listen to. You need to -provide three parameters to this RPC: - -- **path**: data store path that you plan to listen to. You can - register listener on containers, lists and leaves. - -- **datastore**: data store type. *OPERATIONAL* or *CONFIGURATION*. - -- **scope**: Represents scope of data change. Possible options are: - - - BASE: only changes directly to the data tree node specified in the - path will be reported - - - ONE: changes to the node and to direct child nodes will be - reported - - - SUBTREE: changes anywhere in the subtree starting at the node will - be reported - -The RPC to create the stream can be invoked via RESTCONF like this: - -- URI: - http://{odlAddress}:{odlPort}/restconf/operations/sal-remote:create-data-change-event-subscription - -- HEADER: Content-Type=application/json - -- OPERATION: POST - -- DATA: - - .. code:: json - - { - "input": { - "path": "/toaster:toaster/toaster:toasterStatus", - "sal-remote-augment:datastore": "OPERATIONAL", - "sal-remote-augment:scope": "ONE" - } - } - -The response should look something like this: - -.. code:: json - - { - "output": { - "stream-name": "data-change-event-subscription/toaster:toaster/toaster:toasterStatus/datastore=CONFIGURATION/scope=SUBTREE" - } - } - -**stream-name** is important because you will need to use it when you -subscribe to the stream in the next step. - -.. note:: - - Internally, this will create a new listener for *stream-name* if it - did not already exist. - -Subscribe to stream -^^^^^^^^^^^^^^^^^^^ - -In order to subscribe to stream and obtain WebSocket location you need -to call *GET* on your stream path. The URI should generally be -http://{odlAddress}:{odlPort}/restconf/streams/stream/{streamName}, -where *{streamName}* is the *stream-name* parameter contained in -response from *create-data-change-event-subscription* RPC from the -previous step. - -- URI: - http://{odlAddress}:{odlPort}/restconf/streams/stream/data-change-event-subscription/toaster:toaster/datastore=CONFIGURATION/scope=SUBTREE - -- OPERATION: GET - -The subscription call may be modified with the following query parameters defined in the RESTCONF RFC: - -- `filter `__ - -- `start-time `__ - -- `end-time `__ - -In addition, the following ODL extension query parameter is supported: - -:odl-leaf-nodes-only: - If this parameter is set to "true", create and update notifications will only - contain the leaf nodes modified instead of the entire subscription subtree. - This can help in reducing the size of the notifications. - -:odl-skip-notification-data: - If this parameter is set to "true", create and update notifications will only - contain modified leaf nodes without data. - This can help in reducing the size of the notifications. - -The expected response status is 200 OK and response body should be -empty. You will get your WebSocket location from **Location** header of -response. For example in our particular toaster example location header -would have this value: -*ws://{odlAddress}:8185/toaster:toaster/datastore=CONFIGURATION/scope=SUBTREE* - -.. note:: - - During this phase there is an internal check for to see if a - listener for the *stream-name* from the URI exists. If not, new a - new listener is registered with the DOM data broker. - -Receive notifications -^^^^^^^^^^^^^^^^^^^^^ - -You should now have a data change notification stream created and have -location of a WebSocket. You can use this WebSocket to listen to data -change notifications. To listen to notifications you can use a -JavaScript client or if you are using chrome browser you can use the -`Simple WebSocket -Client `__. - -Also, for testing purposes, there is simple Java application named -WebSocketClient. The application is placed in the -*-sal-rest-connector-classes.class* project. It accepts a WebSocket URI -as and input parameter. After starting the utility (WebSocketClient -class directly in Eclipse/InteliJ Idea) received notifications should be -displayed in console. - -Notifications are always in XML format and look like this: - -.. code:: xml - - - 2014-09-11T09:58:23+02:00 - - - /meae:toaster - updated - - - - - - - -Example use case -~~~~~~~~~~~~~~~~ - -The typical use case is listening to data change events to update web -page data in real-time. In this tutorial we will be using toaster as the -base. - -When you call *make-toast* RPC, it sets *toasterStatus* to "down" to -reflect that the toaster is busy making toast. When it finishes, -*toasterStatus* is set to "up" again. We will listen to this toaster -status changes in data store and will reflect it on our web page in -real-time thanks to WebSocket data change notification. - -Simple javascript client implementation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -We will create simple JavaScript web application that will listen -updates on *toasterStatus* leaf and update some element of our web page -according to new toaster status state. - -Create stream -^^^^^^^^^^^^^ - -First you need to create stream that you are planing to subscribe to. -This can be achieved by invoking "create-data-change-event-subscription" -RPC on RESTCONF via AJAX request. You need to provide data store -**path** that you plan to listen on, **data store type** and **scope**. -If the request is successful you can extract the **stream-name** from -the response and use that to subscribe to the newly created stream. The -*{username}* and *{password}* fields represent your credentials that you -use to connect to OpenDaylight via RESTCONF: - -.. note:: - - The default user name and password are "admin". - -.. code:: javascript - - function createStream() { - $.ajax( - { - url: 'http://{odlAddress}:{odlPort}/restconf/operations/sal-remote:create-data-change-event-subscription', - type: 'POST', - headers: { - 'Authorization': 'Basic ' + btoa('{username}:{password}'), - 'Content-Type': 'application/json' - }, - data: JSON.stringify( - { - 'input': { - 'path': '/toaster:toaster/toaster:toasterStatus', - 'sal-remote-augment:datastore': 'OPERATIONAL', - 'sal-remote-augment:scope': 'ONE' - } - } - ) - }).done(function (data) { - // this function will be called when ajax call is executed successfully - subscribeToStream(data.output['stream-name']); - }).fail(function (data) { - // this function will be called when ajax call fails - console.log("Create stream call unsuccessful"); - }) - } - -Subscribe to stream -^^^^^^^^^^^^^^^^^^^ - -The Next step is to subscribe to the stream. To subscribe to the stream -you need to call *GET* on -*http://{odlAddress}:{odlPort}/restconf/streams/stream/{stream-name}*. -If the call is successful, you get WebSocket address for this stream in -**Location** parameter inside response header. You can get response -header by calling *getResponseHeader(\ *Location*)* on HttpRequest -object inside *done()* function call: - -.. code:: javascript - - function subscribeToStream(streamName) { - $.ajax( - { - url: 'http://{odlAddress}:{odlPort}/restconf/streams/stream/' + streamName; - type: 'GET', - headers: { - 'Authorization': 'Basic ' + btoa('{username}:{password}'), - } - } - ).done(function (data, textStatus, httpReq) { - // we need function that has http request object parameter in order to access response headers. - listenToNotifications(httpReq.getResponseHeader('Location')); - }).fail(function (data) { - console.log("Subscribe to stream call unsuccessful"); - }); - } - -Receive notifications -^^^^^^^^^^^^^^^^^^^^^ - -Once you got WebSocket server location you can now connect to it and -start receiving data change events. You need to define functions that -will handle events on WebSocket. In order to process incoming events -from OpenDaylight you need to provide a function that will handle -*onmessage* events. The function must have one parameter that represents -the received event object. The event data will be stored in -*event.data*. The data will be in an XML format that you can then easily -parse using jQuery. - -.. code:: javascript - - function listenToNotifications(socketLocation) { - try { - var notificatinSocket = new WebSocket(socketLocation); - - notificatinSocket.onmessage = function (event) { - // we process our received event here - console.log('Received toaster data change event.'); - $($.parseXML(event.data)).find('data-change-event').each( - function (index) { - var operation = $(this).find('operation').text(); - if (operation == 'updated') { - // toaster status was updated so we call function that gets the value of toasterStatus leaf - updateToasterStatus(); - return false; - } - } - ); - } - notificatinSocket.onerror = function (error) { - console.log("Socket error: " + error); - } - notificatinSocket.onopen = function (event) { - console.log("Socket connection opened."); - } - notificatinSocket.onclose = function (event) { - console.log("Socket connection closed."); - } - // if there is a problem on socket creation we get exception (i.e. when socket address is incorrect) - } catch(e) { - alert("Error when creating WebSocket" + e ); - } - } - -The *updateToasterStatus()* function represents function that calls -*GET* on the path that was modified and sets toaster status in some web -page element according to received data. After the WebSocket connection -has been established you can test events by calling make-toast RPC via -RESTCONF. - -.. note:: - - for more information about WebSockets in JavaScript visit `Writing - WebSocket client - applications `__ - -.. _config_subsystem: - -Config Subsystem ----------------- - -Overview -~~~~~~~~ - -The Controller configuration operation has three stages: - -- First, a Proposed configuration is created. Its target is to replace - the old configuration. - -- Second, the Proposed configuration is validated, and then committed. - If it passes validation successfully, the Proposed configuration - state will be changed to Validated. - -- Finally, a Validated configuration can be Committed, and the affected - modules can be reconfigured. - -In fact, each configuration operation is wrapped in a transaction. Once -a transaction is created, it can be configured, that is to say, a user -can abort the transaction during this stage. After the transaction -configuration is done, it is committed to the validation stage. In this -stage, the validation procedures are invoked. If one or more validations -fail, the transaction can be reconfigured. Upon success, the second -phase commit is invoked. If this commit is successful, the transaction -enters the last stage, committed. After that, the desired modules are -reconfigured. If the second phase commit fails, it means that the -transaction is unhealthy - basically, a new configuration instance -creation failed, and the application can be in an inconsistent state. - -.. figure:: ./images/configuration.jpg - :alt: Configuration states - - Configuration states - -.. figure:: ./images/Transaction.jpg - :alt: Transaction states - - Transaction states - -Validation -~~~~~~~~~~ - -To secure the consistency and safety of the new configuration and to -avoid conflicts, the configuration validation process is necessary. -Usually, validation checks the input parameters of a new configuration, -and mostly verifies module-specific relationships. The validation -procedure results in a decision on whether the proposed configuration is -healthy. - -Dependency resolver -~~~~~~~~~~~~~~~~~~~ - -Since there can be dependencies between modules, a change in a module -configuration can affect the state of other modules. Therefore, we need -to verify whether dependencies on other modules can be resolved. The -Dependency Resolver acts in a manner similar to dependency injectors. -Basically, a dependency tree is built. - -APIs and SPIs -~~~~~~~~~~~~~ - -This section describes configuration system APIs and SPIs. - -SPIs -^^^^ - -**Module** org.opendaylight.controller.config.spi. Module is the common -interface for all modules: every module must implement it. The module is -designated to hold configuration attributes, validate them, and create -instances of service based on the attributes. This instance must -implement the AutoCloseable interface, owing to resources clean up. If -the module was created from an already running instance, it contains an -old instance of the module. A module can implement multiple services. If -the module depends on other modules, setters need to be annotated with -@RequireInterface. - -**Module creation** - -1. The module needs to be configured, set with all required attributes. - -2. The module is then moved to the commit stage for validation. If the - validation fails, the module attributes can be reconfigured. - Otherwise, a new instance is either created, or an old instance is - reconfigured. A module instance is identified by ModuleIdentifier, - consisting of the factory name and instance name. - -| **ModuleFactory** org.opendaylight.controller.config.spi. The - ModuleFactory interface must be implemented by each module factory. -| A module factory can create a new module instance in two ways: - -- From an existing module instance - -- | An entirely new instance - | ModuleFactory can also return default modules, useful for - populating registry with already existing configurations. A module - factory implementation must have a globally unique name. - -APIs -^^^^ - -+--------------------------------------+--------------------------------------+ -| ConfigRegistry | Represents functionality provided by | -| | a configuration transaction (create, | -| | destroy module, validate, or abort | -| | transaction). | -+--------------------------------------+--------------------------------------+ -| ConfigTransactionController | Represents functionality for | -| | manipulating with configuration | -| | transactions (begin, commit config). | -+--------------------------------------+--------------------------------------+ -| RuntimeBeanRegistratorAwareConfiBean | The module implementing this | -| | interface will receive | -| | RuntimeBeanRegistrator before | -| | getInstance is invoked. | -+--------------------------------------+--------------------------------------+ - -Runtime APIs -^^^^^^^^^^^^ - -+--------------------------------------+--------------------------------------+ -| RuntimeBean | Common interface for all runtime | -| | beans | -+--------------------------------------+--------------------------------------+ -| RootRuntimeBeanRegistrator | Represents functionality for root | -| | runtime bean registration, which | -| | subsequently allows hierarchical | -| | registrations | -+--------------------------------------+--------------------------------------+ -| HierarchicalRuntimeBeanRegistration | Represents functionality for runtime | -| | bean registration and | -| | unreregistration from hierarchy | -+--------------------------------------+--------------------------------------+ - -JMX APIs -^^^^^^^^ - -| JMX API is purposed as a transition between the Client API and the JMX - platform. - -+--------------------------------------+--------------------------------------+ -| ConfigTransactionControllerMXBean | Extends ConfigTransactionController, | -| | executed by Jolokia clients on | -| | configuration transaction. | -+--------------------------------------+--------------------------------------+ -| ConfigRegistryMXBean | Represents entry point of | -| | configuration management for | -| | MXBeans. | -+--------------------------------------+--------------------------------------+ -| Object names | Object Name is the pattern used in | -| | JMX to locate JMX beans. It consists | -| | of domain and key properties (at | -| | least one key-value pair). Domain is | -| | defined as | -| | "org.opendaylight.controller". The | -| | only mandatory property is "type". | -+--------------------------------------+--------------------------------------+ - -Use case scenarios -^^^^^^^^^^^^^^^^^^ - -| A few samples of successful and unsuccessful transaction scenarios - follow: - -**Successful commit scenario** - -1. The user creates a transaction calling creteTransaction() method on - ConfigRegistry. - -2. ConfigRegisty creates a transaction controller, and registers the - transaction as a new bean. - -3. Runtime configurations are copied to the transaction. The user can - create modules and set their attributes. - -4. The configuration transaction is to be committed. - -5. The validation process is performed. - -6. After successful validation, the second phase commit begins. - -7. Modules proposed to be destroyed are destroyed, and their service - instances are closed. - -8. Runtime beans are set to registrator. - -9. The transaction controller invokes the method getInstance on each - module. - -10. The transaction is committed, and resources are either closed or - released. - -| **Validation failure scenario** -| The transaction is the same as the previous case until the validation - process. - -1. If validation fails, (that is to day, illegal input attributes values - or dependency resolver failure), the validationException is thrown - and exposed to the user. - -2. The user can decide to reconfigure the transaction and commit again, - or abort the current transaction. - -3. On aborted transactions, TransactionController and JMXRegistrator are - properly closed. - -4. Unregistration event is sent to ConfigRegistry. - -Default module instances -^^^^^^^^^^^^^^^^^^^^^^^^ - -The configuration subsystem provides a way for modules to create default -instances. A default instance is an instance of a module, that is -created at the module bundle start-up (module becomes visible for -configuration subsystem, for example, its bundle is activated in the -OSGi environment). By default, no default instances are produced. - -The default instance does not differ from instances created later in the -module life-cycle. The only difference is that the configuration for the -default instance cannot be provided by the configuration subsystem. The -module has to acquire the configuration for these instances on its own. -It can be acquired from, for example, environment variables. After the -creation of a default instance, it acts as a regular instance and fully -participates in the configuration subsystem (It can be reconfigured or -deleted in following transactions.). diff --git a/docs/images/Get.png b/docs/images/Get.png index 5c1f48445c..74f1a92e88 100644 Binary files a/docs/images/Get.png and b/docs/images/Get.png differ diff --git a/docs/images/Transaction.jpg b/docs/images/Transaction.jpg deleted file mode 100644 index 258710ae56..0000000000 Binary files a/docs/images/Transaction.jpg and /dev/null differ diff --git a/docs/images/configuration.jpg b/docs/images/configuration.jpg deleted file mode 100644 index 3b07a2b55e..0000000000 Binary files a/docs/images/configuration.jpg and /dev/null differ diff --git a/docs/pom.xml b/docs/pom.xml index 1aa368380c..22c11bcb3c 100644 --- a/docs/pom.xml +++ b/docs/pom.xml @@ -12,14 +12,14 @@ org.opendaylight.odlparent odlparent - 9.0.12 + 13.0.11 org.opendaylight.controller controller-docs jar - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ${project.artifactId} Controller documentation @@ -36,28 +36,6 @@ - - - org.opendaylight.controller - netty-event-executor-config - - - org.opendaylight.controller - netty-threadgroup-config - - - org.opendaylight.controller - netty-timer-config - - - org.opendaylight.controller - threadpool-config-api - - - org.opendaylight.controller - threadpool-config-impl - - org.opendaylight.controller @@ -97,13 +75,17 @@ + + com.github.spotbugs + spotbugs-annotations + com.guicedee.services javax.inject - javax.annotation - javax.annotation-api + jakarta.annotation + jakarta.annotation-api org.kohsuke.metainf-services @@ -111,11 +93,19 @@ org.osgi - osgi.core + org.osgi.framework + + + org.osgi + org.osgi.service.component + + + org.osgi + org.osgi.service.component.annotations org.osgi - osgi.cmpn + org.osgi.service.metatype.annotations @@ -214,19 +204,16 @@ https://junit.org/junit4/javadoc/4.13/ - http://hamcrest.org/JavaHamcrest/javadoc/2.2/ - http://google.github.io/truth/api/1.0.1/ - http://www.slf4j.org/apidocs/ - https://google.github.io/guava/releases/29.0-jre/api/docs/ - http://doc.akka.io/japi/akka/2.6.18/ - http://netty.io/4.1/api/ - https://commons.apache.org/proper/commons-lang/javadocs/api-2.6/ - https://commons.apache.org/proper/commons-lang/javadocs/api-3.9/ - https://commons.apache.org/proper/commons-codec/apidocs/ + https://hamcrest.org/JavaHamcrest/javadoc/2.2/ + https://www.slf4j.org/apidocs/ + https://guava.dev/releases/32.0.1-jre/api/docs/ + https://doc.akka.io/japi/akka/2.6/ + https://netty.io/4.1/api/ + https://commons.apache.org/proper/commons-lang/javadocs/api-release/ - https://www.javadoc.io/doc/org.opendaylight.odlparent/odlparent-docs/9.0.12/ - https://www.javadoc.io/doc/org.opendaylight.yangtools/yangtools-docs/7.0.12/ - https://www.javadoc.io/doc/org.opendaylight.mdsal/mdsal-docs/8.0.10/ + https://www.javadoc.io/doc/org.opendaylight.odlparent/odlparent-docs/13.0.11/ + https://www.javadoc.io/doc/org.opendaylight.yangtools/yangtools-docs/13.0.2/ + https://www.javadoc.io/doc/org.opendaylight.mdsal/mdsal-docs/13.0.1/ diff --git a/features/features-controller-experimental/pom.xml b/features/features-controller-experimental/pom.xml index f864c9db94..6401adb5fe 100644 --- a/features/features-controller-experimental/pom.xml +++ b/features/features-controller-experimental/pom.xml @@ -11,13 +11,13 @@ org.opendaylight.odlparent feature-repo-parent - 9.0.12 + 13.0.11 org.opendaylight.controller features-controller-experimental - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT feature OpenDaylight :: Controller :: Experimental Features Controller Experimental Features @@ -35,12 +35,6 @@ - - org.opendaylight.controller - odl-controller-exp-netty-config - xml - features - org.opendaylight.controller odl-toaster diff --git a/features/features-controller-testing/pom.xml b/features/features-controller-testing/pom.xml index d562accfc9..fb095fe8a1 100644 --- a/features/features-controller-testing/pom.xml +++ b/features/features-controller-testing/pom.xml @@ -11,13 +11,13 @@ org.opendaylight.odlparent feature-repo-parent - 9.0.12 + 13.0.11 org.opendaylight.controller features-controller-testing - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT feature OpenDaylight :: Controller :: Features to support CSIT testing Controller CSIT Features diff --git a/features/features-controller/pom.xml b/features/features-controller/pom.xml index c652fb26b1..64d72cf7c7 100644 --- a/features/features-controller/pom.xml +++ b/features/features-controller/pom.xml @@ -11,13 +11,13 @@ org.opendaylight.odlparent feature-repo-parent - 9.0.12 + 13.0.11 org.opendaylight.controller features-controller - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT feature OpenDaylight :: Controller :: Features Controller Production Features diff --git a/features/odl-clustering-test-app/pom.xml b/features/odl-clustering-test-app/pom.xml index e1f76bf472..672ac82c36 100644 --- a/features/odl-clustering-test-app/pom.xml +++ b/features/odl-clustering-test-app/pom.xml @@ -11,7 +11,7 @@ org.opendaylight.controller single-feature-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ../single-feature-parent diff --git a/features/odl-clustering-test-app/src/main/feature/feature.xml b/features/odl-clustering-test-app/src/main/feature/feature.xml index 7829413d47..69d3b61ee2 100644 --- a/features/odl-clustering-test-app/src/main/feature/feature.xml +++ b/features/odl-clustering-test-app/src/main/feature/feature.xml @@ -8,6 +8,6 @@ --> - odl-mdsal-model-rfc6991 + odl-mdsal-model-rfc6991 diff --git a/features/odl-controller-akka/pom.xml b/features/odl-controller-akka/pom.xml index 458ed4446b..b1f43ef15b 100644 --- a/features/odl-controller-akka/pom.xml +++ b/features/odl-controller-akka/pom.xml @@ -14,7 +14,7 @@ org.opendaylight.controller single-feature-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ../single-feature-parent diff --git a/features/odl-controller-akka/src/main/history/dependencies.xml b/features/odl-controller-akka/src/main/history/dependencies.xml index e0f6329f4d..4e7493fb0b 100644 --- a/features/odl-controller-akka/src/main/history/dependencies.xml +++ b/features/odl-controller-akka/src/main/history/dependencies.xml @@ -2,14 +2,14 @@ odl-controller-scala - mvn:com.typesafe/config/1.4.0 - mvn:com.typesafe/ssl-config-core_2.13/0.4.2 - mvn:io.aeron/aeron-client/1.37.0 - mvn:io.aeron/aeron-driver/1.37.0 + mvn:com.typesafe/config/1.4.2 + mvn:com.typesafe/ssl-config-core_2.13/0.4.3 + mvn:io.aeron/aeron-client/1.38.1 + mvn:io.aeron/aeron-driver/1.38.1 mvn:io.netty/netty/3.10.6.Final - mvn:org.agrona/agrona/1.14.0 + mvn:org.agrona/agrona/1.15.2 mvn:org.opendaylight.controller/repackaged-akka/${project.version} - mvn:org.reactivestreams/reactive-streams/1.0.3 + mvn:org.reactivestreams/reactive-streams/1.0.4 wrap wrap:mvn:org.lmdbjava/lmdbjava/0.7.0 diff --git a/features/odl-controller-blueprint/pom.xml b/features/odl-controller-blueprint/pom.xml index 3c1ae262a9..b6bba0b97c 100644 --- a/features/odl-controller-blueprint/pom.xml +++ b/features/odl-controller-blueprint/pom.xml @@ -12,7 +12,7 @@ org.opendaylight.controller single-feature-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ../single-feature-parent diff --git a/features/odl-controller-blueprint/src/main/feature/feature.xml b/features/odl-controller-blueprint/src/main/feature/feature.xml index c18573aa3a..82dfaca522 100644 --- a/features/odl-controller-blueprint/src/main/feature/feature.xml +++ b/features/odl-controller-blueprint/src/main/feature/feature.xml @@ -8,10 +8,10 @@ --> - odl-yangtools-codec - odl-mdsal-binding-api - odl-mdsal-binding-runtime - odl-mdsal-dom-api + odl-yangtools-codec + odl-mdsal-binding-api + odl-mdsal-binding-runtime + odl-mdsal-dom-api mvn:org.opendaylight.controller/blueprint/${project.version} diff --git a/features/odl-controller-broker-local/pom.xml b/features/odl-controller-broker-local/pom.xml index 7ed88b092c..9bb7bc3a64 100644 --- a/features/odl-controller-broker-local/pom.xml +++ b/features/odl-controller-broker-local/pom.xml @@ -11,7 +11,7 @@ org.opendaylight.controller single-feature-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ../single-feature-parent diff --git a/features/odl-controller-broker-local/src/main/feature/feature.xml b/features/odl-controller-broker-local/src/main/feature/feature.xml index 9a765beb48..2068ab79f9 100644 --- a/features/odl-controller-broker-local/src/main/feature/feature.xml +++ b/features/odl-controller-broker-local/src/main/feature/feature.xml @@ -8,9 +8,9 @@ --> - odl-mdsal-dom - odl-mdsal-eos-binding - odl-mdsal-eos-dom - odl-mdsal-singleton-dom + odl-mdsal-dom + odl-mdsal-eos-binding + odl-mdsal-eos-dom + odl-mdsal-singleton-dom diff --git a/features/odl-controller-exp-netty-config/pom.xml b/features/odl-controller-exp-netty-config/pom.xml deleted file mode 100644 index 4a29734207..0000000000 --- a/features/odl-controller-exp-netty-config/pom.xml +++ /dev/null @@ -1,51 +0,0 @@ - - - - 4.0.0 - - org.opendaylight.controller - single-feature-parent - 5.0.0-SNAPSHOT - ../single-feature-parent - - - odl-controller-exp-netty-config - feature - OpenDaylight :: Controller :: Experimental :: Netty Configuration - Common configuration for Netty resources - - - - org.opendaylight.odlparent - odl-netty-4 - xml - features - - - org.opendaylight.controller - netty-event-executor-config - - - org.opendaylight.controller - netty-threadgroup-config - - - org.opendaylight.controller - netty-timer-config - - - org.opendaylight.controller - threadpool-config-api - - - org.opendaylight.controller - threadpool-config-impl - - - diff --git a/features/odl-controller-exp-netty-config/src/main/feature/feature.xml b/features/odl-controller-exp-netty-config/src/main/feature/feature.xml deleted file mode 100644 index 2eac124f29..0000000000 --- a/features/odl-controller-exp-netty-config/src/main/feature/feature.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - odl-netty-4 - - diff --git a/features/odl-controller-mdsal-common/pom.xml b/features/odl-controller-mdsal-common/pom.xml index 06307baf38..be8cc89a13 100644 --- a/features/odl-controller-mdsal-common/pom.xml +++ b/features/odl-controller-mdsal-common/pom.xml @@ -11,7 +11,7 @@ org.opendaylight.controller single-feature-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ../single-feature-parent diff --git a/features/odl-controller-mdsal-common/src/main/feature/feature.xml b/features/odl-controller-mdsal-common/src/main/feature/feature.xml index ffc34187e5..7ae191c7be 100644 --- a/features/odl-controller-mdsal-common/src/main/feature/feature.xml +++ b/features/odl-controller-mdsal-common/src/main/feature/feature.xml @@ -8,7 +8,7 @@ --> - odl-mdsal-common - odl-mdsal-binding-runtime + odl-mdsal-common + odl-mdsal-binding-runtime diff --git a/features/odl-controller-scala/pom.xml b/features/odl-controller-scala/pom.xml index 7bc24ca458..5653979564 100644 --- a/features/odl-controller-scala/pom.xml +++ b/features/odl-controller-scala/pom.xml @@ -14,7 +14,7 @@ org.opendaylight.controller single-feature-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ../single-feature-parent diff --git a/features/odl-controller-scala/src/main/history/dependencies.xml b/features/odl-controller-scala/src/main/history/dependencies.xml index b4469d23d0..73764f6fec 100644 --- a/features/odl-controller-scala/src/main/history/dependencies.xml +++ b/features/odl-controller-scala/src/main/history/dependencies.xml @@ -3,7 +3,7 @@ mvn:org.scala-lang.modules/scala-java8-compat_2.13/1.0.2 mvn:org.scala-lang.modules/scala-parser-combinators_2.13/1.1.2 - mvn:org.scala-lang/scala-library/2.13.8 - mvn:org.scala-lang/scala-reflect/2.13.8 + mvn:org.scala-lang/scala-library/2.13.13 + mvn:org.scala-lang/scala-reflect/2.13.13 diff --git a/features/odl-jolokia/pom.xml b/features/odl-jolokia/pom.xml index 48dedfda35..9ea689a4a0 100644 --- a/features/odl-jolokia/pom.xml +++ b/features/odl-jolokia/pom.xml @@ -11,7 +11,7 @@ org.opendaylight.controller single-feature-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ../single-feature-parent diff --git a/features/odl-mdsal-benchmark/pom.xml b/features/odl-mdsal-benchmark/pom.xml index 5945a4fb93..42b2d9e401 100644 --- a/features/odl-mdsal-benchmark/pom.xml +++ b/features/odl-mdsal-benchmark/pom.xml @@ -11,7 +11,7 @@ org.opendaylight.controller single-feature-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ../single-feature-parent diff --git a/features/odl-mdsal-broker/pom.xml b/features/odl-mdsal-broker/pom.xml index 95740769ac..686971b49f 100644 --- a/features/odl-mdsal-broker/pom.xml +++ b/features/odl-mdsal-broker/pom.xml @@ -11,7 +11,7 @@ org.opendaylight.controller single-feature-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ../single-feature-parent @@ -47,11 +47,5 @@ xml features - - org.opendaylight.controller - odl-controller-blueprint - xml - features - diff --git a/features/odl-mdsal-broker/src/main/feature/feature.xml b/features/odl-mdsal-broker/src/main/feature/feature.xml index 11af9450e1..ffa724ca99 100644 --- a/features/odl-mdsal-broker/src/main/feature/feature.xml +++ b/features/odl-mdsal-broker/src/main/feature/feature.xml @@ -8,7 +8,7 @@ --> - odl-mdsal-singleton-dom - odl-mdsal-eos-binding + odl-mdsal-singleton-dom + odl-mdsal-eos-binding diff --git a/features/odl-mdsal-clustering-commons/pom.xml b/features/odl-mdsal-clustering-commons/pom.xml index a9c1831daa..0d98ae7098 100644 --- a/features/odl-mdsal-clustering-commons/pom.xml +++ b/features/odl-mdsal-clustering-commons/pom.xml @@ -11,7 +11,7 @@ org.opendaylight.controller single-feature-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ../single-feature-parent @@ -39,6 +39,12 @@ xml features + + org.opendaylight.odlparent + odl-netty-4 + xml + features + org.opendaylight.odlparent odl-servlet-api diff --git a/features/odl-mdsal-clustering-commons/src/main/feature/feature.xml b/features/odl-mdsal-clustering-commons/src/main/feature/feature.xml index 9a048e910a..7a41fc13bb 100644 --- a/features/odl-mdsal-clustering-commons/src/main/feature/feature.xml +++ b/features/odl-mdsal-clustering-commons/src/main/feature/feature.xml @@ -8,10 +8,11 @@ --> - odl-apache-commons-lang3 - odl-dropwizard-metrics - odl-servlet-api - odl-yangtools-data - odl-yangtools-codec + odl-apache-commons-lang3 + odl-dropwizard-metrics + odl-netty-4 + odl-servlet-api + odl-yangtools-data + odl-yangtools-codec diff --git a/features/odl-mdsal-distributed-datastore/pom.xml b/features/odl-mdsal-distributed-datastore/pom.xml index 62659c8464..35b1d52094 100644 --- a/features/odl-mdsal-distributed-datastore/pom.xml +++ b/features/odl-mdsal-distributed-datastore/pom.xml @@ -11,7 +11,7 @@ org.opendaylight.controller single-feature-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ../single-feature-parent diff --git a/features/odl-mdsal-distributed-datastore/src/main/feature/feature.xml b/features/odl-mdsal-distributed-datastore/src/main/feature/feature.xml index 756bd7cf5f..bf7451b93d 100644 --- a/features/odl-mdsal-distributed-datastore/src/main/feature/feature.xml +++ b/features/odl-mdsal-distributed-datastore/src/main/feature/feature.xml @@ -8,11 +8,11 @@ --> - odl-apache-commons-text - odl-yangtools-codec - odl-mdsal-eos-dom - odl-mdsal-dom-broker - odl-mdsal-binding-dom-adapter + odl-apache-commons-text + odl-yangtools-codec + odl-mdsal-eos-dom + odl-mdsal-dom-broker + odl-mdsal-binding-dom-adapter mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/akkaconf diff --git a/features/odl-mdsal-remoterpc-connector/pom.xml b/features/odl-mdsal-remoterpc-connector/pom.xml index a3cb6f00fe..68033905be 100644 --- a/features/odl-mdsal-remoterpc-connector/pom.xml +++ b/features/odl-mdsal-remoterpc-connector/pom.xml @@ -11,7 +11,7 @@ org.opendaylight.controller single-feature-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ../single-feature-parent diff --git a/features/odl-toaster/pom.xml b/features/odl-toaster/pom.xml index 2fd4ef5f16..01bfd40440 100644 --- a/features/odl-toaster/pom.xml +++ b/features/odl-toaster/pom.xml @@ -11,7 +11,7 @@ org.opendaylight.controller single-feature-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ../single-feature-parent diff --git a/features/odl-toaster/src/main/feature/feature.xml b/features/odl-toaster/src/main/feature/feature.xml index 07800a651b..9a6b3e79d1 100644 --- a/features/odl-toaster/src/main/feature/feature.xml +++ b/features/odl-toaster/src/main/feature/feature.xml @@ -8,6 +8,6 @@ --> - odl-mdsal-binding-runtime + odl-mdsal-binding-runtime diff --git a/features/pom.xml b/features/pom.xml index bc571e0eb0..97ade422e2 100644 --- a/features/pom.xml +++ b/features/pom.xml @@ -11,13 +11,13 @@ org.opendaylight.odlparent odlparent-lite - 9.0.12 + 13.0.11 org.opendaylight.controller features-aggregator - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT pom @@ -34,7 +34,7 @@ features-controller-experimental - odl-controller-exp-netty-config + odl-toaster features-controller-testing @@ -47,13 +47,12 @@ odl-controller-blueprint + odl-controller-broker-local odl-controller-mdsal-common odl-jolokia - odl-controller-broker-local odl-mdsal-broker odl-mdsal-clustering-commons odl-mdsal-distributed-datastore odl-mdsal-remoterpc-connector - odl-toaster diff --git a/features/single-feature-parent/pom.xml b/features/single-feature-parent/pom.xml index 820bd9497d..556d3acd76 100644 --- a/features/single-feature-parent/pom.xml +++ b/features/single-feature-parent/pom.xml @@ -11,13 +11,13 @@ org.opendaylight.odlparent single-feature-parent - 9.0.12 + 13.0.11 org.opendaylight.controller single-feature-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT pom @@ -25,7 +25,7 @@ org.opendaylight.controller bundle-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT pom import diff --git a/jolokia/pom.xml b/jolokia/pom.xml index 6301333886..1d98663b1f 100644 --- a/jolokia/pom.xml +++ b/jolokia/pom.xml @@ -11,13 +11,13 @@ org.opendaylight.odlparent odlparent - 9.0.12 + 13.0.11 org.opendaylight.controller odl-jolokia-osgi - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT jar diff --git a/karaf/pom.xml b/karaf/pom.xml index 0476bdf509..56869cc048 100644 --- a/karaf/pom.xml +++ b/karaf/pom.xml @@ -11,13 +11,13 @@ org.opendaylight.odlparent karaf4-parent - 9.0.12 + 13.0.11 org.opendaylight.controller controller-test-karaf - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT pom diff --git a/opendaylight/blueprint/pom.xml b/opendaylight/blueprint/pom.xml index ae6f8f7cb2..2b7bfeb106 100644 --- a/opendaylight/blueprint/pom.xml +++ b/opendaylight/blueprint/pom.xml @@ -12,7 +12,7 @@ org.opendaylight.odlparent bundle-parent - 9.0.12 + 13.0.11 @@ -20,21 +20,21 @@ blueprint bundle ${project.artifactId} - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT org.opendaylight.yangtools yangtools-artifacts - 7.0.12 + 13.0.2 pom import org.opendaylight.mdsal mdsal-artifacts - 8.0.10 + 13.0.1 pom import @@ -53,6 +53,11 @@ com.google.guava guava + + com.github.spotbugs + spotbugs-annotations + true + org.apache.aries.blueprint org.apache.aries.blueprint.core @@ -62,6 +67,46 @@ org.apache.aries org.apache.aries.util + + org.opendaylight.yangtools + concepts + + + org.opendaylight.yangtools + util + + + org.opendaylight.yangtools + yang-common + + + org.opendaylight.yangtools + yang-data-api + + + org.opendaylight.yangtools + yang-data-impl + + + org.opendaylight.yangtools + yang-data-codec-xml + + + org.opendaylight.yangtools + yang-model-api + + + org.opendaylight.yangtools + yang-model-util + + + org.opendaylight.mdsal + mdsal-common-api + + + org.opendaylight.mdsal + mdsal-dom-api + org.opendaylight.mdsal mdsal-dom-spi @@ -72,37 +117,48 @@ org.opendaylight.mdsal - mdsal-binding-dom-codec + mdsal-binding-dom-codec-api - org.opendaylight.yangtools - yang-data-codec-xml + org.opendaylight.mdsal + mdsal-binding-spec-util - org.osgi - osgi.core + org.opendaylight.mdsal + yang-binding org.osgi - osgi.cmpn + org.osgi.framework - org.slf4j - slf4j-api + org.osgi + org.osgi.service.cm org.osgi org.osgi.service.event - - com.google.truth - truth + org.osgi + org.osgi.util.tracker + org.opendaylight.mdsal mdsal-binding-test-model + + org.opendaylight.mdsal + mdsal-binding-dom-adapter + test + + + org.opendaylight.mdsal + mdsal-binding-dom-adapter + test-jar + test + org.opendaylight.mdsal mdsal-binding-test-utils diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/BlueprintBundleTracker.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/BlueprintBundleTracker.java index 7ad8ddb4e3..55994ca1f0 100644 --- a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/BlueprintBundleTracker.java +++ b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/BlueprintBundleTracker.java @@ -7,28 +7,26 @@ */ package org.opendaylight.controller.blueprint; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.Dictionary; import java.util.Enumeration; import java.util.HashSet; -import java.util.Hashtable; import java.util.List; +import java.util.Map; import org.apache.aries.blueprint.NamespaceHandler; import org.apache.aries.blueprint.services.BlueprintExtenderService; import org.apache.aries.quiesce.participant.QuiesceParticipant; import org.apache.aries.util.AriesFrameworkUtil; import org.eclipse.jdt.annotation.Nullable; -import org.gaul.modernizer_maven_annotations.SuppressModernizer; import org.opendaylight.controller.blueprint.ext.OpendaylightNamespaceHandler; import org.opendaylight.yangtools.util.xml.UntrustedXML; import org.osgi.framework.Bundle; import org.osgi.framework.BundleActivator; import org.osgi.framework.BundleContext; import org.osgi.framework.BundleEvent; +import org.osgi.framework.FrameworkUtil; import org.osgi.framework.ServiceReference; import org.osgi.framework.ServiceRegistration; import org.osgi.framework.SynchronousBundleListener; @@ -131,8 +129,6 @@ public class BlueprintBundleTracker implements BundleActivator, BundleTrackerCus quiesceParticipantTracker.open(); } - @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD", - justification = "https://github.com/spotbugs/spotbugs/issues/811") private QuiesceParticipant onQuiesceParticipantAdded(final ServiceReference reference) { quiesceParticipant = reference.getBundle().getBundleContext().getService(reference); @@ -143,8 +139,6 @@ public class BlueprintBundleTracker implements BundleActivator, BundleTrackerCus return quiesceParticipant; } - @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD", - justification = "https://github.com/spotbugs/spotbugs/issues/811") private BlueprintExtenderService onBlueprintExtenderServiceAdded( final ServiceReference reference) { blueprintExtenderService = reference.getBundle().getBundleContext().getService(reference); @@ -163,20 +157,15 @@ public class BlueprintBundleTracker implements BundleActivator, BundleTrackerCus } private void registerNamespaceHandler(final BundleContext context) { - Dictionary props = emptyDict(); - props.put("osgi.service.blueprint.namespace", OpendaylightNamespaceHandler.NAMESPACE_1_0_0); - namespaceReg = context.registerService(NamespaceHandler.class, new OpendaylightNamespaceHandler(), props); + namespaceReg = context.registerService(NamespaceHandler.class, new OpendaylightNamespaceHandler(), + FrameworkUtil.asDictionary(Map.of( + "osgi.service.blueprint.namespace", OpendaylightNamespaceHandler.NAMESPACE_1_0_0))); } private void registerBlueprintEventHandler(final BundleContext context) { eventHandlerReg = context.registerService(BlueprintListener.class, this, null); } - @SuppressModernizer - private static Dictionary emptyDict() { - return new Hashtable<>(); - } - /** * Implemented from BundleActivator. */ @@ -274,13 +263,12 @@ public class BlueprintBundleTracker implements BundleActivator, BundleTrackerCus return !paths.isEmpty() ? paths : findBlueprintPaths(bundle, ODL_CUSTOM_BLUEPRINT_FILE_PATH); } - @SuppressWarnings({ "rawtypes", "unchecked" }) private static List findBlueprintPaths(final Bundle bundle, final String path) { Enumeration rntries = bundle.findEntries(path, BLUEPRINT_FLE_PATTERN, false); if (rntries == null) { - return Collections.emptyList(); + return List.of(); } else { - return Collections.list((Enumeration)rntries); + return List.copyOf(Collections.list(rntries)); } } diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/AbstractInvokableServiceMetadata.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/AbstractInvokableServiceMetadata.java deleted file mode 100644 index 35f59f85c5..0000000000 --- a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/AbstractInvokableServiceMetadata.java +++ /dev/null @@ -1,178 +0,0 @@ -/* - * Copyright (c) 2016 Brocade Communications Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.blueprint.ext; - -import static java.util.Objects.requireNonNull; - -import com.google.common.base.MoreObjects; -import com.google.common.collect.ImmutableSet; -import java.util.Collection; -import java.util.Set; -import java.util.function.Predicate; -import org.apache.aries.blueprint.services.ExtendedBlueprintContainer; -import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry; -import org.opendaylight.mdsal.dom.api.DOMRpcAvailabilityListener; -import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier; -import org.opendaylight.mdsal.dom.api.DOMRpcService; -import org.opendaylight.mdsal.dom.api.DOMSchemaService; -import org.opendaylight.mdsal.dom.spi.RpcRoutingStrategy; -import org.opendaylight.yangtools.concepts.ListenerRegistration; -import org.opendaylight.yangtools.yang.binding.RpcService; -import org.opendaylight.yangtools.yang.common.QName; -import org.opendaylight.yangtools.yang.model.api.SchemaContext; -import org.osgi.service.blueprint.container.ComponentDefinitionException; - -abstract class AbstractInvokableServiceMetadata extends AbstractDependentComponentFactoryMetadata { - private final String interfaceName; - - private ListenerRegistration rpcListenerReg; - private RpcConsumerRegistry rpcRegistry; - private Class rpcInterface; - private Set rpcSchemaPaths; - - AbstractInvokableServiceMetadata(final String id, final String interfaceName) { - super(id); - this.interfaceName = requireNonNull(interfaceName); - } - - Class rpcInterface() { - return rpcInterface; - } - - @SuppressWarnings({ "checkstyle:IllegalCatch", "unchecked" }) - @Override - public final void init(final ExtendedBlueprintContainer container) { - super.init(container); - - final Class interfaceClass; - try { - interfaceClass = container().getBundleContext().getBundle().loadClass(interfaceName); - } catch (final Exception e) { - throw new ComponentDefinitionException(String.format("%s: Error obtaining interface class %s", - logName(), interfaceName), e); - } - - if (!RpcService.class.isAssignableFrom(interfaceClass)) { - throw new ComponentDefinitionException(String.format( - "%s: The specified interface %s is not an RpcService", logName(), interfaceName)); - } - - rpcInterface = (Class)interfaceClass; - } - - @Override - protected final void startTracking() { - // Request RpcProviderRegistry first ... - retrieveService("RpcConsumerRegistry", RpcConsumerRegistry.class, this::onRpcRegistry); - } - - private void onRpcRegistry(final Object service) { - log.debug("{}: Retrieved RpcProviderRegistry {}", logName(), service); - rpcRegistry = (RpcConsumerRegistry)service; - - // Now acquire SchemaService... - retrieveService("SchemaService", DOMSchemaService.class, this::onSchemaService); - } - - private void onSchemaService(final Object service) { - log.debug("{}: Retrieved SchemaService {}", logName(), service); - - // Now get the SchemaContext and trigger RPC resolution - retrievedSchemaContext(((DOMSchemaService)service).getGlobalContext()); - } - - private void retrievedSchemaContext(final SchemaContext schemaContext) { - log.debug("{}: retrievedSchemaContext", logName()); - - final Collection schemaPaths = RpcUtil.decomposeRpcService(rpcInterface, schemaContext, - rpcFilter()); - if (schemaPaths.isEmpty()) { - log.debug("{}: interface {} has no acceptable entries, assuming it is satisfied", logName(), rpcInterface); - setSatisfied(); - return; - } - - rpcSchemaPaths = ImmutableSet.copyOf(schemaPaths); - log.debug("{}: Got SchemaPaths: {}", logName(), rpcSchemaPaths); - - // First get the DOMRpcService OSGi service. This will be used to register a listener to be notified - // when the underlying DOM RPC service is available. - retrieveService("DOMRpcService", DOMRpcService.class, this::retrievedDOMRpcService); - } - - private void retrievedDOMRpcService(final Object service) { - log.debug("{}: retrievedDOMRpcService {}", logName(), service); - final DOMRpcService domRpcService = (DOMRpcService)service; - - setDependencyDesc("Available DOM RPC for binding RPC: " + rpcInterface); - rpcListenerReg = domRpcService.registerRpcListener(new DOMRpcAvailabilityListener() { - @Override - public void onRpcAvailable(final Collection rpcs) { - onRpcsAvailable(rpcs); - } - - @Override - public void onRpcUnavailable(final Collection rpcs) { - } - }); - } - - abstract Predicate rpcFilter(); - - @SuppressWarnings("checkstyle:IllegalCatch") - @Override - public final Object create() throws ComponentDefinitionException { - log.debug("{}: In create: interfaceName: {}", logName(), interfaceName); - - super.onCreate(); - - try { - RpcService rpcService = rpcRegistry.getRpcService(rpcInterface); - - log.debug("{}: create returning service {}", logName(), rpcService); - - return rpcService; - } catch (final RuntimeException e) { - throw new ComponentDefinitionException("Error getting RPC service for " + interfaceName, e); - } - } - - protected final void onRpcsAvailable(final Collection rpcs) { - for (DOMRpcIdentifier identifier : rpcs) { - if (rpcSchemaPaths.contains(identifier.getType())) { - log.debug("{}: onRpcsAvailable - found SchemaPath {}", logName(), identifier.getType()); - setSatisfied(); - break; - } - } - } - - @Override - public final void stopTracking() { - super.stopTracking(); - closeRpcListenerReg(); - } - - private void closeRpcListenerReg() { - if (rpcListenerReg != null) { - rpcListenerReg.close(); - rpcListenerReg = null; - } - } - - @Override - public final void destroy(final Object instance) { - super.destroy(instance); - closeRpcListenerReg(); - } - - @Override - public final String toString() { - return MoreObjects.toStringHelper(this).add("id", getId()).add("interfaceName", interfaceName).toString(); - } -} diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/ActionProviderBean.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/ActionProviderBean.java deleted file mode 100644 index cb97fb00fd..0000000000 --- a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/ActionProviderBean.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (c) 2017 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.blueprint.ext; - -import com.google.common.collect.Collections2; -import com.google.common.collect.ImmutableSet; -import java.util.Collection; -import java.util.Set; -import org.opendaylight.mdsal.binding.api.RpcProviderService; -import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier; -import org.opendaylight.mdsal.dom.api.DOMRpcImplementationNotAvailableException; -import org.opendaylight.mdsal.dom.api.DOMRpcProviderService; -import org.opendaylight.mdsal.dom.api.DOMSchemaService; -import org.opendaylight.mdsal.dom.spi.RpcRoutingStrategy; -import org.opendaylight.yangtools.concepts.Registration; -import org.opendaylight.yangtools.util.concurrent.FluentFutures; -import org.opendaylight.yangtools.yang.binding.RpcService; -import org.opendaylight.yangtools.yang.common.QName; -import org.osgi.framework.Bundle; -import org.osgi.service.blueprint.container.ComponentDefinitionException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Blueprint bean corresponding to the "action-provider" element that registers the promise to instantiate action - * instances with RpcProviderRegistry. - * - *

    - * This bean has two distinct facets: - * - if a reference bean is provided, it registers it with {@link RpcProviderService} - * - if a reference bean is not provided, it registers the corresponding no-op implementation with - * {@link DOMRpcProviderService} for all action (Routed RPC) elements in the provided interface - * - * @author Robert Varga - */ -public class ActionProviderBean { - static final String ACTION_PROVIDER = "action-provider"; - - private static final Logger LOG = LoggerFactory.getLogger(ActionProviderBean.class); - - private DOMRpcProviderService domRpcProvider; - private RpcProviderService bindingRpcProvider; - private DOMSchemaService schemaService; - private RpcService implementation; - private String interfaceName; - private Registration reg; - private Bundle bundle; - - public void setBundle(final Bundle bundle) { - this.bundle = bundle; - } - - public void setInterfaceName(final String interfaceName) { - this.interfaceName = interfaceName; - } - - public void setImplementation(final RpcService implementation) { - this.implementation = implementation; - } - - public void setDomRpcProvider(final DOMRpcProviderService rpcProviderService) { - this.domRpcProvider = rpcProviderService; - } - - public void setBindingRpcProvider(final RpcProviderService rpcProvider) { - this.bindingRpcProvider = rpcProvider; - } - - public void setSchemaService(final DOMSchemaService schemaService) { - this.schemaService = schemaService; - } - - public void init() { - // First resolve the interface class - final Class interfaceClass = getRpcClass(); - - LOG.debug("{}: resolved interface {} to {}", ACTION_PROVIDER, interfaceName, interfaceClass); - - if (implementation != null) { - registerImplementation(interfaceClass); - } else { - registerFallback(interfaceClass); - } - } - - @SuppressWarnings("checkstyle:IllegalCatch") - public void destroy() { - if (reg != null) { - try { - reg.close(); - } catch (final Exception e) { - LOG.warn("{}: error while unregistering", ACTION_PROVIDER, e); - } finally { - reg = null; - } - } - } - - @SuppressWarnings("unchecked") - private Class getRpcClass() { - final Class iface; - - try { - iface = bundle.loadClass(interfaceName); - } catch (final ClassNotFoundException e) { - throw new ComponentDefinitionException(String.format( - "The specified \"interface\" for %s \"%s\" does not refer to an available class", interfaceName, - ACTION_PROVIDER), e); - } - if (!RpcService.class.isAssignableFrom(iface)) { - throw new ComponentDefinitionException(String.format( - "The specified \"interface\" %s for \"%s\" is not an RpcService", interfaceName, ACTION_PROVIDER)); - } - - return (Class) iface; - } - - private void registerFallback(final Class interfaceClass) { - final Collection paths = RpcUtil.decomposeRpcService(interfaceClass, - schemaService.getGlobalContext(), RpcRoutingStrategy::isContextBasedRouted); - if (paths.isEmpty()) { - LOG.warn("{}: interface {} has no actions defined", ACTION_PROVIDER, interfaceClass); - return; - } - - final Set rpcs = ImmutableSet.copyOf(Collections2.transform(paths, DOMRpcIdentifier::create)); - reg = domRpcProvider.registerRpcImplementation( - (rpc, input) -> FluentFutures.immediateFailedFluentFuture(new DOMRpcImplementationNotAvailableException( - "Action %s has no instance matching %s", rpc, input)), rpcs); - LOG.debug("Registered provider for {}", interfaceName); - } - - private void registerImplementation(final Class interfaceClass) { - if (!interfaceClass.isInstance(implementation)) { - throw new ComponentDefinitionException(String.format( - "The specified \"interface\" %s for \"%s\" is not implemented by RpcService \"ref\" %s", - interfaceName, ACTION_PROVIDER, implementation.getClass())); - } - - reg = bindingRpcProvider.registerRpcImplementation(interfaceClass, implementation); - LOG.debug("Registered implementation {} for {}", implementation, interfaceName); - } -} diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/ActionServiceMetadata.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/ActionServiceMetadata.java deleted file mode 100644 index 5bb3f14d78..0000000000 --- a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/ActionServiceMetadata.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (c) 2017 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.blueprint.ext; - -import java.util.function.Predicate; -import org.opendaylight.mdsal.dom.spi.RpcRoutingStrategy; - -/** - * Factory metadata corresponding to the "action-service" element. It waits for a DOM promise of registration - * to appear in the {@link DOMRpcService} and then acquires a dynamic proxy via RpcProviderRegistry. - * - * @author Robert Varga - */ -final class ActionServiceMetadata extends AbstractInvokableServiceMetadata { - /* - * Implementation note: - * - * This implementation assumes Binding V1 semantics for actions, which means actions are packaged along with RPCs - * into a single interface. This has interesting implications on working with RpcServiceMetadata, which only - * handles the RPC side of the contract. - * - * Further interesting interactions stem from the fact that in DOM world each action is a separate entity, so the - * interface contract can let some actions to be invoked, while failing for others. This is a shortcoming of the - * Binding Specification and will be addressed in Binding V2 -- where each action is its own interface. - */ - ActionServiceMetadata(final String id, final String interfaceName) { - super(id, interfaceName); - } - - @Override - Predicate rpcFilter() { - return RpcRoutingStrategy::isContextBasedRouted; - } -} diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/BindingContext.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/BindingContext.java index dd672e4110..67905aeaf8 100644 --- a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/BindingContext.java +++ b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/BindingContext.java @@ -15,23 +15,25 @@ import java.io.IOException; import java.lang.reflect.InvocationTargetException; import java.net.URISyntaxException; import java.util.Set; -import javax.xml.parsers.ParserConfigurationException; import javax.xml.stream.XMLStreamException; import javax.xml.transform.dom.DOMSource; import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections; import org.opendaylight.yangtools.yang.binding.DataObject; -import org.opendaylight.yangtools.yang.binding.Identifiable; -import org.opendaylight.yangtools.yang.binding.Identifier; import org.opendaylight.yangtools.yang.binding.InstanceIdentifier; +import org.opendaylight.yangtools.yang.binding.Key; +import org.opendaylight.yangtools.yang.binding.KeyAware; +import org.opendaylight.yangtools.yang.binding.contract.Naming; import org.opendaylight.yangtools.yang.common.QName; -import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode; +import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier; +import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates; +import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode; import org.opendaylight.yangtools.yang.data.api.schema.MapNode; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter; import org.opendaylight.yangtools.yang.data.codec.xml.XmlParserStream; -import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes; import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNormalizedNodeStreamWriter; -import org.opendaylight.yangtools.yang.data.impl.schema.NormalizedNodeResult; +import org.opendaylight.yangtools.yang.data.impl.schema.NormalizationResultHolder; +import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes; import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode; import org.opendaylight.yangtools.yang.model.api.DataSchemaNode; import org.opendaylight.yangtools.yang.model.api.ListSchemaNode; @@ -48,11 +50,9 @@ import org.xml.sax.SAXException; * @author Thomas Pantelis (originally; re-factored by Michael Vorburger.ch) */ public abstract class BindingContext { - private static String GET_KEY_METHOD = "key"; - public static BindingContext create(final String logName, final Class klass, final String appConfigListKeyValue) { - if (Identifiable.class.isAssignableFrom(klass)) { + if (KeyAware.class.isAssignableFrom(klass)) { // The binding class corresponds to a yang list. if (Strings.isNullOrEmpty(appConfigListKeyValue)) { throw new ComponentDefinitionException(String.format( @@ -75,12 +75,12 @@ public abstract class BindingContext { } public final InstanceIdentifier appConfigPath; - public final Class appConfigBindingClass; + public final Class appConfigBindingClass; public final Class schemaType; public final QName bindingQName; - private BindingContext(final Class appConfigBindingClass, - final InstanceIdentifier appConfigPath, final Class schemaType) { + private BindingContext(final Class appConfigBindingClass, final InstanceIdentifier appConfigPath, + final Class schemaType) { this.appConfigBindingClass = appConfigBindingClass; this.appConfigPath = appConfigPath; this.schemaType = schemaType; @@ -89,20 +89,14 @@ public abstract class BindingContext { } public NormalizedNode parseDataElement(final Element element, final SchemaTreeInference dataSchema) - throws XMLStreamException, IOException, ParserConfigurationException, SAXException, URISyntaxException { - final NormalizedNodeResult resultHolder = new NormalizedNodeResult(); + throws XMLStreamException, IOException, SAXException, URISyntaxException { + final NormalizationResultHolder resultHolder = new NormalizationResultHolder(); final NormalizedNodeStreamWriter writer = ImmutableNormalizedNodeStreamWriter.from(resultHolder); final XmlParserStream xmlParser = XmlParserStream.create(writer, dataSchema); xmlParser.traverse(new DOMSource(element)); - final NormalizedNode result = resultHolder.getResult(); - if (result instanceof MapNode) { - final MapNode mapNode = (MapNode) result; - final MapEntryNode mapEntryNode = mapNode.body().iterator().next(); - return mapEntryNode; - } - - return result; + final NormalizedNode result = resultHolder.getResult().data(); + return result instanceof MapNode mapNode ? mapNode.body().iterator().next() : result; } public abstract NormalizedNode newDefaultNode(SchemaTreeInference dataSchema); @@ -113,13 +107,13 @@ public abstract class BindingContext { private static class ContainerBindingContext extends BindingContext { @SuppressWarnings("unchecked") ContainerBindingContext(final Class appConfigBindingClass) { - super((Class) appConfigBindingClass, - InstanceIdentifier.create((Class) appConfigBindingClass), ContainerSchemaNode.class); + super(appConfigBindingClass, InstanceIdentifier.create((Class) appConfigBindingClass), + ContainerSchemaNode.class); } @Override - public NormalizedNode newDefaultNode(final SchemaTreeInference dataSchema) { - return ImmutableNodes.containerNode(bindingQName); + public ContainerNode newDefaultNode(final SchemaTreeInference dataSchema) { + return ImmutableNodes.newContainerBuilder().withNodeIdentifier(new NodeIdentifier(bindingQName)).build(); } } @@ -142,8 +136,8 @@ public abstract class BindingContext { final String listKeyValue) throws InstantiationException, IllegalAccessException, IllegalArgumentException, InvocationTargetException, NoSuchMethodException, SecurityException { // We assume the yang list key type is string. - Identifier keyInstance = (Identifier) bindingClass.getMethod(GET_KEY_METHOD).getReturnType() - .getConstructor(String.class).newInstance(listKeyValue); + Key keyInstance = (Key) bindingClass.getMethod(Naming.KEY_AWARE_KEY_NAME) + .getReturnType().getConstructor(String.class).newInstance(listKeyValue); InstanceIdentifier appConfigPath = InstanceIdentifier.builder((Class)bindingClass, keyInstance).build(); return new ListBindingContext(bindingClass, appConfigPath, listKeyValue); } @@ -158,7 +152,10 @@ public abstract class BindingContext { checkArgument(keys.size() == 1, "Expected only 1 key for list %s", appConfigBindingClass); QName listKeyQName = keys.iterator().next(); - return ImmutableNodes.mapEntryBuilder(bindingQName, listKeyQName, appConfigListKeyValue).build(); + return ImmutableNodes.newMapEntryBuilder() + .withNodeIdentifier(NodeIdentifierWithPredicates.of(bindingQName, listKeyQName, appConfigListKeyValue)) + .withChild(ImmutableNodes.leafNode(listKeyQName, appConfigListKeyValue)) + .build(); } } } diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/ComponentProcessor.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/ComponentProcessor.java index 2f0709f6af..8e8d98ff36 100644 --- a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/ComponentProcessor.java +++ b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/ComponentProcessor.java @@ -10,8 +10,8 @@ package org.opendaylight.controller.blueprint.ext; import com.google.common.base.Strings; import java.util.ArrayList; import java.util.Dictionary; -import java.util.Hashtable; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.aries.blueprint.ComponentDefinitionRegistry; @@ -20,13 +20,11 @@ import org.apache.aries.blueprint.ext.AbstractPropertyPlaceholder; import org.apache.aries.blueprint.mutable.MutableBeanMetadata; import org.apache.aries.blueprint.mutable.MutableServiceReferenceMetadata; import org.apache.aries.util.AriesFrameworkUtil; -import org.gaul.modernizer_maven_annotations.SuppressModernizer; import org.opendaylight.controller.blueprint.BlueprintContainerRestartService; import org.osgi.framework.Bundle; import org.osgi.framework.Constants; +import org.osgi.framework.FrameworkUtil; import org.osgi.framework.ServiceRegistration; -import org.osgi.service.blueprint.reflect.BeanProperty; -import org.osgi.service.blueprint.reflect.ComponentMetadata; import org.osgi.service.blueprint.reflect.ValueMetadata; import org.osgi.service.cm.ManagedService; import org.slf4j.Logger; @@ -45,8 +43,8 @@ public class ComponentProcessor implements ComponentDefinitionRegistryProcessor private static final String CM_PERSISTENT_ID_PROPERTY = "persistentId"; private final List> managedServiceRegs = new ArrayList<>(); - private Bundle bundle; - private BlueprintContainerRestartService blueprintContainerRestartService; + private Bundle bundle = null; + private BlueprintContainerRestartService blueprintContainerRestartService = null; private boolean restartDependentsOnUpdates; private boolean useDefaultForReferenceTypes; @@ -55,7 +53,7 @@ public class ComponentProcessor implements ComponentDefinitionRegistryProcessor } public void setBlueprintContainerRestartService(final BlueprintContainerRestartService restartService) { - this.blueprintContainerRestartService = restartService; + blueprintContainerRestartService = restartService; } public void setRestartDependentsOnUpdates(final boolean restartDependentsOnUpdates) { @@ -67,21 +65,19 @@ public class ComponentProcessor implements ComponentDefinitionRegistryProcessor } public void destroy() { - for (ServiceRegistration reg: managedServiceRegs) { - AriesFrameworkUtil.safeUnregisterService(reg); - } + managedServiceRegs.forEach(AriesFrameworkUtil::safeUnregisterService); } @Override public void process(final ComponentDefinitionRegistry registry) { LOG.debug("{}: In process", logName()); - for (String name : registry.getComponentDefinitionNames()) { - ComponentMetadata component = registry.getComponentDefinition(name); - if (component instanceof MutableBeanMetadata) { - processMutableBeanMetadata((MutableBeanMetadata) component); - } else if (component instanceof MutableServiceReferenceMetadata) { - processServiceReferenceMetadata((MutableServiceReferenceMetadata)component); + for (var name : registry.getComponentDefinitionNames()) { + final var component = registry.getComponentDefinition(name); + if (component instanceof MutableBeanMetadata bean) { + processMutableBeanMetadata(bean); + } else if (component instanceof MutableServiceReferenceMetadata serviceRef) { + processServiceReferenceMetadata(serviceRef); } } } @@ -112,18 +108,15 @@ public class ComponentProcessor implements ComponentDefinitionRegistryProcessor LOG.debug("{}: Found PropertyPlaceholder bean: {}, runtime {}", logName(), bean.getId(), bean.getRuntimeClass()); - for (BeanProperty prop : bean.getProperties()) { + for (var prop : bean.getProperties()) { if (CM_PERSISTENT_ID_PROPERTY.equals(prop.getName())) { - if (prop.getValue() instanceof ValueMetadata) { - ValueMetadata persistentId = (ValueMetadata)prop.getValue(); - - LOG.debug("{}: Found {} property, value : {}", logName(), - CM_PERSISTENT_ID_PROPERTY, persistentId.getStringValue()); - + if (prop.getValue() instanceof ValueMetadata persistentId) { + LOG.debug("{}: Found {} property, value : {}", logName(), CM_PERSISTENT_ID_PROPERTY, + persistentId.getStringValue()); registerManagedService(persistentId.getStringValue()); } else { - LOG.debug("{}: {} property metadata {} is not instanceof ValueMetadata", - logName(), CM_PERSISTENT_ID_PROPERTY, prop.getValue()); + LOG.debug("{}: {} property metadata {} is not instanceof ValueMetadata", logName(), + CM_PERSISTENT_ID_PROPERTY, prop.getValue()); } break; @@ -132,11 +125,10 @@ public class ComponentProcessor implements ComponentDefinitionRegistryProcessor } } - @SuppressModernizer private void registerManagedService(final String persistentId) { // Register a ManagedService so we get updates from the ConfigAdmin when the cfg file corresponding // to the persistentId changes. - final ManagedService managedService = new ManagedService() { + final var managedService = new ManagedService() { private final AtomicBoolean initialUpdate = new AtomicBoolean(true); private volatile Dictionary previousProperties; @@ -156,11 +148,11 @@ public class ComponentProcessor implements ComponentDefinitionRegistryProcessor } }; - Dictionary props = new Hashtable<>(); - props.put(Constants.SERVICE_PID, persistentId); - props.put(Constants.BUNDLE_SYMBOLICNAME, bundle.getSymbolicName()); - props.put(Constants.BUNDLE_VERSION, bundle.getHeaders().get(Constants.BUNDLE_VERSION)); - managedServiceRegs.add(bundle.getBundleContext().registerService(ManagedService.class, managedService, props)); + managedServiceRegs.add(bundle.getBundleContext().registerService(ManagedService.class, managedService, + FrameworkUtil.asDictionary(Map.of( + Constants.SERVICE_PID, persistentId, + Constants.BUNDLE_SYMBOLICNAME, bundle.getSymbolicName(), + Constants.BUNDLE_VERSION, bundle.getHeaders().get(Constants.BUNDLE_VERSION))))); } private String logName() { diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/DataStoreAppConfigDefaultXMLReader.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/DataStoreAppConfigDefaultXMLReader.java index c147492955..4dea3404f9 100644 --- a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/DataStoreAppConfigDefaultXMLReader.java +++ b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/DataStoreAppConfigDefaultXMLReader.java @@ -14,7 +14,6 @@ import java.io.InputStream; import java.net.URISyntaxException; import java.net.URL; import java.util.Optional; -import javax.xml.parsers.ParserConfigurationException; import javax.xml.stream.XMLStreamException; import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer; import org.opendaylight.mdsal.dom.api.DOMSchemaService; @@ -54,7 +53,7 @@ public class DataStoreAppConfigDefaultXMLReader { @FunctionalInterface public interface FallbackConfigProvider { NormalizedNode get(SchemaTreeInference dataSchema) - throws IOException, XMLStreamException, ParserConfigurationException, SAXException, URISyntaxException; + throws IOException, XMLStreamException, SAXException, URISyntaxException; } @FunctionalInterface @@ -93,8 +92,8 @@ public class DataStoreAppConfigDefaultXMLReader { return Resources.getResource(testClass, defaultAppConfigFileName); } - public T createDefaultInstance() throws ConfigXMLReaderException, ParserConfigurationException, XMLStreamException, - IOException, SAXException, URISyntaxException { + public T createDefaultInstance() throws ConfigXMLReaderException, XMLStreamException, IOException, SAXException, + URISyntaxException { return createDefaultInstance(dataSchema -> { throw new IllegalArgumentException( "Failed to read XML (not creating model from defaults as runtime would, for better clarity in tests)"); @@ -103,7 +102,7 @@ public class DataStoreAppConfigDefaultXMLReader { @SuppressWarnings("unchecked") public T createDefaultInstance(final FallbackConfigProvider fallback) throws ConfigXMLReaderException, - URISyntaxException, ParserConfigurationException, XMLStreamException, SAXException, IOException { + URISyntaxException, XMLStreamException, SAXException, IOException { YangInstanceIdentifier yangPath = bindingSerializer.toYangInstanceIdentifier(bindingContext.appConfigPath); LOG.debug("{}: Creating app config instance from path {}, Qname: {}", logName, yangPath, @@ -176,7 +175,7 @@ public class DataStoreAppConfigDefaultXMLReader { if (!optionalURL.isPresent()) { return null; } - URL url = optionalURL.get(); + URL url = optionalURL.orElseThrow(); try (InputStream is = url.openStream()) { Document root = UntrustedXML.newDocumentBuilder().parse(is); NormalizedNode dataNode = bindingContext.parseDataElement(root.getDocumentElement(), @@ -185,8 +184,7 @@ public class DataStoreAppConfigDefaultXMLReader { LOG.debug("{}: Parsed data node: {}", logName, dataNode); return dataNode; - } catch (final IOException | SAXException | XMLStreamException | ParserConfigurationException - | URISyntaxException e) { + } catch (final IOException | SAXException | XMLStreamException | URISyntaxException e) { String msg = String.format("%s: Could not read/parse app config %s", logName, url); LOG.error(msg, e); throw new ConfigXMLReaderException(msg, e); diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/DataStoreAppConfigMetadata.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/DataStoreAppConfigMetadata.java index 5b20827c7f..35cdf03e4c 100644 --- a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/DataStoreAppConfigMetadata.java +++ b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/DataStoreAppConfigMetadata.java @@ -17,13 +17,11 @@ import java.util.Collection; import java.util.Objects; import java.util.Optional; import java.util.concurrent.atomic.AtomicBoolean; -import javax.xml.parsers.ParserConfigurationException; import javax.xml.stream.XMLStreamException; import org.apache.aries.blueprint.services.ExtendedBlueprintContainer; import org.eclipse.jdt.annotation.NonNull; import org.eclipse.jdt.annotation.Nullable; import org.opendaylight.controller.blueprint.ext.DataStoreAppConfigDefaultXMLReader.ConfigURLProvider; -import org.opendaylight.mdsal.binding.api.ClusteredDataTreeChangeListener; import org.opendaylight.mdsal.binding.api.DataBroker; import org.opendaylight.mdsal.binding.api.DataObjectModification; import org.opendaylight.mdsal.binding.api.DataObjectModification.ModificationType; @@ -33,7 +31,8 @@ import org.opendaylight.mdsal.binding.api.ReadTransaction; import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer; import org.opendaylight.mdsal.common.api.LogicalDatastoreType; import org.opendaylight.mdsal.dom.api.DOMSchemaService; -import org.opendaylight.yangtools.concepts.ListenerRegistration; +import org.opendaylight.yangtools.concepts.Registration; +import org.opendaylight.yangtools.yang.binding.ChildOf; import org.opendaylight.yangtools.yang.binding.DataObject; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; import org.opendaylight.yangtools.yang.model.api.SchemaTreeInference; @@ -70,7 +69,7 @@ public class DataStoreAppConfigMetadata extends AbstractDependentComponentFactor private final AtomicBoolean readingInitialAppConfig = new AtomicBoolean(true); private volatile BindingContext bindingContext; - private volatile ListenerRegistration appConfigChangeListenerReg; + private volatile Registration appConfigChangeListenerReg; private volatile DataObject currentAppConfig; // Note: the BindingNormalizedNodeSerializer interface is annotated as deprecated because there's an @@ -86,7 +85,7 @@ public class DataStoreAppConfigMetadata extends AbstractDependentComponentFactor this.defaultAppConfigFileName = defaultAppConfigFileName; this.appConfigBindingClassName = appConfigBindingClassName; this.appConfigListKeyValue = appConfigListKeyValue; - this.appConfigUpdateStrategy = updateStrategyValue; + appConfigUpdateStrategy = updateStrategyValue; } @Override @@ -97,10 +96,10 @@ public class DataStoreAppConfigMetadata extends AbstractDependentComponentFactor Class appConfigBindingClass; try { Class bindingClass = container.getBundleContext().getBundle().loadClass(appConfigBindingClassName); - if (!DataObject.class.isAssignableFrom(bindingClass)) { + if (!ChildOf.class.isAssignableFrom(bindingClass)) { throw new ComponentDefinitionException(String.format( "%s: Specified app config binding class %s does not extend %s", - logName(), appConfigBindingClassName, DataObject.class.getName())); + logName(), appConfigBindingClassName, ChildOf.class.getName())); } appConfigBindingClass = (Class) bindingClass; @@ -143,15 +142,12 @@ public class DataStoreAppConfigMetadata extends AbstractDependentComponentFactor setDependencyDesc("Initial app config " + bindingContext.appConfigBindingClass.getSimpleName()); - // We register a DTCL to get updates and also read the app config data from the data store. If - // the app config data is present then both the read and initial DTCN update will return it. If the - // the data isn't present, we won't get an initial DTCN update so the read will indicate the data - // isn't present. - - DataTreeIdentifier dataTreeId = DataTreeIdentifier.create(LogicalDatastoreType.CONFIGURATION, - bindingContext.appConfigPath); - appConfigChangeListenerReg = dataBroker.registerDataTreeChangeListener(dataTreeId, - (ClusteredDataTreeChangeListener) this::onAppConfigChanged); + // We register a DTCL to get updates and also read the app config data from the data store. If the app config + // data is present then both the read and initial DTCN update will return it. If the the data isn't present, we + // will not get an initial DTCN update so the read will indicate the data is not present. + appConfigChangeListenerReg = dataBroker.registerTreeChangeListener( + DataTreeIdentifier.of(LogicalDatastoreType.CONFIGURATION, bindingContext.appConfigPath), + this::onAppConfigChanged); readInitialAppConfig(dataBroker); } @@ -187,12 +183,12 @@ public class DataStoreAppConfigMetadata extends AbstractDependentComponentFactor private void onAppConfigChanged(final Collection> changes) { for (DataTreeModification change: changes) { DataObjectModification changeRoot = change.getRootNode(); - ModificationType type = changeRoot.getModificationType(); + ModificationType type = changeRoot.modificationType(); LOG.debug("{}: onAppConfigChanged: {}, {}", logName(), type, change.getRootPath()); if (type == ModificationType.SUBTREE_MODIFIED || type == ModificationType.WRITE) { - DataObject newAppConfig = changeRoot.getDataAfter(); + DataObject newAppConfig = changeRoot.dataAfter(); LOG.debug("New app config instance: {}, previous: {}", newAppConfig, currentAppConfig); @@ -219,7 +215,7 @@ public class DataStoreAppConfigMetadata extends AbstractDependentComponentFactor if (result) { DataObject localAppConfig; if (possibleAppConfig.isPresent()) { - localAppConfig = possibleAppConfig.get(); + localAppConfig = possibleAppConfig.orElseThrow(); } else { // No app config data is present so create an empty instance via the bindingSerializer service. // This will also return default values for leafs that haven't been explicitly set. @@ -267,8 +263,7 @@ public class DataStoreAppConfigMetadata extends AbstractDependentComponentFactor } }); - } catch (final ConfigXMLReaderException | IOException | SAXException | XMLStreamException - | ParserConfigurationException | URISyntaxException e) { + } catch (ConfigXMLReaderException | IOException | SAXException | XMLStreamException | URISyntaxException e) { if (e.getCause() == null) { setFailureMessage(e.getMessage()); } else { @@ -279,7 +274,7 @@ public class DataStoreAppConfigMetadata extends AbstractDependentComponentFactor } private @Nullable NormalizedNode parsePossibleDefaultAppConfigElement(final SchemaTreeInference dataSchema) - throws URISyntaxException, IOException, ParserConfigurationException, SAXException, XMLStreamException { + throws URISyntaxException, IOException, SAXException, XMLStreamException { if (defaultAppConfigElement == null) { return null; } diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/NotificationListenerBean.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/NotificationListenerBean.java deleted file mode 100644 index 74c2956621..0000000000 --- a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/NotificationListenerBean.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright (c) 2016 Brocade Communications Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.blueprint.ext; - -import org.opendaylight.mdsal.binding.api.NotificationService; -import org.opendaylight.yangtools.concepts.ListenerRegistration; -import org.opendaylight.yangtools.yang.binding.NotificationListener; -import org.osgi.framework.Bundle; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Blueprint bean corresponding to the "notification-listener" element that registers a NotificationListener - * with the NotificationService. - * - * @author Thomas Pantelis - */ -public class NotificationListenerBean { - private static final Logger LOG = LoggerFactory.getLogger(NotificationListenerBean.class); - static final String NOTIFICATION_LISTENER = "notification-listener"; - - private Bundle bundle; - private NotificationService notificationService; - private NotificationListener notificationListener; - private ListenerRegistration registration; - - public void setNotificationService(final NotificationService notificationService) { - this.notificationService = notificationService; - } - - public void setNotificationListener(final NotificationListener notificationListener) { - this.notificationListener = notificationListener; - } - - public void setBundle(final Bundle bundle) { - this.bundle = bundle; - } - - public void init() { - LOG.debug("{}: init - registering NotificationListener {}", bundle.getSymbolicName(), notificationListener); - - registration = notificationService.registerNotificationListener(notificationListener); - } - - public void destroy() { - if (registration != null) { - LOG.debug("{}: destroy - closing ListenerRegistration {}", bundle.getSymbolicName(), notificationListener); - registration.close(); - } else { - LOG.debug("{}: destroy - listener was not registered", bundle.getSymbolicName()); - } - } -} diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/OpendaylightNamespaceHandler.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/OpendaylightNamespaceHandler.java index af26acae0a..371b7efecd 100644 --- a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/OpendaylightNamespaceHandler.java +++ b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/OpendaylightNamespaceHandler.java @@ -11,12 +11,10 @@ import com.google.common.base.Strings; import java.io.IOException; import java.io.StringReader; import java.net.URL; -import java.util.Collections; import java.util.Set; import org.apache.aries.blueprint.ComponentDefinitionRegistry; import org.apache.aries.blueprint.NamespaceHandler; import org.apache.aries.blueprint.ParserContext; -import org.apache.aries.blueprint.ext.ComponentFactoryMetadata; import org.apache.aries.blueprint.mutable.MutableBeanMetadata; import org.apache.aries.blueprint.mutable.MutableRefMetadata; import org.apache.aries.blueprint.mutable.MutableReferenceMetadata; @@ -24,10 +22,6 @@ import org.apache.aries.blueprint.mutable.MutableServiceMetadata; import org.apache.aries.blueprint.mutable.MutableServiceReferenceMetadata; import org.apache.aries.blueprint.mutable.MutableValueMetadata; import org.opendaylight.controller.blueprint.BlueprintContainerRestartService; -import org.opendaylight.mdsal.binding.api.NotificationService; -import org.opendaylight.mdsal.binding.api.RpcProviderService; -import org.opendaylight.mdsal.dom.api.DOMRpcProviderService; -import org.opendaylight.mdsal.dom.api.DOMSchemaService; import org.opendaylight.yangtools.util.xml.UntrustedXML; import org.osgi.service.blueprint.container.ComponentDefinitionException; import org.osgi.service.blueprint.reflect.BeanMetadata; @@ -54,32 +48,20 @@ import org.xml.sax.SAXException; */ public final class OpendaylightNamespaceHandler implements NamespaceHandler { public static final String NAMESPACE_1_0_0 = "http://opendaylight.org/xmlns/blueprint/v1.0.0"; - static final String ROUTED_RPC_REG_CONVERTER_NAME = "org.opendaylight.blueprint.RoutedRpcRegConverter"; - static final String DOM_RPC_PROVIDER_SERVICE_NAME = "org.opendaylight.blueprint.DOMRpcProviderService"; - static final String RPC_REGISTRY_NAME = "org.opendaylight.blueprint.RpcRegistry"; - static final String BINDING_RPC_PROVIDER_SERVICE_NAME = "org.opendaylight.blueprint.RpcProviderService"; - static final String SCHEMA_SERVICE_NAME = "org.opendaylight.blueprint.SchemaService"; - static final String NOTIFICATION_SERVICE_NAME = "org.opendaylight.blueprint.NotificationService"; - static final String TYPE_ATTR = "type"; - static final String UPDATE_STRATEGY_ATTR = "update-strategy"; private static final Logger LOG = LoggerFactory.getLogger(OpendaylightNamespaceHandler.class); + private static final String TYPE_ATTR = "type"; + private static final String UPDATE_STRATEGY_ATTR = "update-strategy"; private static final String COMPONENT_PROCESSOR_NAME = ComponentProcessor.class.getName(); private static final String RESTART_DEPENDENTS_ON_UPDATES = "restart-dependents-on-updates"; private static final String USE_DEFAULT_FOR_REFERENCE_TYPES = "use-default-for-reference-types"; private static final String CLUSTERED_APP_CONFIG = "clustered-app-config"; - private static final String INTERFACE = "interface"; - private static final String REF_ATTR = "ref"; private static final String ID_ATTR = "id"; - private static final String RPC_SERVICE = "rpc-service"; - private static final String ACTION_SERVICE = "action-service"; - private static final String SPECIFIC_SERVICE_REF_LIST = "specific-reference-list"; - private static final String STATIC_REFERENCE = "static-reference"; @SuppressWarnings("rawtypes") @Override public Set getManagedClasses() { - return Collections.emptySet(); + return Set.of(); } @Override @@ -97,22 +79,8 @@ public final class OpendaylightNamespaceHandler implements NamespaceHandler { public Metadata parse(final Element element, final ParserContext context) { LOG.debug("In parse for {}", element); - if (nodeNameEquals(element, RpcImplementationBean.RPC_IMPLEMENTATION)) { - return parseRpcImplementation(element, context); - } else if (nodeNameEquals(element, RPC_SERVICE)) { - return parseRpcService(element, context); - } else if (nodeNameEquals(element, NotificationListenerBean.NOTIFICATION_LISTENER)) { - return parseNotificationListener(element, context); - } else if (nodeNameEquals(element, CLUSTERED_APP_CONFIG)) { + if (nodeNameEquals(element, CLUSTERED_APP_CONFIG)) { return parseClusteredAppConfig(element, context); - } else if (nodeNameEquals(element, SPECIFIC_SERVICE_REF_LIST)) { - return parseSpecificReferenceList(element, context); - } else if (nodeNameEquals(element, STATIC_REFERENCE)) { - return parseStaticReference(element, context); - } else if (nodeNameEquals(element, ACTION_SERVICE)) { - return parseActionService(element, context); - } else if (nodeNameEquals(element, ActionProviderBean.ACTION_PROVIDER)) { - return parseActionProvider(element, context); } throw new ComponentDefinitionException("Unsupported standalone element: " + element.getNodeName()); @@ -144,12 +112,10 @@ public final class OpendaylightNamespaceHandler implements NamespaceHandler { private static ComponentMetadata decorateServiceType(final Attr attr, final ComponentMetadata component, final ParserContext context) { - if (!(component instanceof MutableServiceMetadata)) { + if (!(component instanceof MutableServiceMetadata service)) { throw new ComponentDefinitionException("Expected an instanceof MutableServiceMetadata"); } - MutableServiceMetadata service = (MutableServiceMetadata)component; - LOG.debug("decorateServiceType for {} - adding type property {}", service.getId(), attr.getValue()); service.addServiceProperty(createValue(context, TYPE_ATTR), createValue(context, attr.getValue())); @@ -235,107 +201,6 @@ public final class OpendaylightNamespaceHandler implements NamespaceHandler { return metadata; } - private static Metadata parseActionProvider(final Element element, final ParserContext context) { - registerDomRpcProviderServiceRefBean(context); - registerBindingRpcProviderServiceRefBean(context); - registerSchemaServiceRefBean(context); - - MutableBeanMetadata metadata = createBeanMetadata(context, context.generateId(), ActionProviderBean.class, - true, true); - addBlueprintBundleRefProperty(context, metadata); - metadata.addProperty("domRpcProvider", createRef(context, DOM_RPC_PROVIDER_SERVICE_NAME)); - metadata.addProperty("bindingRpcProvider", createRef(context, BINDING_RPC_PROVIDER_SERVICE_NAME)); - metadata.addProperty("schemaService", createRef(context, SCHEMA_SERVICE_NAME)); - metadata.addProperty("interfaceName", createValue(context, element.getAttribute(INTERFACE))); - - if (element.hasAttribute(REF_ATTR)) { - metadata.addProperty("implementation", createRef(context, element.getAttribute(REF_ATTR))); - } - - LOG.debug("parseActionProvider returning {}", metadata); - return metadata; - } - - - private static Metadata parseRpcImplementation(final Element element, final ParserContext context) { - registerBindingRpcProviderServiceRefBean(context); - - MutableBeanMetadata metadata = createBeanMetadata(context, context.generateId(), RpcImplementationBean.class, - true, true); - addBlueprintBundleRefProperty(context, metadata); - metadata.addProperty("rpcProvider", createRef(context, BINDING_RPC_PROVIDER_SERVICE_NAME)); - metadata.addProperty("implementation", createRef(context, element.getAttribute(REF_ATTR))); - - if (element.hasAttribute(INTERFACE)) { - metadata.addProperty("interfaceName", createValue(context, element.getAttribute(INTERFACE))); - } - - LOG.debug("parseRpcImplementation returning {}", metadata); - return metadata; - } - - private static Metadata parseActionService(final Element element, final ParserContext context) { - ComponentFactoryMetadata metadata = new ActionServiceMetadata(getId(context, element), - element.getAttribute(INTERFACE)); - - LOG.debug("parseActionService returning {}", metadata); - - return metadata; - } - - private static Metadata parseRpcService(final Element element, final ParserContext context) { - ComponentFactoryMetadata metadata = new RpcServiceMetadata(getId(context, element), - element.getAttribute(INTERFACE)); - - LOG.debug("parseRpcService returning {}", metadata); - - return metadata; - } - - private static void registerDomRpcProviderServiceRefBean(final ParserContext context) { - registerRefBean(context, DOM_RPC_PROVIDER_SERVICE_NAME, DOMRpcProviderService.class); - } - - private static void registerBindingRpcProviderServiceRefBean(final ParserContext context) { - registerRefBean(context, BINDING_RPC_PROVIDER_SERVICE_NAME, RpcProviderService.class); - } - - private static void registerSchemaServiceRefBean(final ParserContext context) { - registerRefBean(context, SCHEMA_SERVICE_NAME, DOMSchemaService.class); - } - - private static void registerRefBean(final ParserContext context, final String name, final Class clazz) { - ComponentDefinitionRegistry registry = context.getComponentDefinitionRegistry(); - if (registry.getComponentDefinition(name) == null) { - MutableReferenceMetadata metadata = createServiceRef(context, clazz, null); - metadata.setId(name); - registry.registerComponentDefinition(metadata); - } - } - - private static Metadata parseNotificationListener(final Element element, final ParserContext context) { - registerNotificationServiceRefBean(context); - - MutableBeanMetadata metadata = createBeanMetadata(context, context.generateId(), NotificationListenerBean.class, - true, true); - addBlueprintBundleRefProperty(context, metadata); - metadata.addProperty("notificationService", createRef(context, NOTIFICATION_SERVICE_NAME)); - metadata.addProperty("notificationListener", createRef(context, element.getAttribute(REF_ATTR))); - - LOG.debug("parseNotificationListener returning {}", metadata); - - return metadata; - } - - private static void registerNotificationServiceRefBean(final ParserContext context) { - ComponentDefinitionRegistry registry = context.getComponentDefinitionRegistry(); - if (registry.getComponentDefinition(NOTIFICATION_SERVICE_NAME) == null) { - MutableReferenceMetadata metadata = createServiceRef(context, NotificationService.class, null); - metadata.setId(NOTIFICATION_SERVICE_NAME); - registry.registerComponentDefinition(metadata); - } - } - private static Metadata parseClusteredAppConfig(final Element element, final ParserContext context) { LOG.debug("parseClusteredAppConfig"); @@ -383,24 +248,6 @@ public final class OpendaylightNamespaceHandler implements NamespaceHandler { } } - private static Metadata parseSpecificReferenceList(final Element element, final ParserContext context) { - ComponentFactoryMetadata metadata = new SpecificReferenceListMetadata(getId(context, element), - element.getAttribute(INTERFACE)); - - LOG.debug("parseSpecificReferenceList returning {}", metadata); - - return metadata; - } - - private static Metadata parseStaticReference(final Element element, final ParserContext context) { - ComponentFactoryMetadata metadata = new StaticReferenceMetadata(getId(context, element), - element.getAttribute(INTERFACE)); - - LOG.debug("parseStaticReference returning {}", metadata); - - return metadata; - } - private static Element parseXML(final String name, final String xml) { try { return UntrustedXML.newDocumentBuilder().parse(new InputSource(new StringReader(xml))).getDocumentElement(); diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/RpcImplementationBean.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/RpcImplementationBean.java deleted file mode 100644 index 94d5b3b22f..0000000000 --- a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/RpcImplementationBean.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Copyright (c) 2016 Brocade Communications Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.blueprint.ext; - -import com.google.common.base.Strings; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import org.opendaylight.mdsal.binding.api.RpcProviderService; -import org.opendaylight.yangtools.concepts.ObjectRegistration; -import org.opendaylight.yangtools.yang.binding.RpcService; -import org.osgi.framework.Bundle; -import org.osgi.service.blueprint.container.ComponentDefinitionException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Blueprint bean corresponding to the "rpc-implementation" element that registers an RPC implementation with - * the RpcProviderRegistry. - * - * @author Thomas Pantelis - */ -public class RpcImplementationBean { - private static final Logger LOG = LoggerFactory.getLogger(RpcImplementationBean.class); - static final String RPC_IMPLEMENTATION = "rpc-implementation"; - - private RpcProviderService rpcProvider; - private Bundle bundle; - private String interfaceName; - private RpcService implementation; - private final List> rpcRegistrations = new ArrayList<>(); - - public void setRpcProvider(final RpcProviderService rpcProvider) { - this.rpcProvider = rpcProvider; - } - - public void setBundle(final Bundle bundle) { - this.bundle = bundle; - } - - public void setInterfaceName(final String interfaceName) { - this.interfaceName = interfaceName; - } - - public void setImplementation(final RpcService implementation) { - this.implementation = implementation; - } - - @SuppressWarnings("checkstyle:IllegalCatch") - public void init() { - try { - List> rpcInterfaces = getImplementedRpcServiceInterfaces(interfaceName, - implementation.getClass(), bundle, RPC_IMPLEMENTATION); - - LOG.debug("{}: init - adding implementation {} for RpcService interface(s) {}", bundle.getSymbolicName(), - implementation, rpcInterfaces); - - for (Class rpcInterface : rpcInterfaces) { - rpcRegistrations.add(rpcProvider.registerRpcImplementation(rpcInterface, implementation)); - } - } catch (final ComponentDefinitionException e) { - throw e; - } catch (final Exception e) { - throw new ComponentDefinitionException(String.format( - "Error processing \"%s\" for %s", RPC_IMPLEMENTATION, implementation.getClass()), e); - } - } - - public void destroy() { - for (ObjectRegistration reg: rpcRegistrations) { - reg.close(); - } - } - - @SuppressWarnings("unchecked") - static List> getImplementedRpcServiceInterfaces(final String interfaceName, - final Class implementationClass, final Bundle bundle, final String logName) - throws ClassNotFoundException { - if (!Strings.isNullOrEmpty(interfaceName)) { - Class rpcInterface = bundle.loadClass(interfaceName); - - if (!rpcInterface.isAssignableFrom(implementationClass)) { - throw new ComponentDefinitionException(String.format( - "The specified \"interface\" %s for \"%s\" is not implemented by RpcService \"ref\" %s", - interfaceName, logName, implementationClass)); - } - - return Collections.singletonList((Class)rpcInterface); - } - - List> rpcInterfaces = new ArrayList<>(); - for (Class intface : implementationClass.getInterfaces()) { - if (RpcService.class.isAssignableFrom(intface)) { - rpcInterfaces.add((Class) intface); - } - } - - if (rpcInterfaces.isEmpty()) { - throw new ComponentDefinitionException(String.format( - "The \"ref\" instance %s for \"%s\" does not implemented any RpcService interfaces", - implementationClass, logName)); - } - - return rpcInterfaces; - } -} diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/RpcServiceMetadata.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/RpcServiceMetadata.java deleted file mode 100644 index 4ab3867214..0000000000 --- a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/RpcServiceMetadata.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2016 Brocade Communications Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.blueprint.ext; - -import java.util.function.Predicate; -import org.opendaylight.mdsal.dom.spi.RpcRoutingStrategy; - -/** - * Factory metadata corresponding to the "rpc-service" element that gets an RPC service implementation from - * the RpcProviderRegistry and provides it to the Blueprint container. - * - * @author Thomas Pantelis - */ -final class RpcServiceMetadata extends AbstractInvokableServiceMetadata { - RpcServiceMetadata(final String id, final String interfaceName) { - super(id, interfaceName); - } - - @Override - Predicate rpcFilter() { - return s -> !s.isContextBasedRouted(); - } -} diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/RpcUtil.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/RpcUtil.java deleted file mode 100644 index 331aac7e30..0000000000 --- a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/RpcUtil.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (c) 2017 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.blueprint.ext; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.function.Predicate; -import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections; -import org.opendaylight.mdsal.dom.spi.RpcRoutingStrategy; -import org.opendaylight.yangtools.yang.binding.RpcService; -import org.opendaylight.yangtools.yang.common.QName; -import org.opendaylight.yangtools.yang.common.QNameModule; -import org.opendaylight.yangtools.yang.model.api.Module; -import org.opendaylight.yangtools.yang.model.api.RpcDefinition; -import org.opendaylight.yangtools.yang.model.api.SchemaContext; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Utility methods for dealing with various aspects of RPCs and actions. - * - * @author Robert Varga - */ -final class RpcUtil { - private static final Logger LOG = LoggerFactory.getLogger(RpcUtil.class); - - private RpcUtil() { - throw new UnsupportedOperationException(); - } - - static Collection decomposeRpcService(final Class service, - final SchemaContext schemaContext, final Predicate filter) { - final QNameModule moduleName = BindingReflections.getQNameModule(service); - final Module module = schemaContext.findModule(moduleName).orElseThrow(() -> new IllegalArgumentException( - "Module not found in SchemaContext: " + moduleName + "; service: " + service)); - LOG.debug("Resolved service {} to module {}", service, module); - - final Collection rpcs = module.getRpcs(); - final Collection ret = new ArrayList<>(rpcs.size()); - for (RpcDefinition rpc : rpcs) { - final RpcRoutingStrategy strategy = RpcRoutingStrategy.from(rpc); - if (filter.test(strategy)) { - ret.add(rpc.getQName()); - } - } - - return ret; - } -} diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/SpecificReferenceListMetadata.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/SpecificReferenceListMetadata.java deleted file mode 100644 index 0412f00a7a..0000000000 --- a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/SpecificReferenceListMetadata.java +++ /dev/null @@ -1,225 +0,0 @@ -/* - * Copyright (c) 2016 Brocade Communications Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.blueprint.ext; - -import com.google.common.collect.ImmutableList; -import com.google.common.io.Resources; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import java.io.IOException; -import java.net.URL; -import java.nio.charset.StandardCharsets; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; -import java.util.Set; -import java.util.concurrent.ConcurrentSkipListSet; -import org.osgi.framework.Bundle; -import org.osgi.framework.BundleEvent; -import org.osgi.framework.ServiceReference; -import org.osgi.service.blueprint.container.ComponentDefinitionException; -import org.osgi.util.tracker.BundleTracker; -import org.osgi.util.tracker.BundleTrackerCustomizer; -import org.osgi.util.tracker.ServiceTracker; -import org.osgi.util.tracker.ServiceTrackerCustomizer; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Factory metadata corresponding to the "specific-reference-list" element that obtains a specific list - * of service instances from the OSGi registry for a given interface. The specific list is learned by first - * extracting the list of expected service types by inspecting RESOLVED bundles for a resource file under - * META-INF/services with the same name as the given interface. The type(s) listed in the resource file - * must match the "type" property of the advertised service(s). In this manner, an app bundle announces the - * service type(s) that it will advertise so that this class knows which services to expect up front. Once - * all the expected services are obtained, the container is notified that all dependencies of this component - * factory are satisfied. - * - * @author Thomas Pantelis - */ -class SpecificReferenceListMetadata extends AbstractDependentComponentFactoryMetadata { - private static final Logger LOG = LoggerFactory.getLogger(SpecificReferenceListMetadata.class); - - private final String interfaceName; - private final String serviceResourcePath; - private final Collection expectedServiceTypes = new ConcurrentSkipListSet<>(); - private final Collection retrievedServiceTypes = new ConcurrentSkipListSet<>(); - private final Collection retrievedServices = Collections.synchronizedList(new ArrayList<>()); - private volatile BundleTracker bundleTracker; - private volatile ServiceTracker serviceTracker; - - SpecificReferenceListMetadata(final String id, final String interfaceName) { - super(id); - this.interfaceName = interfaceName; - serviceResourcePath = "META-INF/services/" + interfaceName; - } - - @Override - protected void startTracking() { - BundleTrackerCustomizer bundleListener = new BundleTrackerCustomizer<>() { - @Override - public Bundle addingBundle(final Bundle bundle, final BundleEvent event) { - bundleAdded(bundle); - return bundle; - } - - @Override - public void modifiedBundle(final Bundle bundle, final BundleEvent event, final Bundle object) { - } - - @Override - public void removedBundle(final Bundle bundle, final BundleEvent event, final Bundle object) { - } - }; - - bundleTracker = new BundleTracker<>(container().getBundleContext(), Bundle.RESOLVED | Bundle.STARTING - | Bundle.STOPPING | Bundle.ACTIVE, bundleListener); - - // This will get the list of all current RESOLVED+ bundles. - bundleTracker.open(); - - if (expectedServiceTypes.isEmpty()) { - setSatisfied(); - return; - } - - ServiceTrackerCustomizer serviceListener = new ServiceTrackerCustomizer<>() { - @Override - public Object addingService(final ServiceReference reference) { - return serviceAdded(reference); - } - - @Override - public void modifiedService(final ServiceReference reference, final Object service) { - } - - @Override - public void removedService(final ServiceReference reference, final Object service) { - container().getBundleContext().ungetService(reference); - } - }; - - setDependencyDesc(interfaceName + " services with types " + expectedServiceTypes); - - serviceTracker = new ServiceTracker<>(container().getBundleContext(), interfaceName, serviceListener); - serviceTracker.open(); - } - - @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD", - justification = "https://github.com/spotbugs/spotbugs/issues/811") - private void bundleAdded(final Bundle bundle) { - URL resource = bundle.getEntry(serviceResourcePath); - if (resource == null) { - return; - } - - LOG.debug("{}: Found {} resource in bundle {}", logName(), resource, bundle.getSymbolicName()); - - try { - for (String line : Resources.readLines(resource, StandardCharsets.UTF_8)) { - int ci = line.indexOf('#'); - if (ci >= 0) { - line = line.substring(0, ci); - } - - line = line.trim(); - if (line.isEmpty()) { - continue; - } - - String serviceType = line; - LOG.debug("{}: Retrieved service type {}", logName(), serviceType); - expectedServiceTypes.add(serviceType); - } - } catch (final IOException e) { - setFailure(String.format("%s: Error reading resource %s from bundle %s", logName(), resource, - bundle.getSymbolicName()), e); - } - } - - @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD", - justification = "https://github.com/spotbugs/spotbugs/issues/811") - private Object serviceAdded(final ServiceReference reference) { - Object service = container().getBundleContext().getService(reference); - String serviceType = (String) reference.getProperty(OpendaylightNamespaceHandler.TYPE_ATTR); - - LOG.debug("{}: Service type {} added from bundle {}", logName(), serviceType, - reference.getBundle().getSymbolicName()); - - if (serviceType == null) { - LOG.error("{}: Missing OSGi service property '{}' for service interface {} in bundle {}", logName(), - OpendaylightNamespaceHandler.TYPE_ATTR, interfaceName, reference.getBundle().getSymbolicName()); - return service; - } - - if (!expectedServiceTypes.contains(serviceType)) { - LOG.error("{}: OSGi service property '{}' for service interface {} in bundle {} was not found in the " - + "expected service types {} obtained via {} bundle resources. Is the bundle resource missing or " - + "the service type misspelled?", logName(), OpendaylightNamespaceHandler.TYPE_ATTR, interfaceName, - reference.getBundle().getSymbolicName(), expectedServiceTypes, serviceResourcePath); - return service; - } - - // If already satisfied, meaning we got all initial services, then a new bundle must've been - // dynamically installed or a prior service's blueprint container was restarted, in which case we - // restart our container. - if (isSatisfied()) { - restartContainer(); - } else { - retrievedServiceTypes.add(serviceType); - retrievedServices.add(service); - - if (retrievedServiceTypes.equals(expectedServiceTypes)) { - LOG.debug("{}: Got all expected service types", logName()); - setSatisfied(); - } else { - Set remaining = new HashSet<>(expectedServiceTypes); - remaining.removeAll(retrievedServiceTypes); - setDependencyDesc(interfaceName + " services with types " + remaining); - } - } - - return service; - } - - @Override - public Object create() throws ComponentDefinitionException { - LOG.debug("{}: In create: interfaceName: {}", logName(), interfaceName); - - super.onCreate(); - - LOG.debug("{}: create returning service list {}", logName(), retrievedServices); - - synchronized (retrievedServices) { - return ImmutableList.copyOf(retrievedServices); - } - } - - @Override - public void destroy(final Object instance) { - super.destroy(instance); - - if (bundleTracker != null) { - bundleTracker.close(); - bundleTracker = null; - } - - if (serviceTracker != null) { - serviceTracker.close(); - serviceTracker = null; - } - } - - @Override - public String toString() { - StringBuilder builder = new StringBuilder(); - builder.append("SpecificReferenceListMetadata [interfaceName=").append(interfaceName) - .append(", serviceResourcePath=").append(serviceResourcePath).append("]"); - return builder.toString(); - } -} diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/StaticReferenceMetadata.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/StaticReferenceMetadata.java deleted file mode 100644 index 97c04af56f..0000000000 --- a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/StaticReferenceMetadata.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright (c) 2016 Brocade Communications Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.blueprint.ext; - -import org.osgi.service.blueprint.container.ComponentDefinitionException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Factory metadata corresponding to the "static-reference" element that obtains an OSGi service and - * returns the actual instance. This differs from the standard "reference" element that returns a dynamic - * proxy whose underlying service instance can come and go. - * - * @author Thomas Pantelis - */ -class StaticReferenceMetadata extends AbstractDependentComponentFactoryMetadata { - private static final Logger LOG = LoggerFactory.getLogger(StaticReferenceMetadata.class); - - private final String interfaceName; - private volatile Object retrievedService; - - StaticReferenceMetadata(final String id, final String interfaceName) { - super(id); - this.interfaceName = interfaceName; - } - - @Override - protected void startTracking() { - retrieveService(interfaceName, interfaceName, service -> { - retrievedService = service; - setSatisfied(); - }); - } - - @Override - public Object create() throws ComponentDefinitionException { - super.onCreate(); - - LOG.debug("{}: create returning service {}", logName(), retrievedService); - - return retrievedService; - } - - @Override - public String toString() { - StringBuilder builder = new StringBuilder(); - builder.append("StaticReferenceMetadata [interfaceName=").append(interfaceName).append("]"); - return builder.toString(); - } -} diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/StaticServiceReferenceRecipe.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/StaticServiceReferenceRecipe.java index 525fc82c19..26246f9d08 100644 --- a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/StaticServiceReferenceRecipe.java +++ b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/StaticServiceReferenceRecipe.java @@ -29,10 +29,6 @@ import org.slf4j.LoggerFactory; class StaticServiceReferenceRecipe extends AbstractServiceReferenceRecipe { private static final Logger LOG = LoggerFactory.getLogger(StaticServiceReferenceRecipe.class); - private static final SatisfactionListener NOOP_LISTENER = satisfiable -> { - // Intentional NOOP - }; - private volatile ServiceReference trackedServiceReference; private volatile Object trackedService; private Consumer serviceSatisfiedCallback; @@ -44,8 +40,10 @@ class StaticServiceReferenceRecipe extends AbstractServiceReferenceRecipe { } void startTracking(final Consumer newServiceSatisfiedCallback) { - this.serviceSatisfiedCallback = newServiceSatisfiedCallback; - super.start(NOOP_LISTENER); + serviceSatisfiedCallback = newServiceSatisfiedCallback; + super.start(satisfiable -> { + // Intentional NOOP + }); } @SuppressWarnings("rawtypes") diff --git a/opendaylight/blueprint/src/test/java/org/opendaylight/controller/blueprint/tests/DataStoreAppConfigDefaultXMLReaderTest.java b/opendaylight/blueprint/src/test/java/org/opendaylight/controller/blueprint/tests/DataStoreAppConfigDefaultXMLReaderTest.java index 6407efa949..dedf8e1c59 100644 --- a/opendaylight/blueprint/src/test/java/org/opendaylight/controller/blueprint/tests/DataStoreAppConfigDefaultXMLReaderTest.java +++ b/opendaylight/blueprint/src/test/java/org/opendaylight/controller/blueprint/tests/DataStoreAppConfigDefaultXMLReaderTest.java @@ -7,7 +7,8 @@ */ package org.opendaylight.controller.blueprint.tests; -import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; import org.junit.Test; import org.opendaylight.controller.blueprint.ext.DataStoreAppConfigDefaultXMLReader; @@ -21,26 +22,26 @@ import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controll * @author Michael Vorburger.ch */ public class DataStoreAppConfigDefaultXMLReaderTest extends AbstractConcurrentDataBrokerTest { - @Test public void testConfigXML() throws Exception { - Lists lists = new DataStoreAppConfigDefaultXMLReader<>( - getClass(), "/opendaylight-sal-test-store-config.xml", - getDataBrokerTestCustomizer().getSchemaService(), - getDataBrokerTestCustomizer().getAdapterContext().currentSerializer(), - Lists.class).createDefaultInstance(); + Lists lists = new DataStoreAppConfigDefaultXMLReader<>(getClass(), "/opendaylight-sal-test-store-config.xml", + getDataBrokerTestCustomizer().getSchemaService(), + getDataBrokerTestCustomizer().getAdapterContext().currentSerializer(), Lists.class) + .createDefaultInstance(); - UnorderedList element = lists.getUnorderedContainer().getUnorderedList().values().iterator().next(); - assertThat(element.getName()).isEqualTo("someName"); - assertThat(element.getValue()).isEqualTo("someValue"); + UnorderedList element = lists.nonnullUnorderedContainer().nonnullUnorderedList().values().iterator().next(); + assertEquals("someName", element.getName()); + assertEquals("someValue", element.getValue()); } - @Test(expected = IllegalArgumentException.class) + @Test public void testBadXMLName() throws Exception { - new DataStoreAppConfigDefaultXMLReader<>( - getClass(), "/badname.xml", - getDataBrokerTestCustomizer().getSchemaService(), - getDataBrokerTestCustomizer().getAdapterContext().currentSerializer(), - Lists.class).createDefaultInstance(); + final var reader = new DataStoreAppConfigDefaultXMLReader<>(getClass(), "/badname.xml", + getDataBrokerTestCustomizer().getSchemaService(), + getDataBrokerTestCustomizer().getAdapterContext().currentSerializer(), Lists.class); + + final String message = assertThrows(IllegalArgumentException.class, reader::createDefaultInstance).getMessage(); + assertEquals("resource /badname.xml relative to " + DataStoreAppConfigDefaultXMLReaderTest.class.getName() + + " not found.", message); } } diff --git a/opendaylight/config/netty-event-executor-config/pom.xml b/opendaylight/config/netty-event-executor-config/pom.xml deleted file mode 100644 index 39d382f2cc..0000000000 --- a/opendaylight/config/netty-event-executor-config/pom.xml +++ /dev/null @@ -1,33 +0,0 @@ - - - - 4.0.0 - - org.opendaylight.odlparent - bundle-parent - 9.0.12 - - - - org.opendaylight.controller - netty-event-executor-config - 0.16.0-SNAPSHOT - bundle - ${project.artifactId} - Configuration Wrapper around netty's event executor - - - - com.google.guava - guava - - - io.netty - netty-common - - - org.osgi - osgi.cmpn - - - diff --git a/opendaylight/config/netty-event-executor-config/src/main/java/org/opendaylight/controller/config/yang/netty/eventexecutor/AutoCloseableEventExecutor.java b/opendaylight/config/netty-event-executor-config/src/main/java/org/opendaylight/controller/config/yang/netty/eventexecutor/AutoCloseableEventExecutor.java deleted file mode 100644 index b12fc67609..0000000000 --- a/opendaylight/config/netty-event-executor-config/src/main/java/org/opendaylight/controller/config/yang/netty/eventexecutor/AutoCloseableEventExecutor.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.config.yang.netty.eventexecutor; - -import com.google.common.reflect.AbstractInvocationHandler; -import com.google.common.reflect.Reflection; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import io.netty.util.concurrent.EventExecutor; -import io.netty.util.concurrent.GlobalEventExecutor; -import io.netty.util.concurrent.ImmediateEventExecutor; -import java.lang.reflect.Method; -import java.util.concurrent.TimeUnit; - -public interface AutoCloseableEventExecutor extends EventExecutor, AutoCloseable { - - static AutoCloseableEventExecutor globalEventExecutor() { - return CloseableEventExecutorMixin.createCloseableProxy(GlobalEventExecutor.INSTANCE); - } - - static AutoCloseableEventExecutor immediateEventExecutor() { - return CloseableEventExecutorMixin.createCloseableProxy(ImmediateEventExecutor.INSTANCE); - } - - class CloseableEventExecutorMixin implements AutoCloseable { - public static final int DEFAULT_SHUTDOWN_SECONDS = 1; - private final EventExecutor eventExecutor; - - public CloseableEventExecutorMixin(final EventExecutor eventExecutor) { - this.eventExecutor = eventExecutor; - } - - @Override - @SuppressFBWarnings(value = "UC_USELESS_VOID_METHOD", justification = "False positive") - public void close() { - eventExecutor.shutdownGracefully(0, DEFAULT_SHUTDOWN_SECONDS, TimeUnit.SECONDS); - } - - static AutoCloseableEventExecutor createCloseableProxy(final EventExecutor eventExecutor) { - final CloseableEventExecutorMixin closeableEventExecutor = new CloseableEventExecutorMixin(eventExecutor); - return Reflection.newProxy(AutoCloseableEventExecutor.class, new AbstractInvocationHandler() { - @Override - protected Object handleInvocation(final Object proxy, final Method method, final Object[] args) - throws Throwable { - if (method.getName().equals("close")) { - closeableEventExecutor.close(); - return null; - } else { - return method.invoke(closeableEventExecutor.eventExecutor, args); - } - } - }); - } - } -} diff --git a/opendaylight/config/netty-event-executor-config/src/main/java/org/opendaylight/controller/config/yang/netty/eventexecutor/OSGiGlobalEventExecutor.java b/opendaylight/config/netty-event-executor-config/src/main/java/org/opendaylight/controller/config/yang/netty/eventexecutor/OSGiGlobalEventExecutor.java deleted file mode 100644 index f5081eea7b..0000000000 --- a/opendaylight/config/netty-event-executor-config/src/main/java/org/opendaylight/controller/config/yang/netty/eventexecutor/OSGiGlobalEventExecutor.java +++ /dev/null @@ -1,204 +0,0 @@ -/* - * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.config.yang.netty.eventexecutor; - -import static io.netty.util.concurrent.GlobalEventExecutor.INSTANCE; - -import com.google.common.annotations.Beta; -import io.netty.util.concurrent.EventExecutor; -import io.netty.util.concurrent.EventExecutorGroup; -import io.netty.util.concurrent.Future; -import io.netty.util.concurrent.ProgressivePromise; -import io.netty.util.concurrent.Promise; -import io.netty.util.concurrent.ScheduledFuture; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import org.osgi.service.component.annotations.Activate; -import org.osgi.service.component.annotations.Component; -import org.osgi.service.component.annotations.Deactivate; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@Beta -@Component(immediate = true, property = "type=global-event-executor") -public final class OSGiGlobalEventExecutor implements EventExecutor { - private static final Logger LOG = LoggerFactory.getLogger(OSGiGlobalEventExecutor.class); - - @Override - public boolean isShuttingDown() { - return INSTANCE.isShuttingDown(); - } - - @Override - public Future shutdownGracefully() { - return INSTANCE.shutdownGracefully(); - } - - @Override - public Future shutdownGracefully(final long quietPeriod, final long timeout, final TimeUnit unit) { - return INSTANCE.shutdownGracefully(quietPeriod, timeout, unit); - } - - @Override - public Future terminationFuture() { - return INSTANCE.terminationFuture(); - } - - @Override - @Deprecated - public void shutdown() { - INSTANCE.shutdown(); - } - - @Override - public List shutdownNow() { - return INSTANCE.shutdownNow(); - } - - @Override - public Iterator iterator() { - return INSTANCE.iterator(); - } - - @Override - public Future submit(final Runnable task) { - return INSTANCE.submit(task); - } - - @Override - public Future submit(final Runnable task, final T result) { - return INSTANCE.submit(task, result); - } - - @Override - public Future submit(final Callable task) { - return INSTANCE.submit(task); - } - - @Override - public ScheduledFuture schedule(final Runnable command, final long delay, final TimeUnit unit) { - return INSTANCE.schedule(command, delay, unit); - } - - @Override - public ScheduledFuture schedule(final Callable callable, final long delay, final TimeUnit unit) { - return INSTANCE.schedule(callable, delay, unit); - } - - @Override - public ScheduledFuture scheduleAtFixedRate(final Runnable command, final long initialDelay, final long period, - final TimeUnit unit) { - return INSTANCE.scheduleAtFixedRate(command, initialDelay, period, unit); - } - - @Override - public ScheduledFuture scheduleWithFixedDelay(final Runnable command, final long initialDelay, final long delay, - final TimeUnit unit) { - return INSTANCE.scheduleWithFixedDelay(command, initialDelay, delay, unit); - } - - @Override - public boolean isShutdown() { - return INSTANCE.isShutdown(); - } - - @Override - public boolean isTerminated() { - return INSTANCE.isTerminated(); - } - - @Override - public boolean awaitTermination(final long timeout, final TimeUnit unit) throws InterruptedException { - return INSTANCE.awaitTermination(timeout, unit); - } - - @Override - public List> invokeAll(final Collection> tasks) - throws InterruptedException { - return INSTANCE.invokeAll(tasks); - } - - @Override - public List> invokeAll(final Collection> tasks, - final long timeout, final TimeUnit unit) throws InterruptedException { - return INSTANCE.invokeAll(tasks, timeout, unit); - } - - @Override - public T invokeAny(final Collection> tasks) - throws InterruptedException, ExecutionException { - return INSTANCE.invokeAny(tasks); - } - - @Override - public T invokeAny(final Collection> tasks, final long timeout, final TimeUnit unit) - throws InterruptedException, ExecutionException, TimeoutException { - return INSTANCE.invokeAny(tasks, timeout, unit); - } - - @Override - public void execute(final Runnable command) { - INSTANCE.execute(command); - } - - @Override - public EventExecutor next() { - return INSTANCE.next(); - } - - @Override - public EventExecutorGroup parent() { - return INSTANCE.parent(); - } - - @Override - public boolean inEventLoop() { - return INSTANCE.inEventLoop(); - } - - @Override - public boolean inEventLoop(final Thread thread) { - return INSTANCE.inEventLoop(thread); - } - - @Override - public Promise newPromise() { - return INSTANCE.newPromise(); - } - - @Override - public ProgressivePromise newProgressivePromise() { - return INSTANCE.newProgressivePromise(); - } - - @Override - public Future newSucceededFuture(final V result) { - return INSTANCE.newSucceededFuture(result); - } - - @Override - public Future newFailedFuture(final Throwable cause) { - return INSTANCE.newFailedFuture(cause); - } - - @Activate - void activate() { - LOG.info("Global Event executor enabled"); - } - - @Deactivate - void deactivate() { - LOG.info("Global Event executor disabled"); - } - -} diff --git a/opendaylight/config/netty-threadgroup-config/pom.xml b/opendaylight/config/netty-threadgroup-config/pom.xml deleted file mode 100644 index 14a5c98c16..0000000000 --- a/opendaylight/config/netty-threadgroup-config/pom.xml +++ /dev/null @@ -1,29 +0,0 @@ - - - - 4.0.0 - - org.opendaylight.odlparent - bundle-parent - 9.0.12 - - - - org.opendaylight.controller - netty-threadgroup-config - 0.16.0-SNAPSHOT - bundle - ${project.artifactId} - Configuration Wrapper around netty's event group - - - - io.netty - netty-transport - - - org.osgi - osgi.cmpn - - - diff --git a/opendaylight/config/netty-threadgroup-config/src/main/java/org/opendaylight/controller/config/yang/netty/threadgroup/AbstractGlobalGroup.java b/opendaylight/config/netty-threadgroup-config/src/main/java/org/opendaylight/controller/config/yang/netty/threadgroup/AbstractGlobalGroup.java deleted file mode 100644 index ca4bc29283..0000000000 --- a/opendaylight/config/netty-threadgroup-config/src/main/java/org/opendaylight/controller/config/yang/netty/threadgroup/AbstractGlobalGroup.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.config.yang.netty.threadgroup; - -import io.netty.channel.nio.NioEventLoopGroup; -import java.util.concurrent.TimeUnit; - -abstract class AbstractGlobalGroup extends NioEventLoopGroup implements AutoCloseable { - AbstractGlobalGroup(final int threadCount) { - super(threadCount < 0 ? 0 : threadCount); - } - - @Override - public final void close() { - shutdownGracefully(0, 1, TimeUnit.SECONDS); - } -} diff --git a/opendaylight/config/netty-threadgroup-config/src/main/java/org/opendaylight/controller/config/yang/netty/threadgroup/Configuration.java b/opendaylight/config/netty-threadgroup-config/src/main/java/org/opendaylight/controller/config/yang/netty/threadgroup/Configuration.java deleted file mode 100644 index 174b44fdf2..0000000000 --- a/opendaylight/config/netty-threadgroup-config/src/main/java/org/opendaylight/controller/config/yang/netty/threadgroup/Configuration.java +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.config.yang.netty.threadgroup; - -import org.osgi.service.metatype.annotations.AttributeDefinition; -import org.osgi.service.metatype.annotations.ObjectClassDefinition; - -@ObjectClassDefinition(pid = "org.opendaylight.netty.threadgroup") -public @interface Configuration { - @AttributeDefinition(name = "global-boss-group-thread-count") - int bossThreadCount() default 0; - - @AttributeDefinition(name = "global-worker-group-thread-count") - int workerThreadCount() default 0; -} diff --git a/opendaylight/config/netty-threadgroup-config/src/main/java/org/opendaylight/controller/config/yang/netty/threadgroup/GlobalBossGroup.java b/opendaylight/config/netty-threadgroup-config/src/main/java/org/opendaylight/controller/config/yang/netty/threadgroup/GlobalBossGroup.java deleted file mode 100644 index 5b461848e9..0000000000 --- a/opendaylight/config/netty-threadgroup-config/src/main/java/org/opendaylight/controller/config/yang/netty/threadgroup/GlobalBossGroup.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.config.yang.netty.threadgroup; - -import io.netty.channel.EventLoopGroup; -import org.osgi.service.component.annotations.Activate; -import org.osgi.service.component.annotations.Component; -import org.osgi.service.component.annotations.Deactivate; -import org.osgi.service.metatype.annotations.Designate; - -@Component(immediate = true, service = EventLoopGroup.class, property = "type=global-boss-group") -@Designate(ocd = Configuration.class) -public final class GlobalBossGroup extends AbstractGlobalGroup { - @Activate - public GlobalBossGroup(final Configuration configuration) { - super(configuration.bossThreadCount()); - } - - @Deactivate - void deactivate() { - close(); - } -} diff --git a/opendaylight/config/netty-threadgroup-config/src/main/java/org/opendaylight/controller/config/yang/netty/threadgroup/GlobalWorkerGroup.java b/opendaylight/config/netty-threadgroup-config/src/main/java/org/opendaylight/controller/config/yang/netty/threadgroup/GlobalWorkerGroup.java deleted file mode 100644 index 4a9f46ed6b..0000000000 --- a/opendaylight/config/netty-threadgroup-config/src/main/java/org/opendaylight/controller/config/yang/netty/threadgroup/GlobalWorkerGroup.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.config.yang.netty.threadgroup; - -import io.netty.channel.EventLoopGroup; -import org.osgi.service.component.annotations.Activate; -import org.osgi.service.component.annotations.Component; -import org.osgi.service.component.annotations.Deactivate; -import org.osgi.service.metatype.annotations.Designate; - -@Component(immediate = true, service = EventLoopGroup.class, property = "type=global-worker-group") -@Designate(ocd = Configuration.class) -public final class GlobalWorkerGroup extends AbstractGlobalGroup { - @Activate - public GlobalWorkerGroup(final Configuration configuration) { - super(configuration.workerThreadCount()); - } - - @Deactivate - void deactivate() { - close(); - } -} diff --git a/opendaylight/config/netty-timer-config/pom.xml b/opendaylight/config/netty-timer-config/pom.xml deleted file mode 100644 index a740ba2ab9..0000000000 --- a/opendaylight/config/netty-timer-config/pom.xml +++ /dev/null @@ -1,29 +0,0 @@ - - - - 4.0.0 - - org.opendaylight.odlparent - bundle-parent - 9.0.12 - - - - org.opendaylight.controller - netty-timer-config - 0.16.0-SNAPSHOT - bundle - ${project.artifactId} - Configuration Wrapper around netty's timer - - - - io.netty - netty-common - - - org.osgi - osgi.cmpn - - - diff --git a/opendaylight/config/netty-timer-config/src/main/java/org/opendaylight/controller/config/yang/netty/timer/HashedWheelTimerCloseable.java b/opendaylight/config/netty-timer-config/src/main/java/org/opendaylight/controller/config/yang/netty/timer/HashedWheelTimerCloseable.java deleted file mode 100644 index 7bc1352164..0000000000 --- a/opendaylight/config/netty-timer-config/src/main/java/org/opendaylight/controller/config/yang/netty/timer/HashedWheelTimerCloseable.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.config.yang.netty.timer; - -import io.netty.util.HashedWheelTimer; -import io.netty.util.Timeout; -import io.netty.util.Timer; -import io.netty.util.TimerTask; -import java.util.Set; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.TimeUnit; -import org.eclipse.jdt.annotation.Nullable; - -public final class HashedWheelTimerCloseable implements AutoCloseable, Timer { - - private final Timer timer; - - private HashedWheelTimerCloseable(final Timer timer) { - this.timer = timer; - } - - @Override - public void close() { - stop(); - } - - @Override - public Timeout newTimeout(final TimerTask task, final long delay, final TimeUnit unit) { - return this.timer.newTimeout(task, delay, unit); - } - - @Override - public Set stop() { - return this.timer.stop(); - } - - public static HashedWheelTimerCloseable newInstance(final @Nullable Long duration, - final @Nullable Integer ticksPerWheel) { - return newInstance(null, duration, ticksPerWheel); - } - - public static HashedWheelTimerCloseable newInstance(final @Nullable ThreadFactory threadFactory, - final @Nullable Long duration, final @Nullable Integer ticksPerWheel) { - TimeUnit unit = TimeUnit.MILLISECONDS; - if (!nullOrNonPositive(duration) && threadFactory == null && nullOrNonPositive(ticksPerWheel)) { - return new HashedWheelTimerCloseable(new HashedWheelTimer(duration, unit)); - } - - if (!nullOrNonPositive(duration) && threadFactory == null && !nullOrNonPositive(ticksPerWheel)) { - return new HashedWheelTimerCloseable(new HashedWheelTimer(duration, unit, ticksPerWheel)); - } - - if (nullOrNonPositive(duration) && threadFactory != null && nullOrNonPositive(ticksPerWheel)) { - return new HashedWheelTimerCloseable(new HashedWheelTimer(threadFactory)); - } - - if (!nullOrNonPositive(duration) && threadFactory != null && nullOrNonPositive(ticksPerWheel)) { - return new HashedWheelTimerCloseable( - new HashedWheelTimer(threadFactory, duration, unit)); - } - - if (!nullOrNonPositive(duration) && threadFactory != null && !nullOrNonPositive(ticksPerWheel)) { - return new HashedWheelTimerCloseable( - new HashedWheelTimer(threadFactory, duration, unit, ticksPerWheel)); - } - - return new HashedWheelTimerCloseable(new HashedWheelTimer()); - } - - private static boolean nullOrNonPositive(final Number num) { - return num == null || num.longValue() <= 0; - } -} diff --git a/opendaylight/config/netty-timer-config/src/main/java/org/opendaylight/controller/config/yang/netty/timer/OSGiGlobalTimer.java b/opendaylight/config/netty-timer-config/src/main/java/org/opendaylight/controller/config/yang/netty/timer/OSGiGlobalTimer.java deleted file mode 100644 index 9613000792..0000000000 --- a/opendaylight/config/netty-timer-config/src/main/java/org/opendaylight/controller/config/yang/netty/timer/OSGiGlobalTimer.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.config.yang.netty.timer; - -import io.netty.util.Timeout; -import io.netty.util.Timer; -import io.netty.util.TimerTask; -import java.util.Set; -import java.util.concurrent.TimeUnit; -import org.osgi.service.component.annotations.Activate; -import org.osgi.service.component.annotations.Component; -import org.osgi.service.component.annotations.Deactivate; -import org.osgi.service.metatype.annotations.AttributeDefinition; -import org.osgi.service.metatype.annotations.Designate; -import org.osgi.service.metatype.annotations.ObjectClassDefinition; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@Component(immediate = true, configurationPid = "org.opendaylight.netty.timer", property = "type=global-timer") -@Designate(ocd = OSGiGlobalTimer.Config.class) -public final class OSGiGlobalTimer implements Timer { - @ObjectClassDefinition - public @interface Config { - @AttributeDefinition(name = "tick-duration") - long tickDuration() default 0; - @AttributeDefinition(name = "ticks-per-wheel") - int ticksPerWheel() default 0; - } - - private static final Logger LOG = LoggerFactory.getLogger(OSGiGlobalTimer.class); - - private Timer delegate; - - @Override - public Timeout newTimeout(final TimerTask task, final long delay, final TimeUnit unit) { - return delegate.newTimeout(task, delay, unit); - } - - @Override - public Set stop() { - return delegate.stop(); - } - - @Activate - void activate(final Config config) { - delegate = HashedWheelTimerCloseable.newInstance(config.tickDuration(), config.ticksPerWheel()); - LOG.info("Global Netty timer started"); - } - - @Deactivate - void deactivate() { - delegate.stop(); - LOG.info("Global Netty timer stopped"); - } -} diff --git a/opendaylight/config/pom.xml b/opendaylight/config/pom.xml deleted file mode 100644 index f61ce5051e..0000000000 --- a/opendaylight/config/pom.xml +++ /dev/null @@ -1,28 +0,0 @@ - - - 4.0.0 - - org.opendaylight.odlparent - odlparent-lite - 9.0.12 - - - - org.opendaylight.controller - config-aggregator - 0.16.0-SNAPSHOT - pom - - - true - true - - - - threadpool-config-api - threadpool-config-impl - netty-threadgroup-config - netty-event-executor-config - netty-timer-config - - diff --git a/opendaylight/config/threadpool-config-api/pom.xml b/opendaylight/config/threadpool-config-api/pom.xml deleted file mode 100644 index 81c069f4f5..0000000000 --- a/opendaylight/config/threadpool-config-api/pom.xml +++ /dev/null @@ -1,17 +0,0 @@ - - - - 4.0.0 - - org.opendaylight.odlparent - bundle-parent - 9.0.12 - - - - org.opendaylight.controller - threadpool-config-api - 0.16.0-SNAPSHOT - bundle - ${project.artifactId} - diff --git a/opendaylight/config/threadpool-config-api/src/main/java/org/opendaylight/controller/config/threadpool/ScheduledThreadPool.java b/opendaylight/config/threadpool-config-api/src/main/java/org/opendaylight/controller/config/threadpool/ScheduledThreadPool.java deleted file mode 100644 index 79ed26b2d2..0000000000 --- a/opendaylight/config/threadpool-config-api/src/main/java/org/opendaylight/controller/config/threadpool/ScheduledThreadPool.java +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ - -package org.opendaylight.controller.config.threadpool; - -import java.util.concurrent.ScheduledExecutorService; - -/** - * Interface representing scheduled {@link ThreadPool}. - */ -public interface ScheduledThreadPool extends ThreadPool { - - @Override - ScheduledExecutorService getExecutor(); -} \ No newline at end of file diff --git a/opendaylight/config/threadpool-config-api/src/main/java/org/opendaylight/controller/config/threadpool/ThreadPool.java b/opendaylight/config/threadpool-config-api/src/main/java/org/opendaylight/controller/config/threadpool/ThreadPool.java deleted file mode 100644 index 68ecb80fe3..0000000000 --- a/opendaylight/config/threadpool-config-api/src/main/java/org/opendaylight/controller/config/threadpool/ThreadPool.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ - -package org.opendaylight.controller.config.threadpool; - -import java.util.concurrent.ExecutorService; - -/** - * Interface representing thread pool. - */ -public interface ThreadPool { - - ExecutorService getExecutor(); - - int getMaxThreadCount(); -} \ No newline at end of file diff --git a/opendaylight/config/threadpool-config-impl/pom.xml b/opendaylight/config/threadpool-config-impl/pom.xml deleted file mode 100644 index b506f8f058..0000000000 --- a/opendaylight/config/threadpool-config-impl/pom.xml +++ /dev/null @@ -1,40 +0,0 @@ - - - - 4.0.0 - - org.opendaylight.odlparent - bundle-parent - 9.0.12 - - - - org.opendaylight.controller - threadpool-config-impl - 0.16.0-SNAPSHOT - bundle - ${project.artifactId} - - - - - org.opendaylight.controller - controller-artifacts - 5.0.0-SNAPSHOT - pom - import - - - - - - - ${project.groupId} - threadpool-config-api - - - com.google.guava - guava - - - diff --git a/opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/FixedThreadPoolWrapper.java b/opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/FixedThreadPoolWrapper.java deleted file mode 100644 index 2dad26490b..0000000000 --- a/opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/FixedThreadPoolWrapper.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ - -package org.opendaylight.controller.config.threadpool.util; - -import java.io.Closeable; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.ThreadPoolExecutor; -import org.opendaylight.controller.config.threadpool.ThreadPool; - -/** - * Implementation of {@link ThreadPool} using fixed number of threads wraps - * {@link ExecutorService}. - */ -public class FixedThreadPoolWrapper implements ThreadPool, Closeable { - - private final ThreadPoolExecutor executor; - - public FixedThreadPoolWrapper(int threadCount, ThreadFactory factory) { - this.executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(threadCount, factory); - executor.prestartAllCoreThreads(); - } - - @Override - public ExecutorService getExecutor() { - return Executors.unconfigurableExecutorService(executor); - } - - @Override - public void close() { - executor.shutdown(); - } - - @Override - public int getMaxThreadCount() { - return executor.getMaximumPoolSize(); - } - - public void setMaxThreadCount(int maxThreadCount) { - executor.setCorePoolSize(maxThreadCount); - executor.setMaximumPoolSize(maxThreadCount); - } -} diff --git a/opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/FlexibleThreadPoolWrapper.java b/opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/FlexibleThreadPoolWrapper.java deleted file mode 100644 index 9949e36d37..0000000000 --- a/opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/FlexibleThreadPoolWrapper.java +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.config.threadpool.util; - -import java.io.Closeable; -import java.util.OptionalInt; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.RejectedExecutionHandler; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import org.opendaylight.controller.config.threadpool.ThreadPool; - -/** - * Implementation of {@link ThreadPool} using flexible number of threads wraps - * {@link ExecutorService}. - */ -public class FlexibleThreadPoolWrapper implements ThreadPool, Closeable { - private final ThreadPoolExecutor executor; - - public FlexibleThreadPoolWrapper(final int minThreadCount, final int maxThreadCount, final long keepAlive, - final TimeUnit timeUnit, final ThreadFactory threadFactory) { - this(minThreadCount, maxThreadCount, keepAlive, timeUnit, threadFactory, getQueue(OptionalInt.empty())); - } - - public FlexibleThreadPoolWrapper(final int minThreadCount, final int maxThreadCount, final long keepAlive, - final TimeUnit timeUnit, final ThreadFactory threadFactory, final OptionalInt queueCapacity) { - this(minThreadCount, maxThreadCount, keepAlive, timeUnit, threadFactory, getQueue(queueCapacity)); - } - - private FlexibleThreadPoolWrapper(final int minThreadCount, final int maxThreadCount, final long keepAlive, - final TimeUnit timeUnit, final ThreadFactory threadFactory, final BlockingQueue queue) { - - executor = new ThreadPoolExecutor(minThreadCount, maxThreadCount, keepAlive, timeUnit, - queue, threadFactory, new FlexibleRejectionHandler()); - executor.prestartAllCoreThreads(); - } - - /** - * Overriding the queue: - * ThreadPoolExecutor would not create new threads if the queue is not full, thus adding - * occurs in RejectedExecutionHandler. - * This impl saturates threadpool first, then queue. When both are full caller will get blocked. - */ - private static ForwardingBlockingQueue getQueue(final OptionalInt capacity) { - final BlockingQueue delegate = capacity.isPresent() ? new LinkedBlockingQueue<>(capacity.getAsInt()) - : new LinkedBlockingQueue<>(); - return new ForwardingBlockingQueue(delegate); - } - - @Override - public ExecutorService getExecutor() { - return Executors.unconfigurableExecutorService(executor); - } - - public int getMinThreadCount() { - return executor.getCorePoolSize(); - } - - public void setMinThreadCount(final int minThreadCount) { - executor.setCorePoolSize(minThreadCount); - } - - @Override - public int getMaxThreadCount() { - return executor.getMaximumPoolSize(); - } - - public void setMaxThreadCount(final int maxThreadCount) { - executor.setMaximumPoolSize(maxThreadCount); - } - - public long getKeepAliveMillis() { - return executor.getKeepAliveTime(TimeUnit.MILLISECONDS); - } - - public void setKeepAliveMillis(final long keepAliveMillis) { - executor.setKeepAliveTime(keepAliveMillis, TimeUnit.MILLISECONDS); - } - - public void setThreadFactory(final ThreadFactory threadFactory) { - executor.setThreadFactory(threadFactory); - } - - public void prestartAllCoreThreads() { - executor.prestartAllCoreThreads(); - } - - @Override - public void close() { - executor.shutdown(); - } - - /** - * if the max threads are met, then it will raise a rejectedExecution. We then push to the queue. - */ - private static class FlexibleRejectionHandler implements RejectedExecutionHandler { - @Override - @SuppressWarnings("checkstyle:parameterName") - public void rejectedExecution(final Runnable r, final ThreadPoolExecutor executor) { - try { - executor.getQueue().put(r); - } catch (InterruptedException e) { - throw new RejectedExecutionException("Interrupted while waiting on the queue", e); - } - } - } - - private static class ForwardingBlockingQueue - extends com.google.common.util.concurrent.ForwardingBlockingQueue { - private final BlockingQueue delegate; - - ForwardingBlockingQueue(final BlockingQueue delegate) { - this.delegate = delegate; - } - - @Override - protected BlockingQueue delegate() { - return delegate; - } - - @Override - @SuppressWarnings("checkstyle:parameterName") - public boolean offer(final Runnable o) { - // ThreadPoolExecutor will spawn a new thread after core size is reached only - // if the queue.offer returns false. - return false; - } - } -} diff --git a/opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/NamingThreadPoolFactory.java b/opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/NamingThreadPoolFactory.java deleted file mode 100644 index 0efa4824ae..0000000000 --- a/opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/NamingThreadPoolFactory.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.config.threadpool.util; - -import static java.util.Objects.requireNonNull; - -import java.io.Closeable; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.atomic.AtomicLong; - -/** - * Implementation of {@link ThreadFactory}. This class is thread-safe. - */ -public class NamingThreadPoolFactory implements ThreadFactory, Closeable { - - private final ThreadGroup group; - private final String namePrefix; - private final AtomicLong threadName = new AtomicLong(); - - public NamingThreadPoolFactory(final String namePrefix) { - this.namePrefix = requireNonNull(namePrefix); - this.group = new ThreadGroup(namePrefix); - } - - @Override - @SuppressWarnings("checkstyle:parameterName") - public Thread newThread(final Runnable r) { - return new Thread(group, r, String.format("%s-%d", group.getName(), threadName.incrementAndGet())); - } - - @Override - public void close() { - } - - public String getNamePrefix() { - return namePrefix; - } -} diff --git a/opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/ScheduledThreadPoolWrapper.java b/opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/ScheduledThreadPoolWrapper.java deleted file mode 100644 index 648bd82ab0..0000000000 --- a/opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/ScheduledThreadPoolWrapper.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ - -package org.opendaylight.controller.config.threadpool.util; - -import java.io.Closeable; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledThreadPoolExecutor; -import java.util.concurrent.ThreadFactory; -import org.opendaylight.controller.config.threadpool.ScheduledThreadPool; - -/** - * Implementation of {@link ScheduledThreadPool} wraps - * {@link ScheduledExecutorService}. - */ -public class ScheduledThreadPoolWrapper implements ScheduledThreadPool, Closeable { - - private final ScheduledThreadPoolExecutor executor; - private final int threadCount; - - public ScheduledThreadPoolWrapper(int threadCount, ThreadFactory factory) { - this.threadCount = threadCount; - this.executor = new ScheduledThreadPoolExecutor(threadCount, factory); - executor.prestartAllCoreThreads(); - } - - @Override - public ScheduledExecutorService getExecutor() { - return Executors.unconfigurableScheduledExecutorService(executor); - } - - @Override - public void close() { - executor.shutdown(); - } - - @Override - public int getMaxThreadCount() { - return threadCount; - } - -} diff --git a/opendaylight/md-sal/cds-access-api/pom.xml b/opendaylight/md-sal/cds-access-api/pom.xml index 3d53e0f019..fb0bc37268 100644 --- a/opendaylight/md-sal/cds-access-api/pom.xml +++ b/opendaylight/md-sal/cds-access-api/pom.xml @@ -4,7 +4,7 @@ org.opendaylight.controller mdsal-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ../parent @@ -13,14 +13,18 @@ - org.opendaylight.controller - repackaged-akka + com.github.spotbugs + spotbugs-annotations + true com.google.guava guava - + + org.eclipse.jdt + org.eclipse.jdt.annotation + org.opendaylight.yangtools concepts @@ -37,28 +41,58 @@ org.opendaylight.yangtools yang-data-impl + + org.opendaylight.yangtools + yang-data-spi + org.opendaylight.yangtools yang-data-codec-binfmt + + org.opendaylight.yangtools + yang-data-tree-api + + + org.opendaylight.controller + repackaged-akka + + + org.scala-lang + scala-library + + + org.apache.commons + commons-lang3 + test + org.opendaylight.yangtools mockito-configuration - com.typesafe.akka - akka-testkit_2.13 + org.opendaylight.yangtools + yang-data-tree-ri + test - commons-lang - commons-lang - test + com.typesafe.akka + akka-testkit_2.13 + + + + maven-javadoc-plugin + 3.1.1 + + + + org.apache.felix diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/ABIVersion.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/ABIVersion.java index cfaf477f8f..4658f3b754 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/ABIVersion.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/ABIVersion.java @@ -9,7 +9,6 @@ package org.opendaylight.controller.cluster.access; import static com.google.common.base.Preconditions.checkArgument; -import com.google.common.annotations.Beta; import com.google.common.annotations.VisibleForTesting; import java.io.DataInput; import java.io.DataOutput; @@ -22,10 +21,7 @@ import org.slf4j.LoggerFactory; /** * Enumeration of all ABI versions supported by this implementation of the client access API. - * - * @author Robert Varga */ -@Beta public enum ABIVersion implements WritableObject { // NOTE: enumeration values need to be sorted in ascending order of their version to keep Comparable working @@ -40,44 +36,20 @@ public enum ABIVersion implements WritableObject { } }, + // BORON was 5 + // NEON_SR2 was 6 + // SODIUM_SR1 was 7 + // MAGNESIUM was 8 + // CHLORINE_SR2 was 9 + /** - * Initial ABI version, as shipped with Boron Simultaneous release. - */ - // We seed the initial version to be the same as DataStoreVersions.BORON-VERSION for compatibility reasons. - BORON(5) { - @Override - public NormalizedNodeStreamVersion getStreamVersion() { - return NormalizedNodeStreamVersion.LITHIUM; - } - }, - /** - * Revised ABI version. The messages remain the same as {@link #BORON}, but messages bearing QNames in any shape - * are using {@link NormalizedNodeStreamVersion#NEON_SR2}, which improves encoding. - */ - NEON_SR2(6) { - @Override - public NormalizedNodeStreamVersion getStreamVersion() { - return NormalizedNodeStreamVersion.NEON_SR2; - } - }, - /** - * Revised ABI version. The messages remain the same as {@link #BORON}, but messages bearing QNames in any shape - * are using {@link NormalizedNodeStreamVersion#SODIUM_SR1}, which improves encoding. - */ - SODIUM_SR1(7) { - @Override - public NormalizedNodeStreamVersion getStreamVersion() { - return NormalizedNodeStreamVersion.SODIUM_SR1; - } - }, - /** - * Revised ABI version. The messages remain the same as {@link #BORON}, but messages bearing QNames in any shape - * are using {@link NormalizedNodeStreamVersion#MAGNESIUM}, which improves encoding. + * Oldest ABI version we support. The messages remain the same as {@code CHLORINE_SR2}, the serialization proxies in + * use are flat objects without any superclasses. Data encoding does not include augmentations as separate objects. */ - MAGNESIUM(8) { + POTASSIUM(10) { @Override public NormalizedNodeStreamVersion getStreamVersion() { - return NormalizedNodeStreamVersion.MAGNESIUM; + return NormalizedNodeStreamVersion.POTASSIUM; } }, @@ -117,7 +89,7 @@ public enum ABIVersion implements WritableObject { * @return Current {@link ABIVersion} */ public static @NonNull ABIVersion current() { - return SODIUM_SR1; + return POTASSIUM; } /** @@ -130,24 +102,22 @@ public enum ABIVersion implements WritableObject { * @throws PastVersionException if the specified integer identifies a past version which is no longer supported */ public static @NonNull ABIVersion valueOf(final short value) throws FutureVersionException, PastVersionException { - switch (Short.toUnsignedInt(value)) { - case 0: - case 1: - case 2: - case 3: - case 4: - throw new PastVersionException(value, BORON); - case 5: - return BORON; - case 6: - return NEON_SR2; - case 7: - return SODIUM_SR1; - case 8: - return MAGNESIUM; - default: - throw new FutureVersionException(value, MAGNESIUM); - } + return switch (Short.toUnsignedInt(value)) { + case 0, 1, 2, 3, 4, 6, 7, 8, 9 -> throw new PastVersionException(value, POTASSIUM); + case 10 -> POTASSIUM; + default -> throw new FutureVersionException(value, POTASSIUM); + }; + } + + /** + * Return {@code true} if this version is earier than some {@code other} version. + * + * @param other Other {@link ABIVersion} + * @return {@code true} if {@code other is later} + * @throws NullPointerException if {@code other} is null + */ + public boolean lt(final @NonNull ABIVersion other) { + return compareTo(other) < 0; } @Override diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/AbstractVersionException.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/AbstractVersionException.java index 0567ef1191..1555b59501 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/AbstractVersionException.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/AbstractVersionException.java @@ -9,17 +9,15 @@ package org.opendaylight.controller.cluster.access; import static java.util.Objects.requireNonNull; -import com.google.common.annotations.Beta; import org.eclipse.jdt.annotation.NonNull; /** * Abstract base exception used for reporting version mismatches from {@link ABIVersion}. - * - * @author Robert Varga */ -@Beta public abstract class AbstractVersionException extends Exception { + @java.io.Serial private static final long serialVersionUID = 1L; + private final @NonNull ABIVersion closestVersion; private final int version; @@ -34,7 +32,7 @@ public abstract class AbstractVersionException extends Exception { * * @return Numeric version */ - public final int getVersion() { + public final int version() { return version; } @@ -43,8 +41,7 @@ public abstract class AbstractVersionException extends Exception { * * @return Closest supported {@link ABIVersion} */ - public final @NonNull ABIVersion getClosestVersion() { + public final @NonNull ABIVersion closestVersion() { return closestVersion; } - } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/FutureVersionException.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/FutureVersionException.java index d5f132a7b8..f0ceaa4890 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/FutureVersionException.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/FutureVersionException.java @@ -7,19 +7,15 @@ */ package org.opendaylight.controller.cluster.access; -import com.google.common.annotations.Beta; - /** * Exception thrown from {@link ABIVersion#valueOf(short)} when the specified version is too new to be supported * by the codebase. - * - * @author Robert Varga */ -@Beta public final class FutureVersionException extends AbstractVersionException { + @java.io.Serial private static final long serialVersionUID = 1L; - FutureVersionException(final short version, ABIVersion closest) { + FutureVersionException(final short version, final ABIVersion closest) { super("Version " + Short.toUnsignedInt(version) + " is too new", version, closest); } } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/PastVersionException.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/PastVersionException.java index c8cbe54b3d..c333d3495e 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/PastVersionException.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/PastVersionException.java @@ -7,16 +7,12 @@ */ package org.opendaylight.controller.cluster.access; -import com.google.common.annotations.Beta; - /** * Exception thrown from {@link ABIVersion#valueOf(short)} when the specified version is too old and no longer * supported by the codebase. - * - * @author Robert Varga */ -@Beta public final class PastVersionException extends AbstractVersionException { + @java.io.Serial private static final long serialVersionUID = 1L; PastVersionException(final short version, final ABIVersion closest) { diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbortLocalTransactionRequest.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbortLocalTransactionRequest.java index 3898ee22b3..9e2998c5b2 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbortLocalTransactionRequest.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbortLocalTransactionRequest.java @@ -8,7 +8,6 @@ package org.opendaylight.controller.cluster.access.commands; import akka.actor.ActorRef; -import com.google.common.annotations.Beta; import org.eclipse.jdt.annotation.NonNull; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; @@ -16,11 +15,9 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier * Request to abort a local transaction. Since local transactions do not introduce state on the backend until they * are ready, the purpose of this message is to inform the backend that a message identifier has been used. This is * not important for single transactions, but is critical to ensure transaction ordering within local histories. - * - * @author Robert Varga */ -@Beta public final class AbortLocalTransactionRequest extends AbstractLocalTransactionRequest { + @java.io.Serial private static final long serialVersionUID = 1L; public AbortLocalTransactionRequest(final @NonNull TransactionIdentifier identifier, diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractLocalHistoryRequestProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractLocalHistoryRequestProxy.java deleted file mode 100644 index f6ba2e7970..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractLocalHistoryRequestProxy.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.commands; - -import java.io.DataInput; -import java.io.IOException; -import org.opendaylight.controller.cluster.access.concepts.AbstractRequestProxy; -import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier; - -/** - * Abstract base class for serialization proxies associated with {@link LocalHistoryRequest}s. - * - * @author Robert Varga - * - * @param Message type - */ -abstract class AbstractLocalHistoryRequestProxy> - extends AbstractRequestProxy { - private static final long serialVersionUID = 1L; - - protected AbstractLocalHistoryRequestProxy() { - // For Externalizable - } - - AbstractLocalHistoryRequestProxy(final T request) { - super(request); - } - - @Override - protected final LocalHistoryIdentifier readTarget(final DataInput in) throws IOException { - return LocalHistoryIdentifier.readFrom(in); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractLocalTransactionRequest.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractLocalTransactionRequest.java index efc0e856b2..5831e65c11 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractLocalTransactionRequest.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractLocalTransactionRequest.java @@ -22,6 +22,7 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier */ public abstract class AbstractLocalTransactionRequest> extends TransactionRequest { + @java.io.Serial private static final long serialVersionUID = 1L; AbstractLocalTransactionRequest(final TransactionIdentifier identifier, final long sequence, @@ -30,7 +31,7 @@ public abstract class AbstractLocalTransactionRequest externalizableProxy(final ABIVersion version) { + protected final SerialForm externalizableProxy(final ABIVersion version) { throw new UnsupportedOperationException("Local transaction request " + this + " should never be serialized"); } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractReadPathTransactionRequest.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractReadPathTransactionRequest.java index fa53a599aa..2b4ee0e7e8 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractReadPathTransactionRequest.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractReadPathTransactionRequest.java @@ -10,12 +10,15 @@ package org.opendaylight.controller.cluster.access.commands; import static java.util.Objects.requireNonNull; import akka.actor.ActorRef; -import com.google.common.annotations.Beta; import com.google.common.base.MoreObjects.ToStringHelper; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; import org.eclipse.jdt.annotation.NonNull; import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; +import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput; /** * Abstract base class for {@link TransactionRequest}s accessing data as visible in the isolated context of a particular @@ -25,13 +28,33 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; * This class is visible outside of this package for the purpose of allowing common instanceof checks * and simplified codepaths. * - * @author Robert Varga - * * @param Message type */ -@Beta public abstract class AbstractReadPathTransactionRequest> extends AbstractReadTransactionRequest { + interface SerialForm> + extends AbstractReadTransactionRequest.SerialForm { + + @Override + default T readExternal(final ObjectInput in, final TransactionIdentifier target, final long sequence, + final ActorRef replyTo, final boolean snapshotOnly) throws IOException { + return readExternal(in, target, sequence, replyTo, snapshotOnly, + NormalizedNodeDataInput.newDataInput(in).readYangInstanceIdentifier()); + } + + @NonNull T readExternal(@NonNull ObjectInput in, @NonNull TransactionIdentifier target, long sequence, + @NonNull ActorRef replyTo, boolean snapshotOnly, @NonNull YangInstanceIdentifier path) throws IOException; + + @Override + default void writeExternal(final ObjectOutput out, final T msg) throws IOException { + AbstractReadTransactionRequest.SerialForm.super.writeExternal(out, msg); + try (var nnout = msg.getVersion().getStreamVersion().newDataOutput(out)) { + nnout.writeYangInstanceIdentifier(msg.getPath()); + } + } + } + + @java.io.Serial private static final long serialVersionUID = 1L; private final @NonNull YangInstanceIdentifier path; @@ -57,5 +80,5 @@ public abstract class AbstractReadPathTransactionRequest externalizableProxy(ABIVersion version); + protected abstract SerialForm externalizableProxy(ABIVersion version); } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractReadPathTransactionRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractReadPathTransactionRequestProxyV1.java deleted file mode 100644 index 2cd4ec9e73..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractReadPathTransactionRequestProxyV1.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright (c) 2017 Pantheon Technologies, s.r.o. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.commands; - -import akka.actor.ActorRef; -import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; -import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput; -import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput; -import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion; - -/** - * Abstract base class for serialization proxies associated with {@link AbstractReadTransactionRequest}s. It implements - * the initial (Boron) serialization format. - * - * @author Robert Varga - * - * @param Message type - */ -abstract class AbstractReadPathTransactionRequestProxyV1> - extends AbstractReadTransactionRequestProxyV1 { - private static final long serialVersionUID = 1L; - - private YangInstanceIdentifier path; - private transient NormalizedNodeStreamVersion streamVersion; - - protected AbstractReadPathTransactionRequestProxyV1() { - // For Externalizable - } - - AbstractReadPathTransactionRequestProxyV1(final T request) { - super(request); - path = request.getPath(); - streamVersion = request.getVersion().getStreamVersion(); - } - - @Override - public final void writeExternal(final ObjectOutput out) throws IOException { - super.writeExternal(out); - try (NormalizedNodeDataOutput nnout = streamVersion.newDataOutput(out)) { - nnout.writeYangInstanceIdentifier(path); - } - } - - @Override - public final void readExternal(final ObjectInput in) throws ClassNotFoundException, IOException { - super.readExternal(in); - path = NormalizedNodeDataInput.newDataInput(in).readYangInstanceIdentifier(); - } - - @Override - protected final T createReadRequest(final TransactionIdentifier target, final long sequence, - final ActorRef replyTo, final boolean snapshotOnly) { - return createReadPathRequest(target, sequence, replyTo, path, snapshotOnly); - } - - abstract T createReadPathRequest(TransactionIdentifier target, long sequence, ActorRef replyTo, - YangInstanceIdentifier requestPath, boolean snapshotOnly); -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractReadTransactionRequest.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractReadTransactionRequest.java index 3fc4821edf..23fdd85140 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractReadTransactionRequest.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractReadTransactionRequest.java @@ -8,8 +8,11 @@ package org.opendaylight.controller.cluster.access.commands; import akka.actor.ActorRef; -import com.google.common.annotations.Beta; import com.google.common.base.MoreObjects.ToStringHelper; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import org.eclipse.jdt.annotation.NonNull; import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; @@ -20,13 +23,28 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier * This class is visible outside of this package for the purpose of allowing common instanceof checks * and simplified codepaths. * - * @author Robert Varga - * * @param Message type */ -@Beta public abstract class AbstractReadTransactionRequest> extends TransactionRequest { + interface SerialForm> extends TransactionRequest.SerialForm { + @Override + default T readExternal(final ObjectInput in, final TransactionIdentifier target, final long sequence, + final ActorRef replyTo) throws IOException { + return readExternal(in, target, sequence, replyTo, in.readBoolean()); + } + + @NonNull T readExternal(@NonNull ObjectInput in, @NonNull TransactionIdentifier target, long sequence, + @NonNull ActorRef replyTo, boolean snapshotOnly) throws IOException; + + @Override + default void writeExternal(final ObjectOutput out, final T msg) throws IOException { + TransactionRequest.SerialForm.super.writeExternal(out, msg); + out.writeBoolean(msg.isSnapshotOnly()); + } + } + + @java.io.Serial private static final long serialVersionUID = 1L; private final boolean snapshotOnly; @@ -52,5 +70,5 @@ public abstract class AbstractReadTransactionRequest externalizableProxy(ABIVersion version); + protected abstract SerialForm externalizableProxy(ABIVersion version); } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractReadTransactionRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractReadTransactionRequestProxyV1.java deleted file mode 100644 index 88820bd1f5..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractReadTransactionRequestProxyV1.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.commands; - -import akka.actor.ActorRef; -import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; - -/** - * Abstract base class for serialization proxies associated with {@link AbstractReadTransactionRequest}s. It implements - * the initial (Boron) serialization format. - * - * @author Robert Varga - * - * @param Message type - */ -abstract class AbstractReadTransactionRequestProxyV1> - extends AbstractTransactionRequestProxy { - private static final long serialVersionUID = 1L; - private boolean snapshotOnly; - - protected AbstractReadTransactionRequestProxyV1() { - // For Externalizable - } - - AbstractReadTransactionRequestProxyV1(final T request) { - super(request); - snapshotOnly = request.isSnapshotOnly(); - } - - @Override - public void writeExternal(final ObjectOutput out) throws IOException { - super.writeExternal(out); - out.writeBoolean(snapshotOnly); - } - - @Override - public void readExternal(final ObjectInput in) throws ClassNotFoundException, IOException { - super.readExternal(in); - snapshotOnly = in.readBoolean(); - } - - @Override - protected final T createRequest(final TransactionIdentifier target, final long sequence, final ActorRef replyTo) { - return createReadRequest(target, sequence, replyTo, snapshotOnly); - } - - @SuppressWarnings("checkstyle:hiddenField") - abstract T createReadRequest(TransactionIdentifier target, long sequence, ActorRef replyTo, boolean snapshotOnly); -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractTransactionRequestProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractTransactionRequestProxy.java deleted file mode 100644 index 437d281e18..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractTransactionRequestProxy.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.commands; - -import java.io.DataInput; -import java.io.IOException; -import org.opendaylight.controller.cluster.access.concepts.AbstractRequestProxy; -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; - -/** - * Abstract base class for serialization proxies associated with {@link TransactionRequest}s. - * - * @author Robert Varga - * - * @param Message type - */ -abstract class AbstractTransactionRequestProxy> - extends AbstractRequestProxy { - private static final long serialVersionUID = 1L; - - protected AbstractTransactionRequestProxy() { - // For Externalizable - } - - AbstractTransactionRequestProxy(final T request) { - super(request); - } - - @Override - protected final TransactionIdentifier readTarget(final DataInput in) throws IOException { - return TransactionIdentifier.readFrom(in); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractTransactionSuccessProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractTransactionSuccessProxy.java deleted file mode 100644 index a1284b703f..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractTransactionSuccessProxy.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.commands; - -import java.io.DataInput; -import java.io.IOException; -import org.opendaylight.controller.cluster.access.concepts.AbstractSuccessProxy; -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; - -/** - * Abstract base class for serialization proxies associated with {@link TransactionSuccess}es. - * - * @author Robert Varga - * - * @param Message type - */ -abstract class AbstractTransactionSuccessProxy> - extends AbstractSuccessProxy { - private static final long serialVersionUID = 1L; - - protected AbstractTransactionSuccessProxy() { - // For Externalizable - } - - AbstractTransactionSuccessProxy(final T request) { - super(request); - } - - @Override - protected final TransactionIdentifier readTarget(final DataInput in) throws IOException { - return TransactionIdentifier.readFrom(in); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CCF.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CCF.java new file mode 100644 index 0000000000..ea9c37e3fd --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CCF.java @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.commands; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +/** + * Externalizable proxy for use with {@link ConnectClientFailure}. It implements the Chlorine SR2 serialization format. + */ +final class CCF implements ConnectClientFailure.SerialForm { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private ConnectClientFailure message; + + @SuppressWarnings("checkstyle:RedundantModifier") + public CCF() { + // for Externalizable + } + + CCF(final ConnectClientFailure request) { + message = requireNonNull(request); + } + + @Override + public ConnectClientFailure message() { + return verifyNotNull(message); + } + + @Override + public void setMessage(final ConnectClientFailure message) { + this.message = requireNonNull(message); + } + + @Override + public Object readResolve() { + return message(); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CCR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CCR.java new file mode 100644 index 0000000000..ace94d579c --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CCR.java @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.commands; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +/** + * Externalizable proxy for use with {@link ConnectClientRequest}. It implements the Chlorine SR2 serialization format. + */ +final class CCR implements ConnectClientRequest.SerialForm { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private ConnectClientRequest message; + + @SuppressWarnings("checkstyle:RedundantModifier") + public CCR() { + // for Externalizable + } + + CCR(final ConnectClientRequest request) { + message = requireNonNull(request); + } + + @Override + public ConnectClientRequest message() { + return verifyNotNull(message); + } + + @Override + public void setMessage(final ConnectClientRequest message) { + this.message = requireNonNull(message); + } + + @Override + public Object readResolve() { + return message(); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CCS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CCS.java new file mode 100644 index 0000000000..ea425e5d2d --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CCS.java @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.commands; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +/** + * Externalizable proxy for use with {@link ConnectClientSuccess}. It implements the Chlorine SR2 serialization format. + */ +final class CCS implements ConnectClientSuccess.SerialForm { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private ConnectClientSuccess message; + + @SuppressWarnings("checkstyle:RedundantModifier") + public CCS() { + // for Externalizable + } + + CCS(final ConnectClientSuccess request) { + message = requireNonNull(request); + } + + @Override + public ConnectClientSuccess message() { + return verifyNotNull(message); + } + + @Override + public void setMessage(final ConnectClientSuccess message) { + this.message = requireNonNull(message); + } + + @Override + public Object readResolve() { + return message(); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CHR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CHR.java new file mode 100644 index 0000000000..da3fd132d1 --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CHR.java @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.commands; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +/** + * Externalizable proxy for use with {@link CreateLocalHistoryRequest}. It implements the Chlorine SR2 serialization + * format. + */ +final class CHR implements CreateLocalHistoryRequest.SerialForm { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private CreateLocalHistoryRequest message; + + @SuppressWarnings("checkstyle:RedundantModifier") + public CHR() { + // For Externalizable + } + + CHR(final CreateLocalHistoryRequest message) { + this.message = requireNonNull(message); + } + + @Override + public CreateLocalHistoryRequest message() { + return verifyNotNull(message); + } + + @Override + public void setMessage(final CreateLocalHistoryRequest message) { + this.message = requireNonNull(message); + } + + @Override + public Object readResolve() { + return message(); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ClosedTransactionException.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ClosedTransactionException.java index ece4720564..67b1a40408 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ClosedTransactionException.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ClosedTransactionException.java @@ -7,7 +7,6 @@ */ package org.opendaylight.controller.cluster.access.commands; -import com.google.common.annotations.Beta; import org.opendaylight.controller.cluster.access.concepts.RequestException; /** @@ -15,11 +14,9 @@ import org.opendaylight.controller.cluster.access.concepts.RequestException; * been closed, either via a successful commit or abort (which is indicated via {@link #isSuccessful()}. This can * happen if the corresponding journal record is replicated, but the message to the frontend gets lost and the backed * leader moved before the frontend retried the corresponding request. - * - * @author Robert Varga */ -@Beta public final class ClosedTransactionException extends RequestException { + @java.io.Serial private static final long serialVersionUID = 1L; private final boolean successful; diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CommitLocalTransactionRequest.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CommitLocalTransactionRequest.java index adef0c31bc..7a3f771b47 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CommitLocalTransactionRequest.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CommitLocalTransactionRequest.java @@ -10,29 +10,26 @@ package org.opendaylight.controller.cluster.access.commands; import static java.util.Objects.requireNonNull; import akka.actor.ActorRef; -import com.google.common.annotations.Beta; import com.google.common.base.MoreObjects.ToStringHelper; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.io.ObjectStreamException; import java.util.Optional; import org.eclipse.jdt.annotation.NonNull; import org.eclipse.jdt.annotation.Nullable; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification; /** * Request to commit a local transaction. Since local transactions do not introduce state on the backend until they * are ready, this message carries a complete set of modifications. - * - * @author Robert Varga */ -@Beta public final class CommitLocalTransactionRequest extends AbstractLocalTransactionRequest { + @java.io.Serial private static final long serialVersionUID = 1L; - @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but this class " - + "implements writeReplace to delegate serialization to a Proxy class and thus instances of this class " - + "aren't serialized. FindBugs does not recognize this.") private final DataTreeModification mod; private final Exception delayedFailure; private final boolean coordinated; @@ -77,4 +74,19 @@ public final class CommitLocalTransactionRequest return super.addToStringAttributes(toStringHelper).add("coordinated", coordinated) .add("delayedError", delayedFailure); } + + @java.io.Serial + private void readObject(final ObjectInputStream stream) throws IOException, ClassNotFoundException { + throwNSE(); + } + + @java.io.Serial + private void readObjectNoData() throws ObjectStreamException { + throwNSE(); + } + + @java.io.Serial + private void writeObject(final ObjectOutputStream stream) throws IOException { + throwNSE(); + } } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientFailure.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientFailure.java index 46b460ac0d..1157d1b6f8 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientFailure.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientFailure.java @@ -7,20 +7,31 @@ */ package org.opendaylight.controller.cluster.access.commands; -import com.google.common.annotations.Beta; +import java.io.DataInput; +import java.io.IOException; import org.opendaylight.controller.cluster.access.ABIVersion; -import org.opendaylight.controller.cluster.access.concepts.AbstractRequestFailureProxy; import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier; import org.opendaylight.controller.cluster.access.concepts.RequestException; import org.opendaylight.controller.cluster.access.concepts.RequestFailure; /** * A {@link RequestFailure} reported when {@link ConnectClientRequest} fails. - * - * @author Robert Varga */ -@Beta public final class ConnectClientFailure extends RequestFailure { + interface SerialForm extends RequestFailure.SerialForm { + @Override + default ClientIdentifier readTarget(final DataInput in) throws IOException { + return ClientIdentifier.readFrom(in); + } + + @Override + default ConnectClientFailure createFailure(final ClientIdentifier target, final long sequence, + final RequestException cause) { + return new ConnectClientFailure(target, sequence, cause); + } + } + + @java.io.Serial private static final long serialVersionUID = 1L; ConnectClientFailure(final ClientIdentifier target, final long sequence, final RequestException cause) { @@ -32,9 +43,8 @@ public final class ConnectClientFailure extends RequestFailure externalizableProxy( - final ABIVersion version) { - return new ConnectClientFailureProxyV1(this); + protected SerialForm externalizableProxy(final ABIVersion version) { + return new CCF(this); } @Override diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientFailureProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientFailureProxyV1.java deleted file mode 100644 index 55efb283c0..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientFailureProxyV1.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.commands; - -import java.io.DataInput; -import java.io.IOException; -import org.opendaylight.controller.cluster.access.concepts.AbstractRequestFailureProxy; -import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier; -import org.opendaylight.controller.cluster.access.concepts.RequestException; - -/** - * Serialization proxy for use with {@link ConnectClientFailure}. This class implements initial (Boron) serialization - * format. - * - * @author Robert Varga - */ -final class ConnectClientFailureProxyV1 extends AbstractRequestFailureProxy { - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public ConnectClientFailureProxyV1() { - // For Externalizable - } - - ConnectClientFailureProxyV1(final ConnectClientFailure failure) { - super(failure); - } - - @Override - protected ConnectClientFailure createFailure(final ClientIdentifier target, final long sequence, - final RequestException cause) { - return new ConnectClientFailure(target, sequence, cause); - } - - @Override - protected ClientIdentifier readTarget(final DataInput in) throws IOException { - return ClientIdentifier.readFrom(in); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientRequest.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientRequest.java index ba86035e92..953fafefa8 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientRequest.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientRequest.java @@ -10,10 +10,12 @@ package org.opendaylight.controller.cluster.access.commands; import static java.util.Objects.requireNonNull; import akka.actor.ActorRef; -import com.google.common.annotations.Beta; import com.google.common.base.MoreObjects.ToStringHelper; +import java.io.DataInput; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; import org.opendaylight.controller.cluster.access.ABIVersion; -import org.opendaylight.controller.cluster.access.concepts.AbstractRequestProxy; import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier; import org.opendaylight.controller.cluster.access.concepts.Request; import org.opendaylight.controller.cluster.access.concepts.RequestException; @@ -26,11 +28,30 @@ import org.opendaylight.controller.cluster.access.concepts.RequestException; * *

    * It also includes request stream sequencing information. - * - * @author Robert Varga */ -@Beta public final class ConnectClientRequest extends Request { + interface SerialForm extends Request.SerialForm { + @Override + default ConnectClientRequest readExternal(final ObjectInput in, final ClientIdentifier target, + final long sequence, final ActorRef replyTo) throws IOException { + return new ConnectClientRequest(target, sequence, replyTo, ABIVersion.inexactReadFrom(in), + ABIVersion.inexactReadFrom(in)); + } + + @Override + default ClientIdentifier readTarget(final DataInput in) throws IOException { + return ClientIdentifier.readFrom(in); + } + + @Override + default void writeExternal(final ObjectOutput out, final ConnectClientRequest msg) throws IOException { + Request.SerialForm.super.writeExternal(out, msg); + msg.getMinVersion().writeTo(out); + msg.getMaxVersion().writeTo(out); + } + } + + @java.io.Serial private static final long serialVersionUID = 1L; private final ABIVersion minVersion; @@ -50,8 +71,8 @@ public final class ConnectClientRequest extends Request externalizableProxy( - final ABIVersion version) { - return new ConnectClientRequestProxyV1(this); + protected SerialForm externalizableProxy(final ABIVersion version) { + return new CCR(this); } @Override diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientRequestProxyV1.java deleted file mode 100644 index da3a601db7..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientRequestProxyV1.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.commands; - -import akka.actor.ActorRef; -import java.io.DataInput; -import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; -import org.opendaylight.controller.cluster.access.ABIVersion; -import org.opendaylight.controller.cluster.access.concepts.AbstractRequestProxy; -import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier; - -/** - * Externalizable proxy for use with {@link ConnectClientRequest}. It implements the initial (Boron) serialization - * format. - * - * @author Robert Varga - */ -final class ConnectClientRequestProxyV1 extends AbstractRequestProxy { - private ABIVersion minVersion; - private ABIVersion maxVersion; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public ConnectClientRequestProxyV1() { - // for Externalizable - } - - ConnectClientRequestProxyV1(final ConnectClientRequest request) { - super(request); - this.minVersion = request.getMinVersion(); - this.maxVersion = request.getMaxVersion(); - } - - @Override - public void writeExternal(final ObjectOutput out) throws IOException { - super.writeExternal(out); - minVersion.writeTo(out); - maxVersion.writeTo(out); - } - - @Override - public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { - super.readExternal(in); - minVersion = ABIVersion.inexactReadFrom(in); - maxVersion = ABIVersion.inexactReadFrom(in); - } - - @Override - protected ConnectClientRequest createRequest(final ClientIdentifier target, final long sequence, - final ActorRef replyTo) { - return new ConnectClientRequest(target, sequence, replyTo, minVersion, maxVersion); - } - - @Override - protected ClientIdentifier readTarget(final DataInput in) throws IOException { - return ClientIdentifier.readFrom(in); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientSuccess.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientSuccess.java index 43fdb3c3c2..ad0e3624e1 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientSuccess.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientSuccess.java @@ -12,39 +12,84 @@ import static java.util.Objects.requireNonNull; import akka.actor.ActorRef; import akka.actor.ActorSelection; -import com.google.common.annotations.Beta; +import akka.serialization.JavaSerializer; +import akka.serialization.Serialization; import com.google.common.base.MoreObjects.ToStringHelper; import com.google.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import java.io.DataInput; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectInputStream; +import java.io.ObjectOutput; +import java.io.ObjectOutputStream; +import java.io.ObjectStreamException; +import java.util.ArrayList; import java.util.List; import java.util.Optional; import org.eclipse.jdt.annotation.NonNull; import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier; import org.opendaylight.controller.cluster.access.concepts.RequestSuccess; -import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree; +import org.opendaylight.yangtools.yang.data.tree.api.ReadOnlyDataTree; /** * Successful reply to an {@link ConnectClientRequest}. Client actor which initiated this connection should use * the version reported via {@link #getVersion()} of this message to communicate with this backend. Should this backend * fail, the client can try accessing the provided alternates. - * - * @author Robert Varga */ -@Beta public final class ConnectClientSuccess extends RequestSuccess { - private static final long serialVersionUID = 1L; + interface SerialForm extends RequestSuccess.SerialForm { + @Override + default ClientIdentifier readTarget(final DataInput in) throws IOException { + return ClientIdentifier.readFrom(in); + } + + @Override + default ConnectClientSuccess readExternal(final ObjectInput in, final ClientIdentifier target, + final long sequence) throws IOException, ClassNotFoundException { + final var backend = JavaSerializer.currentSystem().value().provider() + .resolveActorRef((String) in.readObject()); + final var maxMessages = in.readInt(); + + final int alternatesSize = in.readInt(); + final var alternates = new ArrayList(alternatesSize); + for (int i = 0; i < alternatesSize; ++i) { + alternates.add(ActorSelection.apply(ActorRef.noSender(), (String)in.readObject())); + } + + return new ConnectClientSuccess(target, sequence, backend, alternates, maxMessages, null); + } + + @Override + default void writeExternal(final ObjectOutput out, final ConnectClientSuccess msg) throws IOException { + out.writeObject(Serialization.serializedActorPath(msg.backend)); + out.writeInt(msg.maxMessages); + + out.writeInt(msg.alternates.size()); + for (ActorSelection b : msg.alternates) { + out.writeObject(b.toSerializationFormat()); + } + + // We are ignoring the DataTree, it is not serializable anyway + } + } - @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but this class " - + "implements writeReplace to delegate serialization to a Proxy class and thus instances of this class " - + "aren't serialized. FindBugs does not recognize this.") - private final @NonNull List alternates; + @java.io.Serial + private static final long serialVersionUID = 1L; - @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "See justification above.") + private final @NonNull ImmutableList alternates; private final ReadOnlyDataTree dataTree; private final @NonNull ActorRef backend; private final int maxMessages; + private ConnectClientSuccess(final ConnectClientSuccess success, final ABIVersion version) { + super(success, version); + alternates = success.alternates; + dataTree = success.dataTree; + backend = success.backend; + maxMessages = success.maxMessages; + } + ConnectClientSuccess(final ClientIdentifier target, final long sequence, final ActorRef backend, final List alternates, final int maxMessages, final ReadOnlyDataTree dataTree) { super(target, sequence); @@ -83,13 +128,13 @@ public final class ConnectClientSuccess extends RequestSuccess { - private static final long serialVersionUID = 1L; - - private List alternates; - private ActorRef backend; - private int maxMessages; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public ConnectClientSuccessProxyV1() { - // For Externalizable - } - - ConnectClientSuccessProxyV1(final ConnectClientSuccess success) { - super(success); - this.alternates = success.getAlternates(); - this.backend = success.getBackend(); - this.maxMessages = success.getMaxMessages(); - // We are ignoring the DataTree, it is not serializable anyway - } - - @Override - public void writeExternal(final ObjectOutput out) throws IOException { - super.writeExternal(out); - - out.writeObject(Serialization.serializedActorPath(backend)); - out.writeInt(maxMessages); - - out.writeInt(alternates.size()); - for (ActorSelection b : alternates) { - out.writeObject(b.toSerializationFormat()); - } - } - - @Override - public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { - super.readExternal(in); - - backend = JavaSerializer.currentSystem().value().provider().resolveActorRef((String) in.readObject()); - maxMessages = in.readInt(); - - final int alternatesSize = in.readInt(); - alternates = new ArrayList<>(alternatesSize); - for (int i = 0; i < alternatesSize; ++i) { - alternates.add(ActorSelection.apply(ActorRef.noSender(), (String)in.readObject())); - } - } - - @Override - protected ConnectClientSuccess createSuccess(final ClientIdentifier target, final long sequence) { - return new ConnectClientSuccess(target, sequence, backend, alternates, maxMessages, null); - } - - @Override - protected ClientIdentifier readTarget(final DataInput in) throws IOException { - return ClientIdentifier.readFrom(in); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CreateLocalHistoryRequest.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CreateLocalHistoryRequest.java index 01a110d046..b627bafa43 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CreateLocalHistoryRequest.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CreateLocalHistoryRequest.java @@ -8,17 +8,23 @@ package org.opendaylight.controller.cluster.access.commands; import akka.actor.ActorRef; -import com.google.common.annotations.Beta; +import java.io.ObjectInput; import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier; /** * Request to create a new local history. - * - * @author Robert Varga */ -@Beta public final class CreateLocalHistoryRequest extends LocalHistoryRequest { + interface SerialForm extends LocalHistoryRequest.SerialForm { + @Override + default CreateLocalHistoryRequest readExternal(final ObjectInput in, final LocalHistoryIdentifier target, + final long sequence, final ActorRef replyTo) { + return new CreateLocalHistoryRequest(target, sequence, replyTo); + } + } + + @java.io.Serial private static final long serialVersionUID = 1L; public CreateLocalHistoryRequest(final LocalHistoryIdentifier target, final ActorRef replyTo) { @@ -34,9 +40,8 @@ public final class CreateLocalHistoryRequest extends LocalHistoryRequest externalizableProxy( - final ABIVersion version) { - return new CreateLocalHistoryRequestProxyV1(this); + protected SerialForm externalizableProxy(final ABIVersion version) { + return new CHR(this); } @Override diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CreateLocalHistoryRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CreateLocalHistoryRequestProxyV1.java deleted file mode 100644 index b61c9f5309..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CreateLocalHistoryRequestProxyV1.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.commands; - -import akka.actor.ActorRef; -import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier; - -/** - * Externalizable proxy for use with {@link CreateLocalHistoryRequest}. It implements the initial (Boron) serialization - * format. - * - * @author Robert Varga - */ -final class CreateLocalHistoryRequestProxyV1 extends AbstractLocalHistoryRequestProxy { - private static final long serialVersionUID = 1L; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public CreateLocalHistoryRequestProxyV1() { - // For Externalizable - } - - CreateLocalHistoryRequestProxyV1(final CreateLocalHistoryRequest request) { - super(request); - } - - @Override - protected CreateLocalHistoryRequest createRequest(final LocalHistoryIdentifier target, final long sequence, - final ActorRef replyTo) { - return new CreateLocalHistoryRequest(target, sequence, replyTo); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DHR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DHR.java new file mode 100644 index 0000000000..ebd0f02aaa --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DHR.java @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.commands; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +/** + * Externalizable proxy for use with {@link DestroyLocalHistoryRequest}. It implements the Chlorine SR2 serialization + * format. + */ +final class DHR implements DestroyLocalHistoryRequest.SerialForm { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private DestroyLocalHistoryRequest message; + + @SuppressWarnings("checkstyle:RedundantModifier") + public DHR() { + // for Externalizable + } + + DHR(final DestroyLocalHistoryRequest message) { + this.message = requireNonNull(message); + } + + @Override + public DestroyLocalHistoryRequest message() { + return verifyNotNull(message); + } + + @Override + public void setMessage(final DestroyLocalHistoryRequest message) { + this.message = requireNonNull(message); + } + + @Override + public Object readResolve() { + return message(); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DeadHistoryException.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DeadHistoryException.java index 7df84763c5..a91eb6971c 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DeadHistoryException.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DeadHistoryException.java @@ -7,7 +7,6 @@ */ package org.opendaylight.controller.cluster.access.commands; -import com.google.common.annotations.Beta; import com.google.common.collect.RangeSet; import com.google.common.primitives.UnsignedLong; import org.opendaylight.controller.cluster.access.concepts.RequestException; @@ -15,11 +14,9 @@ import org.opendaylight.controller.cluster.access.concepts.RequestException; /** * A {@link RequestException} indicating that the backend has received a request to create a history which has already * been retired. - * - * @author Robert Varga */ -@Beta public final class DeadHistoryException extends RequestException { + @java.io.Serial private static final long serialVersionUID = 1L; public DeadHistoryException(final RangeSet purgedHistories) { diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DeadTransactionException.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DeadTransactionException.java index fee439984a..0f259c1a94 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DeadTransactionException.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DeadTransactionException.java @@ -7,7 +7,6 @@ */ package org.opendaylight.controller.cluster.access.commands; -import com.google.common.annotations.Beta; import com.google.common.collect.ImmutableRangeSet; import com.google.common.collect.RangeSet; import com.google.common.primitives.UnsignedLong; @@ -16,11 +15,9 @@ import org.opendaylight.controller.cluster.access.concepts.RequestException; /** * A {@link RequestException} indicating that the backend has received a request to create a transaction which has * already been purged. - * - * @author Robert Varga */ -@Beta public final class DeadTransactionException extends RequestException { + @java.io.Serial private static final long serialVersionUID = 1L; private final RangeSet purgedIdentifiers; diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DestroyLocalHistoryRequest.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DestroyLocalHistoryRequest.java index 375128318c..5add5eb054 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DestroyLocalHistoryRequest.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DestroyLocalHistoryRequest.java @@ -8,17 +8,23 @@ package org.opendaylight.controller.cluster.access.commands; import akka.actor.ActorRef; -import com.google.common.annotations.Beta; +import java.io.ObjectInput; import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier; /** * Request to destroy a local history. - * - * @author Robert Varga */ -@Beta public final class DestroyLocalHistoryRequest extends LocalHistoryRequest { + interface SerialForm extends LocalHistoryRequest.SerialForm { + @Override + default DestroyLocalHistoryRequest readExternal(final ObjectInput in, final LocalHistoryIdentifier target, + final long sequence, final ActorRef replyTo) { + return new DestroyLocalHistoryRequest(target, sequence, replyTo); + } + } + + @java.io.Serial private static final long serialVersionUID = 1L; public DestroyLocalHistoryRequest(final LocalHistoryIdentifier target, final long sequence, @@ -31,9 +37,8 @@ public final class DestroyLocalHistoryRequest extends LocalHistoryRequest externalizableProxy( - final ABIVersion version) { - return new DestroyLocalHistoryRequestProxyV1(this); + protected SerialForm externalizableProxy(final ABIVersion version) { + return new DHR(this); } @Override diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DestroyLocalHistoryRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DestroyLocalHistoryRequestProxyV1.java deleted file mode 100644 index 0ebd69054d..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DestroyLocalHistoryRequestProxyV1.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.commands; - -import akka.actor.ActorRef; -import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier; - -/** - * Externalizable proxy for use with {@link DestroyLocalHistoryRequest}. It implements the initial (Boron) serialization - * format. - * - * @author Robert Varga - */ -final class DestroyLocalHistoryRequestProxyV1 extends AbstractLocalHistoryRequestProxy { - private static final long serialVersionUID = 1L; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public DestroyLocalHistoryRequestProxyV1() { - // For Externalizable - } - - DestroyLocalHistoryRequestProxyV1(final DestroyLocalHistoryRequest request) { - super(request); - } - - @Override - protected DestroyLocalHistoryRequest createRequest(final LocalHistoryIdentifier target, final long sequence, - final ActorRef replyTo) { - return new DestroyLocalHistoryRequest(target, sequence, replyTo); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ETR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ETR.java new file mode 100644 index 0000000000..26964e474a --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ETR.java @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.commands; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +/** + * Externalizable proxy for use with {@link ExistsTransactionRequest}. It implements the Chlorine SR2 serialization + * format. + */ +final class ETR implements ExistsTransactionRequest.SerialForm { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private ExistsTransactionRequest message; + + @SuppressWarnings("checkstyle:RedundantModifier") + public ETR() { + // for Externalizable + } + + ETR(final ExistsTransactionRequest message) { + this.message = requireNonNull(message); + } + + @Override + public ExistsTransactionRequest message() { + return verifyNotNull(message); + } + + @Override + public void setMessage(final ExistsTransactionRequest message) { + this.message = requireNonNull(message); + } + + @Override + public Object readResolve() { + return message(); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ETS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ETS.java new file mode 100644 index 0000000000..ad8564b33a --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ETS.java @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.commands; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; + +/** + * Externalizable proxy for use with {@link ExistsTransactionSuccess}. It implements the Chlorine SR2 serialization + * format. + */ +final class ETS implements TransactionSuccess.SerialForm { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private ExistsTransactionSuccess message; + + @SuppressWarnings("checkstyle:RedundantModifier") + public ETS() { + // for Externalizable + } + + ETS(final ExistsTransactionSuccess message) { + this.message = requireNonNull(message); + } + + @Override + public ExistsTransactionSuccess message() { + return verifyNotNull(message); + } + + @Override + public void setMessage(final ExistsTransactionSuccess message) { + this.message = requireNonNull(message); + } + + @Override + public void writeExternal(final ObjectOutput out, final ExistsTransactionSuccess msg) throws IOException { + out.writeBoolean(msg.getExists()); + } + + @Override + public ExistsTransactionSuccess readExternal(final ObjectInput in, final TransactionIdentifier target, + final long sequence) throws IOException { + return new ExistsTransactionSuccess(target, sequence, in.readBoolean()); + } + + @Override + public Object readResolve() { + return message(); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionRequest.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionRequest.java index ab2316c28e..06c2797ca4 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionRequest.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionRequest.java @@ -8,7 +8,8 @@ package org.opendaylight.controller.cluster.access.commands; import akka.actor.ActorRef; -import com.google.common.annotations.Beta; +import java.io.IOException; +import java.io.ObjectInput; import org.eclipse.jdt.annotation.NonNull; import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; @@ -16,15 +17,22 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; /** * A transaction request to query if a particular path exists in the current view of a particular transaction. - * - * @author Robert Varga */ -@Beta public final class ExistsTransactionRequest extends AbstractReadPathTransactionRequest { + interface SerialForm extends AbstractReadPathTransactionRequest.SerialForm { + @Override + default ExistsTransactionRequest readExternal(final ObjectInput in, final TransactionIdentifier target, + final long sequence, final ActorRef replyTo, final boolean snapshotOnly, final YangInstanceIdentifier path) + throws IOException { + return new ExistsTransactionRequest(target, sequence, replyTo, path, snapshotOnly); + } + } + + @java.io.Serial private static final long serialVersionUID = 1L; public ExistsTransactionRequest(final @NonNull TransactionIdentifier identifier, final long sequence, - final @NonNull ActorRef replyTo, final @NonNull YangInstanceIdentifier path, final boolean snapshotOnly) { + final @NonNull ActorRef replyTo, final @NonNull YangInstanceIdentifier path, final boolean snapshotOnly) { super(identifier, sequence, replyTo, path, snapshotOnly); } @@ -38,7 +46,7 @@ public final class ExistsTransactionRequest extends AbstractReadPathTransactionR } @Override - protected ExistsTransactionRequestProxyV1 externalizableProxy(final ABIVersion version) { - return new ExistsTransactionRequestProxyV1(this); + protected SerialForm externalizableProxy(final ABIVersion version) { + return new ETR(this); } } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionRequestProxyV1.java deleted file mode 100644 index 2429947155..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionRequestProxyV1.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.commands; - -import akka.actor.ActorRef; -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; - -/** - * Externalizable proxy for use with {@link ExistsTransactionRequest}. It implements the initial (Boron) serialization - * format. - * - * @author Robert Varga - */ -final class ExistsTransactionRequestProxyV1 extends - AbstractReadPathTransactionRequestProxyV1 { - private static final long serialVersionUID = 1L; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public ExistsTransactionRequestProxyV1() { - // For Externalizable - } - - ExistsTransactionRequestProxyV1(final ExistsTransactionRequest request) { - super(request); - } - - @Override - ExistsTransactionRequest createReadPathRequest(final TransactionIdentifier target, final long sequence, - final ActorRef replyTo, final YangInstanceIdentifier path, final boolean snapshotOnly) { - return new ExistsTransactionRequest(target, sequence, replyTo, path, snapshotOnly); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionSuccess.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionSuccess.java index 8a1704de76..72dee3aefa 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionSuccess.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionSuccess.java @@ -7,7 +7,6 @@ */ package org.opendaylight.controller.cluster.access.commands; -import com.google.common.annotations.Beta; import com.google.common.base.MoreObjects.ToStringHelper; import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; @@ -15,14 +14,18 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier /** * Successful reply to an {@link ExistsTransactionRequest}. It indicates presence of requested data via * {@link #getExists()}. - * - * @author Robert Varga */ -@Beta public final class ExistsTransactionSuccess extends TransactionSuccess { + @java.io.Serial private static final long serialVersionUID = 1L; + private final boolean exists; + private ExistsTransactionSuccess(final ExistsTransactionSuccess success, final ABIVersion version) { + super(success, version); + exists = success.exists; + } + public ExistsTransactionSuccess(final TransactionIdentifier target, final long sequence, final boolean exists) { super(target, sequence); this.exists = exists; @@ -33,13 +36,13 @@ public final class ExistsTransactionSuccess extends TransactionSuccess { - private static final long serialVersionUID = 1L; - private boolean exists; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public ExistsTransactionSuccessProxyV1() { - // For Externalizable - } - - ExistsTransactionSuccessProxyV1(final ExistsTransactionSuccess request) { - super(request); - this.exists = request.getExists(); - } - - @Override - public void writeExternal(final ObjectOutput out) throws IOException { - super.writeExternal(out); - out.writeBoolean(exists); - } - - @Override - public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { - super.readExternal(in); - exists = in.readBoolean(); - } - - @Override - protected ExistsTransactionSuccess createSuccess(final TransactionIdentifier target, final long sequence) { - return new ExistsTransactionSuccess(target, sequence, exists); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/HF.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/HF.java new file mode 100644 index 0000000000..68e9b09b48 --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/HF.java @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.commands; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +/** + * Externalizable proxy for use with {@link LocalHistoryFailure}. It implements the Chlorine SR2 serialization format. + */ +final class HF implements LocalHistoryFailure.SerialForm { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private LocalHistoryFailure message; + + @SuppressWarnings("checkstyle:RedundantModifier") + public HF() { + // for Externalizable + } + + HF(final LocalHistoryFailure message) { + this.message = requireNonNull(message); + } + + @Override + public LocalHistoryFailure message() { + return verifyNotNull(message); + } + + @Override + public void setMessage(final LocalHistoryFailure message) { + this.message = requireNonNull(message); + } + + @Override + public Object readResolve() { + return message(); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/HS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/HS.java new file mode 100644 index 0000000000..4ab0ff5ce0 --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/HS.java @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.commands; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +/** + * Externalizable proxy for use with {@link LocalHistorySuccess}. It implements the Chlorine SR2 serialization format. + */ +final class HS implements LocalHistorySuccess.SerialForm { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private LocalHistorySuccess message; + + @SuppressWarnings("checkstyle:RedundantModifier") + public HS() { + // for Externalizable + } + + HS(final LocalHistorySuccess message) { + this.message = requireNonNull(message); + } + + @Override + public LocalHistorySuccess message() { + return verifyNotNull(message); + } + + @Override + public void setMessage(final LocalHistorySuccess message) { + this.message = requireNonNull(message); + } + + @Override + public Object readResolve() { + return message(); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ITSR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ITSR.java new file mode 100644 index 0000000000..ef76f5ea19 --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ITSR.java @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.commands; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +/** + * Externalizable proxy for use with {@link IncrementTransactionSequenceRequest}. It implements the Chlorine SR2 + * serialization format. + */ +final class ITSR implements IncrementTransactionSequenceRequest.SerialForm { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private IncrementTransactionSequenceRequest message; + + @SuppressWarnings("checkstyle:RedundantModifier") + public ITSR() { + // for Externalizable + } + + ITSR(final IncrementTransactionSequenceRequest message) { + this.message = requireNonNull(message); + } + + @Override + public IncrementTransactionSequenceRequest message() { + return verifyNotNull(message); + } + + @Override + public void setMessage(final IncrementTransactionSequenceRequest message) { + this.message = requireNonNull(message); + } + + @Override + public Object readResolve() { + return message(); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ITSS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ITSS.java new file mode 100644 index 0000000000..7252d58a61 --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ITSS.java @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.commands; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +import java.io.ObjectInput; +import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; + +/** + * Externalizable proxy for use with {@link IncrementTransactionSequenceSuccess}. It implements the Chlorine SR2 + * serialization format. + */ +final class ITSS implements TransactionSuccess.SerialForm { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private IncrementTransactionSequenceSuccess message; + + @SuppressWarnings("checkstyle:RedundantModifier") + public ITSS() { + // for Externalizable + } + + ITSS(final IncrementTransactionSequenceSuccess message) { + this.message = requireNonNull(message); + } + + @Override + public IncrementTransactionSequenceSuccess message() { + return verifyNotNull(message); + } + + @Override + public void setMessage(final IncrementTransactionSequenceSuccess message) { + this.message = requireNonNull(message); + } + + @Override + public IncrementTransactionSequenceSuccess readExternal(final ObjectInput it, final TransactionIdentifier target, + final long sequence) { + return new IncrementTransactionSequenceSuccess(target, sequence); + } + + @Override + public Object readResolve() { + return message(); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/IncrementTransactionSequenceRequest.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/IncrementTransactionSequenceRequest.java index ffc0a68b89..5695860e17 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/IncrementTransactionSequenceRequest.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/IncrementTransactionSequenceRequest.java @@ -7,28 +7,55 @@ */ package org.opendaylight.controller.cluster.access.commands; +import static com.google.common.base.Preconditions.checkArgument; + import akka.actor.ActorRef; -import com.google.common.base.Preconditions; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; +import org.opendaylight.yangtools.concepts.WritableObjects; /** * A blank transaction request. This is used to provide backfill requests in converted retransmit scenarios, such as * when a initial request to a transaction (such as a {@link ReadTransactionRequest}) is satisfied by the backend * before the need to replay the transaction to a different remote backend. - * - * @author Robert Varga */ public final class IncrementTransactionSequenceRequest extends AbstractReadTransactionRequest { + interface SerialForm extends AbstractReadTransactionRequest.SerialForm { + @Override + default void writeExternal(final ObjectOutput out, final IncrementTransactionSequenceRequest msg) + throws IOException { + AbstractReadTransactionRequest.SerialForm.super.writeExternal(out, msg); + WritableObjects.writeLong(out, msg.getIncrement()); + } + + @Override + default IncrementTransactionSequenceRequest readExternal(final ObjectInput in, + final TransactionIdentifier target, final long sequence, final ActorRef replyTo, + final boolean snapshotOnly) throws IOException { + return new IncrementTransactionSequenceRequest(target, sequence, replyTo, snapshotOnly, + WritableObjects.readLong(in)); + } + } + + @java.io.Serial private static final long serialVersionUID = 1L; private final long increment; + public IncrementTransactionSequenceRequest(final IncrementTransactionSequenceRequest request, + final ABIVersion version) { + super(request, version); + increment = request.increment; + } + public IncrementTransactionSequenceRequest(final TransactionIdentifier identifier, final long sequence, final ActorRef replyTo, final boolean snapshotOnly, final long increment) { super(identifier, sequence, replyTo, snapshotOnly); - Preconditions.checkArgument(increment >= 0); + checkArgument(increment >= 0, "Unexpected increment %s", increment); this.increment = increment; } @@ -42,12 +69,12 @@ public final class IncrementTransactionSequenceRequest extends } @Override - protected IncrementTransactionSequenceRequestProxyV1 externalizableProxy(final ABIVersion version) { - return new IncrementTransactionSequenceRequestProxyV1(this); + protected SerialForm externalizableProxy(final ABIVersion version) { + return new ITSR(this); } @Override protected IncrementTransactionSequenceRequest cloneAsVersion(final ABIVersion targetVersion) { - return this; + return new IncrementTransactionSequenceRequest(this, targetVersion); } } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/IncrementTransactionSequenceRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/IncrementTransactionSequenceRequestProxyV1.java deleted file mode 100644 index da1659e1fe..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/IncrementTransactionSequenceRequestProxyV1.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (c) 2017 Pantheon Technologies, s.r.o. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.commands; - -import akka.actor.ActorRef; -import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -import org.opendaylight.yangtools.concepts.WritableObjects; - -final class IncrementTransactionSequenceRequestProxyV1 - extends AbstractReadTransactionRequestProxyV1 { - private long increment; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public IncrementTransactionSequenceRequestProxyV1() { - // For Externalizable - } - - IncrementTransactionSequenceRequestProxyV1(final IncrementTransactionSequenceRequest request) { - super(request); - this.increment = request.getIncrement(); - } - - @Override - public void writeExternal(final ObjectOutput out) throws IOException { - super.writeExternal(out); - WritableObjects.writeLong(out, increment); - } - - @Override - public void readExternal(final ObjectInput in) throws ClassNotFoundException, IOException { - super.readExternal(in); - increment = WritableObjects.readLong(in); - } - - @Override - IncrementTransactionSequenceRequest createReadRequest(final TransactionIdentifier target, final long sequence, - final ActorRef replyToActor, final boolean snapshotOnly) { - return new IncrementTransactionSequenceRequest(target, sequence, replyToActor, snapshotOnly, increment); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/IncrementTransactionSequenceSuccess.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/IncrementTransactionSequenceSuccess.java index 80f4a0d5aa..4f27f76563 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/IncrementTransactionSequenceSuccess.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/IncrementTransactionSequenceSuccess.java @@ -7,30 +7,32 @@ */ package org.opendaylight.controller.cluster.access.commands; -import com.google.common.annotations.Beta; import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; /** * Successful reply to an {@link IncrementTransactionSequenceRequest}. - * - * @author Robert Varga */ -@Beta public final class IncrementTransactionSequenceSuccess extends TransactionSuccess { + @java.io.Serial private static final long serialVersionUID = 1L; + private IncrementTransactionSequenceSuccess(final IncrementTransactionSequenceSuccess success, + final ABIVersion version) { + super(success, version); + } + public IncrementTransactionSequenceSuccess(final TransactionIdentifier target, final long sequence) { super(target, sequence); } @Override - protected IncrementTransactionSequenceSuccessProxyV1 externalizableProxy(final ABIVersion version) { - return new IncrementTransactionSequenceSuccessProxyV1(this); + protected ITSS externalizableProxy(final ABIVersion version) { + return new ITSS(this); } @Override protected IncrementTransactionSequenceSuccess cloneAsVersion(final ABIVersion version) { - return this; + return new IncrementTransactionSequenceSuccess(this, version); } } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/IncrementTransactionSequenceSuccessProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/IncrementTransactionSequenceSuccessProxyV1.java deleted file mode 100644 index a99faabcff..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/IncrementTransactionSequenceSuccessProxyV1.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.commands; - -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; - -/** - * Externalizable proxy for use with {@link IncrementTransactionSequenceSuccess}. It implements the initial (Boron) - * serialization format. - * - * @author Robert Varga - */ -final class IncrementTransactionSequenceSuccessProxyV1 - extends AbstractTransactionSuccessProxy { - private static final long serialVersionUID = 1L; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public IncrementTransactionSequenceSuccessProxyV1() { - // For Externalizable - } - - IncrementTransactionSequenceSuccessProxyV1(final IncrementTransactionSequenceSuccess request) { - super(request); - } - - @Override - protected IncrementTransactionSequenceSuccess createSuccess(final TransactionIdentifier target, - final long sequence) { - return new IncrementTransactionSequenceSuccess(target, sequence); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistoryFailure.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistoryFailure.java index 4fd69c24ce..fc24d8aeda 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistoryFailure.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistoryFailure.java @@ -7,7 +7,8 @@ */ package org.opendaylight.controller.cluster.access.commands; -import com.google.common.annotations.Beta; +import java.io.DataInput; +import java.io.IOException; import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier; import org.opendaylight.controller.cluster.access.concepts.RequestException; @@ -15,24 +16,39 @@ import org.opendaylight.controller.cluster.access.concepts.RequestFailure; /** * Generic {@link RequestFailure} involving a {@link LocalHistoryRequest}. - * - * @author Robert Varga */ -@Beta public final class LocalHistoryFailure extends RequestFailure { + interface SerialForm extends RequestFailure.SerialForm { + @Override + default LocalHistoryIdentifier readTarget(final DataInput in) throws IOException { + return LocalHistoryIdentifier.readFrom(in); + } + + @Override + default LocalHistoryFailure createFailure(final LocalHistoryIdentifier target, final long sequence, + final RequestException cause) { + return new LocalHistoryFailure(target, sequence, cause); + } + } + + @java.io.Serial private static final long serialVersionUID = 1L; + private LocalHistoryFailure(final LocalHistoryFailure failure, final ABIVersion version) { + super(failure, version); + } + LocalHistoryFailure(final LocalHistoryIdentifier target, final long sequence, final RequestException cause) { super(target, sequence, cause); } @Override - protected LocalHistoryFailure cloneAsVersion(final ABIVersion version) { - return this; + protected LocalHistoryFailure cloneAsVersion(final ABIVersion targetVersion) { + return new LocalHistoryFailure(this, targetVersion); } @Override - protected LocalHistoryFailureProxyV1 externalizableProxy(final ABIVersion version) { - return new LocalHistoryFailureProxyV1(this); + protected SerialForm externalizableProxy(final ABIVersion version) { + return new HF(this); } } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistoryFailureProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistoryFailureProxyV1.java deleted file mode 100644 index 0d3a687a41..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistoryFailureProxyV1.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.commands; - -import java.io.DataInput; -import java.io.IOException; -import org.opendaylight.controller.cluster.access.concepts.AbstractRequestFailureProxy; -import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier; -import org.opendaylight.controller.cluster.access.concepts.RequestException; - -/** - * Externalizable proxy for use with {@link LocalHistoryFailure}. It implements the initial (Boron) serialization - * format. - * - * @author Robert Varga - */ -final class LocalHistoryFailureProxyV1 extends - AbstractRequestFailureProxy { - private static final long serialVersionUID = 1L; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public LocalHistoryFailureProxyV1() { - // For Externalizable - } - - LocalHistoryFailureProxyV1(final LocalHistoryFailure failure) { - super(failure); - } - - @Override - protected LocalHistoryFailure createFailure(final LocalHistoryIdentifier target, final long sequence, - final RequestException cause) { - return new LocalHistoryFailure(target, sequence, cause); - } - - @Override - protected LocalHistoryIdentifier readTarget(final DataInput in) throws IOException { - return LocalHistoryIdentifier.readFrom(in); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistoryRequest.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistoryRequest.java index 33d04850fb..c304384fd8 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistoryRequest.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistoryRequest.java @@ -8,8 +8,9 @@ package org.opendaylight.controller.cluster.access.commands; import akka.actor.ActorRef; -import com.google.common.annotations.Beta; import com.google.common.base.Preconditions; +import java.io.DataInput; +import java.io.IOException; import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier; import org.opendaylight.controller.cluster.access.concepts.Request; @@ -19,12 +20,17 @@ import org.opendaylight.controller.cluster.access.concepts.RequestException; * Abstract base class for {@link Request}s involving specific local history. This class is visible outside of this * package solely for the ability to perform a unified instanceof check. * - * @author Robert Varga - * * @param Message type */ -@Beta public abstract class LocalHistoryRequest> extends Request { + interface SerialForm> extends Request.SerialForm { + @Override + default LocalHistoryIdentifier readTarget(final DataInput in) throws IOException { + return LocalHistoryIdentifier.readFrom(in); + } + } + + @java.io.Serial private static final long serialVersionUID = 1L; LocalHistoryRequest(final LocalHistoryIdentifier target, final long sequence, final ActorRef replyTo) { @@ -42,5 +48,5 @@ public abstract class LocalHistoryRequest> exte } @Override - protected abstract AbstractLocalHistoryRequestProxy externalizableProxy(ABIVersion version); + protected abstract SerialForm externalizableProxy(ABIVersion version); } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistorySuccess.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistorySuccess.java index 3b8ed35816..7c0e1865c1 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistorySuccess.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistorySuccess.java @@ -7,37 +7,48 @@ */ package org.opendaylight.controller.cluster.access.commands; -import com.google.common.annotations.Beta; +import java.io.DataInput; +import java.io.IOException; +import java.io.ObjectInput; import org.opendaylight.controller.cluster.access.ABIVersion; -import org.opendaylight.controller.cluster.access.concepts.AbstractSuccessProxy; import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier; import org.opendaylight.controller.cluster.access.concepts.RequestSuccess; /** * Success class for {@link RequestSuccess}es involving a specific local history. - * - * @author Robert Varga */ -@Beta public final class LocalHistorySuccess extends RequestSuccess { - private static final long serialVersionUID = 1L; + interface SerialForm extends RequestSuccess.SerialForm { + @Override + default LocalHistoryIdentifier readTarget(final DataInput in) throws IOException { + return LocalHistoryIdentifier.readFrom(in); + } - public LocalHistorySuccess(final LocalHistoryIdentifier target, final long sequence) { - super(target, sequence); + @Override + default LocalHistorySuccess readExternal(final ObjectInput it, final LocalHistoryIdentifier target, + final long sequence) { + return new LocalHistorySuccess(target, sequence); + } } + @java.io.Serial + private static final long serialVersionUID = 1L; + private LocalHistorySuccess(final LocalHistorySuccess success, final ABIVersion version) { super(success, version); } + public LocalHistorySuccess(final LocalHistoryIdentifier target, final long sequence) { + super(target, sequence); + } + @Override protected LocalHistorySuccess cloneAsVersion(final ABIVersion version) { return new LocalHistorySuccess(this, version); } @Override - protected AbstractSuccessProxy externalizableProxy( - final ABIVersion version) { - return new LocalHistorySuccessProxyV1(this); + protected SerialForm externalizableProxy(final ABIVersion version) { + return new HS(this); } } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistorySuccessProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistorySuccessProxyV1.java deleted file mode 100644 index 97a7a1c8d6..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistorySuccessProxyV1.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.commands; - -import java.io.DataInput; -import java.io.IOException; -import org.opendaylight.controller.cluster.access.concepts.AbstractSuccessProxy; -import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier; - -/** - * Serialization proxy associated with {@link LocalHistorySuccess}. - * - * @author Robert Varga - */ -final class LocalHistorySuccessProxyV1 extends AbstractSuccessProxy { - private static final long serialVersionUID = 1L; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public LocalHistorySuccessProxyV1() { - // For Externalizable - } - - LocalHistorySuccessProxyV1(final LocalHistorySuccess success) { - super(success); - } - - @Override - protected LocalHistoryIdentifier readTarget(final DataInput in) throws IOException { - return LocalHistoryIdentifier.readFrom(in); - } - - @Override - protected LocalHistorySuccess createSuccess(final LocalHistoryIdentifier target, final long sequence) { - return new LocalHistorySuccess(target, sequence); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/MTR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/MTR.java new file mode 100644 index 0000000000..a0c5acf8b0 --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/MTR.java @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.commands; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +/** + * Externalizable proxy for use with {@link ModifyTransactionRequest}. It implements the Chlorine SR2 serialization + * format. + */ +final class MTR implements ModifyTransactionRequest.SerialForm { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private ModifyTransactionRequest message; + + @SuppressWarnings("checkstyle:RedundantModifier") + public MTR() { + // for Externalizable + } + + MTR(final ModifyTransactionRequest message) { + this.message = requireNonNull(message); + } + + @Override + public ModifyTransactionRequest message() { + return verifyNotNull(message); + } + + @Override + public void setMessage(final ModifyTransactionRequest message) { + this.message = requireNonNull(message); + } + + @Override + public Object readResolve() { + return message(); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/MTS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/MTS.java new file mode 100644 index 0000000000..ee7e8768ea --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/MTS.java @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.commands; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +import java.io.IOException; +import java.io.ObjectInput; +import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; + +/** + * Externalizable proxy for use with {@link ModifyTransactionSuccess}. It implements the Chlorine SR2 serialization + * format. + */ +final class MTS implements TransactionSuccess.SerialForm { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private ModifyTransactionSuccess message; + + @SuppressWarnings("checkstyle:RedundantModifier") + public MTS() { + // for Externalizable + } + + MTS(final ModifyTransactionSuccess message) { + this.message = requireNonNull(message); + } + + @Override + public ModifyTransactionSuccess message() { + return verifyNotNull(message); + } + + @Override + public void setMessage(final ModifyTransactionSuccess message) { + this.message = requireNonNull(message); + } + + @Override + public ModifyTransactionSuccess readExternal(final ObjectInput in, final TransactionIdentifier target, + final long sequence) throws IOException { + return new ModifyTransactionSuccess(target, sequence); + } + + @Override + public Object readResolve() { + return message(); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionRequest.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionRequest.java index 39b577cef2..b1ddd38930 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionRequest.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionRequest.java @@ -8,33 +8,83 @@ package org.opendaylight.controller.cluster.access.commands; import akka.actor.ActorRef; -import com.google.common.annotations.Beta; import com.google.common.base.MoreObjects.ToStringHelper; import com.google.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectInputStream; +import java.io.ObjectOutput; +import java.io.ObjectOutputStream; +import java.io.ObjectStreamException; +import java.util.ArrayList; import java.util.List; import java.util.Optional; import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.controller.cluster.access.concepts.SliceableMessage; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; +import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput; +import org.opendaylight.yangtools.yang.data.impl.schema.ReusableImmutableNormalizedNodeStreamWriter; /** * A transaction request to apply a particular set of operations on top of the current transaction. This message is * used to also finish a transaction by specifying a {@link PersistenceProtocol}. - * - * @author Robert Varga */ -@Beta public final class ModifyTransactionRequest extends TransactionRequest implements SliceableMessage { + interface SerialForm extends TransactionRequest.SerialForm { + + + @Override + default ModifyTransactionRequest readExternal(final ObjectInput in, final TransactionIdentifier target, + final long sequence, final ActorRef replyTo) throws IOException { + + final var protocol = Optional.ofNullable(PersistenceProtocol.readFrom(in)); + final int size = in.readInt(); + final List modifications; + if (size != 0) { + modifications = new ArrayList<>(size); + final var nnin = NormalizedNodeDataInput.newDataInput(in); + final var writer = ReusableImmutableNormalizedNodeStreamWriter.create(); + for (int i = 0; i < size; ++i) { + modifications.add(TransactionModification.readFrom(nnin, writer)); + } + } else { + modifications = ImmutableList.of(); + } + + return new ModifyTransactionRequest(target, sequence, replyTo, modifications, protocol.orElse(null)); + } + + @Override + default void writeExternal(final ObjectOutput out, final ModifyTransactionRequest msg) throws IOException { + TransactionRequest.SerialForm.super.writeExternal(out, msg); + + out.writeByte(PersistenceProtocol.byteValue(msg.getPersistenceProtocol().orElse(null))); + + final var modifications = msg.getModifications(); + out.writeInt(modifications.size()); + if (!modifications.isEmpty()) { + try (var nnout = msg.getVersion().getStreamVersion().newDataOutput(out)) { + for (var op : modifications) { + op.writeTo(nnout); + } + } + } + } + } + + @java.io.Serial private static final long serialVersionUID = 1L; - @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but this class " - + "implements writeReplace to delegate serialization to a Proxy class and thus instances of this class " - + "aren't serialized. FindBugs does not recognize this.") private final List modifications; private final PersistenceProtocol protocol; + private ModifyTransactionRequest(final ModifyTransactionRequest request, final ABIVersion version) { + super(request, version); + modifications = request.modifications; + protocol = request.protocol; + } + ModifyTransactionRequest(final TransactionIdentifier target, final long sequence, final ActorRef replyTo, final List modifications, final PersistenceProtocol protocol) { super(target, sequence, replyTo); @@ -57,12 +107,27 @@ public final class ModifyTransactionRequest extends TransactionRequest, - Identifiable { +public final class ModifyTransactionRequestBuilder implements Identifiable { private final List modifications = new ArrayList<>(1); - private final TransactionIdentifier identifier; + private final @NonNull TransactionIdentifier identifier; private final ActorRef replyTo; private PersistenceProtocol protocol; @@ -82,8 +76,7 @@ public final class ModifyTransactionRequestBuilder implements Builder { - private static final long serialVersionUID = 1L; - - private List modifications; - private Optional protocol; - private transient NormalizedNodeStreamVersion streamVersion; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public ModifyTransactionRequestProxyV1() { - // For Externalizable - } - - ModifyTransactionRequestProxyV1(final ModifyTransactionRequest request) { - super(request); - this.modifications = requireNonNull(request.getModifications()); - this.protocol = request.getPersistenceProtocol(); - this.streamVersion = request.getVersion().getStreamVersion(); - } - - @Override - public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { - super.readExternal(in); - - protocol = Optional.ofNullable(PersistenceProtocol.readFrom(in)); - - final int size = in.readInt(); - if (size != 0) { - modifications = new ArrayList<>(size); - final NormalizedNodeDataInput nnin = NormalizedNodeDataInput.newDataInput(in); - final ReusableImmutableNormalizedNodeStreamWriter writer = - ReusableImmutableNormalizedNodeStreamWriter.create(); - for (int i = 0; i < size; ++i) { - modifications.add(TransactionModification.readFrom(nnin, writer)); - } - } else { - modifications = ImmutableList.of(); - } - } - - @Override - public void writeExternal(final ObjectOutput out) throws IOException { - super.writeExternal(out); - - out.writeByte(PersistenceProtocol.byteValue(protocol.orElse(null))); - out.writeInt(modifications.size()); - if (!modifications.isEmpty()) { - try (NormalizedNodeDataOutput nnout = streamVersion.newDataOutput(out)) { - for (TransactionModification op : modifications) { - op.writeTo(nnout); - } - } - } - } - - @Override - protected ModifyTransactionRequest createRequest(final TransactionIdentifier target, final long sequence, - final ActorRef replyTo) { - return new ModifyTransactionRequest(target, sequence, replyTo, modifications, protocol.orElse(null)); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionSuccess.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionSuccess.java index c4dd20d6c9..38adf787b9 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionSuccess.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionSuccess.java @@ -7,17 +7,14 @@ */ package org.opendaylight.controller.cluster.access.commands; -import com.google.common.annotations.Beta; import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; /** * Response to a {@link ModifyTransactionRequest} which does not have a {@link PersistenceProtocol}. - * - * @author Robert Varga */ -@Beta public final class ModifyTransactionSuccess extends TransactionSuccess { + @java.io.Serial private static final long serialVersionUID = 1L; public ModifyTransactionSuccess(final TransactionIdentifier identifier, final long sequence) { @@ -29,8 +26,8 @@ public final class ModifyTransactionSuccess extends TransactionSuccess externalizableProxy(final ABIVersion version) { - return new ModifyTransactionSuccessProxyV1(this); + protected MTS externalizableProxy(final ABIVersion version) { + return new MTS(this); } @Override diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionSuccessProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionSuccessProxyV1.java deleted file mode 100644 index 0efff09259..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionSuccessProxyV1.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.commands; - -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; - -/** - * Externalizable proxy for use with {@link ModifyTransactionSuccess}. It implements the initial (Boron) serialization - * format. - * - * @author Robert Varga - */ -final class ModifyTransactionSuccessProxyV1 extends AbstractTransactionSuccessProxy { - private static final long serialVersionUID = 1L; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public ModifyTransactionSuccessProxyV1() { - // For Externalizable - } - - ModifyTransactionSuccessProxyV1(final ModifyTransactionSuccess success) { - super(success); - } - - @Override - protected ModifyTransactionSuccess createSuccess(final TransactionIdentifier target, final long sequence) { - return new ModifyTransactionSuccess(target, sequence); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/NotLeaderException.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/NotLeaderException.java index 0864cd0cf0..c4353c37fd 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/NotLeaderException.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/NotLeaderException.java @@ -8,17 +8,14 @@ package org.opendaylight.controller.cluster.access.commands; import akka.actor.ActorRef; -import com.google.common.annotations.Beta; import org.opendaylight.controller.cluster.access.concepts.RequestException; /** * General error raised when the recipient of a Request is not the correct backend to talk to. This typically * means that the backend processing has moved and the frontend needs to run rediscovery and retry the request. - * - * @author Robert Varga */ -@Beta public final class NotLeaderException extends RequestException { + @java.io.Serial private static final long serialVersionUID = 1L; public NotLeaderException(final ActorRef me) { diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/OutOfOrderRequestException.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/OutOfOrderRequestException.java index cd110d66b6..0c908078eb 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/OutOfOrderRequestException.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/OutOfOrderRequestException.java @@ -7,17 +7,14 @@ */ package org.opendaylight.controller.cluster.access.commands; -import com.google.common.annotations.Beta; import org.opendaylight.controller.cluster.access.concepts.RequestException; /** * A {@link RequestException} indicating that the backend has received a Request whose sequence does not match the * next expected sequence for the target. This is a hard error, as it indicates a Request is missing in the stream. - * - * @author Robert Varga */ -@Beta public final class OutOfOrderRequestException extends RequestException { + @java.io.Serial private static final long serialVersionUID = 1L; public OutOfOrderRequestException(final long expectedRequest) { diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/OutOfSequenceEnvelopeException.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/OutOfSequenceEnvelopeException.java index ad3dd8d700..b39e09a6a4 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/OutOfSequenceEnvelopeException.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/OutOfSequenceEnvelopeException.java @@ -7,18 +7,15 @@ */ package org.opendaylight.controller.cluster.access.commands; -import com.google.common.annotations.Beta; import org.opendaylight.controller.cluster.access.concepts.RequestException; /** * A {@link RequestException} indicating that the backend has received a RequestEnvelope whose sequence does not match * the next expected sequence. This can happen during leader transitions, when a part of the stream is rejected because * the backend is not the leader and it transitions to being a leader with old stream messages still being present. - * - * @author Robert Varga */ -@Beta public final class OutOfSequenceEnvelopeException extends RequestException { + @java.io.Serial private static final long serialVersionUID = 1L; public OutOfSequenceEnvelopeException(final long expectedEnvelope) { diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/PHR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/PHR.java new file mode 100644 index 0000000000..e2b3959e64 --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/PHR.java @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.commands; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +/** + * Externalizable proxy for use with {@link PurgeLocalHistoryRequest}. It implements the Chlorine SR2 serialization + * format. + */ +final class PHR implements PurgeLocalHistoryRequest.SerialForm { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private PurgeLocalHistoryRequest message; + + @SuppressWarnings("checkstyle:RedundantModifier") + public PHR() { + // for Externalizable + } + + PHR(final PurgeLocalHistoryRequest message) { + this.message = requireNonNull(message); + } + + @Override + public PurgeLocalHistoryRequest message() { + return verifyNotNull(message); + } + + @Override + public void setMessage(final PurgeLocalHistoryRequest message) { + this.message = requireNonNull(message); + } + + @Override + public Object readResolve() { + return message(); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/PersistenceProtocol.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/PersistenceProtocol.java index be58b05b1f..82fca03087 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/PersistenceProtocol.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/PersistenceProtocol.java @@ -7,19 +7,16 @@ */ package org.opendaylight.controller.cluster.access.commands; -import com.google.common.annotations.Beta; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; +import org.eclipse.jdt.annotation.Nullable; import org.opendaylight.yangtools.concepts.WritableObject; /** * Enumeration of transaction persistence protocols. These govern which protocol is executed between the frontend * and backend to drive persistence of a particular transaction. - * - * @author Robert Varga */ -@Beta public enum PersistenceProtocol implements WritableObject { /** * Abort protocol. The transaction has been aborted on the frontend and its effects should not be visible @@ -77,20 +74,14 @@ public enum PersistenceProtocol implements WritableObject { return finish == null ? 0 : finish.byteValue(); } - static PersistenceProtocol valueOf(final byte value) { - switch (value) { - case 0: - return null; - case 1: - return ABORT; - case 2: - return SIMPLE; - case 3: - return THREE_PHASE; - case 4: - return READY; - default: - throw new IllegalArgumentException("Unhandled byte value " + value); - } + static @Nullable PersistenceProtocol valueOf(final byte value) { + return switch (value) { + case 0 -> null; + case 1 -> ABORT; + case 2 -> SIMPLE; + case 3 -> THREE_PHASE; + case 4 -> READY; + default -> throw new IllegalArgumentException("Unhandled byte value " + value); + }; } } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/PurgeLocalHistoryRequest.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/PurgeLocalHistoryRequest.java index ecbd749dd1..c9dc5dc1e7 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/PurgeLocalHistoryRequest.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/PurgeLocalHistoryRequest.java @@ -8,18 +8,24 @@ package org.opendaylight.controller.cluster.access.commands; import akka.actor.ActorRef; -import com.google.common.annotations.Beta; +import java.io.ObjectInput; import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier; /** * Request to purge a local history. This request is sent by the client once it receives a successful reply to * {@link DestroyLocalHistoryRequest} and indicates it has removed all state attached to a particular local history. - * - * @author Robert Varga */ -@Beta public final class PurgeLocalHistoryRequest extends LocalHistoryRequest { + interface SerialForm extends LocalHistoryRequest.SerialForm { + @Override + default PurgeLocalHistoryRequest readExternal(final ObjectInput in, final LocalHistoryIdentifier target, + final long sequence, final ActorRef replyTo) { + return new PurgeLocalHistoryRequest(target, sequence, replyTo); + } + } + + @java.io.Serial private static final long serialVersionUID = 1L; public PurgeLocalHistoryRequest(final LocalHistoryIdentifier target, final long sequence, final ActorRef replyTo) { @@ -31,8 +37,8 @@ public final class PurgeLocalHistoryRequest extends LocalHistoryRequest externalizableProxy(final ABIVersion version) { - return new PurgeLocalHistoryRequestProxyV1(this); + protected SerialForm externalizableProxy(final ABIVersion version) { + return new PHR(this); } @Override diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/PurgeLocalHistoryRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/PurgeLocalHistoryRequestProxyV1.java deleted file mode 100644 index 11c344f539..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/PurgeLocalHistoryRequestProxyV1.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.commands; - -import akka.actor.ActorRef; -import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier; - -/** - * Externalizable proxy for use with {@link PurgeLocalHistoryRequest}. It implements the initial (Boron) serialization - * format. - * - * @author Robert Varga - */ -final class PurgeLocalHistoryRequestProxyV1 extends AbstractLocalHistoryRequestProxy { - private static final long serialVersionUID = 1L; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public PurgeLocalHistoryRequestProxyV1() { - // For Externalizable - } - - PurgeLocalHistoryRequestProxyV1(final PurgeLocalHistoryRequest request) { - super(request); - } - - @Override - protected PurgeLocalHistoryRequest createRequest(final LocalHistoryIdentifier target, final long sequence, - final ActorRef replyTo) { - return new PurgeLocalHistoryRequest(target, sequence, replyTo); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/RTR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/RTR.java new file mode 100644 index 0000000000..e342c1806f --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/RTR.java @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.commands; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +/** + * Externalizable proxy for use with {@link ReadTransactionRequest}. It implements the Chlorine SR2 serialization + * format. + */ +final class RTR implements ReadTransactionRequest.SerialForm { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private ReadTransactionRequest message; + + @SuppressWarnings("checkstyle:RedundantModifier") + public RTR() { + // for Externalizable + } + + RTR(final ReadTransactionRequest message) { + this.message = requireNonNull(message); + } + + @Override + public ReadTransactionRequest message() { + return verifyNotNull(message); + } + + @Override + public void setMessage(final ReadTransactionRequest message) { + this.message = requireNonNull(message); + } + + @Override + public Object readResolve() { + return message(); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/RTS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/RTS.java new file mode 100644 index 0000000000..2c808349ea --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/RTS.java @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.commands; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Optional; +import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; +import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; +import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput; + +/** + * Externalizable proxy for use with {@link ReadTransactionSuccess}. It implements the Chlorine SR2 serialization + * format. + */ +final class RTS implements TransactionSuccess.SerialForm { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private ReadTransactionSuccess message; + + @SuppressWarnings("checkstyle:RedundantModifier") + public RTS() { + // for Externalizable + } + + RTS(final ReadTransactionSuccess message) { + this.message = requireNonNull(message); + } + + @Override + public ReadTransactionSuccess message() { + return verifyNotNull(message); + } + + @Override + public void setMessage(final ReadTransactionSuccess message) { + this.message = requireNonNull(message); + } + + @Override + public ReadTransactionSuccess readExternal(final ObjectInput in, final TransactionIdentifier target, + final long sequence) throws IOException { + final Optional data; + if (in.readBoolean()) { + data = Optional.of(NormalizedNodeDataInput.newDataInput(in).readNormalizedNode()); + } else { + data = Optional.empty(); + } + return new ReadTransactionSuccess(target, sequence, data); + } + + @Override + public void writeExternal(final ObjectOutput out, final ReadTransactionSuccess msg) throws IOException { + TransactionSuccess.SerialForm.super.writeExternal(out, msg); + + final var data = msg.getData(); + if (data.isPresent()) { + out.writeBoolean(true); + try (var nnout = msg.getVersion().getStreamVersion().newDataOutput(out)) { + nnout.writeNormalizedNode(data.orElseThrow()); + } + } else { + out.writeBoolean(false); + } + } + + @Override + public Object readResolve() { + return message(); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionRequest.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionRequest.java index 92caa59b7a..292496b7b4 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionRequest.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionRequest.java @@ -8,7 +8,8 @@ package org.opendaylight.controller.cluster.access.commands; import akka.actor.ActorRef; -import com.google.common.annotations.Beta; +import java.io.IOException; +import java.io.ObjectInput; import org.eclipse.jdt.annotation.NonNull; import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; @@ -16,11 +17,18 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; /** * A transaction request to read a particular path exists in the current view of a particular transaction. - * - * @author Robert Varga */ -@Beta public final class ReadTransactionRequest extends AbstractReadPathTransactionRequest { + interface SerialForm extends AbstractReadPathTransactionRequest.SerialForm { + @Override + default ReadTransactionRequest readExternal(final ObjectInput in, final TransactionIdentifier target, + final long sequence, final ActorRef replyTo, final boolean snapshotOnly, final YangInstanceIdentifier path) + throws IOException { + return new ReadTransactionRequest(target, sequence, replyTo, path, snapshotOnly); + } + } + + @java.io.Serial private static final long serialVersionUID = 1L; public ReadTransactionRequest(final @NonNull TransactionIdentifier identifier, final long sequence, @@ -38,7 +46,7 @@ public final class ReadTransactionRequest extends AbstractReadPathTransactionReq } @Override - protected ReadTransactionRequestProxyV1 externalizableProxy(final ABIVersion version) { - return new ReadTransactionRequestProxyV1(this); + protected SerialForm externalizableProxy(final ABIVersion version) { + return new RTR(this); } } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionRequestProxyV1.java deleted file mode 100644 index a83b6bcaac..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionRequestProxyV1.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.commands; - -import akka.actor.ActorRef; -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; - -/** - * Externalizable proxy for use with {@link ReadTransactionRequest}. It implements the initial (Boron) serialization - * format. - * - * @author Robert Varga - */ -final class ReadTransactionRequestProxyV1 extends AbstractReadPathTransactionRequestProxyV1 { - private static final long serialVersionUID = 1L; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public ReadTransactionRequestProxyV1() { - // For Externalizable - } - - ReadTransactionRequestProxyV1(final ReadTransactionRequest request) { - super(request); - } - - @Override - ReadTransactionRequest createReadPathRequest(final TransactionIdentifier target, final long sequence, - final ActorRef replyTo, final YangInstanceIdentifier path, final boolean snapshotOnly) { - return new ReadTransactionRequest(target, sequence, replyTo, path, snapshotOnly); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionSuccess.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionSuccess.java index 1b3410ee3c..a03766e9da 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionSuccess.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionSuccess.java @@ -9,8 +9,10 @@ package org.opendaylight.controller.cluster.access.commands; import static java.util.Objects.requireNonNull; -import com.google.common.annotations.Beta; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.io.ObjectStreamException; import java.util.Optional; import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.controller.cluster.access.concepts.SliceableMessage; @@ -20,16 +22,19 @@ import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; /** * Successful reply to an {@link ReadTransactionRequest}. It indicates presence of requested data via * {@link #getData()}. - * - * @author Robert Varga */ -@Beta -@SuppressFBWarnings("SE_BAD_FIELD") public final class ReadTransactionSuccess extends TransactionSuccess implements SliceableMessage { + @java.io.Serial private static final long serialVersionUID = 1L; + private final Optional data; + private ReadTransactionSuccess(final ReadTransactionSuccess request, final ABIVersion version) { + super(request, version); + data = request.data; + } + public ReadTransactionSuccess(final TransactionIdentifier identifier, final long sequence, final Optional data) { super(identifier, sequence); @@ -41,12 +46,27 @@ public final class ReadTransactionSuccess extends TransactionSuccess externalizableProxy(final ABIVersion version) { - return new ReadTransactionSuccessProxyV1(this); + protected RTS externalizableProxy(final ABIVersion version) { + return new RTS(this); } @Override protected ReadTransactionSuccess cloneAsVersion(final ABIVersion version) { - return this; + return new ReadTransactionSuccess(this, version); + } + + @java.io.Serial + private void readObject(final ObjectInputStream stream) throws IOException, ClassNotFoundException { + throwNSE(); + } + + @java.io.Serial + private void readObjectNoData() throws ObjectStreamException { + throwNSE(); + } + + @java.io.Serial + private void writeObject(final ObjectOutputStream stream) throws IOException { + throwNSE(); } } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionSuccessProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionSuccessProxyV1.java deleted file mode 100644 index d442f36739..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionSuccessProxyV1.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.commands; - -import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; -import java.util.Optional; -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; -import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput; -import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput; -import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion; - -/** - * Externalizable proxy for use with {@link ReadTransactionSuccess}. It implements the initial (Boron) serialization - * format. - * - * @author Robert Varga - */ -final class ReadTransactionSuccessProxyV1 extends AbstractTransactionSuccessProxy { - private static final long serialVersionUID = 1L; - - private Optional data; - private transient NormalizedNodeStreamVersion streamVersion; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public ReadTransactionSuccessProxyV1() { - // For Externalizable - } - - ReadTransactionSuccessProxyV1(final ReadTransactionSuccess request) { - super(request); - this.data = request.getData(); - this.streamVersion = request.getVersion().getStreamVersion(); - } - - @Override - public void writeExternal(final ObjectOutput out) throws IOException { - super.writeExternal(out); - - if (data.isPresent()) { - out.writeBoolean(true); - try (NormalizedNodeDataOutput nnout = streamVersion.newDataOutput(out)) { - nnout.writeNormalizedNode(data.get()); - } - } else { - out.writeBoolean(false); - } - } - - @Override - public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { - super.readExternal(in); - - if (in.readBoolean()) { - data = Optional.of(NormalizedNodeDataInput.newDataInput(in).readNormalizedNode()); - } else { - data = Optional.empty(); - } - } - - @Override - protected ReadTransactionSuccess createSuccess(final TransactionIdentifier target, final long sequence) { - return new ReadTransactionSuccess(target, sequence, data); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/STR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/STR.java new file mode 100644 index 0000000000..aa529eab68 --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/STR.java @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.commands; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +/** + * Externalizable proxy for use with {@link SkipTransactionsRequest}. It implements the Chlorine SR2 serialization + * format. + */ +final class STR implements SkipTransactionsRequest.SerialForm { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private SkipTransactionsRequest message; + + @SuppressWarnings("checkstyle:RedundantModifier") + public STR() { + // for Externalizable + } + + STR(final SkipTransactionsRequest message) { + this.message = requireNonNull(message); + } + + @Override + public SkipTransactionsRequest message() { + return verifyNotNull(message); + } + + @Override + public void setMessage(final SkipTransactionsRequest message) { + this.message = requireNonNull(message); + } + + @Override + public Object readResolve() { + return message(); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/STS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/STS.java new file mode 100644 index 0000000000..54897090f5 --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/STS.java @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.commands; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +import java.io.ObjectInput; +import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; + +/** + * Externalizable proxy for use with {@link SkipTransactionsResponse}. It implements the Chlorine SR2 serialization + * format. + */ +final class STS implements TransactionSuccess.SerialForm { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private SkipTransactionsResponse message; + + @SuppressWarnings("checkstyle:RedundantModifier") + public STS() { + // for Externalizable + } + + STS(final SkipTransactionsResponse message) { + this.message = requireNonNull(message); + } + + @Override + public SkipTransactionsResponse message() { + return verifyNotNull(message); + } + + @Override + public void setMessage(final SkipTransactionsResponse message) { + this.message = requireNonNull(message); + } + + @Override + public SkipTransactionsResponse readExternal(final ObjectInput in, final TransactionIdentifier target, + final long sequence) { + return new SkipTransactionsResponse(target, sequence); + } + + @Override + public Object readResolve() { + return message(); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsRequest.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsRequest.java index dd5faa8e87..a2c037f784 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsRequest.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsRequest.java @@ -8,15 +8,18 @@ package org.opendaylight.controller.cluster.access.commands; import akka.actor.ActorRef; -import com.google.common.annotations.Beta; import com.google.common.base.MoreObjects.ToStringHelper; import com.google.common.collect.ImmutableList; import com.google.common.primitives.UnsignedLong; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; import java.util.Collection; import java.util.List; import org.eclipse.jdt.annotation.NonNull; import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; +import org.opendaylight.yangtools.concepts.WritableObjects; /** * Request to skip a number of {@link TransactionIdentifier}s within a {code local history}. This request is essentially @@ -27,8 +30,51 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier * This request is sent by the frontend to inform the backend that a set of {@link TransactionIdentifier}s are * explicitly retired and are guaranteed to never be used by the frontend. */ -@Beta public final class SkipTransactionsRequest extends TransactionRequest { + interface SerialForm extends TransactionRequest.SerialForm { + @Override + default SkipTransactionsRequest readExternal(final ObjectInput in, final TransactionIdentifier target, + final long sequence, final ActorRef replyTo) throws IOException { + final int size = in.readInt(); + final var builder = ImmutableList.builderWithExpectedSize(size); + int idx; + if (size % 2 != 0) { + builder.add(UnsignedLong.fromLongBits(WritableObjects.readLong(in))); + idx = 1; + } else { + idx = 0; + } + for (; idx < size; idx += 2) { + final byte hdr = WritableObjects.readLongHeader(in); + builder.add(UnsignedLong.fromLongBits(WritableObjects.readFirstLong(in, hdr))); + builder.add(UnsignedLong.fromLongBits(WritableObjects.readSecondLong(in, hdr))); + } + + return new SkipTransactionsRequest(target, sequence, replyTo, builder.build()); + } + + @Override + default void writeExternal(final ObjectOutput out, final SkipTransactionsRequest msg) throws IOException { + TransactionRequest.SerialForm.super.writeExternal(out, msg); + + final var others = msg.others; + final int size = others.size(); + out.writeInt(size); + + int idx; + if (size % 2 != 0) { + WritableObjects.writeLong(out, others.get(0).longValue()); + idx = 1; + } else { + idx = 0; + } + for (; idx < size; idx += 2) { + WritableObjects.writeLongs(out, others.get(idx).longValue(), others.get(idx + 1).longValue()); + } + } + } + + @java.io.Serial private static final long serialVersionUID = 1L; // Note: UnsignedLong is arbitrary, yang.common.Uint64 would work just as well, we really want an immutable @@ -41,6 +87,11 @@ public final class SkipTransactionsRequest extends TransactionRequest { - private List others; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public SkipTransactionsRequestV1() { - // For Externalizable - } - - SkipTransactionsRequestV1(final SkipTransactionsRequest request) { - super(request); - others = request.getOthers(); - } - - @Override - public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { - super.readExternal(in); - - final int size = in.readInt(); - final var builder = ImmutableList.builderWithExpectedSize(size); - int idx; - if (size % 2 != 0) { - builder.add(UnsignedLong.fromLongBits(WritableObjects.readLong(in))); - idx = 1; - } else { - idx = 0; - } - for (; idx < size; idx += 2) { - final byte hdr = WritableObjects.readLongHeader(in); - builder.add(UnsignedLong.fromLongBits(WritableObjects.readFirstLong(in, hdr))); - builder.add(UnsignedLong.fromLongBits(WritableObjects.readSecondLong(in, hdr))); - } - others = builder.build(); - } - - @Override - public void writeExternal(final ObjectOutput out) throws IOException { - super.writeExternal(out); - - final int size = others.size(); - out.writeInt(size); - - int idx; - if (size % 2 != 0) { - WritableObjects.writeLong(out, others.get(0).longValue()); - idx = 1; - } else { - idx = 0; - } - for (; idx < size; idx += 2) { - WritableObjects.writeLongs(out, others.get(idx).longValue(), others.get(idx + 1).longValue()); - } - } - - @Override - protected SkipTransactionsRequest createRequest(final TransactionIdentifier target, final long sequence, - final ActorRef replyToActor) { - return new SkipTransactionsRequest(target, sequence, replyToActor, others); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsResponse.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsResponse.java index b62af7e7dc..9f3d54d9a6 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsResponse.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsResponse.java @@ -7,29 +7,32 @@ */ package org.opendaylight.controller.cluster.access.commands; -import com.google.common.annotations.Beta; import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; /** * Successful reply to a {@link SkipTransactionsRequest}. */ -@Beta +// FIXME: rename to SkipTransactionsSuccess public final class SkipTransactionsResponse extends TransactionSuccess { + @java.io.Serial private static final long serialVersionUID = 1L; + private SkipTransactionsResponse(final SkipTransactionsResponse success, final ABIVersion version) { + super(success, version); + } + public SkipTransactionsResponse(final TransactionIdentifier identifier, final long sequence) { super(identifier, sequence); } @Override - protected AbstractTransactionSuccessProxy externalizableProxy( - final ABIVersion version) { - return new SkipTransactionsResponseProxyV1(this); + protected STS externalizableProxy(final ABIVersion version) { + return new STS(this); } @Override protected SkipTransactionsResponse cloneAsVersion(final ABIVersion version) { - return this; + return new SkipTransactionsResponse(this, version); } } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsResponseProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsResponseProxyV1.java deleted file mode 100644 index 9bc93f9497..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsResponseProxyV1.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.commands; - -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; - -/** - * Externalizable proxy for use with {@link SkipTransactionsResponse}. It implements the initial (Phosphorus SR1) - * serialization format. - */ -final class SkipTransactionsResponseProxyV1 extends AbstractTransactionSuccessProxy { - private static final long serialVersionUID = 1L; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public SkipTransactionsResponseProxyV1() { - // For Externalizable - } - - SkipTransactionsResponseProxyV1(final SkipTransactionsResponse success) { - super(success); - } - - @Override - protected SkipTransactionsResponse createSuccess(final TransactionIdentifier target, final long sequence) { - return new SkipTransactionsResponse(target, sequence); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TAR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TAR.java new file mode 100644 index 0000000000..98f63b75ca --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TAR.java @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.commands; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +/** + * Externalizable proxy for use with {@link TransactionAbortRequest}. It implements the Chlorine SR2 serialization + * format. + */ +final class TAR implements TransactionAbortRequest.SerialForm { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private TransactionAbortRequest message; + + @SuppressWarnings("checkstyle:RedundantModifier") + public TAR() { + // for Externalizable + } + + TAR(final TransactionAbortRequest message) { + this.message = requireNonNull(message); + } + + @Override + public TransactionAbortRequest message() { + return verifyNotNull(message); + } + + @Override + public void setMessage(final TransactionAbortRequest message) { + this.message = requireNonNull(message); + } + + @Override + public Object readResolve() { + return message(); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TAS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TAS.java new file mode 100644 index 0000000000..daeee07046 --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TAS.java @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.commands; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +import java.io.ObjectInput; +import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; + +/** + * Externalizable proxy for use with {@link TransactionAbortSuccess}. It implements the Chlorine SR2 serialization + * format. + */ +final class TAS implements TransactionSuccess.SerialForm { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private TransactionAbortSuccess message; + + @SuppressWarnings("checkstyle:RedundantModifier") + public TAS() { + // for Externalizable + } + + TAS(final TransactionAbortSuccess message) { + this.message = requireNonNull(message); + } + + @Override + public TransactionAbortSuccess message() { + return verifyNotNull(message); + } + + @Override + public void setMessage(final TransactionAbortSuccess message) { + this.message = requireNonNull(message); + } + + @Override + public TransactionAbortSuccess readExternal(final ObjectInput in, final TransactionIdentifier target, + final long sequence) { + return new TransactionAbortSuccess(target, sequence); + } + + @Override + public Object readResolve() { + return message(); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TCCS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TCCS.java new file mode 100644 index 0000000000..22a8a84a3a --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TCCS.java @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.commands; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +import java.io.ObjectInput; +import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; + +/** + * Externalizable proxy for use with {@link TransactionCanCommitSuccess}. It implements the Chlorine SR2 serialization + * format. + */ +final class TCCS implements TransactionSuccess.SerialForm { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private TransactionCanCommitSuccess message; + + @SuppressWarnings("checkstyle:RedundantModifier") + public TCCS() { + // for Externalizable + } + + TCCS(final TransactionCanCommitSuccess message) { + this.message = requireNonNull(message); + } + + @Override + public TransactionCanCommitSuccess message() { + return verifyNotNull(message); + } + + @Override + public void setMessage(final TransactionCanCommitSuccess message) { + this.message = requireNonNull(message); + } + + @Override + public TransactionCanCommitSuccess readExternal(final ObjectInput in, final TransactionIdentifier target, + final long sequence) { + return new TransactionCanCommitSuccess(target, sequence); + } + + @Override + public Object readResolve() { + return message(); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TCS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TCS.java new file mode 100644 index 0000000000..7f897d82f7 --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TCS.java @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.commands; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +import java.io.ObjectInput; +import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; + +/** + * Externalizable proxy for use with {@link TransactionCommitSuccess}. It implements the Chlorine SR2 serialization + * format. + */ +final class TCS implements TransactionSuccess.SerialForm { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private TransactionCommitSuccess message; + + @SuppressWarnings("checkstyle:RedundantModifier") + public TCS() { + // for Externalizable + } + + TCS(final TransactionCommitSuccess message) { + this.message = requireNonNull(message); + } + + @Override + public TransactionCommitSuccess message() { + return verifyNotNull(message); + } + + @Override + public void setMessage(final TransactionCommitSuccess message) { + this.message = requireNonNull(message); + } + + @Override + public TransactionCommitSuccess readExternal(final ObjectInput in, final TransactionIdentifier target, + final long sequence) { + return new TransactionCommitSuccess(target, sequence); + } + + @Override + public Object readResolve() { + return message(); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TDCR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TDCR.java new file mode 100644 index 0000000000..01c2733f9f --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TDCR.java @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.commands; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +/** + * Externalizable proxy for use with {@link TransactionDoCommitRequest}. It implements the Chlorine SR2 serialization + * format. + */ +final class TDCR implements TransactionDoCommitRequest.SerialForm { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private TransactionDoCommitRequest message; + + @SuppressWarnings("checkstyle:RedundantModifier") + public TDCR() { + // for Externalizable + } + + TDCR(final TransactionDoCommitRequest message) { + this.message = requireNonNull(message); + } + + @Override + public TransactionDoCommitRequest message() { + return verifyNotNull(message); + } + + @Override + public void setMessage(final TransactionDoCommitRequest message) { + this.message = requireNonNull(message); + } + + @Override + public Object readResolve() { + return message(); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TF.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TF.java new file mode 100644 index 0000000000..6e26fc329d --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TF.java @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.commands; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +/** + * Externalizable proxy for use with {@link TransactionFailure}. It implements the Chlorine SR2 serialization format. + */ +final class TF implements TransactionFailure.SerialForm { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private TransactionFailure message; + + @SuppressWarnings("checkstyle:RedundantModifier") + public TF() { + // for Externalizable + } + + TF(final TransactionFailure message) { + this.message = requireNonNull(message); + } + + @Override + public TransactionFailure message() { + return verifyNotNull(message); + } + + @Override + public void setMessage(final TransactionFailure message) { + this.message = requireNonNull(message); + } + + @Override + public Object readResolve() { + return message(); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPCR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPCR.java new file mode 100644 index 0000000000..0bf4ae5ea9 --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPCR.java @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.commands; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +/** + * Externalizable proxy for use with {@link TransactionPreCommitRequest}. It implements the Chlorine SR2 serialization + * format. + */ +final class TPCR implements TransactionPreCommitRequest.SerialForm { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private TransactionPreCommitRequest message; + + @SuppressWarnings("checkstyle:RedundantModifier") + public TPCR() { + // for Externalizable + } + + TPCR(final TransactionPreCommitRequest message) { + this.message = requireNonNull(message); + } + + @Override + public TransactionPreCommitRequest message() { + return verifyNotNull(message); + } + + @Override + public void setMessage(final TransactionPreCommitRequest message) { + this.message = requireNonNull(message); + } + + @Override + public Object readResolve() { + return message(); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPCS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPCS.java new file mode 100644 index 0000000000..a64efa1f95 --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPCS.java @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.commands; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +import java.io.IOException; +import java.io.ObjectInput; +import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; + +/** + * Externalizable proxy for use with {@link TransactionPreCommitSuccess}. It implements the Chlorine SR2 serialization + * format. + */ +final class TPCS implements TransactionSuccess.SerialForm { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private TransactionPreCommitSuccess message; + + @SuppressWarnings("checkstyle:RedundantModifier") + public TPCS() { + // for Externalizable + } + + TPCS(final TransactionPreCommitSuccess message) { + this.message = requireNonNull(message); + } + + @Override + public TransactionPreCommitSuccess message() { + return verifyNotNull(message); + } + + @Override + public void setMessage(final TransactionPreCommitSuccess message) { + this.message = requireNonNull(message); + } + + @Override + public TransactionPreCommitSuccess readExternal(final ObjectInput in, final TransactionIdentifier target, + final long sequence) throws IOException { + return new TransactionPreCommitSuccess(target, sequence); + } + + @Override + public Object readResolve() { + return message(); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPR.java new file mode 100644 index 0000000000..a80e1f6675 --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPR.java @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.commands; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +/** + * Externalizable proxy for use with {@link TransactionPurgeRequest}. It implements the Chlorine SR2 serialization + * format. + */ +final class TPR implements TransactionPurgeRequest.SerialForm { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private TransactionPurgeRequest message; + + @SuppressWarnings("checkstyle:RedundantModifier") + public TPR() { + // for Externalizable + } + + TPR(final TransactionPurgeRequest message) { + this.message = requireNonNull(message); + } + + @Override + public TransactionPurgeRequest message() { + return verifyNotNull(message); + } + + @Override + public void setMessage(final TransactionPurgeRequest message) { + this.message = requireNonNull(message); + } + + @Override + public Object readResolve() { + return message(); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPS.java new file mode 100644 index 0000000000..1b2f94a74e --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPS.java @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.commands; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +import java.io.ObjectInput; +import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; + +/** + * Externalizable proxy for use with {@link TransactionPurgeResponse}. It implements the Chlorine SR2 serialization + * format. + */ +final class TPS implements TransactionSuccess.SerialForm { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private TransactionPurgeResponse message; + + @SuppressWarnings("checkstyle:RedundantModifier") + public TPS() { + // for Externalizable + } + + TPS(final TransactionPurgeResponse message) { + this.message = requireNonNull(message); + } + + @Override + public TransactionPurgeResponse message() { + return verifyNotNull(message); + } + + @Override + public void setMessage(final TransactionPurgeResponse message) { + this.message = requireNonNull(message); + } + + @Override + public TransactionPurgeResponse readExternal(final ObjectInput in, final TransactionIdentifier target, + final long sequence) { + return new TransactionPurgeResponse(target, sequence); + } + + @Override + public Object readResolve() { + return message(); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortRequest.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortRequest.java index b8499cc2a2..c9238ab9af 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortRequest.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortRequest.java @@ -8,30 +8,40 @@ package org.opendaylight.controller.cluster.access.commands; import akka.actor.ActorRef; -import com.google.common.annotations.Beta; +import java.io.ObjectInput; import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; /** * A transaction request to perform the abort step of the three-phase commit protocol. - * - * @author Robert Varga */ -@Beta public final class TransactionAbortRequest extends TransactionRequest { + interface SerialForm extends TransactionRequest.SerialForm { + @Override + default TransactionAbortRequest readExternal(final ObjectInput in, final TransactionIdentifier target, + final long sequence, final ActorRef replyTo) { + return new TransactionAbortRequest(target, sequence, replyTo); + } + } + + @java.io.Serial private static final long serialVersionUID = 1L; + private TransactionAbortRequest(final TransactionAbortRequest request, final ABIVersion version) { + super(request, version); + } + public TransactionAbortRequest(final TransactionIdentifier target, final long sequence, final ActorRef replyTo) { super(target, sequence, replyTo); } @Override - protected TransactionAbortRequestProxyV1 externalizableProxy(final ABIVersion version) { - return new TransactionAbortRequestProxyV1(this); + protected SerialForm externalizableProxy(final ABIVersion version) { + return new TAR(this); } @Override protected TransactionAbortRequest cloneAsVersion(final ABIVersion version) { - return this; + return new TransactionAbortRequest(this, version); } } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortRequestProxyV1.java deleted file mode 100644 index 3e67dfe83b..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortRequestProxyV1.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.commands; - -import akka.actor.ActorRef; -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; - -/** - * Externalizable proxy for use with {@link TransactionAbortRequest}. It implements the initial (Boron) serialization - * format. - * - * @author Robert Varga - */ -final class TransactionAbortRequestProxyV1 extends AbstractTransactionRequestProxy { - private static final long serialVersionUID = 1L; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public TransactionAbortRequestProxyV1() { - // For Externalizable - } - - TransactionAbortRequestProxyV1(final TransactionAbortRequest request) { - super(request); - } - - @Override - protected TransactionAbortRequest createRequest(final TransactionIdentifier target, final long sequence, - final ActorRef replyTo) { - return new TransactionAbortRequest(target, sequence, replyTo); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortSuccess.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortSuccess.java index 69c6dddd8f..db92890b1b 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortSuccess.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortSuccess.java @@ -17,19 +17,24 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier * @author Robert Varga */ public final class TransactionAbortSuccess extends TransactionSuccess { + @java.io.Serial private static final long serialVersionUID = 1L; + private TransactionAbortSuccess(final TransactionAbortSuccess success, final ABIVersion version) { + super(success, version); + } + public TransactionAbortSuccess(final TransactionIdentifier identifier, final long sequence) { super(identifier, sequence); } @Override - protected AbstractTransactionSuccessProxy externalizableProxy(final ABIVersion version) { - return new TransactionAbortSuccessProxyV1(this); + protected TAS externalizableProxy(final ABIVersion version) { + return new TAS(this); } @Override protected TransactionAbortSuccess cloneAsVersion(final ABIVersion version) { - return this; + return new TransactionAbortSuccess(this, version); } } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortSuccessProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortSuccessProxyV1.java deleted file mode 100644 index c9de3b9703..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortSuccessProxyV1.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.commands; - -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; - -/** - * Externalizable proxy for use with {@link TransactionAbortSuccess}. It implements the initial (Boron) - * serialization format. - * - * @author Robert Varga - */ -final class TransactionAbortSuccessProxyV1 extends AbstractTransactionSuccessProxy { - private static final long serialVersionUID = 1L; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public TransactionAbortSuccessProxyV1() { - // For Externalizable - } - - TransactionAbortSuccessProxyV1(final TransactionAbortSuccess success) { - super(success); - } - - @Override - protected TransactionAbortSuccess createSuccess(final TransactionIdentifier target, final long sequence) { - return new TransactionAbortSuccess(target, sequence); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionCanCommitSuccess.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionCanCommitSuccess.java index e6149bd5da..55c5cdb2d6 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionCanCommitSuccess.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionCanCommitSuccess.java @@ -17,20 +17,24 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier * @author Robert Varga */ public final class TransactionCanCommitSuccess extends TransactionSuccess { + @java.io.Serial private static final long serialVersionUID = 1L; + private TransactionCanCommitSuccess(final TransactionCanCommitSuccess success, final ABIVersion version) { + super(success, version); + } + public TransactionCanCommitSuccess(final TransactionIdentifier identifier, final long sequence) { super(identifier, sequence); } @Override - protected AbstractTransactionSuccessProxy externalizableProxy( - final ABIVersion version) { - return new TransactionCanCommitSuccessProxyV1(this); + protected TCCS externalizableProxy(final ABIVersion version) { + return new TCCS(this); } @Override protected TransactionCanCommitSuccess cloneAsVersion(final ABIVersion version) { - return this; + return new TransactionCanCommitSuccess(this, version); } } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionCanCommitSuccessProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionCanCommitSuccessProxyV1.java deleted file mode 100644 index b41ec2998f..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionCanCommitSuccessProxyV1.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.commands; - -import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; - -/** - * Externalizable proxy for use with {@link TransactionCanCommitSuccess}. It implements the initial (Boron) - * serialization format. - * - * @author Robert Varga - */ -final class TransactionCanCommitSuccessProxyV1 extends AbstractTransactionSuccessProxy { - private static final long serialVersionUID = 1L; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public TransactionCanCommitSuccessProxyV1() { - // For Externalizable - } - - TransactionCanCommitSuccessProxyV1(final TransactionCanCommitSuccess success) { - super(success); - } - - @Override - public void writeExternal(final ObjectOutput out) throws IOException { - super.writeExternal(out); - } - - @Override - public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { - super.readExternal(in); - } - - @Override - protected TransactionCanCommitSuccess createSuccess(final TransactionIdentifier target, final long sequence) { - return new TransactionCanCommitSuccess(target, sequence); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionCommitSuccess.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionCommitSuccess.java index 6b28244484..1fc06da9c9 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionCommitSuccess.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionCommitSuccess.java @@ -17,19 +17,24 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier * @author Robert Varga */ public final class TransactionCommitSuccess extends TransactionSuccess { + @java.io.Serial private static final long serialVersionUID = 1L; + private TransactionCommitSuccess(final TransactionCommitSuccess success, final ABIVersion version) { + super(success, version); + } + public TransactionCommitSuccess(final TransactionIdentifier identifier, final long sequence) { super(identifier, sequence); } @Override - protected AbstractTransactionSuccessProxy externalizableProxy(final ABIVersion version) { - return new TransactionCommitSuccessProxyV1(this); + protected TCS externalizableProxy(final ABIVersion version) { + return new TCS(this); } @Override protected TransactionCommitSuccess cloneAsVersion(final ABIVersion version) { - return this; + return new TransactionCommitSuccess(this, version); } } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionCommitSuccessProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionCommitSuccessProxyV1.java deleted file mode 100644 index f23da7bac2..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionCommitSuccessProxyV1.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.commands; - -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; - -/** - * Externalizable proxy for use with {@link TransactionCommitSuccess}. It implements the initial (Boron) - * serialization format. - * - * @author Robert Varga - */ -final class TransactionCommitSuccessProxyV1 extends AbstractTransactionSuccessProxy { - private static final long serialVersionUID = 1L; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public TransactionCommitSuccessProxyV1() { - // For Externalizable - } - - TransactionCommitSuccessProxyV1(final TransactionCommitSuccess success) { - super(success); - } - - @Override - protected TransactionCommitSuccess createSuccess(final TransactionIdentifier target, final long sequence) { - return new TransactionCommitSuccess(target, sequence); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionDataModification.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionDataModification.java index 9f0cb81876..d6eb297bb0 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionDataModification.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionDataModification.java @@ -9,7 +9,6 @@ package org.opendaylight.controller.cluster.access.commands; import static java.util.Objects.requireNonNull; -import com.google.common.annotations.Beta; import java.io.IOException; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; @@ -17,10 +16,7 @@ import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutpu /** * A {@link TransactionModification} which has a data component. - * - * @author Robert Varga */ -@Beta public abstract class TransactionDataModification extends TransactionModification { private final NormalizedNode data; diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionDelete.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionDelete.java index 62acdbbb76..00c9bd8259 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionDelete.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionDelete.java @@ -7,15 +7,11 @@ */ package org.opendaylight.controller.cluster.access.commands; -import com.google.common.annotations.Beta; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; /** * Delete a particular path. - * - * @author Robert Varga */ -@Beta public final class TransactionDelete extends TransactionModification { public TransactionDelete(final YangInstanceIdentifier path) { super(path); diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionDoCommitRequest.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionDoCommitRequest.java index 955c268008..ad7ffad146 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionDoCommitRequest.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionDoCommitRequest.java @@ -8,30 +8,40 @@ package org.opendaylight.controller.cluster.access.commands; import akka.actor.ActorRef; -import com.google.common.annotations.Beta; +import java.io.ObjectInput; import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; /** * A transaction request to perform the final, doCommit, step of the three-phase commit protocol. - * - * @author Robert Varga */ -@Beta public final class TransactionDoCommitRequest extends TransactionRequest { + interface SerialForm extends TransactionRequest.SerialForm { + @Override + default TransactionDoCommitRequest readExternal(final ObjectInput in, final TransactionIdentifier target, + final long sequence, final ActorRef replyTo) { + return new TransactionDoCommitRequest(target, sequence, replyTo); + } + } + + @java.io.Serial private static final long serialVersionUID = 1L; + private TransactionDoCommitRequest(final TransactionDoCommitRequest request, final ABIVersion version) { + super(request, version); + } + public TransactionDoCommitRequest(final TransactionIdentifier target, final long sequence, final ActorRef replyTo) { super(target, sequence, replyTo); } @Override - protected TransactionDoCommitRequestProxyV1 externalizableProxy(final ABIVersion version) { - return new TransactionDoCommitRequestProxyV1(this); + protected SerialForm externalizableProxy(final ABIVersion version) { + return new TDCR(this); } @Override protected TransactionDoCommitRequest cloneAsVersion(final ABIVersion version) { - return this; + return new TransactionDoCommitRequest(this, version); } } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionDoCommitRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionDoCommitRequestProxyV1.java deleted file mode 100644 index fcb63fd0f5..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionDoCommitRequestProxyV1.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.commands; - -import akka.actor.ActorRef; -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; - -/** - * Externalizable proxy for use with {@link TransactionDoCommitRequest}. It implements the initial (Boron) serialization - * format. - * - * @author Robert Varga - */ -final class TransactionDoCommitRequestProxyV1 extends AbstractTransactionRequestProxy { - private static final long serialVersionUID = 1L; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public TransactionDoCommitRequestProxyV1() { - // For Externalizable - } - - TransactionDoCommitRequestProxyV1(final TransactionDoCommitRequest request) { - super(request); - } - - @Override - protected TransactionDoCommitRequest createRequest(final TransactionIdentifier target, final long sequence, - final ActorRef replyTo) { - return new TransactionDoCommitRequest(target, sequence, replyTo); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionFailure.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionFailure.java index e0b6a59987..288a90ee3c 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionFailure.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionFailure.java @@ -7,7 +7,8 @@ */ package org.opendaylight.controller.cluster.access.commands; -import com.google.common.annotations.Beta; +import java.io.DataInput; +import java.io.IOException; import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.controller.cluster.access.concepts.RequestException; import org.opendaylight.controller.cluster.access.concepts.RequestFailure; @@ -15,24 +16,39 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier /** * Generic {@link RequestFailure} involving a {@link TransactionRequest}. - * - * @author Robert Varga */ -@Beta public final class TransactionFailure extends RequestFailure { + interface SerialForm extends RequestFailure.SerialForm { + @Override + default TransactionIdentifier readTarget(final DataInput in) throws IOException { + return TransactionIdentifier.readFrom(in); + } + + @Override + default TransactionFailure createFailure(final TransactionIdentifier target, final long sequence, + final RequestException cause) { + return new TransactionFailure(target, sequence, cause); + } + } + + @java.io.Serial private static final long serialVersionUID = 1L; + private TransactionFailure(final TransactionFailure failure, final ABIVersion version) { + super(failure, version); + } + TransactionFailure(final TransactionIdentifier target, final long sequence, final RequestException cause) { super(target, sequence, cause); } @Override protected TransactionFailure cloneAsVersion(final ABIVersion version) { - return this; + return new TransactionFailure(this, version); } @Override - protected TransactionFailureProxyV1 externalizableProxy(final ABIVersion version) { - return new TransactionFailureProxyV1(this); + protected SerialForm externalizableProxy(final ABIVersion version) { + return new TF(this); } } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionFailureProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionFailureProxyV1.java deleted file mode 100644 index d3b1dd7cf7..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionFailureProxyV1.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.commands; - -import java.io.DataInput; -import java.io.IOException; -import org.opendaylight.controller.cluster.access.concepts.AbstractRequestFailureProxy; -import org.opendaylight.controller.cluster.access.concepts.RequestException; -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; - -/** - * Externalizable proxy for use with {@link TransactionFailure}. It implements the initial (Boron) serialization - * format. - * - * @author Robert Varga - */ -final class TransactionFailureProxyV1 extends AbstractRequestFailureProxy { - private static final long serialVersionUID = 1L; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public TransactionFailureProxyV1() { - // For Externalizable - } - - TransactionFailureProxyV1(final TransactionFailure failure) { - super(failure); - } - - @Override - protected TransactionFailure createFailure(final TransactionIdentifier target, final long sequence, - final RequestException cause) { - return new TransactionFailure(target, sequence, cause); - } - - @Override - protected TransactionIdentifier readTarget(final DataInput in) throws IOException { - return TransactionIdentifier.readFrom(in); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionMerge.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionMerge.java index bf2580f44b..2784687d28 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionMerge.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionMerge.java @@ -7,16 +7,12 @@ */ package org.opendaylight.controller.cluster.access.commands; -import com.google.common.annotations.Beta; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; /** * Merge a {@link NormalizedNode} tree onto a specific path. - * - * @author Robert Varga */ -@Beta public final class TransactionMerge extends TransactionDataModification { public TransactionMerge(final YangInstanceIdentifier path, final NormalizedNode data) { super(path, data); diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionModification.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionModification.java index a4e0194373..96bea87d46 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionModification.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionModification.java @@ -9,7 +9,6 @@ package org.opendaylight.controller.cluster.access.commands; import static java.util.Objects.requireNonNull; -import com.google.common.annotations.Beta; import com.google.common.base.MoreObjects; import java.io.IOException; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; @@ -23,10 +22,7 @@ import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutpu * {@link #readFrom(NormalizedNodeDataInput, ReusableStreamReceiver)} methods for explicit serialization. The reason for * this is that they are usually transmitted in bulk, hence it is advantageous to reuse * a {@link NormalizedNodeDataOutput} instance to achieve better compression. - * - * @author Robert Varga */ -@Beta public abstract class TransactionModification { static final byte TYPE_DELETE = 1; static final byte TYPE_MERGE = 2; @@ -57,15 +53,11 @@ public abstract class TransactionModification { static TransactionModification readFrom(final NormalizedNodeDataInput in, final ReusableStreamReceiver writer) throws IOException { final byte type = in.readByte(); - switch (type) { - case TYPE_DELETE: - return new TransactionDelete(in.readYangInstanceIdentifier()); - case TYPE_MERGE: - return new TransactionMerge(in.readYangInstanceIdentifier(), in.readNormalizedNode(writer)); - case TYPE_WRITE: - return new TransactionWrite(in.readYangInstanceIdentifier(), in.readNormalizedNode(writer)); - default: - throw new IllegalArgumentException("Unhandled type " + type); - } + return switch (type) { + case TYPE_DELETE -> new TransactionDelete(in.readYangInstanceIdentifier()); + case TYPE_MERGE -> new TransactionMerge(in.readYangInstanceIdentifier(), in.readNormalizedNode(writer)); + case TYPE_WRITE -> new TransactionWrite(in.readYangInstanceIdentifier(), in.readNormalizedNode(writer)); + default -> throw new IllegalArgumentException("Unhandled type " + type); + }; } } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitRequest.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitRequest.java index 226557d124..3172842f76 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitRequest.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitRequest.java @@ -8,31 +8,41 @@ package org.opendaylight.controller.cluster.access.commands; import akka.actor.ActorRef; -import com.google.common.annotations.Beta; +import java.io.ObjectInput; import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; /** * A transaction request to perform the second, preCommit, step of the three-phase commit protocol. - * - * @author Robert Varga */ -@Beta public final class TransactionPreCommitRequest extends TransactionRequest { + interface SerialForm extends TransactionRequest.SerialForm { + @Override + default TransactionPreCommitRequest readExternal(final ObjectInput in, final TransactionIdentifier target, + final long sequence, final ActorRef replyTo) { + return new TransactionPreCommitRequest(target, sequence, replyTo); + } + } + + @java.io.Serial private static final long serialVersionUID = 1L; + private TransactionPreCommitRequest(final TransactionPreCommitRequest request, final ABIVersion version) { + super(request, version); + } + public TransactionPreCommitRequest(final TransactionIdentifier target, final long sequence, final ActorRef replyTo) { super(target, sequence, replyTo); } @Override - protected TransactionPreCommitRequestProxyV1 externalizableProxy(final ABIVersion version) { - return new TransactionPreCommitRequestProxyV1(this); + protected SerialForm externalizableProxy(final ABIVersion version) { + return new TPCR(this); } @Override protected TransactionPreCommitRequest cloneAsVersion(final ABIVersion version) { - return this; + return new TransactionPreCommitRequest(this, version); } } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitRequestProxyV1.java deleted file mode 100644 index bf044e1a15..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitRequestProxyV1.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.commands; - -import akka.actor.ActorRef; -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; - -/** - * Externalizable proxy for use with {@link TransactionPreCommitRequest}. It implements the initial (Boron) - * serialization format. - * - * @author Robert Varga - */ -final class TransactionPreCommitRequestProxyV1 extends AbstractTransactionRequestProxy { - private static final long serialVersionUID = 1L; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public TransactionPreCommitRequestProxyV1() { - // For Externalizable - } - - TransactionPreCommitRequestProxyV1(final TransactionPreCommitRequest request) { - super(request); - } - - @Override - protected TransactionPreCommitRequest createRequest(final TransactionIdentifier target, final long sequence, - final ActorRef replyTo) { - return new TransactionPreCommitRequest(target, sequence, replyTo); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitSuccess.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitSuccess.java index 716f37804c..695d261512 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitSuccess.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitSuccess.java @@ -16,20 +16,24 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier * @author Robert Varga */ public final class TransactionPreCommitSuccess extends TransactionSuccess { + @java.io.Serial private static final long serialVersionUID = 1L; + private TransactionPreCommitSuccess(final TransactionPreCommitSuccess success, final ABIVersion version) { + super(success, version); + } + public TransactionPreCommitSuccess(final TransactionIdentifier identifier, final long sequence) { super(identifier, sequence); } @Override - protected AbstractTransactionSuccessProxy externalizableProxy( - final ABIVersion version) { - return new TransactionPreCommitSuccessProxyV1(this); + protected TPCS externalizableProxy(final ABIVersion version) { + return new TPCS(this); } @Override protected TransactionPreCommitSuccess cloneAsVersion(final ABIVersion version) { - return this; + return new TransactionPreCommitSuccess(this, version); } } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitSuccessProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitSuccessProxyV1.java deleted file mode 100644 index 17b1b8da0d..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitSuccessProxyV1.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.commands; - -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; - -/** - * Externalizable proxy for use with {@link TransactionPreCommitSuccess}. It implements the initial (Boron) - * serialization format. - * - * @author Robert Varga - */ -final class TransactionPreCommitSuccessProxyV1 extends AbstractTransactionSuccessProxy { - private static final long serialVersionUID = 1L; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public TransactionPreCommitSuccessProxyV1() { - // For Externalizable - } - - TransactionPreCommitSuccessProxyV1(final TransactionPreCommitSuccess success) { - super(success); - } - - @Override - protected TransactionPreCommitSuccess createSuccess(final TransactionIdentifier target, final long sequence) { - return new TransactionPreCommitSuccess(target, sequence); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeRequest.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeRequest.java index a0fab70188..757c8134a2 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeRequest.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeRequest.java @@ -8,7 +8,7 @@ package org.opendaylight.controller.cluster.access.commands; import akka.actor.ActorRef; -import com.google.common.annotations.Beta; +import java.io.ObjectInput; import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; @@ -16,24 +16,34 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier * A transaction request to perform the final transaction transition, which is purging it from the protocol view, * meaning the frontend has no further knowledge of the transaction. The backend is free to purge any state related * to the transaction and responds with a {@link TransactionPurgeResponse}. - * - * @author Robert Varga */ -@Beta public final class TransactionPurgeRequest extends TransactionRequest { + interface SerialForm extends TransactionRequest.SerialForm { + @Override + default TransactionPurgeRequest readExternal(final ObjectInput in, final TransactionIdentifier target, + final long sequence, final ActorRef replyTo) { + return new TransactionPurgeRequest(target, sequence, replyTo); + } + } + + @java.io.Serial private static final long serialVersionUID = 1L; + private TransactionPurgeRequest(final TransactionPurgeRequest request, final ABIVersion version) { + super(request, version); + } + public TransactionPurgeRequest(final TransactionIdentifier target, final long sequence, final ActorRef replyTo) { super(target, sequence, replyTo); } @Override - protected TransactionPurgeRequestProxyV1 externalizableProxy(final ABIVersion version) { - return new TransactionPurgeRequestProxyV1(this); + protected SerialForm externalizableProxy(final ABIVersion version) { + return new TPR(this); } @Override protected TransactionPurgeRequest cloneAsVersion(final ABIVersion version) { - return this; + return new TransactionPurgeRequest(this, version); } } \ No newline at end of file diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeRequestProxyV1.java deleted file mode 100644 index ee56b4c81a..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeRequestProxyV1.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2017 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.commands; - -import akka.actor.ActorRef; -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; - -/** - * Externalizable proxy for use with {@link TransactionPurgeRequest}. It implements the initial (Boron) - * serialization format. - * - * @author Robert Varga - */ -final class TransactionPurgeRequestProxyV1 extends AbstractTransactionRequestProxy { - private static final long serialVersionUID = 1L; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public TransactionPurgeRequestProxyV1() { - // For Externalizable - } - - TransactionPurgeRequestProxyV1(final TransactionPurgeRequest request) { - super(request); - } - - @Override - protected TransactionPurgeRequest createRequest(final TransactionIdentifier target, final long sequence, - final ActorRef replyTo) { - return new TransactionPurgeRequest(target, sequence, replyTo); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeResponse.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeResponse.java index 54710143bc..558e414d12 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeResponse.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeResponse.java @@ -12,24 +12,27 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier /** * Successful reply to a {@link TransactionPurgeRequest}. - * - * @author Robert Varga */ +// FIXME: rename to TransactionPurgeSuccess public final class TransactionPurgeResponse extends TransactionSuccess { + @java.io.Serial private static final long serialVersionUID = 1L; + private TransactionPurgeResponse(final TransactionPurgeResponse success, final ABIVersion version) { + super(success, version); + } + public TransactionPurgeResponse(final TransactionIdentifier identifier, final long sequence) { super(identifier, sequence); } @Override - protected AbstractTransactionSuccessProxy externalizableProxy( - final ABIVersion version) { - return new TransactionPurgeResponseProxyV1(this); + protected TPS externalizableProxy(final ABIVersion version) { + return new TPS(this); } @Override protected TransactionPurgeResponse cloneAsVersion(final ABIVersion version) { - return this; + return new TransactionPurgeResponse(this, version); } } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeResponseProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeResponseProxyV1.java deleted file mode 100644 index d15d7292b5..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeResponseProxyV1.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright (c) 2017 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.commands; - -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; - -/** - * Externalizable proxy for use with {@link TransactionPurgeResponse}. It implements the initial (Boron) - * serialization format. - * - * @author Robert Varga - */ -final class TransactionPurgeResponseProxyV1 extends AbstractTransactionSuccessProxy { - private static final long serialVersionUID = 1L; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public TransactionPurgeResponseProxyV1() { - // For Externalizable - } - - TransactionPurgeResponseProxyV1(final TransactionPurgeResponse success) { - super(success); - } - - @Override - protected TransactionPurgeResponse createSuccess(final TransactionIdentifier target, final long sequence) { - return new TransactionPurgeResponse(target, sequence); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionRequest.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionRequest.java index 4dcf6ea93c..15d98f9150 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionRequest.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionRequest.java @@ -8,7 +8,8 @@ package org.opendaylight.controller.cluster.access.commands; import akka.actor.ActorRef; -import com.google.common.annotations.Beta; +import java.io.DataInput; +import java.io.IOException; import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.controller.cluster.access.concepts.Request; import org.opendaylight.controller.cluster.access.concepts.RequestException; @@ -18,12 +19,18 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier * Abstract base class for {@link Request}s involving specific transaction. This class is visible outside of this * package solely for the ability to perform a unified instanceof check. * - * @author Robert Varga - * * @param Message type */ -@Beta public abstract class TransactionRequest> extends Request { + protected interface SerialForm> + extends Request.SerialForm { + @Override + default TransactionIdentifier readTarget(final DataInput in) throws IOException { + return TransactionIdentifier.readFrom(in); + } + } + + @java.io.Serial private static final long serialVersionUID = 1L; TransactionRequest(final TransactionIdentifier identifier, final long sequence, final ActorRef replyTo) { @@ -40,5 +47,5 @@ public abstract class TransactionRequest> extend } @Override - protected abstract AbstractTransactionRequestProxy externalizableProxy(ABIVersion version); + protected abstract SerialForm externalizableProxy(ABIVersion version); } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionSuccess.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionSuccess.java index 636a2e741b..689b4d5ee1 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionSuccess.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionSuccess.java @@ -7,7 +7,8 @@ */ package org.opendaylight.controller.cluster.access.commands; -import com.google.common.annotations.Beta; +import java.io.DataInput; +import java.io.IOException; import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.controller.cluster.access.concepts.RequestSuccess; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; @@ -16,13 +17,18 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier * Abstract base class for {@link RequestSuccess}es involving specific transaction. This class is visible outside of * this package solely for the ability to perform a unified instanceof check. * - * @author Robert Varga - * * @param Message type */ -@Beta public abstract class TransactionSuccess> extends RequestSuccess { + interface SerialForm> extends RequestSuccess.SerialForm { + @Override + default TransactionIdentifier readTarget(final DataInput in) throws IOException { + return TransactionIdentifier.readFrom(in); + } + } + + @java.io.Serial private static final long serialVersionUID = 1L; TransactionSuccess(final TransactionIdentifier identifier, final long sequence) { @@ -34,5 +40,5 @@ public abstract class TransactionSuccess> } @Override - protected abstract AbstractTransactionSuccessProxy externalizableProxy(ABIVersion version); + protected abstract SerialForm externalizableProxy(ABIVersion version); } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionWrite.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionWrite.java index 4960c4ad98..af1acbe57e 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionWrite.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionWrite.java @@ -7,16 +7,12 @@ */ package org.opendaylight.controller.cluster.access.commands; -import com.google.common.annotations.Beta; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; /** * Modification to write (and replace) a subtree at specified path with another subtree. - * - * @author Robert Varga */ -@Beta public final class TransactionWrite extends TransactionDataModification { public TransactionWrite(final YangInstanceIdentifier path, final NormalizedNode data) { super(path, data); diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/UnknownHistoryException.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/UnknownHistoryException.java index 196c60c0d8..c688df3c90 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/UnknownHistoryException.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/UnknownHistoryException.java @@ -7,17 +7,14 @@ */ package org.opendaylight.controller.cluster.access.commands; -import com.google.common.annotations.Beta; import org.opendaylight.controller.cluster.access.concepts.RequestException; /** * A {@link RequestException} indicating that the backend has received a request referencing an unknown history. This * typically happens when the linear history ID is newer than the highest observed {@link CreateLocalHistoryRequest}. - * - * @author Robert Varga */ -@Beta public final class UnknownHistoryException extends RequestException { + @java.io.Serial private static final long serialVersionUID = 1L; public UnknownHistoryException(final Long lastSeenHistory) { @@ -25,7 +22,7 @@ public final class UnknownHistoryException extends RequestException { } private static String historyToString(final Long history) { - return history == null ? "null" : Long.toUnsignedString(history.longValue()); + return history == null ? "null" : Long.toUnsignedString(history); } @Override diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractEnvelopeProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractEnvelopeProxy.java deleted file mode 100644 index 71a731adde..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractEnvelopeProxy.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.concepts; - -import java.io.Externalizable; -import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; -import org.opendaylight.yangtools.concepts.WritableObjects; - -abstract class AbstractEnvelopeProxy> implements Externalizable { - private static final long serialVersionUID = 1L; - - private T message; - private long sessionId; - private long txSequence; - - AbstractEnvelopeProxy() { - // for Externalizable - } - - AbstractEnvelopeProxy(final Envelope envelope) { - message = envelope.getMessage(); - txSequence = envelope.getTxSequence(); - sessionId = envelope.getSessionId(); - } - - @Override - public void writeExternal(final ObjectOutput out) throws IOException { - WritableObjects.writeLongs(out, sessionId, txSequence); - out.writeObject(message); - } - - @SuppressWarnings("unchecked") - @Override - public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { - final byte header = WritableObjects.readLongHeader(in); - sessionId = WritableObjects.readFirstLong(in, header); - txSequence = WritableObjects.readSecondLong(in, header); - message = (T) in.readObject(); - } - - @SuppressWarnings("checkstyle:hiddenField") - abstract Envelope createEnvelope(T wrappedNessage, long sessionId, long txSequence); - - final Object readResolve() { - return createEnvelope(message, sessionId, txSequence); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractMessageProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractMessageProxy.java deleted file mode 100644 index 0367527a5e..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractMessageProxy.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.concepts; - -import static com.google.common.base.Verify.verifyNotNull; - -import java.io.DataInput; -import java.io.Externalizable; -import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; -import org.eclipse.jdt.annotation.NonNull; -import org.opendaylight.yangtools.concepts.WritableIdentifier; -import org.opendaylight.yangtools.concepts.WritableObjects; - -/** - * Abstract Externalizable proxy for use with {@link Message} subclasses. - * - * @author Robert Varga - * - * @param Target identifier type - * @param Message class - */ -abstract class AbstractMessageProxy> implements Externalizable { - private static final long serialVersionUID = 1L; - private T target; - private long sequence; - - protected AbstractMessageProxy() { - // For Externalizable - } - - AbstractMessageProxy(final @NonNull C message) { - this.target = message.getTarget(); - this.sequence = message.getSequence(); - } - - @Override - public void writeExternal(final ObjectOutput out) throws IOException { - target.writeTo(out); - WritableObjects.writeLong(out, sequence); - } - - @Override - public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { - target = verifyNotNull(readTarget(in)); - sequence = WritableObjects.readLong(in); - } - - protected final Object readResolve() { - return verifyNotNull(createMessage(target, sequence)); - } - - protected abstract @NonNull T readTarget(@NonNull DataInput in) throws IOException; - - abstract @NonNull C createMessage(@NonNull T msgTarget, long msgSequence); -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractRequestFailureProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractRequestFailureProxy.java deleted file mode 100644 index e35936d53b..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractRequestFailureProxy.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.concepts; - -import com.google.common.annotations.Beta; -import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; -import org.eclipse.jdt.annotation.NonNull; -import org.opendaylight.yangtools.concepts.WritableIdentifier; - -/** - * Abstract Externalizable proxy for use with {@link RequestFailure} subclasses. - * - * @author Robert Varga - * - * @param Target identifier type - */ -@Beta -public abstract class AbstractRequestFailureProxy> - extends AbstractResponseProxy { - private static final long serialVersionUID = 1L; - private RequestException cause; - - protected AbstractRequestFailureProxy() { - // For Externalizable - } - - protected AbstractRequestFailureProxy(final @NonNull C failure) { - super(failure); - this.cause = failure.getCause(); - } - - @Override - public void writeExternal(final ObjectOutput out) throws IOException { - super.writeExternal(out); - out.writeObject(cause); - } - - @Override - public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { - super.readExternal(in); - cause = (RequestException) in.readObject(); - } - - @Override - final C createResponse(final T target, final long sequence) { - return createFailure(target, sequence, cause); - } - - protected abstract @NonNull C createFailure(@NonNull T target, long sequence, - @NonNull RequestException failureCause); -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractRequestProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractRequestProxy.java deleted file mode 100644 index 183766f310..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractRequestProxy.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.concepts; - -import akka.actor.ActorRef; -import akka.serialization.JavaSerializer; -import akka.serialization.Serialization; -import com.google.common.annotations.Beta; -import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; -import org.eclipse.jdt.annotation.NonNull; -import org.opendaylight.yangtools.concepts.WritableIdentifier; - -/** - * Abstract Externalizable proxy for use with {@link Request} subclasses. - * - * @author Robert Varga - * - * @param Target identifier type - */ -@Beta -public abstract class AbstractRequestProxy> - extends AbstractMessageProxy { - private static final long serialVersionUID = 1L; - private ActorRef replyTo; - - protected AbstractRequestProxy() { - // For Externalizable - } - - protected AbstractRequestProxy(final @NonNull C request) { - super(request); - this.replyTo = request.getReplyTo(); - } - - @Override - public void writeExternal(final ObjectOutput out) throws IOException { - super.writeExternal(out); - out.writeObject(Serialization.serializedActorPath(replyTo)); - } - - @Override - public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { - super.readExternal(in); - replyTo = JavaSerializer.currentSystem().value().provider().resolveActorRef((String) in.readObject()); - } - - @Override - final C createMessage(final T target, final long sequence) { - return createRequest(target, sequence, replyTo); - } - - protected abstract @NonNull C createRequest(@NonNull T target, long sequence, @NonNull ActorRef replyToActor); -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractResponseEnvelopeProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractResponseEnvelopeProxy.java deleted file mode 100644 index 1e873b4bdf..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractResponseEnvelopeProxy.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.concepts; - -import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; -import org.opendaylight.yangtools.concepts.WritableObjects; - -abstract class AbstractResponseEnvelopeProxy> extends AbstractEnvelopeProxy { - private static final long serialVersionUID = 1L; - - private long executionTimeNanos; - - AbstractResponseEnvelopeProxy() { - // for Externalizable - } - - AbstractResponseEnvelopeProxy(final ResponseEnvelope envelope) { - super(envelope); - this.executionTimeNanos = envelope.getExecutionTimeNanos(); - } - - @Override - public final void writeExternal(final ObjectOutput out) throws IOException { - super.writeExternal(out); - WritableObjects.writeLong(out, executionTimeNanos); - } - - @Override - public final void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { - super.readExternal(in); - executionTimeNanos = WritableObjects.readLong(in); - } - - @Override - final ResponseEnvelope createEnvelope(final T message, final long sessionId, final long txSequence) { - return createEnvelope(message, sessionId, txSequence, executionTimeNanos); - } - - @SuppressWarnings("checkstyle:hiddenField") - abstract ResponseEnvelope createEnvelope(T message, long sessionId, long txSequence, long executionTimeNanos); -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractResponseProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractResponseProxy.java deleted file mode 100644 index c9edfdb05f..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractResponseProxy.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.concepts; - -import org.eclipse.jdt.annotation.NonNull; -import org.opendaylight.yangtools.concepts.WritableIdentifier; - -/** - * Abstract Externalizable proxy class to use with {@link Response} subclasses. - * - * @author Robert Varga - * - * @param Target identifier type - * @param Message class - */ -abstract class AbstractResponseProxy> - extends AbstractMessageProxy { - private static final long serialVersionUID = 1L; - - protected AbstractResponseProxy() { - // for Externalizable - } - - AbstractResponseProxy(final @NonNull C response) { - super(response); - } - - @Override - final C createMessage(final T target, final long sequence) { - return createResponse(target, sequence); - } - - abstract @NonNull C createResponse(@NonNull T target, long sequence); -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractSuccessProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractSuccessProxy.java deleted file mode 100644 index ecf792e1d9..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractSuccessProxy.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.concepts; - -import com.google.common.annotations.Beta; -import org.eclipse.jdt.annotation.NonNull; -import org.opendaylight.yangtools.concepts.WritableIdentifier; - -/** - * Abstract Externalizable proxy for use with {@link RequestSuccess} subclasses. - * - * @author Robert Varga - * - * @param Target identifier type - */ -@Beta -public abstract class AbstractSuccessProxy> - extends AbstractResponseProxy { - private static final long serialVersionUID = 1L; - - protected AbstractSuccessProxy() { - // For Externalizable - } - - protected AbstractSuccessProxy(final @NonNull C success) { - super(success); - } - - @Override - final C createResponse(final T target, final long sequence) { - return createSuccess(target, sequence); - } - - protected abstract @NonNull C createSuccess(@NonNull T target, long sequence); -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/CI.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/CI.java new file mode 100644 index 0000000000..e88764dbed --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/CI.java @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.concepts; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import org.opendaylight.yangtools.concepts.WritableObjects; + +/** + * Serialization proxy for {@link ClientIdentifier}. + */ +final class CI implements Externalizable { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private ClientIdentifier identifier; + + @SuppressWarnings("checkstyle:RedundantModifier") + public CI() { + // for Externalizable + } + + CI(final ClientIdentifier identifier) { + this.identifier = requireNonNull(identifier); + } + + @Override + public void readExternal(final ObjectInput in) throws IOException { + identifier = new ClientIdentifier(FrontendIdentifier.readFrom(in), WritableObjects.readLong(in)); + } + + @Override + public void writeExternal(final ObjectOutput out) throws IOException { + identifier.getFrontendId().writeTo(out); + WritableObjects.writeLong(out, identifier.getGeneration()); + } + + @java.io.Serial + private Object readResolve() { + return verifyNotNull(identifier); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/ClientIdentifier.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/ClientIdentifier.java index c317ac31b6..42701539a6 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/ClientIdentifier.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/ClientIdentifier.java @@ -9,14 +9,10 @@ package org.opendaylight.controller.cluster.access.concepts; import static java.util.Objects.requireNonNull; -import com.google.common.annotations.Beta; import com.google.common.base.MoreObjects; import java.io.DataInput; import java.io.DataOutput; -import java.io.Externalizable; import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; import org.eclipse.jdt.annotation.NonNull; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.cds.types.rev191024.ClientGeneration; import org.opendaylight.yangtools.concepts.WritableIdentifier; @@ -26,45 +22,9 @@ import org.opendaylight.yangtools.yang.common.Uint64; /** * A cluster-wide unique identifier of a frontend instance. This identifier discerns between individual incarnations * of a particular frontend. - * - * @author Robert Varga */ -@Beta public final class ClientIdentifier implements WritableIdentifier { - private static final class Proxy implements Externalizable { - private static final long serialVersionUID = 1L; - private FrontendIdentifier frontendId; - private long generation; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public Proxy() { - // Needed for Externalizable - } - - Proxy(final FrontendIdentifier frontendId, final long generation) { - this.frontendId = requireNonNull(frontendId); - this.generation = generation; - } - - @Override - public void writeExternal(final ObjectOutput out) throws IOException { - frontendId.writeTo(out); - WritableObjects.writeLong(out, generation); - } - - @Override - public void readExternal(final ObjectInput in) throws IOException { - frontendId = FrontendIdentifier.readFrom(in); - generation = WritableObjects.readLong(in); - } - - private Object readResolve() { - return new ClientIdentifier(frontendId, generation); - } - } - + @java.io.Serial private static final long serialVersionUID = 1L; private final @NonNull FrontendIdentifier frontendId; @@ -110,24 +70,20 @@ public final class ClientIdentifier implements WritableIdentifier { @Override public boolean equals(final Object obj) { - if (this == obj) { - return true; - } - if (!(obj instanceof ClientIdentifier)) { - return false; - } - - final ClientIdentifier other = (ClientIdentifier) obj; - return generation == other.generation && frontendId.equals(other.frontendId); + return this == obj || obj instanceof ClientIdentifier other && generation == other.generation + && frontendId.equals(other.frontendId); } @Override public String toString() { - return MoreObjects.toStringHelper(ClientIdentifier.class).add("frontend", frontendId) - .add("generation", Long.toUnsignedString(generation)).toString(); + return MoreObjects.toStringHelper(ClientIdentifier.class) + .add("frontend", frontendId) + .add("generation", Long.toUnsignedString(generation)) + .toString(); } + @java.io.Serial private Object writeReplace() { - return new Proxy(frontendId, generation); + return new CI(this); } } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/Envelope.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/Envelope.java index c6123d6fc3..eed7b53741 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/Envelope.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/Envelope.java @@ -10,11 +10,49 @@ package org.opendaylight.controller.cluster.access.concepts; import static java.util.Objects.requireNonNull; import com.google.common.base.MoreObjects; +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; import java.io.Serializable; import org.eclipse.jdt.annotation.NonNull; import org.opendaylight.yangtools.concepts.Immutable; +import org.opendaylight.yangtools.concepts.WritableObjects; public abstract class Envelope> implements Immutable, Serializable { + interface SerialForm, E extends Envelope> extends Externalizable { + + @NonNull E envelope(); + + void setEnvelope(@NonNull E envelope); + + @java.io.Serial + Object readResolve(); + + @Override + default void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { + final byte header = WritableObjects.readLongHeader(in); + final var sessionId = WritableObjects.readFirstLong(in, header); + final var txSequence = WritableObjects.readSecondLong(in, header); + @SuppressWarnings("unchecked") + final var message = (T) in.readObject(); + setEnvelope(readExternal(in, sessionId, txSequence, message)); + } + + E readExternal(ObjectInput in, long sessionId, long txSequence, T message) throws IOException; + + @Override + default void writeExternal(final ObjectOutput out) throws IOException { + writeExternal(out, envelope()); + } + + default void writeExternal(final ObjectOutput out, final @NonNull E envelope) throws IOException { + WritableObjects.writeLongs(out, envelope.getSessionId(), envelope.getTxSequence()); + out.writeObject(envelope.getMessage()); + } + } + + @java.io.Serial private static final long serialVersionUID = 1L; private final @NonNull T message; @@ -60,9 +98,10 @@ public abstract class Envelope> implements Immutable, Se .add("txSequence", Long.toHexString(txSequence)).add("message", message).toString(); } + @java.io.Serial final Object writeReplace() { return createProxy(); } - abstract AbstractEnvelopeProxy createProxy(); + abstract @NonNull SerialForm createProxy(); } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FE.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FE.java new file mode 100644 index 0000000000..3038437fcd --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FE.java @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.concepts; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +import java.io.ObjectInput; + +/** + * Serialization proxy for {@link FailureEnvelope}. + */ +final class FE implements ResponseEnvelope.SerialForm, FailureEnvelope> { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private FailureEnvelope envelope; + + @SuppressWarnings("checkstyle:RedundantModifier") + public FE() { + // for Externalizable + } + + FE(final FailureEnvelope envelope) { + this.envelope = requireNonNull(envelope); + } + + @Override + public FailureEnvelope envelope() { + return verifyNotNull(envelope); + } + + @Override + public void setEnvelope(final FailureEnvelope envelope) { + this.envelope = requireNonNull(envelope); + } + + @Override + public FailureEnvelope readExternal(final ObjectInput in, final long sessionId, final long txSequence, + final RequestFailure message, final long executionTimeNanos) { + return new FailureEnvelope(message, sessionId, txSequence, executionTimeNanos); + } + + @Override + public Object readResolve() { + return envelope(); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FI.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FI.java new file mode 100644 index 0000000000..1a3e72b831 --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FI.java @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.concepts; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; + +/** + * Serialization proxy for {@link FrontendIdentifier}. + */ +final class FI implements Externalizable { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private FrontendIdentifier identifier; + + @SuppressWarnings("checkstyle:RedundantModifier") + public FI() { + // for Externalizable + } + + FI(final FrontendIdentifier identifier) { + this.identifier = requireNonNull(identifier); + } + + @Override + public void readExternal(final ObjectInput in) throws IOException { + identifier = new FrontendIdentifier(MemberName.readFrom(in), FrontendType.readFrom(in)); + } + + @Override + public void writeExternal(final ObjectOutput out) throws IOException { + identifier.getMemberName().writeTo(out); + identifier.getClientType().writeTo(out); + } + + @java.io.Serial + private Object readResolve() { + return verifyNotNull(identifier); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FT.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FT.java new file mode 100644 index 0000000000..9e900f7e40 --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FT.java @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.concepts; + +import static java.util.Objects.requireNonNull; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.nio.charset.StandardCharsets; + +/** + * Serialization proxy for {@link FrontendType}. + */ +final class FT implements Externalizable { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private byte[] serialized; + + @SuppressWarnings("checkstyle:RedundantModifier") + public FT() { + // for Externalizable + } + + FT(final byte[] serialized) { + this.serialized = requireNonNull(serialized); + } + + @Override + public void writeExternal(final ObjectOutput out) throws IOException { + out.writeInt(serialized.length); + out.write(serialized); + } + + @Override + public void readExternal(final ObjectInput in) throws IOException { + serialized = new byte[in.readInt()]; + in.readFully(serialized); + } + + @java.io.Serial + private Object readResolve() { + // TODO: consider caching instances here + return new FrontendType(new String(serialized, StandardCharsets.UTF_8), serialized); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FailureEnvelope.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FailureEnvelope.java index 1f641eb181..5342d05f5e 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FailureEnvelope.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FailureEnvelope.java @@ -8,6 +8,7 @@ package org.opendaylight.controller.cluster.access.concepts; public final class FailureEnvelope extends ResponseEnvelope> { + @java.io.Serial private static final long serialVersionUID = 1L; public FailureEnvelope(final RequestFailure message, final long sessionId, final long txSequence, @@ -16,7 +17,7 @@ public final class FailureEnvelope extends ResponseEnvelope } @Override - FailureEnvelopeProxy createProxy() { - return new FailureEnvelopeProxy(this); + FE createProxy() { + return new FE(this); } } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FailureEnvelopeProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FailureEnvelopeProxy.java deleted file mode 100644 index adc50e1eae..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FailureEnvelopeProxy.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.concepts; - -final class FailureEnvelopeProxy extends AbstractResponseEnvelopeProxy> { - private static final long serialVersionUID = 1L; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to be - // able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public FailureEnvelopeProxy() { - // for Externalizable - } - - FailureEnvelopeProxy(final FailureEnvelope envelope) { - super(envelope); - } - - @Override - ResponseEnvelope> createEnvelope(final RequestFailure message, final long sessionId, - final long txSequence, final long executionTimeNanos) { - return new FailureEnvelope(message, sessionId, txSequence, executionTimeNanos); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FrontendIdentifier.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FrontendIdentifier.java index 10abac6d58..76aad38da7 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FrontendIdentifier.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FrontendIdentifier.java @@ -9,59 +9,20 @@ package org.opendaylight.controller.cluster.access.concepts; import static java.util.Objects.requireNonNull; -import com.google.common.annotations.Beta; import java.io.DataInput; import java.io.DataOutput; -import java.io.Externalizable; import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; import java.util.Objects; import org.eclipse.jdt.annotation.NonNull; import org.opendaylight.yangtools.concepts.WritableIdentifier; /** * A cluster-wide unique identifier of a frontend type located at a cluster member. - * - * @author Robert Varga */ -@Beta public final class FrontendIdentifier implements WritableIdentifier { - private static final class Proxy implements Externalizable { - private static final long serialVersionUID = 1L; - private MemberName memberName; - private FrontendType clientType; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public Proxy() { - // Needed for Externalizable - } - - Proxy(final MemberName memberName, final FrontendType clientType) { - this.memberName = requireNonNull(memberName); - this.clientType = requireNonNull(clientType); - } - - @Override - public void writeExternal(final ObjectOutput out) throws IOException { - memberName.writeTo(out); - clientType.writeTo(out); - } - - @Override - public void readExternal(final ObjectInput in) throws IOException { - memberName = MemberName.readFrom(in); - clientType = FrontendType.readFrom(in); - } - - private Object readResolve() { - return new FrontendIdentifier(memberName, clientType); - } - } - + @java.io.Serial private static final long serialVersionUID = 1L; + private final MemberName memberName; private final FrontendType clientType; @@ -75,8 +36,8 @@ public final class FrontendIdentifier implements WritableIdentifier { } public static @NonNull FrontendIdentifier readFrom(final DataInput in) throws IOException { - final MemberName memberName = MemberName.readFrom(in); - final FrontendType clientType = FrontendType.readFrom(in); + final var memberName = MemberName.readFrom(in); + final var clientType = FrontendType.readFrom(in); return new FrontendIdentifier(memberName, clientType); } @@ -101,15 +62,8 @@ public final class FrontendIdentifier implements WritableIdentifier { @Override public boolean equals(final Object obj) { - if (this == obj) { - return true; - } - if (!(obj instanceof FrontendIdentifier)) { - return false; - } - - final FrontendIdentifier other = (FrontendIdentifier) obj; - return memberName.equals(other.memberName) && clientType.equals(other.clientType); + return this == obj || obj instanceof FrontendIdentifier other && memberName.equals(other.memberName) + && clientType.equals(other.clientType); } public @NonNull String toPersistentId() { @@ -121,7 +75,8 @@ public final class FrontendIdentifier implements WritableIdentifier { return toPersistentId(); } + @java.io.Serial private Object writeReplace() { - return new Proxy(memberName, clientType); + return new FI(this); } } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FrontendType.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FrontendType.java index 2a2a5b2b30..6619181613 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FrontendType.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FrontendType.java @@ -11,16 +11,12 @@ import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Verify.verifyNotNull; import static java.util.Objects.requireNonNull; -import com.google.common.annotations.Beta; import com.google.common.base.MoreObjects; import com.google.common.base.Strings; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.io.DataInput; import java.io.DataOutput; -import java.io.Externalizable; import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; import java.nio.charset.StandardCharsets; import java.util.regex.Pattern; import org.eclipse.jdt.annotation.NonNull; @@ -31,47 +27,12 @@ import org.opendaylight.yangtools.concepts.WritableIdentifier; * An {@link Identifier} identifying a data store frontend type, which is able to access the data store backend. * Frontend implementations need to define this identifier so that multiple clients existing on a member node can be * discerned. - * - * @author Robert Varga */ -@Beta public final class FrontendType implements Comparable, WritableIdentifier { - private static final class Proxy implements Externalizable { - private static final long serialVersionUID = 1L; - private byte[] serialized; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public Proxy() { - // For Externalizable - } - - Proxy(final byte[] serialized) { - this.serialized = requireNonNull(serialized); - } - - @Override - public void writeExternal(final ObjectOutput out) throws IOException { - out.writeInt(serialized.length); - out.write(serialized); - } - - @Override - public void readExternal(final ObjectInput in) throws IOException { - serialized = new byte[in.readInt()]; - in.readFully(serialized); - } - - private Object readResolve() { - // TODO: consider caching instances here - return new FrontendType(new String(serialized, StandardCharsets.UTF_8), serialized); - } - } - + @java.io.Serial + private static final long serialVersionUID = 1L; private static final String SIMPLE_STRING_REGEX = "^[a-zA-Z0-9-_.*+:=,!~';]+$"; private static final Pattern SIMPLE_STRING_PATTERN = Pattern.compile(SIMPLE_STRING_REGEX); - private static final long serialVersionUID = 1L; private final @NonNull String name; @@ -157,7 +118,8 @@ public final class FrontendType implements Comparable, WritableIde return local; } - Object writeReplace() { - return new Proxy(getSerialized()); + @java.io.Serial + private Object writeReplace() { + return new FT(getSerialized()); } } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/HI.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/HI.java new file mode 100644 index 0000000000..ab4d884eee --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/HI.java @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.concepts; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import org.opendaylight.yangtools.concepts.WritableObjects; + +/** + * Serialization proxy for {@link LocalHistoryIdentifier}. + * + * @implNote + * cookie is currently required only for module-based sharding, which is implemented as part of normal + * DataBroker interfaces. For DOMDataTreeProducer cookie will always be zero, hence we may end up not needing + * cookie at all. + * We use WritableObjects.writeLongs() to output historyId and cookie (in that order). If we end up not needing + * the cookie at all, we can switch to writeLong() and use zero flags for compatibility. + */ +final class HI implements Externalizable { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private LocalHistoryIdentifier identifier; + + @SuppressWarnings("checkstyle:RedundantModifier") + public HI() { + // for Externalizable + } + + HI(final LocalHistoryIdentifier identifier) { + this.identifier = requireNonNull(identifier); + } + + @Override + public void writeExternal(final ObjectOutput out) throws IOException { + identifier.getClientId().writeTo(out); + WritableObjects.writeLongs(out, identifier.getHistoryId(), identifier.getCookie()); + } + + @Override + public void readExternal(final ObjectInput in) throws IOException { + final var clientId = ClientIdentifier.readFrom(in); + final byte header = WritableObjects.readLongHeader(in); + final var historyId = WritableObjects.readFirstLong(in, header); + final var cookie = WritableObjects.readSecondLong(in, header); + identifier = new LocalHistoryIdentifier(clientId, historyId, cookie); + } + + @java.io.Serial + private Object readResolve() { + return verifyNotNull(identifier); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/LocalHistoryIdentifier.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/LocalHistoryIdentifier.java index 137bf5907f..ddeb293615 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/LocalHistoryIdentifier.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/LocalHistoryIdentifier.java @@ -12,10 +12,7 @@ import static java.util.Objects.requireNonNull; import com.google.common.base.MoreObjects; import java.io.DataInput; import java.io.DataOutput; -import java.io.Externalizable; import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; import org.eclipse.jdt.annotation.NonNull; import org.opendaylight.yangtools.concepts.WritableIdentifier; import org.opendaylight.yangtools.concepts.WritableObjects; @@ -25,59 +22,11 @@ import org.opendaylight.yangtools.concepts.WritableObjects; * - a {@link ClientIdentifier}, which uniquely identifies a single instantiation of a particular frontend * - an unsigned long, which uniquely identifies the history on the backend * - an unsigned long cookie, assigned by the client and meaningless on the backend, which just reflects it back - * - * @author Robert Varga */ public final class LocalHistoryIdentifier implements WritableIdentifier { - /* - * Implementation note: cookie is currently required only for module-based sharding, which is implemented as part - * of normal DataBroker interfaces. For DOMDataTreeProducer cookie will always be zero, hence - * we may end up not needing cookie at all. - * - * We use WritableObjects.writeLongs() to output historyId and cookie (in that order). If we - * end up not needing the cookie at all, we can switch to writeLong() and use zero flags for - * compatibility. - */ - private static final class Proxy implements Externalizable { - private static final long serialVersionUID = 1L; - private ClientIdentifier clientId; - private long historyId; - private long cookie; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public Proxy() { - // For Externalizable - } - - Proxy(final ClientIdentifier frontendId, final long historyId, final long cookie) { - clientId = requireNonNull(frontendId); - this.historyId = historyId; - this.cookie = cookie; - } - - @Override - public void writeExternal(final ObjectOutput out) throws IOException { - clientId.writeTo(out); - WritableObjects.writeLongs(out, historyId, cookie); - } - - @Override - public void readExternal(final ObjectInput in) throws IOException { - clientId = ClientIdentifier.readFrom(in); - - final byte header = WritableObjects.readLongHeader(in); - historyId = WritableObjects.readFirstLong(in, header); - cookie = WritableObjects.readSecondLong(in, header); - } - - private Object readResolve() { - return new LocalHistoryIdentifier(clientId, historyId, cookie); - } - } - + @java.io.Serial private static final long serialVersionUID = 1L; + private final @NonNull ClientIdentifier clientId; private final long historyId; private final long cookie; @@ -131,11 +80,10 @@ public final class LocalHistoryIdentifier implements WritableIdentifier { if (this == obj) { return true; } - if (!(obj instanceof LocalHistoryIdentifier)) { + if (!(obj instanceof LocalHistoryIdentifier other)) { return false; } - final LocalHistoryIdentifier other = (LocalHistoryIdentifier) obj; return historyId == other.historyId && cookie == other.cookie && clientId.equals(other.clientId); } @@ -146,7 +94,8 @@ public final class LocalHistoryIdentifier implements WritableIdentifier { .add("cookie", Long.toUnsignedString(cookie, 16)).toString(); } + @java.io.Serial private Object writeReplace() { - return new Proxy(clientId, historyId, cookie); + return new HI(this); } } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/MN.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/MN.java new file mode 100644 index 0000000000..37b9fb837f --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/MN.java @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.concepts; + +import static java.util.Objects.requireNonNull; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.nio.charset.StandardCharsets; + +/** + * Serialization proxy for {@link MemberName}. + */ +final class MN implements Externalizable { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private byte[] serialized; + + @SuppressWarnings("checkstyle:RedundantModifier") + public MN() { + // for Externalizable + } + + MN(final byte[] serialized) { + this.serialized = requireNonNull(serialized); + } + + @Override + public void writeExternal(final ObjectOutput out) throws IOException { + out.writeInt(serialized.length); + out.write(serialized); + } + + @Override + public void readExternal(final ObjectInput in) throws IOException { + serialized = new byte[in.readInt()]; + in.readFully(serialized); + } + + @java.io.Serial + private Object readResolve() { + // TODO: consider caching instances here + return new MemberName(new String(serialized, StandardCharsets.UTF_8), serialized); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/MemberName.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/MemberName.java index 3cc2b0c639..daab643f8a 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/MemberName.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/MemberName.java @@ -11,60 +11,21 @@ import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Verify.verifyNotNull; import static java.util.Objects.requireNonNull; -import com.google.common.annotations.Beta; import com.google.common.base.MoreObjects; import com.google.common.base.Strings; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.io.DataInput; import java.io.DataOutput; -import java.io.Externalizable; import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; import java.nio.charset.StandardCharsets; import org.eclipse.jdt.annotation.NonNull; import org.opendaylight.yangtools.concepts.WritableIdentifier; /** * Type-safe encapsulation of a cluster member name. - * - * @author Robert Varga */ -@Beta public final class MemberName implements Comparable, WritableIdentifier { - private static final class Proxy implements Externalizable { - private static final long serialVersionUID = 1L; - private byte[] serialized; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public Proxy() { - // For Externalizable - } - - Proxy(final byte[] serialized) { - this.serialized = requireNonNull(serialized); - } - - @Override - public void writeExternal(final ObjectOutput out) throws IOException { - out.writeInt(serialized.length); - out.write(serialized); - } - - @Override - public void readExternal(final ObjectInput in) throws IOException { - serialized = new byte[in.readInt()]; - in.readFully(serialized); - } - - private Object readResolve() { - // TODO: consider caching instances here - return new MemberName(new String(serialized, StandardCharsets.UTF_8), serialized); - } - } - + @java.io.Serial private static final long serialVersionUID = 1L; private final @NonNull String name; @@ -140,7 +101,8 @@ public final class MemberName implements Comparable, WritableIdentif return local; } + @java.io.Serial Object writeReplace() { - return new Proxy(getSerialized()); + return new MN(getSerialized()); } } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/Message.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/Message.java index 5456fbb19e..9748264e7f 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/Message.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/Message.java @@ -10,15 +10,24 @@ package org.opendaylight.controller.cluster.access.concepts; import static com.google.common.base.Verify.verifyNotNull; import static java.util.Objects.requireNonNull; -import com.google.common.annotations.Beta; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.MoreObjects; import com.google.common.base.MoreObjects.ToStringHelper; +import java.io.DataInput; +import java.io.Externalizable; +import java.io.IOException; +import java.io.NotSerializableException; +import java.io.ObjectInput; +import java.io.ObjectInputStream; +import java.io.ObjectOutput; +import java.io.ObjectOutputStream; +import java.io.ObjectStreamException; import java.io.Serializable; import org.eclipse.jdt.annotation.NonNull; import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.yangtools.concepts.Immutable; import org.opendaylight.yangtools.concepts.WritableIdentifier; +import org.opendaylight.yangtools.concepts.WritableObjects; /** * An abstract concept of a Message. This class cannot be instantiated directly, use its specializations {@link Request} @@ -47,14 +56,49 @@ import org.opendaylight.yangtools.concepts.WritableIdentifier; * Note that this class specifies the {@link Immutable} contract, which means that all subclasses must follow this API * contract. * - * @author Robert Varga - * * @param Target identifier type * @param Message type */ -@Beta -public abstract class Message> implements Immutable, - Serializable { +public abstract class Message> + implements Immutable, Serializable { + /** + * Externalizable proxy for use with {@link Message} subclasses. + * + * @param Target identifier type + * @param Message class + */ + protected interface SerialForm> extends Externalizable { + + @NonNull C message(); + + void setMessage(@NonNull C message); + + @Override + default void writeExternal(final ObjectOutput out) throws IOException { + final var message = message(); + message.getTarget().writeTo(out); + WritableObjects.writeLong(out, message.getSequence()); + writeExternal(out, message); + } + + void writeExternal(@NonNull ObjectOutput out, @NonNull C msg) throws IOException; + + @Override + default void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { + final var target = verifyNotNull(readTarget(in)); + final var sequence = WritableObjects.readLong(in); + setMessage(verifyNotNull(readExternal(in, target, sequence))); + } + + @NonNull C readExternal(@NonNull ObjectInput in, @NonNull T target, long sequence) + throws IOException, ClassNotFoundException; + + Object readResolve(); + + @NonNull T readTarget(@NonNull DataInput in) throws IOException; + } + + @java.io.Serial private static final long serialVersionUID = 1L; private final @NonNull ABIVersion version; @@ -106,21 +150,14 @@ public abstract class Message verifyNotNull(cloneAsVersion(toVersion)); + default -> throw new IllegalArgumentException("Unhandled ABI version " + toVersion); + }; } /** @@ -159,9 +196,29 @@ public abstract class Message externalizableProxy(@NonNull ABIVersion reqVersion); + protected abstract @NonNull SerialForm externalizableProxy(@NonNull ABIVersion reqVersion); + @java.io.Serial protected final Object writeReplace() { return externalizableProxy(version); } + + protected final void throwNSE() throws NotSerializableException { + throw new NotSerializableException(getClass().getName()); + } + + @java.io.Serial + private void readObject(final ObjectInputStream stream) throws IOException, ClassNotFoundException { + throwNSE(); + } + + @java.io.Serial + private void readObjectNoData() throws ObjectStreamException { + throwNSE(); + } + + @java.io.Serial + private void writeObject(final ObjectOutputStream stream) throws IOException { + throwNSE(); + } } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RE.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RE.java new file mode 100644 index 0000000000..27bf825526 --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RE.java @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.concepts; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +import java.io.ObjectInput; + +/** + * Serialization proxy for {@link RequestEnvelope}. + */ +final class RE implements Envelope.SerialForm, RequestEnvelope> { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private RequestEnvelope envelope; + + @SuppressWarnings("checkstyle:RedundantModifier") + public RE() { + // for Externalizable + } + + RE(final RequestEnvelope envelope) { + this.envelope = requireNonNull(envelope); + } + + @Override + public RequestEnvelope envelope() { + return verifyNotNull(envelope); + } + + @Override + public void setEnvelope(final RequestEnvelope envelope) { + this.envelope = requireNonNull(envelope); + } + + @Override + public RequestEnvelope readExternal(final ObjectInput in, final long sessionId, final long txSequence, + final Request message) { + return new RequestEnvelope(message, sessionId, txSequence); + } + + @Override + public Object readResolve() { + return envelope(); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/Request.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/Request.java index 093a3f1eb0..97ce498bda 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/Request.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/Request.java @@ -10,8 +10,12 @@ package org.opendaylight.controller.cluster.access.concepts; import static java.util.Objects.requireNonNull; import akka.actor.ActorRef; -import com.google.common.annotations.Beta; +import akka.serialization.JavaSerializer; +import akka.serialization.Serialization; import com.google.common.base.MoreObjects.ToStringHelper; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; import org.eclipse.jdt.annotation.NonNull; import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.yangtools.concepts.WritableIdentifier; @@ -20,14 +24,31 @@ import org.opendaylight.yangtools.concepts.WritableIdentifier; * A request message concept. Upon receipt of this message, the recipient will respond with either * a {@link RequestSuccess} or a {@link RequestFailure} message. * - * @author Robert Varga - * * @param Target identifier type * @param Message type */ -@Beta public abstract class Request> extends Message { + protected interface SerialForm> + extends Message.SerialForm { + @Override + default C readExternal(final ObjectInput in, final T target, final long sequence) + throws ClassNotFoundException, IOException { + return readExternal(in, target, sequence, + JavaSerializer.currentSystem().value().provider().resolveActorRef((String) in.readObject())); + } + + @NonNull C readExternal(@NonNull ObjectInput in, @NonNull T target, long sequence, @NonNull ActorRef replyTo) + throws IOException; + + @Override + default void writeExternal(final ObjectOutput out, final C msg) throws IOException { + out.writeObject(Serialization.serializedActorPath(msg.getReplyTo())); + } + } + + @java.io.Serial private static final long serialVersionUID = 1L; + private final @NonNull ActorRef replyTo; protected Request(final @NonNull T target, final long sequence, final @NonNull ActorRef replyTo) { @@ -63,5 +84,5 @@ public abstract class Request externalizableProxy(ABIVersion version); + protected abstract SerialForm externalizableProxy(ABIVersion version); } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RequestEnvelope.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RequestEnvelope.java index 46d5d1f996..e8983697cf 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RequestEnvelope.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RequestEnvelope.java @@ -10,6 +10,7 @@ package org.opendaylight.controller.cluster.access.concepts; import akka.actor.ActorRef; public final class RequestEnvelope extends Envelope> { + @java.io.Serial private static final long serialVersionUID = 1L; public RequestEnvelope(final Request message, final long sessionId, final long txSequence) { @@ -17,8 +18,8 @@ public final class RequestEnvelope extends Envelope> { } @Override - RequestEnvelopeProxy createProxy() { - return new RequestEnvelopeProxy(this); + RE createProxy() { + return new RE(this); } /** diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RequestEnvelopeProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RequestEnvelopeProxy.java deleted file mode 100644 index 66e7eaa0a2..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RequestEnvelopeProxy.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.concepts; - -final class RequestEnvelopeProxy extends AbstractEnvelopeProxy> { - private static final long serialVersionUID = 1L; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public RequestEnvelopeProxy() { - // for Externalizable - } - - RequestEnvelopeProxy(final RequestEnvelope envelope) { - super(envelope); - } - - @Override - RequestEnvelope createEnvelope(final Request message, final long sessionId, final long txSequence) { - return new RequestEnvelope(message, sessionId, txSequence); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RequestException.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RequestException.java index 7267edea4f..d1120c61ef 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RequestException.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RequestException.java @@ -9,16 +9,13 @@ package org.opendaylight.controller.cluster.access.concepts; import static java.util.Objects.requireNonNull; -import com.google.common.annotations.Beta; import org.eclipse.jdt.annotation.NonNull; /** * A failure cause behind a {@link RequestFailure} to process a {@link Request}. - * - * @author Robert Varga */ -@Beta public abstract class RequestException extends Exception { + @java.io.Serial private static final long serialVersionUID = 1L; protected RequestException(final @NonNull String message) { diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RequestFailure.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RequestFailure.java index db5a15b0bc..7204912669 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RequestFailure.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RequestFailure.java @@ -9,8 +9,10 @@ package org.opendaylight.controller.cluster.access.concepts; import static java.util.Objects.requireNonNull; -import com.google.common.annotations.Beta; import com.google.common.base.MoreObjects.ToStringHelper; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; import org.eclipse.jdt.annotation.NonNull; import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.yangtools.concepts.WritableIdentifier; @@ -18,14 +20,33 @@ import org.opendaylight.yangtools.concepts.WritableIdentifier; /** * A failure response to a {@link Request}. Contains a {@link RequestException} detailing the cause for this failure. * - * @author Robert Varga - * * @param Target identifier type * @param Message class */ -@Beta public abstract class RequestFailure> extends Response { + /** + * Externalizable proxy for use with {@link RequestFailure} subclasses. + * + * @param Target identifier type + */ + protected interface SerialForm> + extends Message.SerialForm { + @Override + default C readExternal(final ObjectInput in, final T target, final long sequence) + throws IOException, ClassNotFoundException { + return createFailure(target, sequence, (RequestException) in.readObject()); + } + + @Override + default void writeExternal(final ObjectOutput out, final C msg) throws IOException { + out.writeObject(msg.getCause()); + } + + @NonNull C createFailure(@NonNull T target, long sequence, @NonNull RequestException failureCause); + } + + @java.io.Serial private static final long serialVersionUID = 1L; private final @NonNull RequestException cause; @@ -65,5 +86,5 @@ public abstract class RequestFailure externalizableProxy(ABIVersion version); + protected abstract SerialForm externalizableProxy(ABIVersion version); } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RequestSuccess.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RequestSuccess.java index 9b60d21b0d..f7e59ed1e6 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RequestSuccess.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RequestSuccess.java @@ -7,7 +7,8 @@ */ package org.opendaylight.controller.cluster.access.concepts; -import com.google.common.annotations.Beta; +import java.io.IOException; +import java.io.ObjectOutput; import org.eclipse.jdt.annotation.NonNull; import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.yangtools.concepts.WritableIdentifier; @@ -15,23 +16,26 @@ import org.opendaylight.yangtools.concepts.WritableIdentifier; /** * A successful reply to a {@link Request}. * - * @author Robert Varga - * * @param Target identifier type */ -@Beta -public abstract class RequestSuccess> extends - Response { +public abstract class RequestSuccess> + extends Response { + protected interface SerialForm> + extends Response.SerialForm { + @Override + default void writeExternal(final ObjectOutput out, final C msg) throws IOException { + // Defaults to no-op + } + } + + @java.io.Serial private static final long serialVersionUID = 1L; - protected RequestSuccess(final @NonNull C success, final @NonNull ABIVersion version) { + protected RequestSuccess(final @NonNull C success, final @NonNull ABIVersion version) { super(success, version); } protected RequestSuccess(final @NonNull T target, final long sequence) { super(target, sequence); } - - @Override - protected abstract AbstractSuccessProxy externalizableProxy(ABIVersion version); } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/Response.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/Response.java index f733a9e919..a41fa01db9 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/Response.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/Response.java @@ -7,7 +7,6 @@ */ package org.opendaylight.controller.cluster.access.concepts; -import com.google.common.annotations.Beta; import org.eclipse.jdt.annotation.NonNull; import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.yangtools.concepts.WritableIdentifier; @@ -17,13 +16,16 @@ import org.opendaylight.yangtools.concepts.WritableIdentifier; * {@link RequestFailure} and {@link RequestSuccess}, which provide appropriate specialization. It is visible purely for * the purpose of allowing to check if an object is either of those specializations with a single instanceof check. * - * @author Robert Varga - * * @param Target identifier type * @param Message type */ -@Beta public abstract class Response> extends Message { + protected interface SerialForm> + extends Message.SerialForm { + + } + + @java.io.Serial private static final long serialVersionUID = 1L; Response(final @NonNull T target, final long sequence) { @@ -33,7 +35,4 @@ public abstract class Response externalizableProxy(ABIVersion version); } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/ResponseEnvelope.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/ResponseEnvelope.java index 7936baa169..50d1e7434c 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/ResponseEnvelope.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/ResponseEnvelope.java @@ -7,16 +7,39 @@ */ package org.opendaylight.controller.cluster.access.concepts; -import com.google.common.base.Preconditions; +import static com.google.common.base.Preconditions.checkArgument; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import org.eclipse.jdt.annotation.NonNull; +import org.opendaylight.yangtools.concepts.WritableObjects; public abstract class ResponseEnvelope> extends Envelope { + interface SerialForm, E extends ResponseEnvelope> extends Envelope.SerialForm { + @Override + default void writeExternal(final ObjectOutput out, final @NonNull E envelope) throws IOException { + Envelope.SerialForm.super.writeExternal(out, envelope); + WritableObjects.writeLong(out, envelope.getExecutionTimeNanos()); + } + + @Override + default E readExternal(final ObjectInput in, final long sessionId, final long txSequence, final T message) + throws IOException { + return readExternal(in, sessionId, txSequence, message, WritableObjects.readLong(in)); + } + + E readExternal(ObjectInput in, long sessionId, long txSequence, T message, long executionTimeNanos); + } + + @java.io.Serial private static final long serialVersionUID = 1L; private final long executionTimeNanos; ResponseEnvelope(final T message, final long sessionId, final long txSequence, final long executionTimeNanos) { super(message, sessionId, txSequence); - Preconditions.checkArgument(executionTimeNanos >= 0); + checkArgument(executionTimeNanos >= 0, "Negative executionTime"); this.executionTimeNanos = executionTimeNanos; } @@ -29,7 +52,4 @@ public abstract class ResponseEnvelope> extends Envelop public final long getExecutionTimeNanos() { return executionTimeNanos; } - - @Override - abstract AbstractResponseEnvelopeProxy createProxy(); } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RetiredGenerationException.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RetiredGenerationException.java index 7730318f57..3f1f71d17e 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RetiredGenerationException.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RetiredGenerationException.java @@ -7,16 +7,12 @@ */ package org.opendaylight.controller.cluster.access.concepts; -import com.google.common.annotations.Beta; - /** * General error raised when the recipient of a {@link Request} determines that the request contains * a {@link ClientIdentifier} which corresponds to an outdated generation. - * - * @author Robert Varga */ -@Beta public final class RetiredGenerationException extends RequestException { + @java.io.Serial private static final long serialVersionUID = 1L; public RetiredGenerationException(final long originatingGeneration, final long newGeneration) { diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RuntimeRequestException.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RuntimeRequestException.java index 7fb0ef0701..3f886a8510 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RuntimeRequestException.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RuntimeRequestException.java @@ -10,16 +10,13 @@ package org.opendaylight.controller.cluster.access.concepts; import static com.google.common.base.Preconditions.checkArgument; import static java.util.Objects.requireNonNull; -import com.google.common.annotations.Beta; import com.google.common.base.Strings; /** * General error raised when the recipient of a {@link Request} fails to process a request. - * - * @author Robert Varga */ -@Beta public final class RuntimeRequestException extends RequestException { + @java.io.Serial private static final long serialVersionUID = 1L; public RuntimeRequestException(final String message, final Throwable cause) { diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/SE.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/SE.java new file mode 100644 index 0000000000..3e8ce6f256 --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/SE.java @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.concepts; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +import java.io.ObjectInput; + +/** + * Serialization proxy for {@link SuccessEnvelope}. + */ +final class SE implements ResponseEnvelope.SerialForm, SuccessEnvelope> { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private SuccessEnvelope envelope; + + @SuppressWarnings("checkstyle:RedundantModifier") + public SE() { + // for Externalizable + } + + SE(final SuccessEnvelope envelope) { + this.envelope = requireNonNull(envelope); + } + + @Override + public SuccessEnvelope envelope() { + return verifyNotNull(envelope); + } + + @Override + public void setEnvelope(final SuccessEnvelope envelope) { + this.envelope = requireNonNull(envelope); + } + + @Override + public SuccessEnvelope readExternal(final ObjectInput in, final long sessionId, final long txSequence, + final RequestSuccess message, final long executionTimeNanos) { + return new SuccessEnvelope(message, sessionId, txSequence, executionTimeNanos); + } + + @Override + public Object readResolve() { + return envelope(); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/SliceableMessage.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/SliceableMessage.java index cd3e2608d6..118e9262a7 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/SliceableMessage.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/SliceableMessage.java @@ -7,14 +7,12 @@ */ package org.opendaylight.controller.cluster.access.concepts; -import com.google.common.annotations.Beta; - /** * A tagging interface that specifies a message whose serialized size can be large and thus should be sliced into * smaller chunks when transporting over the wire. * * @author Thomas Pantelis */ -@Beta public interface SliceableMessage { + // Marker interface } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/SuccessEnvelope.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/SuccessEnvelope.java index 3c23a23763..2644c6ff0f 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/SuccessEnvelope.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/SuccessEnvelope.java @@ -8,6 +8,7 @@ package org.opendaylight.controller.cluster.access.concepts; public final class SuccessEnvelope extends ResponseEnvelope> { + @java.io.Serial private static final long serialVersionUID = 1L; public SuccessEnvelope(final RequestSuccess message, final long sessionId, final long txSequence, @@ -16,7 +17,7 @@ public final class SuccessEnvelope extends ResponseEnvelope } @Override - SuccessEnvelopeProxy createProxy() { - return new SuccessEnvelopeProxy(this); + SE createProxy() { + return new SE(this); } } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/SuccessEnvelopeProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/SuccessEnvelopeProxy.java deleted file mode 100644 index 3ac388b9db..0000000000 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/SuccessEnvelopeProxy.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.access.concepts; - -final class SuccessEnvelopeProxy extends AbstractResponseEnvelopeProxy> { - private static final long serialVersionUID = 1L; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public SuccessEnvelopeProxy() { - // for Externalizable - } - - SuccessEnvelopeProxy(final SuccessEnvelope envelope) { - super(envelope); - } - - @Override - ResponseEnvelope> createEnvelope(final RequestSuccess message, final long sessionId, - final long txSequence, final long executionTimeNanos) { - return new SuccessEnvelope(message, sessionId, txSequence, executionTimeNanos); - } -} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/TI.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/TI.java new file mode 100644 index 0000000000..8bc927fdbb --- /dev/null +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/TI.java @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.access.concepts; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import org.opendaylight.yangtools.concepts.WritableObjects; + +/** + * Serialization proxy for {@link TransactionIdentifier}. + */ +final class TI implements Externalizable { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private TransactionIdentifier identifier; + + @SuppressWarnings("checkstyle:RedundantModifier") + public TI() { + // for Externalizable + } + + TI(final TransactionIdentifier identifier) { + this.identifier = requireNonNull(identifier); + } + + @Override + public void readExternal(final ObjectInput in) throws IOException { + identifier = new TransactionIdentifier(LocalHistoryIdentifier.readFrom(in), WritableObjects.readLong(in)); + } + + @Override + public void writeExternal(final ObjectOutput out) throws IOException { + identifier.getHistoryId().writeTo(out); + WritableObjects.writeLong(out, identifier.getTransactionId()); + } + + @java.io.Serial + private Object readResolve() { + return verifyNotNull(identifier); + } +} diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/TransactionIdentifier.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/TransactionIdentifier.java index d2a92ea191..ea72c84750 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/TransactionIdentifier.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/TransactionIdentifier.java @@ -9,59 +9,20 @@ package org.opendaylight.controller.cluster.access.concepts; import static java.util.Objects.requireNonNull; -import com.google.common.annotations.Beta; import java.io.DataInput; import java.io.DataOutput; -import java.io.Externalizable; import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; import org.eclipse.jdt.annotation.NonNull; import org.opendaylight.yangtools.concepts.WritableIdentifier; import org.opendaylight.yangtools.concepts.WritableObjects; /** * Globally-unique identifier of a transaction. - * - * @author Robert Varga */ -@Beta public final class TransactionIdentifier implements WritableIdentifier { - private static final class Proxy implements Externalizable { - private static final long serialVersionUID = 1L; - private LocalHistoryIdentifier historyId; - private long transactionId; - - // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to - // be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public Proxy() { - // For Externalizable - } - - Proxy(final LocalHistoryIdentifier historyId, final long transactionId) { - this.historyId = requireNonNull(historyId); - this.transactionId = transactionId; - } - - @Override - public void writeExternal(final ObjectOutput out) throws IOException { - historyId.writeTo(out); - WritableObjects.writeLong(out, transactionId); - } - - @Override - public void readExternal(final ObjectInput in) throws IOException { - historyId = LocalHistoryIdentifier.readFrom(in); - transactionId = WritableObjects.readLong(in); - } - - private Object readResolve() { - return new TransactionIdentifier(historyId, transactionId); - } - } - + @java.io.Serial private static final long serialVersionUID = 1L; + private final @NonNull LocalHistoryIdentifier historyId; private final long transactionId; private String shortString; @@ -97,15 +58,8 @@ public final class TransactionIdentifier implements WritableIdentifier { @Override public boolean equals(final Object obj) { - if (this == obj) { - return true; - } - if (!(obj instanceof TransactionIdentifier)) { - return false; - } - - final TransactionIdentifier other = (TransactionIdentifier) obj; - return transactionId == other.transactionId && historyId.equals(other.historyId); + return this == obj || obj instanceof TransactionIdentifier other && transactionId == other.transactionId + && historyId.equals(other.historyId); } public String toShortString() { @@ -125,7 +79,8 @@ public final class TransactionIdentifier implements WritableIdentifier { return toShortString(); } + @java.io.Serial private Object writeReplace() { - return new Proxy(historyId, transactionId); + return new TI(this); } } diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/UnsupportedRequestException.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/UnsupportedRequestException.java index 903ed59fbc..1de266d285 100644 --- a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/UnsupportedRequestException.java +++ b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/UnsupportedRequestException.java @@ -7,16 +7,12 @@ */ package org.opendaylight.controller.cluster.access.concepts; -import com.google.common.annotations.Beta; - /** * General error raised when the recipient of a {@link Request} determines that it does not know how to handle * the request. - * - * @author Robert Varga */ -@Beta public final class UnsupportedRequestException extends RequestException { + @java.io.Serial private static final long serialVersionUID = 1L; public UnsupportedRequestException(final Request request) { diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/ABIVersionTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/ABIVersionTest.java index f9e9c0c954..1513f36396 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/ABIVersionTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/ABIVersionTest.java @@ -8,8 +8,9 @@ package org.opendaylight.controller.cluster.access; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; -import static org.opendaylight.controller.cluster.access.ABIVersion.BORON; +import static org.opendaylight.controller.cluster.access.ABIVersion.POTASSIUM; import static org.opendaylight.controller.cluster.access.ABIVersion.TEST_FUTURE_VERSION; import static org.opendaylight.controller.cluster.access.ABIVersion.TEST_PAST_VERSION; @@ -22,25 +23,25 @@ public class ABIVersionTest { @Test public void testInvalidVersions() { assertTrue(TEST_PAST_VERSION.compareTo(TEST_FUTURE_VERSION) < 0); - assertTrue(TEST_PAST_VERSION.compareTo(BORON) < 0); - assertTrue(TEST_FUTURE_VERSION.compareTo(BORON) > 0); + assertTrue(TEST_PAST_VERSION.compareTo(POTASSIUM) < 0); + assertTrue(TEST_FUTURE_VERSION.compareTo(POTASSIUM) > 0); } @Test - public void testBoronVersion() throws Exception { - assertEquals((short)5, BORON.shortValue()); - assertEquals(BORON, ABIVersion.valueOf(BORON.shortValue())); - assertEquals(BORON, ABIVersion.readFrom(ByteStreams.newDataInput(writeVersion(BORON)))); + public void testMagnesiumVersion() throws Exception { + assertEquals((short)10, POTASSIUM.shortValue()); + assertEquals(POTASSIUM, ABIVersion.valueOf(POTASSIUM.shortValue())); + assertEquals(POTASSIUM, ABIVersion.readFrom(ByteStreams.newDataInput(writeVersion(POTASSIUM)))); } - @Test(expected = PastVersionException.class) - public void testInvalidPastVersion() throws Exception { - ABIVersion.valueOf(TEST_PAST_VERSION.shortValue()); + @Test + public void testInvalidPastVersion() { + assertThrows(PastVersionException.class, () -> ABIVersion.valueOf(TEST_PAST_VERSION.shortValue())); } - @Test(expected = FutureVersionException.class) - public void testInvalidFutureVersion() throws Exception { - ABIVersion.valueOf(TEST_FUTURE_VERSION.shortValue()); + @Test + public void testInvalidFutureVersion() { + assertThrows(FutureVersionException.class, () -> ABIVersion.valueOf(TEST_FUTURE_VERSION.shortValue())); } private static byte[] writeVersion(final ABIVersion version) { @@ -49,8 +50,9 @@ public class ABIVersionTest { return bado.toByteArray(); } - @Test(expected = IOException.class) - public void testBadRead() throws IOException { - ABIVersion.readFrom(ByteStreams.newDataInput(writeVersion(TEST_PAST_VERSION))); + @Test + public void testBadRead() { + final var in = ByteStreams.newDataInput(writeVersion(TEST_PAST_VERSION)); + assertThrows(IOException.class, () -> ABIVersion.readFrom(in)); } } diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbortLocalTransactionRequestTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbortLocalTransactionRequestTest.java index 60e7dc8388..48465208e2 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbortLocalTransactionRequestTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbortLocalTransactionRequestTest.java @@ -7,7 +7,8 @@ */ package org.opendaylight.controller.cluster.access.commands; -import org.junit.Assert; +import static org.junit.Assert.assertEquals; + import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier; import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier; import org.opendaylight.controller.cluster.access.concepts.FrontendType; @@ -25,14 +26,12 @@ public class AbortLocalTransactionRequestTest private static final AbortLocalTransactionRequest OBJECT = new AbortLocalTransactionRequest(TRANSACTION, ACTOR_REF); - @Override - protected AbortLocalTransactionRequest object() { - return OBJECT; + public AbortLocalTransactionRequestTest() { + super(OBJECT); } @Override - protected void doAdditionalAssertions(final Object deserialize) { - Assert.assertTrue(deserialize instanceof AbortLocalTransactionRequest); - Assert.assertEquals(OBJECT.getReplyTo(), ((AbortLocalTransactionRequest) deserialize).getReplyTo()); + protected void doAdditionalAssertions(final AbortLocalTransactionRequest deserialize) { + assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo()); } } \ No newline at end of file diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbstractLocalTransactionRequestTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbstractLocalTransactionRequestTest.java index e40a39450b..1cb9af38a0 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbstractLocalTransactionRequestTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbstractLocalTransactionRequestTest.java @@ -7,24 +7,34 @@ */ package org.opendaylight.controller.cluster.access.commands; -import org.apache.commons.lang.SerializationUtils; -import org.junit.Assert; +import static org.hamcrest.CoreMatchers.allOf; +import static org.hamcrest.CoreMatchers.endsWith; +import static org.hamcrest.CoreMatchers.startsWith; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertThrows; + +import org.apache.commons.lang3.SerializationUtils; import org.junit.Test; import org.opendaylight.controller.cluster.access.ABIVersion; public abstract class AbstractLocalTransactionRequestTest> extends AbstractTransactionRequestTest { - @Override - protected abstract T object(); + protected AbstractLocalTransactionRequestTest(final T object) { + super(object, -1); + } @Test public void cloneAsVersionTest() { - Assert.assertEquals(object(), object().cloneAsVersion(ABIVersion.BORON)); + assertSame(object(), object().cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION)); } @Override - @Test(expected = UnsupportedOperationException.class) + @Test public void serializationTest() { - SerializationUtils.clone(object()); + final var ex = assertThrows(UnsupportedOperationException.class, () -> SerializationUtils.clone(object())); + assertThat(ex.getMessage(), allOf( + startsWith("Local transaction request "), + endsWith(" should never be serialized"))); } } diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbstractReadTransactionRequestTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbstractReadTransactionRequestTest.java index f1fe2c08f9..5ae7219888 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbstractReadTransactionRequestTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbstractReadTransactionRequestTest.java @@ -7,32 +7,36 @@ */ package org.opendaylight.controller.cluster.access.commands; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; + import com.google.common.base.MoreObjects; -import org.junit.Assert; import org.junit.Test; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; public abstract class AbstractReadTransactionRequestTest> extends AbstractTransactionRequestTest { - protected static final YangInstanceIdentifier PATH = YangInstanceIdentifier.empty(); + protected static final YangInstanceIdentifier PATH = YangInstanceIdentifier.of(); protected static final boolean SNAPSHOT_ONLY = true; - @Override - protected abstract T object(); + protected AbstractReadTransactionRequestTest(final T object, final int baseSize) { + super(object, baseSize); + } @Test public void getPathTest() { - Assert.assertEquals(PATH, object().getPath()); + assertEquals(PATH, object().getPath()); } @Test public void isSnapshotOnlyTest() { - Assert.assertEquals(SNAPSHOT_ONLY, object().isSnapshotOnly()); + assertEquals(SNAPSHOT_ONLY, object().isSnapshotOnly()); } @Test public void addToStringAttributesTest() { - final MoreObjects.ToStringHelper result = object().addToStringAttributes(MoreObjects.toStringHelper(object())); - Assert.assertTrue(result.toString().contains("path=" + PATH)); + final var result = object().addToStringAttributes(MoreObjects.toStringHelper(object())).toString(); + assertThat(result, containsString("path=" + PATH)); } } diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbstractRequestFailureTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbstractRequestFailureTest.java index ccdc6753ac..78456b246a 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbstractRequestFailureTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbstractRequestFailureTest.java @@ -7,8 +7,11 @@ */ package org.opendaylight.controller.cluster.access.commands; -import org.apache.commons.lang.SerializationUtils; -import org.junit.Assert; +import static java.util.Objects.requireNonNull; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.apache.commons.lang3.SerializationUtils; import org.junit.Test; import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier; import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier; @@ -29,26 +32,36 @@ public abstract class AbstractRequestFailureTest> protected static final TransactionIdentifier TRANSACTION_IDENTIFIER = new TransactionIdentifier( HISTORY_IDENTIFIER, 0); protected static final RequestException CAUSE = new RuntimeRequestException("fail", new Throwable()); + private static final int CAUSE_SIZE = SerializationUtils.serialize(CAUSE).length; + + private final T object; + private final int expectedSize; - abstract T object(); + protected AbstractRequestFailureTest(final T object, final int baseSize) { + this.object = requireNonNull(object); + this.expectedSize = baseSize + CAUSE_SIZE; + } @Test public void getCauseTest() { - Assert.assertEquals(CAUSE, object().getCause()); + assertEquals(CAUSE, object.getCause()); } @Test public void isHardFailureTest() { - Assert.assertTrue(object().isHardFailure()); + assertTrue(object.isHardFailure()); } - @SuppressWarnings("unchecked") @Test public void serializationTest() { - final Object deserialize = SerializationUtils.clone(object()); + final var bytes = SerializationUtils.serialize(object); + assertEquals(expectedSize, bytes.length); + + @SuppressWarnings("unchecked") + final var deserialize = (T) SerializationUtils.deserialize(bytes); - Assert.assertEquals(object().getTarget(), ((T) deserialize).getTarget()); - Assert.assertEquals(object().getVersion(), ((T) deserialize).getVersion()); - Assert.assertEquals(object().getSequence(), ((T) deserialize).getSequence()); + assertEquals(object.getTarget(), deserialize.getTarget()); + assertEquals(object.getVersion(), deserialize.getVersion()); + assertEquals(object.getSequence(), deserialize.getSequence()); } } diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbstractRequestSuccessTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbstractRequestSuccessTest.java index 8a81252233..b0038758c7 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbstractRequestSuccessTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbstractRequestSuccessTest.java @@ -7,8 +7,11 @@ */ package org.opendaylight.controller.cluster.access.commands; -import org.apache.commons.lang.SerializationUtils; -import org.junit.Assert; +import static java.util.Objects.requireNonNull; +import static org.junit.Assert.assertEquals; + +import org.apache.commons.lang3.SerializationUtils; +import org.eclipse.jdt.annotation.NonNull; import org.junit.Test; import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier; import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier; @@ -18,25 +21,34 @@ import org.opendaylight.controller.cluster.access.concepts.MemberName; import org.opendaylight.controller.cluster.access.concepts.RequestSuccess; public abstract class AbstractRequestSuccessTest> { - private static final FrontendIdentifier FRONTEND_IDENTIFIER = FrontendIdentifier.create( MemberName.forName("test"), FrontendType.forName("one")); protected static final ClientIdentifier CLIENT_IDENTIFIER = ClientIdentifier.create(FRONTEND_IDENTIFIER, 0); - protected static final LocalHistoryIdentifier HISTORY_IDENTIFIER = new LocalHistoryIdentifier( - CLIENT_IDENTIFIER, 0); + protected static final LocalHistoryIdentifier HISTORY_IDENTIFIER = new LocalHistoryIdentifier(CLIENT_IDENTIFIER, 0); + + private final @NonNull T object; + private final int expectedSize; - protected abstract T object(); + protected AbstractRequestSuccessTest(final T object, final int expectedSize) { + this.object = requireNonNull(object); + this.expectedSize = expectedSize; + } - @SuppressWarnings("unchecked") @Test public void serializationTest() { - final Object deserialize = SerializationUtils.clone(object()); + final var bytes = SerializationUtils.serialize(object); + assertEquals(expectedSize, bytes.length); + + @SuppressWarnings("unchecked") + final var deserialize = (T) SerializationUtils.deserialize(bytes); - Assert.assertEquals(object().getTarget(), ((T) deserialize).getTarget()); - Assert.assertEquals(object().getVersion(), ((T) deserialize).getVersion()); - Assert.assertEquals(object().getSequence(), ((T) deserialize).getSequence()); + assertEquals(object.getTarget(), deserialize.getTarget()); + assertEquals(object.getVersion(), deserialize.getVersion()); + assertEquals(object.getSequence(), deserialize.getSequence()); doAdditionalAssertions(deserialize); } - protected abstract void doAdditionalAssertions(Object deserialize); + protected void doAdditionalAssertions(final T deserialize) { + // No-op by default + } } diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbstractTransactionRequestTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbstractTransactionRequestTest.java index 58d24e4e53..f276ac3937 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbstractTransactionRequestTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbstractTransactionRequestTest.java @@ -7,7 +7,8 @@ */ package org.opendaylight.controller.cluster.access.commands; -import org.junit.Assert; +import static org.junit.Assert.assertNotNull; + import org.junit.Test; import org.opendaylight.controller.cluster.access.concepts.AbstractRequestTest; import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier; @@ -15,7 +16,6 @@ import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier; import org.opendaylight.controller.cluster.access.concepts.FrontendType; import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier; import org.opendaylight.controller.cluster.access.concepts.MemberName; -import org.opendaylight.controller.cluster.access.concepts.RequestException; import org.opendaylight.controller.cluster.access.concepts.RuntimeRequestException; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; @@ -29,14 +29,14 @@ public abstract class AbstractTransactionRequestTest> extends AbstractRequestSuccessTest { + protected static final TransactionIdentifier TRANSACTION_IDENTIFIER = new TransactionIdentifier(HISTORY_IDENTIFIER, + 0); - protected static final TransactionIdentifier TRANSACTION_IDENTIFIER = new TransactionIdentifier( - HISTORY_IDENTIFIER, 0); - + protected AbstractTransactionSuccessTest(final T object, final int expectedSize) { + super(object, expectedSize); + } } diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/CommitLocalTransactionRequestTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/CommitLocalTransactionRequestTest.java index 61b7dc2ad4..f1df2d882f 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/CommitLocalTransactionRequestTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/CommitLocalTransactionRequestTest.java @@ -7,8 +7,11 @@ */ package org.opendaylight.controller.cluster.access.commands; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; + import com.google.common.base.MoreObjects; -import org.junit.Assert; import org.junit.Test; import org.mockito.Mockito; import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier; @@ -17,7 +20,7 @@ import org.opendaylight.controller.cluster.access.concepts.FrontendType; import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier; import org.opendaylight.controller.cluster.access.concepts.MemberName; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification; public class CommitLocalTransactionRequestTest extends AbstractLocalTransactionRequestTest { @@ -30,34 +33,32 @@ public class CommitLocalTransactionRequestTest private static final DataTreeModification MODIFICATION = Mockito.mock(DataTreeModification.class); private static final boolean COORDINATED = true; - private static final CommitLocalTransactionRequest OBJECT = new CommitLocalTransactionRequest( - TRANSACTION, 0, ACTOR_REF, MODIFICATION, null, COORDINATED); + private static final CommitLocalTransactionRequest OBJECT = new CommitLocalTransactionRequest(TRANSACTION, 0, + ACTOR_REF, MODIFICATION, null, COORDINATED); - @Override - protected CommitLocalTransactionRequest object() { - return OBJECT; + public CommitLocalTransactionRequestTest() { + super(OBJECT); } @Test public void getModificationTest() { - Assert.assertEquals(MODIFICATION, OBJECT.getModification()); + assertEquals(MODIFICATION, OBJECT.getModification()); } @Test public void isCoordinatedTest() { - Assert.assertEquals(COORDINATED, OBJECT.isCoordinated()); + assertEquals(COORDINATED, OBJECT.isCoordinated()); } @Test public void addToStringAttributesTest() { - final MoreObjects.ToStringHelper result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT)); - Assert.assertTrue(result.toString().contains("coordinated=" + COORDINATED)); + final var result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT)).toString(); + assertThat(result, containsString("coordinated=" + COORDINATED)); } @Override - protected void doAdditionalAssertions(final Object deserialize) { - Assert.assertTrue(deserialize instanceof CommitLocalTransactionRequest); - Assert.assertEquals(OBJECT.getReplyTo(), ((CommitLocalTransactionRequest) deserialize).getReplyTo()); - Assert.assertEquals(OBJECT.getModification(), ((CommitLocalTransactionRequest) deserialize).getModification()); + protected void doAdditionalAssertions(final CommitLocalTransactionRequest deserialize) { + assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo()); + assertEquals(OBJECT.getModification(), deserialize.getModification()); } } \ No newline at end of file diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ConnectClientFailureTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ConnectClientFailureTest.java index 6518102fa1..2278195d82 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ConnectClientFailureTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ConnectClientFailureTest.java @@ -7,23 +7,23 @@ */ package org.opendaylight.controller.cluster.access.commands; -import org.junit.Assert; +import static org.junit.jupiter.api.Assertions.assertEquals; + import org.junit.Test; import org.opendaylight.controller.cluster.access.ABIVersion; public class ConnectClientFailureTest extends AbstractRequestFailureTest { private static final ConnectClientFailure OBJECT = new ConnectClientFailure(CLIENT_IDENTIFIER, 0, CAUSE); - @Override - ConnectClientFailure object() { - return OBJECT; + public ConnectClientFailureTest() { + super(OBJECT, 99); } @Test public void cloneAsVersionTest() { final ConnectClientFailure clone = OBJECT.cloneAsVersion(ABIVersion.current()); - Assert.assertEquals(OBJECT.getTarget(), clone.getTarget()); - Assert.assertEquals(OBJECT.getSequence(), clone.getSequence()); - Assert.assertEquals(OBJECT.getCause(), clone.getCause()); + assertEquals(OBJECT.getTarget(), clone.getTarget()); + assertEquals(OBJECT.getSequence(), clone.getSequence()); + assertEquals(OBJECT.getCause(), clone.getCause()); } } \ No newline at end of file diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ConnectClientRequestTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ConnectClientRequestTest.java index 64e4717e85..3bf1951e50 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ConnectClientRequestTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ConnectClientRequestTest.java @@ -7,9 +7,13 @@ */ package org.opendaylight.controller.cluster.access.commands; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + import com.google.common.base.MoreObjects; import com.google.common.collect.ImmutableRangeSet; -import org.junit.Assert; import org.junit.Test; import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.controller.cluster.access.concepts.AbstractRequestTest; @@ -17,7 +21,6 @@ import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier; import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier; import org.opendaylight.controller.cluster.access.concepts.FrontendType; import org.opendaylight.controller.cluster.access.concepts.MemberName; -import org.opendaylight.controller.cluster.access.concepts.RequestException; public class ConnectClientRequestTest extends AbstractRequestTest { private static final FrontendIdentifier FRONTEND_IDENTIFIER = FrontendIdentifier.create( @@ -30,49 +33,45 @@ public class ConnectClientRequestTest extends AbstractRequestTest ALTERNATES = ImmutableList.of(ACTOR_SELECTION); private static final int MAX_MESSAGES = 10; - private static final ConnectClientSuccess OBJECT = new ConnectClientSuccess( - CLIENT_IDENTIFIER, 0, ACTOR_REF, ALTERNATES, TREE, MAX_MESSAGES); + private static final ConnectClientSuccess OBJECT = new ConnectClientSuccess(CLIENT_IDENTIFIER, 0, ACTOR_REF, + ALTERNATES, TREE, MAX_MESSAGES); - @Override - protected ConnectClientSuccess object() { - return OBJECT; + public ConnectClientSuccessTest() { + super(OBJECT, 146 + ACTOR_REF.path().toSerializationFormat().length()); } @Before @@ -51,32 +50,36 @@ public class ConnectClientSuccessTest extends AbstractRequestSuccessTest alternates = OBJECT.getAlternates(); - Assert.assertArrayEquals(ALTERNATES.toArray(), alternates.toArray()); + final var alternates = OBJECT.getAlternates(); + assertArrayEquals(ALTERNATES.toArray(), alternates.toArray()); } @Test public void testGetBackend() { final ActorRef actorRef = OBJECT.getBackend(); - Assert.assertEquals(ACTOR_REF, actorRef); + assertEquals(ACTOR_REF, actorRef); } @Test public void testGetDataTree() { - final ReadOnlyDataTree tree = OBJECT.getDataTree().get(); - Assert.assertEquals(TREE, tree); + final ReadOnlyDataTree tree = OBJECT.getDataTree().orElseThrow(); + assertEquals(TREE, tree); } @Test public void testGetMaxMessages() { - final int maxMessages = OBJECT.getMaxMessages(); - Assert.assertEquals(MAX_MESSAGES, maxMessages); + assertEquals(MAX_MESSAGES, OBJECT.getMaxMessages()); } @Test public void cloneAsVersionTest() { - final ConnectClientSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON); - Assert.assertEquals(OBJECT, clone); + final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION); + assertEquals(OBJECT.getSequence(), clone.getSequence()); + assertEquals(OBJECT.getTarget(), clone.getTarget()); + assertEquals(OBJECT.getAlternates(), clone.getAlternates()); + assertEquals(OBJECT.getBackend(), clone.getBackend()); + assertEquals(OBJECT.getDataTree(), clone.getDataTree()); + assertEquals(OBJECT.getMaxMessages(), clone.getMaxMessages()); } @Test @@ -86,11 +89,10 @@ public class ConnectClientSuccessTest extends AbstractRequestSuccessTest { - private static final ExistsTransactionRequest OBJECT = new ExistsTransactionRequest( - TRANSACTION_IDENTIFIER, 0, ACTOR_REF, PATH, SNAPSHOT_ONLY); + private static final ExistsTransactionRequest OBJECT = new ExistsTransactionRequest(TRANSACTION_IDENTIFIER, 0, + ACTOR_REF, PATH, SNAPSHOT_ONLY); - @Override - protected ExistsTransactionRequest object() { - return OBJECT; + public ExistsTransactionRequestTest() { + super(OBJECT, 108); } @Test public void cloneAsVersionTest() { - final ABIVersion cloneVersion = ABIVersion.TEST_FUTURE_VERSION; - final ExistsTransactionRequest clone = OBJECT.cloneAsVersion(cloneVersion); - Assert.assertEquals(cloneVersion, clone.getVersion()); - Assert.assertEquals(OBJECT.getPath(), clone.getPath()); - Assert.assertEquals(OBJECT.isSnapshotOnly(), clone.isSnapshotOnly()); + final var cloneVersion = ABIVersion.TEST_FUTURE_VERSION; + final var clone = OBJECT.cloneAsVersion(cloneVersion); + assertEquals(cloneVersion, clone.getVersion()); + assertEquals(OBJECT.getPath(), clone.getPath()); + assertEquals(OBJECT.isSnapshotOnly(), clone.isSnapshotOnly()); } @Override - protected void doAdditionalAssertions(final Object deserialize) { - Assert.assertTrue(deserialize instanceof ExistsTransactionRequest); - Assert.assertEquals(OBJECT.getReplyTo(), ((ExistsTransactionRequest) deserialize).getReplyTo()); - Assert.assertEquals(OBJECT.getPath(), ((ExistsTransactionRequest) deserialize).getPath()); + protected void doAdditionalAssertions(final ExistsTransactionRequest deserialize) { + assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo()); + assertEquals(OBJECT.getPath(), deserialize.getPath()); } } \ No newline at end of file diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionSuccessTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionSuccessTest.java index e5a74a6385..e8ce28dedb 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionSuccessTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionSuccessTest.java @@ -7,43 +7,45 @@ */ package org.opendaylight.controller.cluster.access.commands; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; + import com.google.common.base.MoreObjects; -import org.junit.Assert; import org.junit.Test; import org.opendaylight.controller.cluster.access.ABIVersion; public class ExistsTransactionSuccessTest extends AbstractTransactionSuccessTest { private static final boolean EXISTS = true; - private static final ExistsTransactionSuccess OBJECT = new ExistsTransactionSuccess( - TRANSACTION_IDENTIFIER, 0, EXISTS); + private static final ExistsTransactionSuccess OBJECT = new ExistsTransactionSuccess(TRANSACTION_IDENTIFIER, 0, + EXISTS); - @Override - protected ExistsTransactionSuccess object() { - return OBJECT; + public ExistsTransactionSuccessTest() { + super(OBJECT, 99); } @Test public void getExistsTest() { - final boolean result = OBJECT.getExists(); - Assert.assertEquals(EXISTS, result); + assertEquals(EXISTS, OBJECT.getExists()); } @Test public void cloneAsVersionTest() { - final ExistsTransactionSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON); - Assert.assertEquals(OBJECT, clone); + final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION); + assertEquals(OBJECT.getSequence(), clone.getSequence()); + assertEquals(OBJECT.getTarget(), clone.getTarget()); + assertEquals(OBJECT.getExists(), clone.getExists()); } @Test public void addToStringAttributesTest() { - final MoreObjects.ToStringHelper result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT)); - Assert.assertTrue(result.toString().contains("exists=" + EXISTS)); + final var result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT)).toString(); + assertThat(result, containsString("exists=" + EXISTS)); } @Override - protected void doAdditionalAssertions(final Object deserialize) { - Assert.assertTrue(deserialize instanceof ExistsTransactionSuccess); - Assert.assertEquals(OBJECT.getExists(), ((ExistsTransactionSuccess) deserialize).getExists()); + protected void doAdditionalAssertions(final ExistsTransactionSuccess deserialize) { + assertEquals(OBJECT.getExists(), deserialize.getExists()); } } \ No newline at end of file diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/LocalHistoryFailureTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/LocalHistoryFailureTest.java index c9d76f9b3d..13b9d6e8a1 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/LocalHistoryFailureTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/LocalHistoryFailureTest.java @@ -7,21 +7,23 @@ */ package org.opendaylight.controller.cluster.access.commands; -import org.junit.Assert; +import static org.junit.Assert.assertEquals; + import org.junit.Test; import org.opendaylight.controller.cluster.access.ABIVersion; public class LocalHistoryFailureTest extends AbstractRequestFailureTest { private static final LocalHistoryFailure OBJECT = new LocalHistoryFailure(HISTORY_IDENTIFIER, 0, CAUSE); - @Override - LocalHistoryFailure object() { - return OBJECT; + public LocalHistoryFailureTest() { + super(OBJECT, 99); } @Test public void cloneAsVersionTest() { - final LocalHistoryFailure clone = OBJECT.cloneAsVersion(ABIVersion.current()); - Assert.assertEquals(OBJECT, clone); + final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION); + assertEquals(OBJECT.getSequence(), clone.getSequence()); + assertEquals(OBJECT.getTarget(), clone.getTarget()); + assertEquals(OBJECT.getCause(), clone.getCause()); } } \ No newline at end of file diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/LocalHistorySuccessTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/LocalHistorySuccessTest.java index 875037bb00..8afca0072c 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/LocalHistorySuccessTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/LocalHistorySuccessTest.java @@ -8,7 +8,6 @@ package org.opendaylight.controller.cluster.access.commands; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; import org.junit.Test; import org.opendaylight.controller.cluster.access.ABIVersion; @@ -16,21 +15,15 @@ import org.opendaylight.controller.cluster.access.ABIVersion; public class LocalHistorySuccessTest extends AbstractRequestSuccessTest { private static final LocalHistorySuccess OBJECT = new LocalHistorySuccess(HISTORY_IDENTIFIER, 0); - @Override - protected LocalHistorySuccess object() { - return OBJECT; + public LocalHistorySuccessTest() { + super(OBJECT, 96); } @Test public void cloneAsVersionTest() { - final LocalHistorySuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON); - assertEquals(ABIVersion.BORON, clone.getVersion()); + final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION); + assertEquals(ABIVersion.TEST_FUTURE_VERSION, clone.getVersion()); assertEquals(OBJECT.getSequence(), clone.getSequence()); assertEquals(OBJECT.getTarget(), clone.getTarget()); } - - @Override - protected void doAdditionalAssertions(final Object deserialize) { - assertTrue(deserialize instanceof LocalHistorySuccess); - } } diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionRequestBuilderTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionRequestBuilderTest.java index 5f6f6454ce..e424f37cf6 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionRequestBuilderTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionRequestBuilderTest.java @@ -24,11 +24,11 @@ import org.opendaylight.controller.cluster.access.concepts.MemberName; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; import org.opendaylight.yangtools.yang.common.QName; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; +import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; -import org.opendaylight.yangtools.yang.data.impl.schema.Builders; +import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes; public class ModifyTransactionRequestBuilderTest { - private final MemberName memberName = MemberName.forName("member-1"); private final FrontendType frontendType = FrontendType.forName("test"); private final FrontendIdentifier frontendId = FrontendIdentifier.create(memberName, frontendType); @@ -36,10 +36,11 @@ public class ModifyTransactionRequestBuilderTest { private final TransactionIdentifier transactionIdentifier = new TransactionIdentifier(new LocalHistoryIdentifier(clientId, 0L), 0L); private final ActorRef actorRef = ActorSystem.create("test").actorOf(Props.create(TestActors.EchoActor.class)); - private final NormalizedNode node = Builders.containerBuilder().withNodeIdentifier( - YangInstanceIdentifier.NodeIdentifier.create(QName.create("namespace", "localName"))).build(); + private final NormalizedNode node = ImmutableNodes.newContainerBuilder() + .withNodeIdentifier(new NodeIdentifier(QName.create("namespace", "localName"))) + .build(); private final TransactionModification transactionModification = - new TransactionWrite(YangInstanceIdentifier.empty(), node); + new TransactionWrite(YangInstanceIdentifier.of(), node); private final ModifyTransactionRequestBuilder modifyTransactionRequestBuilder = new ModifyTransactionRequestBuilder(transactionIdentifier, actorRef); @@ -52,38 +53,37 @@ public class ModifyTransactionRequestBuilderTest { @Test public void testGetIdentifier() { - final TransactionIdentifier identifier = modifyTransactionRequestBuilder.getIdentifier(); + final var identifier = modifyTransactionRequestBuilder.getIdentifier(); assertEquals(transactionIdentifier, identifier); } @Test public void testBuildReady() { modifyTransactionRequestBuilder.setReady(); - final ModifyTransactionRequest modifyTransactionRequest = modifyTransactionRequestBuilder.build(); - assertEquals(PersistenceProtocol.READY, modifyTransactionRequest.getPersistenceProtocol().get()); + final var modifyTransactionRequest = modifyTransactionRequestBuilder.build(); + assertEquals(PersistenceProtocol.READY, modifyTransactionRequest.getPersistenceProtocol().orElseThrow()); assertEquals(transactionModification, modifyTransactionRequest.getModifications().get(0)); } @Test public void testBuildAbort() { modifyTransactionRequestBuilder.setAbort(); - final ModifyTransactionRequest modifyTransactionRequest = modifyTransactionRequestBuilder.build(); - assertEquals(PersistenceProtocol.ABORT, modifyTransactionRequest.getPersistenceProtocol().get()); + final var modifyTransactionRequest = modifyTransactionRequestBuilder.build(); + assertEquals(PersistenceProtocol.ABORT, modifyTransactionRequest.getPersistenceProtocol().orElseThrow()); assertTrue(modifyTransactionRequest.getModifications().isEmpty()); } @Test public void testBuildCommitTrue() { modifyTransactionRequestBuilder.setCommit(true); - final ModifyTransactionRequest modifyTransactionRequest = modifyTransactionRequestBuilder.build(); - assertEquals(PersistenceProtocol.THREE_PHASE, modifyTransactionRequest.getPersistenceProtocol().get()); + final var modifyTransactionRequest = modifyTransactionRequestBuilder.build(); + assertEquals(PersistenceProtocol.THREE_PHASE, modifyTransactionRequest.getPersistenceProtocol().orElseThrow()); } @Test public void testBuildCommitFalse() { modifyTransactionRequestBuilder.setCommit(false); - final ModifyTransactionRequest modifyTransactionRequest = modifyTransactionRequestBuilder.build(); - assertEquals(PersistenceProtocol.SIMPLE, modifyTransactionRequest.getPersistenceProtocol().get()); + final var modifyTransactionRequest = modifyTransactionRequestBuilder.build(); + assertEquals(PersistenceProtocol.SIMPLE, modifyTransactionRequest.getPersistenceProtocol().orElseThrow()); } - } diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionRequestEmptyTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionRequestEmptyTest.java index 3f284e8fc4..45ee0bd258 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionRequestEmptyTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionRequestEmptyTest.java @@ -7,59 +7,55 @@ */ package org.opendaylight.controller.cluster.access.commands; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; + import com.google.common.base.MoreObjects; -import java.util.ArrayList; import java.util.List; import java.util.Optional; -import org.junit.Assert; import org.junit.Test; import org.opendaylight.controller.cluster.access.ABIVersion; public class ModifyTransactionRequestEmptyTest extends AbstractTransactionRequestTest { private static final PersistenceProtocol PROTOCOL = PersistenceProtocol.ABORT; + private static final ModifyTransactionRequest OBJECT = new ModifyTransactionRequest(TRANSACTION_IDENTIFIER, 0, + ACTOR_REF, List.of(), PROTOCOL); - private static final ModifyTransactionRequest OBJECT = new ModifyTransactionRequest( - TRANSACTION_IDENTIFIER, 0, ACTOR_REF, new ArrayList<>(), PROTOCOL); - - @Override - protected ModifyTransactionRequest object() { - return OBJECT; + public ModifyTransactionRequestEmptyTest() { + super(OBJECT, 108); } @Test public void getPersistenceProtocolTest() { - final Optional result = OBJECT.getPersistenceProtocol(); - Assert.assertTrue(result.isPresent()); - Assert.assertEquals(PROTOCOL, result.get()); + assertEquals(Optional.of(PROTOCOL), OBJECT.getPersistenceProtocol()); } @Test public void getModificationsTest() { - final List result = OBJECT.getModifications(); - Assert.assertNotNull(result); - Assert.assertTrue(result.isEmpty()); + assertEquals(List.of(), OBJECT.getModifications()); } @Test public void addToStringAttributesTest() { - final MoreObjects.ToStringHelper result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT)); - Assert.assertTrue(result.toString().contains("modifications=0")); - Assert.assertTrue(result.toString().contains("protocol=" + PROTOCOL)); + final var result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT)).toString(); + assertThat(result, containsString("modifications=0")); + assertThat(result, containsString("protocol=" + PROTOCOL)); } @Test public void cloneAsVersionTest() { - final ModifyTransactionRequest clone = OBJECT.cloneAsVersion(ABIVersion.BORON); - Assert.assertEquals(OBJECT, clone); + final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION); + assertEquals(OBJECT.getSequence(), clone.getSequence()); + assertEquals(OBJECT.getTarget(), clone.getTarget()); + assertEquals(OBJECT.getReplyTo(), clone.getReplyTo()); + assertEquals(OBJECT.getPersistenceProtocol(), clone.getPersistenceProtocol()); } @Override - protected void doAdditionalAssertions(final Object deserialize) { - Assert.assertTrue(deserialize instanceof ModifyTransactionRequest); - final ModifyTransactionRequest casted = (ModifyTransactionRequest) deserialize; - - Assert.assertEquals(OBJECT.getReplyTo(), casted.getReplyTo()); - Assert.assertEquals(OBJECT.getModifications(), casted.getModifications()); - Assert.assertEquals(OBJECT.getPersistenceProtocol(), casted.getPersistenceProtocol()); + protected void doAdditionalAssertions(final ModifyTransactionRequest deserialize) { + assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo()); + assertEquals(OBJECT.getModifications(), deserialize.getModifications()); + assertEquals(OBJECT.getPersistenceProtocol(), deserialize.getPersistenceProtocol()); } } \ No newline at end of file diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionRequestTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionRequestTest.java index 7d0164f867..e3039d319e 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionRequestTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionRequestTest.java @@ -7,80 +7,75 @@ */ package org.opendaylight.controller.cluster.access.commands; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; import static org.opendaylight.controller.cluster.access.commands.TransactionModification.TYPE_WRITE; import com.google.common.base.MoreObjects; -import com.google.common.collect.Lists; import java.util.List; import java.util.Optional; -import org.junit.Assert; import org.junit.Test; import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.yangtools.yang.common.QName; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier; import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode; -import org.opendaylight.yangtools.yang.data.impl.schema.Builders; +import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes; public class ModifyTransactionRequestTest extends AbstractTransactionRequestTest { - private static final ContainerNode NODE = Builders.containerBuilder().withNodeIdentifier( - NodeIdentifier.create(QName.create("namespace", "localName"))).build(); + private static final ContainerNode NODE = ImmutableNodes.newContainerBuilder() + .withNodeIdentifier(new NodeIdentifier(QName.create("namespace", "localName"))) + .build(); - private static final List MODIFICATIONS = Lists.newArrayList( - new TransactionWrite(YangInstanceIdentifier.empty(), NODE)); + private static final List MODIFICATIONS = List.of( + new TransactionWrite(YangInstanceIdentifier.of(), NODE)); private static final PersistenceProtocol PROTOCOL = PersistenceProtocol.ABORT; - private static final ModifyTransactionRequest OBJECT = new ModifyTransactionRequest( - TRANSACTION_IDENTIFIER, 0, ACTOR_REF, MODIFICATIONS, PROTOCOL); + private static final ModifyTransactionRequest OBJECT = new ModifyTransactionRequest(TRANSACTION_IDENTIFIER, 0, + ACTOR_REF, MODIFICATIONS, PROTOCOL); - @Override - protected ModifyTransactionRequest object() { - return OBJECT; + public ModifyTransactionRequestTest() { + super(OBJECT, 140); } @Test public void getPersistenceProtocolTest() { - final Optional result = OBJECT.getPersistenceProtocol(); - assertTrue(result.isPresent()); - assertEquals(PROTOCOL, result.get()); + assertEquals(Optional.of(PROTOCOL), OBJECT.getPersistenceProtocol()); } @Test public void getModificationsTest() { - final List result = OBJECT.getModifications(); - assertNotNull(result); - assertEquals(MODIFICATIONS, result); + assertEquals(MODIFICATIONS, OBJECT.getModifications()); } @Test public void addToStringAttributesTest() { - final MoreObjects.ToStringHelper result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT)); - assertTrue(result.toString().contains("modifications=1")); - assertTrue(result.toString().contains("protocol=" + PROTOCOL)); + final var result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT)).toString(); + assertThat(result, containsString("modifications=1")); + assertThat(result, containsString("protocol=" + PROTOCOL)); } @Test public void cloneAsVersionTest() { - final ModifyTransactionRequest clone = OBJECT.cloneAsVersion(ABIVersion.BORON); - Assert.assertEquals(OBJECT, clone); + final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION); + assertEquals(OBJECT.getSequence(), clone.getSequence()); + assertEquals(OBJECT.getTarget(), clone.getTarget()); + assertEquals(OBJECT.getReplyTo(), clone.getReplyTo()); + assertEquals(OBJECT.getModifications(), clone.getModifications()); + assertEquals(OBJECT.getPersistenceProtocol(), clone.getPersistenceProtocol()); } @Override - protected void doAdditionalAssertions(final Object deserialize) { - assertTrue(deserialize instanceof ModifyTransactionRequest); - final ModifyTransactionRequest casted = (ModifyTransactionRequest) deserialize; - - assertEquals(OBJECT.getReplyTo(), casted.getReplyTo()); - assertEquals(OBJECT.getPersistenceProtocol(), casted.getPersistenceProtocol()); - - assertNotNull(casted.getModifications()); - assertEquals(1, casted.getModifications().size()); - final TransactionModification modification = casted.getModifications().get(0); - assertEquals(YangInstanceIdentifier.empty(), modification.getPath()); + protected void doAdditionalAssertions(final ModifyTransactionRequest deserialize) { + assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo()); + assertEquals(OBJECT.getPersistenceProtocol(), deserialize.getPersistenceProtocol()); + assertNotNull(deserialize.getModifications()); + assertEquals(1, deserialize.getModifications().size()); + final var modification = deserialize.getModifications().get(0); + assertEquals(YangInstanceIdentifier.of(), modification.getPath()); assertEquals(TYPE_WRITE, modification.getType()); } } diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionSuccessTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionSuccessTest.java index 3cd4626933..8fb470d765 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionSuccessTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionSuccessTest.java @@ -8,7 +8,6 @@ package org.opendaylight.controller.cluster.access.commands; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; import org.junit.Test; import org.opendaylight.controller.cluster.access.ABIVersion; @@ -16,21 +15,15 @@ import org.opendaylight.controller.cluster.access.ABIVersion; public class ModifyTransactionSuccessTest extends AbstractTransactionSuccessTest { private static final ModifyTransactionSuccess OBJECT = new ModifyTransactionSuccess(TRANSACTION_IDENTIFIER, 0); - @Override - protected ModifyTransactionSuccess object() { - return OBJECT; + public ModifyTransactionSuccessTest() { + super(OBJECT, 98); } @Test public void cloneAsVersionTest() { - final ModifyTransactionSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON); - assertEquals(ABIVersion.BORON, clone.getVersion()); + final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION); + assertEquals(ABIVersion.TEST_FUTURE_VERSION, clone.getVersion()); assertEquals(OBJECT.getSequence(), clone.getSequence()); assertEquals(OBJECT.getTarget(), clone.getTarget()); } - - @Override - protected void doAdditionalAssertions(final Object deserialize) { - assertTrue(deserialize instanceof ModifyTransactionSuccess); - } } \ No newline at end of file diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionRequestTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionRequestTest.java index c4096833ca..10f4f0eb78 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionRequestTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionRequestTest.java @@ -7,32 +7,31 @@ */ package org.opendaylight.controller.cluster.access.commands; -import org.junit.Assert; +import static org.junit.Assert.assertEquals; + import org.junit.Test; import org.opendaylight.controller.cluster.access.ABIVersion; public class ReadTransactionRequestTest extends AbstractReadTransactionRequestTest { - private static final ReadTransactionRequest OBJECT = new ReadTransactionRequest( - TRANSACTION_IDENTIFIER, 0, ACTOR_REF, PATH, SNAPSHOT_ONLY); + private static final ReadTransactionRequest OBJECT = new ReadTransactionRequest(TRANSACTION_IDENTIFIER, 0, + ACTOR_REF, PATH, SNAPSHOT_ONLY); - @Override - protected ReadTransactionRequest object() { - return OBJECT; + public ReadTransactionRequestTest() { + super(OBJECT, 108); } @Test public void cloneAsVersionTest() { - final ABIVersion cloneVersion = ABIVersion.TEST_FUTURE_VERSION; - final ReadTransactionRequest clone = OBJECT.cloneAsVersion(cloneVersion); - Assert.assertEquals(cloneVersion, clone.getVersion()); - Assert.assertEquals(OBJECT.getPath(), clone.getPath()); - Assert.assertEquals(OBJECT.isSnapshotOnly(), clone.isSnapshotOnly()); + final var cloneVersion = ABIVersion.TEST_FUTURE_VERSION; + final var clone = OBJECT.cloneAsVersion(cloneVersion); + assertEquals(cloneVersion, clone.getVersion()); + assertEquals(OBJECT.getPath(), clone.getPath()); + assertEquals(OBJECT.isSnapshotOnly(), clone.isSnapshotOnly()); } @Override - protected void doAdditionalAssertions(final Object deserialize) { - Assert.assertTrue(deserialize instanceof ReadTransactionRequest); - Assert.assertEquals(OBJECT.getReplyTo(), ((ReadTransactionRequest) deserialize).getReplyTo()); - Assert.assertEquals(OBJECT.getPath(), ((ReadTransactionRequest) deserialize).getPath()); + protected void doAdditionalAssertions(final ReadTransactionRequest deserialize) { + assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo()); + assertEquals(OBJECT.getPath(), deserialize.getPath()); } } \ No newline at end of file diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionSuccessNoDataTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionSuccessNoDataTest.java index 51257462b6..055b6f5c88 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionSuccessNoDataTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionSuccessNoDataTest.java @@ -8,38 +8,34 @@ package org.opendaylight.controller.cluster.access.commands; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; import java.util.Optional; import org.junit.Test; import org.opendaylight.controller.cluster.access.ABIVersion; -import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; public class ReadTransactionSuccessNoDataTest extends AbstractTransactionSuccessTest { - private static final ReadTransactionSuccess OBJECT = new ReadTransactionSuccess( - TRANSACTION_IDENTIFIER, 0, Optional.empty()); + private static final ReadTransactionSuccess OBJECT = new ReadTransactionSuccess(TRANSACTION_IDENTIFIER, 0, + Optional.empty()); - @Override - protected ReadTransactionSuccess object() { - return OBJECT; + public ReadTransactionSuccessNoDataTest() { + super(OBJECT, 99); } @Test public void getDataTest() { - final Optional result = OBJECT.getData(); - assertFalse(result.isPresent()); + assertEquals(Optional.empty(), OBJECT.getData()); } @Test public void cloneAsVersionTest() { - final ReadTransactionSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON); - assertEquals(OBJECT, clone); + final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION); + assertEquals(OBJECT.getSequence(), clone.getSequence()); + assertEquals(OBJECT.getTarget(), clone.getTarget()); + assertEquals(OBJECT.getData(), clone.getData()); } @Override - protected void doAdditionalAssertions(final Object deserialize) { - assertTrue(deserialize instanceof ReadTransactionSuccess); - assertEquals(OBJECT.getData(), ((ReadTransactionSuccess) deserialize).getData()); + protected void doAdditionalAssertions(final ReadTransactionSuccess deserialize) { + assertEquals(OBJECT.getData(), deserialize.getData()); } } diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionSuccessTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionSuccessTest.java index 4bda8c8016..4557897ae5 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionSuccessTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionSuccessTest.java @@ -8,7 +8,6 @@ package org.opendaylight.controller.cluster.access.commands; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; import java.util.Optional; import org.junit.Test; @@ -16,37 +15,35 @@ import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.yangtools.yang.common.QName; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier; import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode; -import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; -import org.opendaylight.yangtools.yang.data.impl.schema.Builders; +import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes; public class ReadTransactionSuccessTest extends AbstractTransactionSuccessTest { - private static final ContainerNode NODE = Builders.containerBuilder().withNodeIdentifier( - NodeIdentifier.create(QName.create("namespace", "localName"))).build(); + private static final ContainerNode NODE = ImmutableNodes.newContainerBuilder() + .withNodeIdentifier(new NodeIdentifier(QName.create("namespace", "localName"))) + .build(); - private static final ReadTransactionSuccess OBJECT = new ReadTransactionSuccess( - TRANSACTION_IDENTIFIER, 0, Optional.of(NODE)); + private static final ReadTransactionSuccess OBJECT = new ReadTransactionSuccess(TRANSACTION_IDENTIFIER, 0, + Optional.of(NODE)); - @Override - protected ReadTransactionSuccess object() { - return OBJECT; + public ReadTransactionSuccessTest() { + super(OBJECT, 129); } @Test public void getDataTest() { - final Optional result = OBJECT.getData(); - assertTrue(result.isPresent()); - assertEquals(NODE.body(), result.get().body()); + assertEquals(Optional.of(NODE), OBJECT.getData()); } @Test public void cloneAsVersionTest() { - final ReadTransactionSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON); - assertEquals(OBJECT, clone); + final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION); + assertEquals(OBJECT.getSequence(), clone.getSequence()); + assertEquals(OBJECT.getTarget(), clone.getTarget()); + assertEquals(OBJECT.getData(), clone.getData()); } @Override - protected void doAdditionalAssertions(final Object deserialize) { - assertTrue(deserialize instanceof ReadTransactionSuccess); - assertEquals(OBJECT.getData(), ((ReadTransactionSuccess) deserialize).getData()); + protected void doAdditionalAssertions(final ReadTransactionSuccess deserialize) { + assertEquals(OBJECT.getData(), deserialize.getData()); } } diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsRequestTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsRequestTest.java index 39076e3007..27b30d9e8b 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsRequestTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsRequestTest.java @@ -7,8 +7,6 @@ */ package org.opendaylight.controller.cluster.access.commands; -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import com.google.common.primitives.UnsignedLong; @@ -17,23 +15,23 @@ import org.junit.Test; import org.opendaylight.controller.cluster.access.ABIVersion; public class SkipTransactionsRequestTest extends AbstractTransactionRequestTest { - private static final SkipTransactionsRequest OBJECT = new SkipTransactionsRequest( - TRANSACTION_IDENTIFIER, 0, ACTOR_REF, List.of(UnsignedLong.ONE)); + private static final SkipTransactionsRequest OBJECT = new SkipTransactionsRequest(TRANSACTION_IDENTIFIER, 0, + ACTOR_REF, List.of(UnsignedLong.ONE)); - @Override - protected SkipTransactionsRequest object() { - return OBJECT; + public SkipTransactionsRequestTest() { + super(OBJECT, 109); } @Test public void cloneAsVersionTest() { - final SkipTransactionsRequest clone = OBJECT.cloneAsVersion(ABIVersion.BORON); - assertEquals(OBJECT, clone); + final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION); + assertEquals(OBJECT.getSequence(), clone.getSequence()); + assertEquals(OBJECT.getTarget(), clone.getTarget()); + assertEquals(OBJECT.getReplyTo(), clone.getReplyTo()); } @Override - protected void doAdditionalAssertions(final Object deserialize) { - assertThat(deserialize, instanceOf(SkipTransactionsRequest.class)); - assertEquals(OBJECT.getReplyTo(), ((SkipTransactionsRequest) deserialize).getReplyTo()); + protected void doAdditionalAssertions(final SkipTransactionsRequest deserialize) { + assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo()); } } \ No newline at end of file diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsResponseTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsResponseTest.java index be70ad96ea..3ff798d231 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsResponseTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsResponseTest.java @@ -7,30 +7,22 @@ */ package org.opendaylight.controller.cluster.access.commands; -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import org.junit.Test; import org.opendaylight.controller.cluster.access.ABIVersion; public class SkipTransactionsResponseTest extends AbstractTransactionSuccessTest { - private static final SkipTransactionsResponse OBJECT = new SkipTransactionsResponse( - TRANSACTION_IDENTIFIER, 0); + private static final SkipTransactionsResponse OBJECT = new SkipTransactionsResponse(TRANSACTION_IDENTIFIER, 0); - @Override - protected SkipTransactionsResponse object() { - return OBJECT; + public SkipTransactionsResponseTest() { + super(OBJECT, 98); } @Test public void cloneAsVersionTest() { - final SkipTransactionsResponse clone = OBJECT.cloneAsVersion(ABIVersion.BORON); - assertEquals(OBJECT, clone); - } - - @Override - protected void doAdditionalAssertions(final Object deserialize) { - assertThat(deserialize, instanceOf(SkipTransactionsResponse.class)); + final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION); + assertEquals(OBJECT.getSequence(), clone.getSequence()); + assertEquals(OBJECT.getTarget(), clone.getTarget()); } } \ No newline at end of file diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortRequestTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortRequestTest.java index 9c7dbf11d7..c0e1ae8e1f 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortRequestTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortRequestTest.java @@ -7,28 +7,29 @@ */ package org.opendaylight.controller.cluster.access.commands; -import org.junit.Assert; +import static org.junit.Assert.assertEquals; + import org.junit.Test; import org.opendaylight.controller.cluster.access.ABIVersion; public class TransactionAbortRequestTest extends AbstractTransactionRequestTest { - private static final TransactionAbortRequest OBJECT = new TransactionAbortRequest( - TRANSACTION_IDENTIFIER, 0, ACTOR_REF); + private static final TransactionAbortRequest OBJECT = new TransactionAbortRequest(TRANSACTION_IDENTIFIER, 0, + ACTOR_REF); - @Override - protected TransactionAbortRequest object() { - return OBJECT; + public TransactionAbortRequestTest() { + super(OBJECT, 101); } @Test public void cloneAsVersionTest() { - final TransactionAbortRequest clone = OBJECT.cloneAsVersion(ABIVersion.BORON); - Assert.assertEquals(OBJECT, clone); + final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION); + assertEquals(OBJECT.getSequence(), clone.getSequence()); + assertEquals(OBJECT.getTarget(), clone.getTarget()); + assertEquals(OBJECT.getReplyTo(), clone.getReplyTo()); } @Override - protected void doAdditionalAssertions(final Object deserialize) { - Assert.assertTrue(deserialize instanceof TransactionAbortRequest); - Assert.assertEquals(OBJECT.getReplyTo(), ((TransactionAbortRequest)deserialize).getReplyTo()); + protected void doAdditionalAssertions(final TransactionAbortRequest deserialize) { + assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo()); } } \ No newline at end of file diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortSuccessTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortSuccessTest.java index 31959aaf3e..1ceab66a95 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortSuccessTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortSuccessTest.java @@ -7,27 +7,22 @@ */ package org.opendaylight.controller.cluster.access.commands; -import org.junit.Assert; +import static org.junit.Assert.assertEquals; + import org.junit.Test; import org.opendaylight.controller.cluster.access.ABIVersion; public class TransactionAbortSuccessTest extends AbstractTransactionSuccessTest { - private static final TransactionAbortSuccess OBJECT = new TransactionAbortSuccess( - TRANSACTION_IDENTIFIER, 0); + private static final TransactionAbortSuccess OBJECT = new TransactionAbortSuccess(TRANSACTION_IDENTIFIER, 0); - @Override - protected TransactionAbortSuccess object() { - return OBJECT; + public TransactionAbortSuccessTest() { + super(OBJECT, 98); } @Test public void cloneAsVersionTest() { - final TransactionAbortSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON); - Assert.assertEquals(OBJECT, clone); - } - - @Override - protected void doAdditionalAssertions(Object deserialize) { - Assert.assertTrue(deserialize instanceof TransactionAbortSuccess); + final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION); + assertEquals(OBJECT.getSequence(), clone.getSequence()); + assertEquals(OBJECT.getTarget(), clone.getTarget()); } } \ No newline at end of file diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionCanCommitSuccessTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionCanCommitSuccessTest.java index e8995a9e24..ee84907d54 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionCanCommitSuccessTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionCanCommitSuccessTest.java @@ -7,27 +7,23 @@ */ package org.opendaylight.controller.cluster.access.commands; -import org.junit.Assert; +import static org.junit.Assert.assertEquals; + import org.junit.Test; import org.opendaylight.controller.cluster.access.ABIVersion; public class TransactionCanCommitSuccessTest extends AbstractTransactionSuccessTest { - private static final TransactionCanCommitSuccess OBJECT = new TransactionCanCommitSuccess( - TRANSACTION_IDENTIFIER, 0); + private static final TransactionCanCommitSuccess OBJECT = new TransactionCanCommitSuccess(TRANSACTION_IDENTIFIER, + 0); - @Override - protected TransactionCanCommitSuccess object() { - return OBJECT; + public TransactionCanCommitSuccessTest() { + super(OBJECT, 99); } @Test public void cloneAsVersionTest() { - final TransactionCanCommitSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON); - Assert.assertEquals(OBJECT, clone); - } - - @Override - protected void doAdditionalAssertions(Object deserialize) { - Assert.assertTrue(deserialize instanceof TransactionCanCommitSuccess); + final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION); + assertEquals(OBJECT.getSequence(), clone.getSequence()); + assertEquals(OBJECT.getTarget(), clone.getTarget()); } } \ No newline at end of file diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionCommitSuccessTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionCommitSuccessTest.java index 9db578d2b6..ca1f8f8dd9 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionCommitSuccessTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionCommitSuccessTest.java @@ -7,27 +7,22 @@ */ package org.opendaylight.controller.cluster.access.commands; -import org.junit.Assert; +import static org.junit.Assert.assertEquals; + import org.junit.Test; import org.opendaylight.controller.cluster.access.ABIVersion; public class TransactionCommitSuccessTest extends AbstractTransactionSuccessTest { - private static final TransactionCommitSuccess OBJECT = new TransactionCommitSuccess( - TRANSACTION_IDENTIFIER, 0); + private static final TransactionCommitSuccess OBJECT = new TransactionCommitSuccess(TRANSACTION_IDENTIFIER, 0); - @Override - protected TransactionCommitSuccess object() { - return OBJECT; + public TransactionCommitSuccessTest() { + super(OBJECT, 98); } @Test public void cloneAsVersionTest() { - final TransactionCommitSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON); - Assert.assertEquals(OBJECT, clone); - } - - @Override - protected void doAdditionalAssertions(Object deserialize) { - Assert.assertTrue(deserialize instanceof TransactionCommitSuccess); + final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION); + assertEquals(OBJECT.getSequence(), clone.getSequence()); + assertEquals(OBJECT.getTarget(), clone.getTarget()); } -} \ No newline at end of file +} diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionDoCommitRequestTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionDoCommitRequestTest.java index 26f1a379bb..a5b3401a7f 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionDoCommitRequestTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionDoCommitRequestTest.java @@ -7,28 +7,29 @@ */ package org.opendaylight.controller.cluster.access.commands; -import org.junit.Assert; +import static org.junit.Assert.assertEquals; + import org.junit.Test; import org.opendaylight.controller.cluster.access.ABIVersion; public class TransactionDoCommitRequestTest extends AbstractTransactionRequestTest { - private static final TransactionDoCommitRequest OBJECT = new TransactionDoCommitRequest( - TRANSACTION_IDENTIFIER, 0, ACTOR_REF); + private static final TransactionDoCommitRequest OBJECT = new TransactionDoCommitRequest(TRANSACTION_IDENTIFIER, 0, + ACTOR_REF); - @Override - protected TransactionDoCommitRequest object() { - return OBJECT; + public TransactionDoCommitRequestTest() { + super(OBJECT, 102); } @Test public void cloneAsVersionTest() { - final TransactionDoCommitRequest clone = OBJECT.cloneAsVersion(ABIVersion.BORON); - Assert.assertEquals(OBJECT, clone); + final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION); + assertEquals(OBJECT.getSequence(), clone.getSequence()); + assertEquals(OBJECT.getTarget(), clone.getTarget()); + assertEquals(OBJECT.getReplyTo(), clone.getReplyTo()); } @Override - protected void doAdditionalAssertions(final Object deserialize) { - Assert.assertTrue(deserialize instanceof TransactionDoCommitRequest); - Assert.assertEquals(OBJECT.getReplyTo(), ((TransactionDoCommitRequest) deserialize).getReplyTo()); + protected void doAdditionalAssertions(final TransactionDoCommitRequest deserialize) { + assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo()); } } \ No newline at end of file diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionFailureTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionFailureTest.java index 9e8467e5ca..7e027ea2d3 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionFailureTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionFailureTest.java @@ -7,21 +7,23 @@ */ package org.opendaylight.controller.cluster.access.commands; -import org.junit.Assert; +import static org.junit.Assert.assertEquals; + import org.junit.Test; import org.opendaylight.controller.cluster.access.ABIVersion; public class TransactionFailureTest extends AbstractRequestFailureTest { private static final TransactionFailure OBJECT = new TransactionFailure(TRANSACTION_IDENTIFIER, 0, CAUSE); - @Override - TransactionFailure object() { - return OBJECT; + public TransactionFailureTest() { + super(OBJECT, 100); } @Test public void cloneAsVersionTest() { - final TransactionFailure clone = OBJECT.cloneAsVersion(ABIVersion.current()); - Assert.assertEquals(OBJECT, clone); + final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION); + assertEquals(OBJECT.getSequence(), clone.getSequence()); + assertEquals(OBJECT.getTarget(), clone.getTarget()); + assertEquals(OBJECT.getCause(), clone.getCause()); } } \ No newline at end of file diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitRequestTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitRequestTest.java index ceac8606b8..21605372c2 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitRequestTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitRequestTest.java @@ -7,28 +7,29 @@ */ package org.opendaylight.controller.cluster.access.commands; -import org.junit.Assert; +import static org.junit.Assert.assertEquals; + import org.junit.Test; import org.opendaylight.controller.cluster.access.ABIVersion; public class TransactionPreCommitRequestTest extends AbstractTransactionRequestTest { - private static final TransactionPreCommitRequest OBJECT = new TransactionPreCommitRequest( - TRANSACTION_IDENTIFIER, 0, ACTOR_REF); + private static final TransactionPreCommitRequest OBJECT = new TransactionPreCommitRequest(TRANSACTION_IDENTIFIER, 0, + ACTOR_REF); - @Override - protected TransactionPreCommitRequest object() { - return OBJECT; + public TransactionPreCommitRequestTest() { + super(OBJECT, 102); } @Test public void cloneAsVersionTest() { - final TransactionPreCommitRequest clone = OBJECT.cloneAsVersion(ABIVersion.BORON); - Assert.assertEquals(OBJECT, clone); + final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION); + assertEquals(OBJECT.getSequence(), clone.getSequence()); + assertEquals(OBJECT.getTarget(), clone.getTarget()); + assertEquals(OBJECT.getReplyTo(), clone.getReplyTo()); } @Override - protected void doAdditionalAssertions(final Object deserialize) { - Assert.assertTrue(deserialize instanceof TransactionPreCommitRequest); - Assert.assertEquals(OBJECT.getReplyTo(), ((TransactionPreCommitRequest) deserialize).getReplyTo()); + protected void doAdditionalAssertions(final TransactionPreCommitRequest deserialize) { + assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo()); } } \ No newline at end of file diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitSuccessTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitSuccessTest.java index 0130ea06ab..5f8f29f450 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitSuccessTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitSuccessTest.java @@ -7,27 +7,23 @@ */ package org.opendaylight.controller.cluster.access.commands; -import org.junit.Assert; +import static org.junit.Assert.assertEquals; + import org.junit.Test; import org.opendaylight.controller.cluster.access.ABIVersion; public class TransactionPreCommitSuccessTest extends AbstractTransactionSuccessTest { - private static final TransactionPreCommitSuccess OBJECT = new TransactionPreCommitSuccess( - TRANSACTION_IDENTIFIER, 0); + private static final TransactionPreCommitSuccess OBJECT = new TransactionPreCommitSuccess(TRANSACTION_IDENTIFIER, + 0); - @Override - protected TransactionPreCommitSuccess object() { - return OBJECT; + public TransactionPreCommitSuccessTest() { + super(OBJECT, 99); } @Test public void cloneAsVersionTest() { - final TransactionPreCommitSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON); - Assert.assertEquals(OBJECT, clone); - } - - @Override - protected void doAdditionalAssertions(Object deserialize) { - Assert.assertTrue(deserialize instanceof TransactionPreCommitSuccess); + final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION); + assertEquals(OBJECT.getSequence(), clone.getSequence()); + assertEquals(OBJECT.getTarget(), clone.getTarget()); } } \ No newline at end of file diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeRequestTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeRequestTest.java index 5ae9f26dbf..7453f4461a 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeRequestTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeRequestTest.java @@ -7,28 +7,29 @@ */ package org.opendaylight.controller.cluster.access.commands; -import org.junit.Assert; +import static org.junit.Assert.assertEquals; + import org.junit.Test; import org.opendaylight.controller.cluster.access.ABIVersion; public class TransactionPurgeRequestTest extends AbstractTransactionRequestTest { - private static final TransactionPurgeRequest OBJECT = new TransactionPurgeRequest( - TRANSACTION_IDENTIFIER, 0, ACTOR_REF); + private static final TransactionPurgeRequest OBJECT = new TransactionPurgeRequest(TRANSACTION_IDENTIFIER, 0, + ACTOR_REF); - @Override - protected TransactionPurgeRequest object() { - return OBJECT; + public TransactionPurgeRequestTest() { + super(OBJECT, 101); } @Test public void cloneAsVersionTest() { - final TransactionPurgeRequest clone = OBJECT.cloneAsVersion(ABIVersion.BORON); - Assert.assertEquals(OBJECT, clone); + final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION); + assertEquals(OBJECT.getSequence(), clone.getSequence()); + assertEquals(OBJECT.getTarget(), clone.getTarget()); + assertEquals(OBJECT.getReplyTo(), clone.getReplyTo()); } @Override - protected void doAdditionalAssertions(final Object deserialize) { - Assert.assertTrue(deserialize instanceof TransactionPurgeRequest); - Assert.assertEquals(OBJECT.getReplyTo(), ((TransactionPurgeRequest) deserialize).getReplyTo()); + protected void doAdditionalAssertions(final TransactionPurgeRequest deserialize) { + assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo()); } } \ No newline at end of file diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeResponseTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeResponseTest.java index bef9ae140b..e8b4294d5e 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeResponseTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeResponseTest.java @@ -7,27 +7,22 @@ */ package org.opendaylight.controller.cluster.access.commands; -import org.junit.Assert; +import static org.junit.Assert.assertEquals; + import org.junit.Test; import org.opendaylight.controller.cluster.access.ABIVersion; public class TransactionPurgeResponseTest extends AbstractTransactionSuccessTest { - private static final TransactionPurgeResponse OBJECT = new TransactionPurgeResponse( - TRANSACTION_IDENTIFIER, 0); + private static final TransactionPurgeResponse OBJECT = new TransactionPurgeResponse(TRANSACTION_IDENTIFIER, 0); - @Override - protected TransactionPurgeResponse object() { - return OBJECT; + public TransactionPurgeResponseTest() { + super(OBJECT, 98); } @Test public void cloneAsVersionTest() { - final TransactionPurgeResponse clone = OBJECT.cloneAsVersion(ABIVersion.BORON); - Assert.assertEquals(OBJECT, clone); - } - - @Override - protected void doAdditionalAssertions(Object deserialize) { - Assert.assertTrue(deserialize instanceof TransactionPurgeResponse); + final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION); + assertEquals(OBJECT.getSequence(), clone.getSequence()); + assertEquals(OBJECT.getTarget(), clone.getTarget()); } } \ No newline at end of file diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/AbstractEnvelopeTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/AbstractEnvelopeTest.java index f0f5d3d26c..fc34fc3978 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/AbstractEnvelopeTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/AbstractEnvelopeTest.java @@ -7,12 +7,18 @@ */ package org.opendaylight.controller.cluster.access.concepts; -import org.apache.commons.lang.SerializationUtils; -import org.junit.Assert; +import static java.util.Objects.requireNonNull; +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.apache.commons.lang3.SerializationUtils; import org.junit.Before; import org.junit.Test; public abstract class AbstractEnvelopeTest> { + protected record EnvelopeDetails>(E envelope, int expectedSize) { + // Nothing else + } + private static final FrontendIdentifier FRONTEND = new FrontendIdentifier(MemberName.forName("test"), FrontendIdentifierTest.ONE_FRONTEND_TYPE); private static final ClientIdentifier CLIENT = new ClientIdentifier(FRONTEND, 0); @@ -20,33 +26,37 @@ public abstract class AbstractEnvelopeTest> { protected static final TransactionIdentifier OBJECT = new TransactionIdentifier(HISTORY, 0); private E envelope; + private int expectedSize; @Before public void setUp() throws Exception { - envelope = createEnvelope(); + final var details = createEnvelope(); + envelope = requireNonNull(details.envelope); + expectedSize = details.expectedSize; } @Test public void testProxySerializationDeserialization() { final byte[] serializedBytes = SerializationUtils.serialize(envelope); - final Object deserialize = SerializationUtils.deserialize(serializedBytes); - checkDeserialized((E) deserialize); + assertEquals(expectedSize, serializedBytes.length); + @SuppressWarnings("unchecked") + final E deserialize = (E) SerializationUtils.deserialize(serializedBytes); + checkDeserialized(deserialize); } private void checkDeserialized(final E deserializedEnvelope) { - Assert.assertEquals(envelope.getSessionId(), deserializedEnvelope.getSessionId()); - Assert.assertEquals(envelope.getTxSequence(), deserializedEnvelope.getTxSequence()); - final Message expectedMessage = envelope.getMessage(); - final Message actualMessage = deserializedEnvelope.getMessage(); - Assert.assertEquals(expectedMessage.getSequence(), actualMessage.getSequence()); - Assert.assertEquals(expectedMessage.getTarget(), actualMessage.getTarget()); - Assert.assertEquals(expectedMessage.getVersion(), actualMessage.getVersion()); - Assert.assertEquals(expectedMessage.getClass(), actualMessage.getClass()); + assertEquals(envelope.getSessionId(), deserializedEnvelope.getSessionId()); + assertEquals(envelope.getTxSequence(), deserializedEnvelope.getTxSequence()); + final var expectedMessage = envelope.getMessage(); + final var actualMessage = deserializedEnvelope.getMessage(); + assertEquals(expectedMessage.getSequence(), actualMessage.getSequence()); + assertEquals(expectedMessage.getTarget(), actualMessage.getTarget()); + assertEquals(expectedMessage.getVersion(), actualMessage.getVersion()); + assertEquals(expectedMessage.getClass(), actualMessage.getClass()); doAdditionalAssertions(envelope, deserializedEnvelope); } - protected abstract E createEnvelope(); + protected abstract EnvelopeDetails createEnvelope(); - @SuppressWarnings("checkstyle:hiddenField") protected abstract void doAdditionalAssertions(E envelope, E resolvedObject); } diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/AbstractIdentifierTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/AbstractIdentifierTest.java index 8fcc9fa1f5..74cd4cf3ba 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/AbstractIdentifierTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/AbstractIdentifierTest.java @@ -42,7 +42,6 @@ public abstract class AbstractIdentifierTest { assertEquals(object().hashCode(), equalObject().hashCode()); } - @Test public final void testSerialization() throws Exception { assertTrue(object().equals(copy(object()))); diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/AbstractRequestTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/AbstractRequestTest.java index c65c1956c2..48ceabef81 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/AbstractRequestTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/AbstractRequestTest.java @@ -7,22 +7,37 @@ */ package org.opendaylight.controller.cluster.access.concepts; +import static java.util.Objects.requireNonNull; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; + import akka.actor.ActorRef; import akka.actor.ActorSystem; import akka.actor.ExtendedActorSystem; import akka.serialization.JavaSerializer; import akka.testkit.TestProbe; import com.google.common.base.MoreObjects; -import org.apache.commons.lang.SerializationUtils; -import org.junit.Assert; +import org.apache.commons.lang3.SerializationUtils; import org.junit.Before; import org.junit.Test; public abstract class AbstractRequestTest> { private static final ActorSystem SYSTEM = ActorSystem.create("test"); protected static final ActorRef ACTOR_REF = TestProbe.apply(SYSTEM).ref(); + private static final int ACTOR_REF_SIZE = ACTOR_REF.path().toSerializationFormat().length(); + + private final T object; + private final int expectedSize; - protected abstract T object(); + protected AbstractRequestTest(final T object, final int baseSize) { + this.object = requireNonNull(object); + this.expectedSize = baseSize + ACTOR_REF_SIZE; + } + + protected final T object() { + return object; + } @Before public void setUp() { @@ -31,25 +46,27 @@ public abstract class AbstractRequestTest> { @Test public void getReplyToTest() { - Assert.assertEquals(ACTOR_REF, object().getReplyTo()); + assertEquals(ACTOR_REF, object.getReplyTo()); } @Test public void addToStringAttributesCommonTest() { - final MoreObjects.ToStringHelper result = object().addToStringAttributes(MoreObjects.toStringHelper(object())); - Assert.assertTrue(result.toString().contains("replyTo=" + ACTOR_REF)); + final var result = object.addToStringAttributes(MoreObjects.toStringHelper(object)); + assertThat(result.toString(), containsString("replyTo=" + ACTOR_REF)); } - @SuppressWarnings("unchecked") @Test public void serializationTest() { - final Object deserialize = SerializationUtils.clone(object()); + final byte[] bytes = SerializationUtils.serialize(object); + assertEquals(expectedSize, bytes.length); + @SuppressWarnings("unchecked") + final T deserialize = (T) SerializationUtils.deserialize(bytes); - Assert.assertEquals(object().getTarget(), ((T) deserialize).getTarget()); - Assert.assertEquals(object().getVersion(), ((T) deserialize).getVersion()); - Assert.assertEquals(object().getSequence(), ((T) deserialize).getSequence()); + assertEquals(object.getTarget(), deserialize.getTarget()); + assertEquals(object.getVersion(), deserialize.getVersion()); + assertEquals(object.getSequence(), deserialize.getSequence()); doAdditionalAssertions(deserialize); } - protected abstract void doAdditionalAssertions(Object deserialize); + protected abstract void doAdditionalAssertions(T deserialize); } diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/ClientIdentifierTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/ClientIdentifierTest.java index 0908659487..d9bd5c126b 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/ClientIdentifierTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/ClientIdentifierTest.java @@ -32,6 +32,6 @@ public class ClientIdentifierTest extends AbstractIdentifierTest { - @Override - protected FailureEnvelope createEnvelope() { - final RequestFailure message = - new MockFailure(OBJECT, new RuntimeRequestException("msg", new RuntimeException()), 42); - return new FailureEnvelope(message, 1L, 2L, 11L); + protected EnvelopeDetails createEnvelope() { + final var cause = new RuntimeRequestException("msg", new RuntimeException()); + final int causeSize = SerializationUtils.serialize(cause).length; + return new EnvelopeDetails<>(new FailureEnvelope(new MockFailure(OBJECT, cause, 42), 1L, 2L, 11L), + causeSize + 216); } @Override protected void doAdditionalAssertions(final FailureEnvelope envelope, final FailureEnvelope resolvedObject) { assertEquals(envelope.getExecutionTimeNanos(), resolvedObject.getExecutionTimeNanos()); - final RequestException expectedCause = envelope.getMessage().getCause(); - final RequestException actualCause = resolvedObject.getMessage().getCause(); + final var expectedCause = envelope.getMessage().getCause(); + final var actualCause = resolvedObject.getMessage().getCause(); assertEquals(expectedCause.getMessage(), actualCause.getMessage()); assertEquals(expectedCause.isRetriable(), actualCause.isRetriable()); } - private static class MockRequestFailureProxy extends AbstractRequestFailureProxy { + private static class MockRequestFailureProxy implements RequestFailure.SerialForm { + @java.io.Serial + private static final long serialVersionUID = 5015515628523887221L; + + private MockFailure message; @SuppressWarnings("checkstyle:RedundantModifier") public MockRequestFailureProxy() { @@ -40,23 +47,38 @@ public class FailureEnvelopeTest extends AbstractEnvelopeTest { } private MockRequestFailureProxy(final MockFailure mockFailure) { - super(mockFailure); + message = requireNonNull(mockFailure); } @Override - protected MockFailure createFailure(final WritableIdentifier target, final long sequence, - final RequestException failureCause) { + public MockFailure createFailure(final WritableIdentifier target, final long sequence, + final RequestException failureCause) { return new MockFailure(target, failureCause, sequence); } @Override - protected WritableIdentifier readTarget(final DataInput in) throws IOException { + public WritableIdentifier readTarget(final DataInput in) throws IOException { return TransactionIdentifier.readFrom(in); } + @Override + public MockFailure message() { + return verifyNotNull(message); + } + + @Override + public void setMessage(final MockFailure message) { + this.message = requireNonNull(message); + } + + @Override + public Object readResolve() { + return message(); + } } private static class MockFailure extends RequestFailure { + @java.io.Serial private static final long serialVersionUID = 1L; MockFailure(final WritableIdentifier target, final RequestException cause, final long sequence) { @@ -64,7 +86,7 @@ public class FailureEnvelopeTest extends AbstractEnvelopeTest { } @Override - protected AbstractRequestFailureProxy externalizableProxy( + protected RequestFailure.SerialForm externalizableProxy( final ABIVersion version) { return new MockRequestFailureProxy(this); } diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/FrontendIdentifierTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/FrontendIdentifierTest.java index cc7124483b..203ffd5ab9 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/FrontendIdentifierTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/FrontendIdentifierTest.java @@ -33,6 +33,6 @@ public class FrontendIdentifierTest extends AbstractIdentifierTest { @Override int expectedSize() { - return 104; + return 88; } @Test diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/LocalHistoryIdentifierTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/LocalHistoryIdentifierTest.java index 161370deff..dc53bb3a76 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/LocalHistoryIdentifierTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/LocalHistoryIdentifierTest.java @@ -33,6 +33,6 @@ public class LocalHistoryIdentifierTest extends AbstractIdentifierTest { @Override int expectedSize() { - return 101; + return 87; } @Test diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/RequestEnvelopeTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/RequestEnvelopeTest.java index b63dc4c78c..30366c99f1 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/RequestEnvelopeTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/RequestEnvelopeTest.java @@ -7,19 +7,21 @@ */ package org.opendaylight.controller.cluster.access.concepts; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; + import akka.actor.ActorRef; import akka.actor.ActorSystem; import akka.actor.ExtendedActorSystem; import akka.serialization.JavaSerializer; import akka.testkit.TestProbe; import org.junit.After; -import org.junit.Assert; import org.junit.Before; import org.opendaylight.controller.cluster.access.commands.TransactionPurgeRequest; import org.opendaylight.controller.cluster.access.commands.TransactionPurgeResponse; public class RequestEnvelopeTest extends AbstractEnvelopeTest { - private ActorSystem system; private ActorRef replyTo; private TestProbe replyToProbe; @@ -33,27 +35,29 @@ public class RequestEnvelopeTest extends AbstractEnvelopeTest { } @Override - protected RequestEnvelope createEnvelope() { + protected EnvelopeDetails createEnvelope() { replyToProbe = new TestProbe(system); replyTo = replyToProbe.ref(); - final TransactionPurgeRequest message = new TransactionPurgeRequest(OBJECT, 2L, replyTo); - return new RequestEnvelope(message, 1L, 2L); + final int refSize = replyTo.path().toSerializationFormat().length(); + + return new EnvelopeDetails<>(new RequestEnvelope(new TransactionPurgeRequest(OBJECT, 2L, replyTo), 1L, 2L), + refSize + 179); } @Override protected void doAdditionalAssertions(final RequestEnvelope envelope, final RequestEnvelope resolvedObject) { final Request actual = resolvedObject.getMessage(); - Assert.assertTrue(actual instanceof TransactionPurgeRequest); - final TransactionPurgeRequest purgeRequest = (TransactionPurgeRequest) actual; - Assert.assertEquals(replyTo, purgeRequest.getReplyTo()); - final TransactionPurgeResponse response = new TransactionPurgeResponse(OBJECT, 2L); + assertThat(actual, instanceOf(TransactionPurgeRequest.class)); + final var purgeRequest = (TransactionPurgeRequest) actual; + assertEquals(replyTo, purgeRequest.getReplyTo()); + final var response = new TransactionPurgeResponse(OBJECT, 2L); resolvedObject.sendSuccess(response, 11L); - final SuccessEnvelope successEnvelope = replyToProbe.expectMsgClass(SuccessEnvelope.class); - Assert.assertEquals(response, successEnvelope.getMessage()); - final RuntimeRequestException failResponse = new RuntimeRequestException("fail", new RuntimeException()); + final var successEnvelope = replyToProbe.expectMsgClass(SuccessEnvelope.class); + assertEquals(response, successEnvelope.getMessage()); + final var failResponse = new RuntimeRequestException("fail", new RuntimeException()); resolvedObject.sendFailure(failResponse, 11L); - final FailureEnvelope failureEnvelope = replyToProbe.expectMsgClass(FailureEnvelope.class); - Assert.assertEquals(failResponse, failureEnvelope.getMessage().getCause()); + final var failureEnvelope = replyToProbe.expectMsgClass(FailureEnvelope.class); + assertEquals(failResponse, failureEnvelope.getMessage().getCause()); } @After diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/SuccessEnvelopeTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/SuccessEnvelopeTest.java index 30d9e98636..9d1aa40b61 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/SuccessEnvelopeTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/SuccessEnvelopeTest.java @@ -7,20 +7,18 @@ */ package org.opendaylight.controller.cluster.access.concepts; -import org.junit.Assert; +import static org.junit.Assert.assertEquals; + import org.opendaylight.controller.cluster.access.commands.TransactionAbortSuccess; public class SuccessEnvelopeTest extends AbstractEnvelopeTest { - @Override - protected SuccessEnvelope createEnvelope() { - final RequestSuccess message = new TransactionAbortSuccess(OBJECT, 2L); - return new SuccessEnvelope(message, 1L, 2L, 11L); + protected EnvelopeDetails createEnvelope() { + return new EnvelopeDetails<>(new SuccessEnvelope(new TransactionAbortSuccess(OBJECT, 2L), 1L, 2L, 11L), 180); } @Override - protected void doAdditionalAssertions(final SuccessEnvelope envelope, - final SuccessEnvelope resolvedObject) { - Assert.assertEquals(envelope.getExecutionTimeNanos(), resolvedObject.getExecutionTimeNanos()); + protected void doAdditionalAssertions(final SuccessEnvelope envelope, final SuccessEnvelope resolvedObject) { + assertEquals(envelope.getExecutionTimeNanos(), resolvedObject.getExecutionTimeNanos()); } } \ No newline at end of file diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/TransactionIdentifierTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/TransactionIdentifierTest.java index b33b61c49b..4433c49a6e 100644 --- a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/TransactionIdentifierTest.java +++ b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/TransactionIdentifierTest.java @@ -34,6 +34,6 @@ public class TransactionIdentifierTest extends AbstractIdentifierTest org.opendaylight.controller mdsal-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ../parent @@ -12,44 +12,90 @@ bundle + + com.github.spotbugs + spotbugs-annotations + true + + + com.google.guava + guava + + + org.eclipse.jdt + org.eclipse.jdt.annotation + org.opendaylight.controller cds-access-api - + + org.opendaylight.controller + repackaged-akka + + + org.opendaylight.controller + sal-clustering-commons + org.opendaylight.yangtools concepts - org.opendaylight.yangtools - yang-data-api + org.scala-lang + scala-library - org.opendaylight.controller - sal-clustering-commons + org.checkerframework + checker-qual + true - org.mockito - mockito-core + com.typesafe + config + test com.typesafe.akka akka-testkit_2.13 - com.google.guava - guava-testlib + com.google.guava + guava-testlib + + + org.opendaylight.yangtools + yang-common + test + + + org.opendaylight.yangtools + yang-data-api + test - org.opendaylight.controller - sal-clustering-commons - test-jar + org.opendaylight.yangtools + yang-data-impl + test + + + org.opendaylight.controller + sal-clustering-commons + test-jar + + + + maven-javadoc-plugin + 3.1.1 + + + + org.apache.felix diff --git a/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/AbstractClientActor.java b/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/AbstractClientActor.java index a1c84c0e83..98edb1d341 100644 --- a/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/AbstractClientActor.java +++ b/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/AbstractClientActor.java @@ -10,17 +10,13 @@ package org.opendaylight.controller.cluster.access.client; import akka.actor.ActorRef; import akka.actor.PoisonPill; import akka.persistence.AbstractPersistentActor; -import com.google.common.annotations.Beta; import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Frontend actor which takes care of persisting generations and creates an appropriate ClientIdentifier. - * - * @author Robert Varga */ -@Beta public abstract class AbstractClientActor extends AbstractPersistentActor { private static final Logger LOG = LoggerFactory.getLogger(AbstractClientActor.class); private AbstractClientActorBehavior currentBehavior; diff --git a/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/AbstractClientActorBehavior.java b/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/AbstractClientActorBehavior.java index 4188a41fd5..39ae396cff 100644 --- a/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/AbstractClientActorBehavior.java +++ b/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/AbstractClientActorBehavior.java @@ -10,7 +10,6 @@ package org.opendaylight.controller.cluster.access.client; import static java.util.Objects.requireNonNull; import akka.actor.ActorRef; -import com.google.common.annotations.Beta; import org.eclipse.jdt.annotation.NonNull; import org.eclipse.jdt.annotation.Nullable; @@ -18,10 +17,7 @@ import org.eclipse.jdt.annotation.Nullable; * Base behavior attached to {@link AbstractClientActor}. * * @param Type of associated context - * - * @author Robert Varga */ -@Beta public abstract class AbstractClientActorBehavior implements AutoCloseable { private final @NonNull C context; @@ -60,6 +56,7 @@ public abstract class AbstractClientActorBehavior { private static final long MAX_DELAY_NANOS = TimeUnit.SECONDS.toNanos(MAX_DELAY_SECONDS); private final Lock lock = new ReentrantLock(); - private final ClientActorContext context; - @GuardedBy("lock") - private final TransmitQueue queue; + private final @NonNull ClientActorContext context; private final @NonNull Long cookie; private final String backendName; + @GuardedBy("lock") + private final TransmitQueue queue; @GuardedBy("lock") private boolean haveTimer; @@ -95,12 +95,12 @@ public abstract class AbstractClientConnection { // Private constructor to avoid code duplication. private AbstractClientConnection(final AbstractClientConnection oldConn, final TransmitQueue newQueue, final String backendName) { - this.context = requireNonNull(oldConn.context); - this.cookie = requireNonNull(oldConn.cookie); + context = oldConn.context; + cookie = oldConn.cookie; this.backendName = requireNonNull(backendName); - this.queue = requireNonNull(newQueue); + queue = requireNonNull(newQueue); // Will be updated in finishReplay if needed. - this.lastReceivedTicks = oldConn.lastReceivedTicks; + lastReceivedTicks = oldConn.lastReceivedTicks; } // This constructor is only to be called by ConnectingClientConnection constructor. @@ -110,8 +110,8 @@ public abstract class AbstractClientConnection { this.context = requireNonNull(context); this.cookie = requireNonNull(cookie); this.backendName = requireNonNull(backendName); - this.queue = new TransmitQueue.Halted(queueDepth); - this.lastReceivedTicks = currentTime(); + queue = new TransmitQueue.Halted(queueDepth); + lastReceivedTicks = currentTime(); } // This constructor is only to be called (indirectly) by ReconnectingClientConnection constructor. @@ -128,7 +128,7 @@ public abstract class AbstractClientConnection { requireNonNull(oldConn.context).messageSlicer()), newBackend.getName()); } - public final ClientActorContext context() { + public final @NonNull ClientActorContext context() { return context; } @@ -136,7 +136,7 @@ public abstract class AbstractClientConnection { return cookie; } - public final ActorRef localActor() { + public final @NonNull ActorRef localActor() { return context.self(); } @@ -345,7 +345,7 @@ public abstract class AbstractClientConnection { if (delay.isPresent()) { // If there is new delay, schedule a timer - scheduleTimer(delay.getAsLong()); + scheduleTimer(delay.orElseThrow()); } else { LOG.debug("{}: not scheduling timeout on {}", context.persistenceId(), this); } @@ -489,7 +489,7 @@ public abstract class AbstractClientConnection { } if (maybeEntry.isPresent()) { - final TransmittedConnectionEntry entry = maybeEntry.get(); + final TransmittedConnectionEntry entry = maybeEntry.orElseThrow(); LOG.debug("Completing {} with {}", entry, envelope); entry.complete(envelope.getMessage()); } diff --git a/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ClientActorBehavior.java b/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ClientActorBehavior.java index 14ca1ef38c..3f8c11a913 100644 --- a/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ClientActorBehavior.java +++ b/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ClientActorBehavior.java @@ -9,7 +9,6 @@ package org.opendaylight.controller.cluster.access.client; import static java.util.Objects.requireNonNull; -import com.google.common.annotations.Beta; import com.google.common.base.Stopwatch; import com.google.common.base.Verify; import java.util.Collection; @@ -45,10 +44,7 @@ import scala.concurrent.duration.FiniteDuration; /** * A behavior, which handles messages sent to a {@link AbstractClientActor}. - * - * @author Robert Varga */ -@Beta public abstract class ClientActorBehavior extends RecoveredClientActorBehavior implements Identifiable { /** @@ -152,12 +148,11 @@ public abstract class ClientActorBehavior extends return ((InternalCommand) command).execute(this); } - if (command instanceof SuccessEnvelope) { - return onRequestSuccess((SuccessEnvelope) command); + if (command instanceof SuccessEnvelope successEnvelope) { + return onRequestSuccess(successEnvelope); } - - if (command instanceof FailureEnvelope) { - return internalOnRequestFailure((FailureEnvelope) command); + if (command instanceof FailureEnvelope failureEnvelope) { + return internalOnRequestFailure(failureEnvelope); } if (MessageAssembler.isHandledMessage(command)) { @@ -174,10 +169,10 @@ public abstract class ClientActorBehavior extends } private static long extractCookie(final Identifier id) { - if (id instanceof TransactionIdentifier) { - return ((TransactionIdentifier) id).getHistoryId().getCookie(); - } else if (id instanceof LocalHistoryIdentifier) { - return ((LocalHistoryIdentifier) id).getCookie(); + if (id instanceof TransactionIdentifier transactionId) { + return transactionId.getHistoryId().getCookie(); + } else if (id instanceof LocalHistoryIdentifier historyId) { + return historyId.getCookie(); } else { throw new IllegalArgumentException("Unhandled identifier " + id); } @@ -215,7 +210,7 @@ public abstract class ClientActorBehavior extends * sessionId and if it does not match our current connection just ignore it. */ final Optional optBackend = conn.getBackendInfo(); - if (optBackend.isPresent() && optBackend.get().getSessionId() != command.getSessionId()) { + if (optBackend.isPresent() && optBackend.orElseThrow().getSessionId() != command.getSessionId()) { LOG.debug("{}: Mismatched current connection {} and envelope {}, ignoring response", persistenceId(), conn, command); return this; @@ -327,8 +322,8 @@ public abstract class ClientActorBehavior extends LOG.error("{}: failed to resolve shard {}", persistenceId(), shard, failure); final RequestException cause; - if (failure instanceof RequestException) { - cause = (RequestException) failure; + if (failure instanceof RequestException requestException) { + cause = requestException; } else { cause = new RuntimeRequestException("Failed to resolve shard " + shard, failure); } @@ -420,7 +415,7 @@ public abstract class ClientActorBehavior extends final Long shard = oldConn.cookie(); LOG.info("{}: refreshing backend for shard {}", persistenceId(), shard); - resolver().refreshBackendInfo(shard, conn.getBackendInfo().get()).whenComplete( + resolver().refreshBackendInfo(shard, conn.getBackendInfo().orElseThrow()).whenComplete( (backend, failure) -> context().executeInActor(behavior -> { backendConnectFinished(shard, conn, backend, failure); return behavior; diff --git a/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ClientActorContext.java b/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ClientActorContext.java index 0864da10b0..abebf02197 100644 --- a/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ClientActorContext.java +++ b/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ClientActorContext.java @@ -13,7 +13,6 @@ import akka.actor.ActorRef; import akka.actor.ActorSystem; import akka.actor.Cancellable; import akka.actor.Scheduler; -import com.google.common.annotations.Beta; import com.google.common.base.Ticker; import java.util.concurrent.TimeUnit; import org.eclipse.jdt.annotation.NonNull; @@ -32,10 +31,7 @@ import scala.concurrent.duration.FiniteDuration; * Time-keeping in a client actor is based on monotonic time. The precision of this time can be expected to be the * same as {@link System#nanoTime()}, but it is not tied to that particular clock. Actor clock is exposed as * a {@link Ticker}, which can be obtained via {@link #ticker()}. This class is thread-safe. - * - * @author Robert Varga */ -@Beta public class ClientActorContext extends AbstractClientActorContext implements Identifiable { private final ExecutionContext executionContext; private final ClientIdentifier identifier; @@ -49,9 +45,9 @@ public class ClientActorContext extends AbstractClientActorContext implements Id final ClientIdentifier identifier, final ClientActorConfig config) { super(self, persistenceId); this.identifier = requireNonNull(identifier); - this.scheduler = requireNonNull(system).scheduler(); - this.executionContext = system.dispatcher(); - this.dispatchers = new Dispatchers(system.dispatchers()); + scheduler = requireNonNull(system).scheduler(); + executionContext = system.dispatcher(); + dispatchers = new Dispatchers(system.dispatchers()); this.config = requireNonNull(config); messageSlicer = MessageSlicer.builder().messageSliceSize(config.getMaximumMessageSliceSize()) diff --git a/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ConnectedClientConnection.java b/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ConnectedClientConnection.java index 8a5af45d15..8bcce85dd3 100644 --- a/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ConnectedClientConnection.java +++ b/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ConnectedClientConnection.java @@ -7,12 +7,14 @@ */ package org.opendaylight.controller.cluster.access.client; -import com.google.common.annotations.Beta; import org.opendaylight.controller.cluster.access.concepts.RequestException; -@Beta +/** + * A connected connection. + * + * @param Backend info type + */ public final class ConnectedClientConnection extends AbstractReceivingClientConnection { - ConnectedClientConnection(final AbstractClientConnection oldConnection, final T newBackend) { super(oldConnection, newBackend); } diff --git a/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ConnectingClientConnection.java b/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ConnectingClientConnection.java index 1015990106..445321b474 100644 --- a/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ConnectingClientConnection.java +++ b/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ConnectingClientConnection.java @@ -7,11 +7,9 @@ */ package org.opendaylight.controller.cluster.access.client; -import com.google.common.annotations.Beta; import java.util.Optional; import org.opendaylight.controller.cluster.access.concepts.RequestException; -@Beta public final class ConnectingClientConnection extends AbstractClientConnection { /** * A wild estimate on how deep a queue should be. Without having knowledge of the remote actor we can only diff --git a/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ConnectionEntry.java b/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ConnectionEntry.java index b47ddee2a3..c5e47e76dd 100644 --- a/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ConnectionEntry.java +++ b/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ConnectionEntry.java @@ -9,7 +9,6 @@ package org.opendaylight.controller.cluster.access.client; import static java.util.Objects.requireNonNull; -import com.google.common.annotations.Beta; import com.google.common.base.MoreObjects; import com.google.common.base.MoreObjects.ToStringHelper; import java.util.function.Consumer; @@ -20,10 +19,7 @@ import org.opendaylight.yangtools.concepts.Immutable; /** * Single entry in a {@link AbstractClientConnection}. Tracks the request, the associated callback and time when * the request was first enqueued. - * - * @author Robert Varga */ -@Beta public class ConnectionEntry implements Immutable { private final Consumer> callback; private final Request request; @@ -32,7 +28,7 @@ public class ConnectionEntry implements Immutable { ConnectionEntry(final Request request, final Consumer> callback, final long now) { this.request = requireNonNull(request); this.callback = requireNonNull(callback); - this.enqueuedTicks = now; + enqueuedTicks = now; } ConnectionEntry(final ConnectionEntry entry) { diff --git a/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/InversibleLock.java b/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/InversibleLock.java index 08bc05346b..0917174b65 100644 --- a/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/InversibleLock.java +++ b/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/InversibleLock.java @@ -7,25 +7,35 @@ */ package org.opendaylight.controller.cluster.access.client; -import com.google.common.annotations.Beta; -import com.google.common.base.Verify; +import static com.google.common.base.Verify.verify; +import static com.google.common.base.Verify.verifyNotNull; + +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.VarHandle; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; import java.util.concurrent.locks.StampedLock; /** * A lock implementation which allows users to perform optimistic reads and validate them in a fashion similar * to {@link StampedLock}. In case a read is contented with a write, the read side will throw * an {@link InversibleLockException}, which the caller can catch and use to wait for the write to resolve. - * - * @author Robert Varga */ -@Beta public final class InversibleLock { - private static final AtomicReferenceFieldUpdater LATCH_UPDATER = - AtomicReferenceFieldUpdater.newUpdater(InversibleLock.class, CountDownLatch.class, "latch"); + private static final VarHandle LATCH; + + static { + try { + LATCH = MethodHandles.lookup().findVarHandle(InversibleLock.class, "latch", CountDownLatch.class); + } catch (NoSuchFieldException | IllegalAccessException e) { + throw new ExceptionInInitializerError(e); + } + } private final StampedLock lock = new StampedLock(); + + @SuppressFBWarnings(value = "UWF_UNWRITTEN_FIELD", + justification = "https://github.com/spotbugs/spotbugs/issues/2749") private volatile CountDownLatch latch; /** @@ -43,7 +53,7 @@ public final class InversibleLock { // Write-locked. Read the corresponding latch and if present report an exception, which will propagate // and force release of locks. - final CountDownLatch local = latch; + final var local = latch; if (local != null) { throw new InversibleLockException(local); } @@ -57,18 +67,13 @@ public final class InversibleLock { } public long writeLock() { - final CountDownLatch local = new CountDownLatch(1); - final boolean taken = LATCH_UPDATER.compareAndSet(this, null, local); - Verify.verify(taken); - + verify(LATCH.compareAndSet(this, null, new CountDownLatch(1))); return lock.writeLock(); } public void unlockWrite(final long stamp) { - final CountDownLatch local = LATCH_UPDATER.getAndSet(this, null); - Verify.verifyNotNull(local); + final var local = verifyNotNull((CountDownLatch) LATCH.getAndSet(this, null)); lock.unlockWrite(stamp); local.countDown(); } - } diff --git a/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/InversibleLockException.java b/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/InversibleLockException.java index a2f2ffd7e8..82b6568459 100644 --- a/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/InversibleLockException.java +++ b/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/InversibleLockException.java @@ -9,17 +9,15 @@ package org.opendaylight.controller.cluster.access.client; import static java.util.Objects.requireNonNull; -import com.google.common.annotations.Beta; +import java.io.Serial; import java.util.concurrent.CountDownLatch; /** * Exception thrown from {@link InversibleLock#optimisticRead()} and can be used to wait for the racing write * to complete using {@link #awaitResolution()}. - * - * @author Robert Varga */ -@Beta public final class InversibleLockException extends RuntimeException { + @Serial private static final long serialVersionUID = 1L; private final transient CountDownLatch latch; diff --git a/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ProgressTracker.java b/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ProgressTracker.java index fed9d4c5d3..677a57e770 100644 --- a/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ProgressTracker.java +++ b/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ProgressTracker.java @@ -118,12 +118,13 @@ abstract class ProgressTracker { * @param now tick number corresponding to caller's present */ ProgressTracker(final ProgressTracker oldTracker, final long now) { - this.defaultTicksPerTask = oldTracker.defaultTicksPerTask; - this.tasksEncountered = this.tasksClosed = oldTracker.tasksClosed; - this.lastClosed = oldTracker.lastClosed; - this.nearestAllowed = oldTracker.nearestAllowed; // Call cancelDebt explicitly if needed. - this.lastIdle = oldTracker.lastIdle; - this.elapsedBeforeIdle = oldTracker.elapsedBeforeIdle; + defaultTicksPerTask = oldTracker.defaultTicksPerTask; + tasksEncountered = tasksClosed = oldTracker.tasksClosed; + lastClosed = oldTracker.lastClosed; + // Call cancelDebt explicitly if needed. + nearestAllowed = oldTracker.nearestAllowed; + lastIdle = oldTracker.lastIdle; + elapsedBeforeIdle = oldTracker.elapsedBeforeIdle; if (!oldTracker.isIdle()) { transitToIdle(now); } @@ -154,7 +155,8 @@ abstract class ProgressTracker { * * @return number of tasks started but not finished yet */ - final long tasksOpen() { // TODO: Should we return int? + // TODO: Should we return int? + final long tasksOpen() { // TODO: Should we check the return value is non-negative? return tasksEncountered - tasksClosed; } diff --git a/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/RecoveringClientActorBehavior.java b/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/RecoveringClientActorBehavior.java index f40deab30d..b44d54921d 100644 --- a/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/RecoveringClientActorBehavior.java +++ b/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/RecoveringClientActorBehavior.java @@ -63,8 +63,8 @@ final class RecoveringClientActorBehavior extends AbstractClientActorBehavior onReceiveCommand(final Object command) { - if (command instanceof SaveSnapshotFailure) { - LOG.error("{}: failed to persist state", persistenceId(), ((SaveSnapshotFailure) command).cause()); + if (command instanceof SaveSnapshotFailure saveFailure) { + LOG.error("{}: failed to persist state", persistenceId(), saveFailure.cause()); return null; - } else if (command instanceof SaveSnapshotSuccess) { - LOG.debug("{}: got command: {}", persistenceId(), command); - SaveSnapshotSuccess saved = (SaveSnapshotSuccess)command; + } else if (command instanceof SaveSnapshotSuccess saved) { + LOG.debug("{}: got command: {}", persistenceId(), saved); context().deleteSnapshots(new SnapshotSelectionCriteria(scala.Long.MaxValue(), saved.metadata().timestamp() - 1, 0L, 0L)); return this; - } else if (command instanceof DeleteSnapshotsSuccess) { - LOG.debug("{}: got command: {}", persistenceId(), command); - } else if (command instanceof DeleteSnapshotsFailure) { + } else if (command instanceof DeleteSnapshotsSuccess deleteSuccess) { + LOG.debug("{}: got command: {}", persistenceId(), deleteSuccess); + } else if (command instanceof DeleteSnapshotsFailure deleteFailure) { // Not treating this as a fatal error. - LOG.warn("{}: failed to delete prior snapshots", persistenceId(), - ((DeleteSnapshotsFailure) command).cause()); + LOG.warn("{}: failed to delete prior snapshots", persistenceId(), deleteFailure.cause()); } else { LOG.debug("{}: stashing command {}", persistenceId(), command); context().stash(); diff --git a/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/TransmitQueue.java b/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/TransmitQueue.java index 71de580bd3..cc3da1e450 100644 --- a/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/TransmitQueue.java +++ b/opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/TransmitQueue.java @@ -53,10 +53,8 @@ import org.slf4j.LoggerFactory; * *

    * This class is not thread-safe, as it is expected to be guarded by {@link AbstractClientConnection}. - * - * @author Robert Varga */ -abstract class TransmitQueue { +abstract sealed class TransmitQueue { static final class Halted extends TransmitQueue { // For ConnectingClientConnection. Halted(final int targetDepth) { @@ -148,7 +146,8 @@ abstract class TransmitQueue { private final Deque inflight = new ArrayDeque<>(); private final Deque pending = new ArrayDeque<>(); - private final AveragingProgressTracker tracker; // Cannot be just ProgressTracker as we are inheriting limits. + // Cannot be just ProgressTracker as we are inheriting limits. + private final AveragingProgressTracker tracker; private ReconnectForwarder successor; /** @@ -218,7 +217,7 @@ abstract class TransmitQueue { return Optional.empty(); } - final TransmittedConnectionEntry entry = maybeEntry.get(); + final TransmittedConnectionEntry entry = maybeEntry.orElseThrow(); tracker.closeTask(now, entry.getEnqueuedTicks(), entry.getTxTicks(), envelope.getExecutionTimeNanos()); // We have freed up a slot, try to transmit something @@ -256,7 +255,7 @@ abstract class TransmitQueue { return false; } - inflight.addLast(maybeTransmitted.get()); + inflight.addLast(maybeTransmitted.orElseThrow()); return true; } @@ -425,12 +424,10 @@ abstract class TransmitQueue { } // Check if the entry has (ever) been transmitted - if (!(e instanceof TransmittedConnectionEntry)) { + if (!(e instanceof TransmittedConnectionEntry te)) { return Optional.empty(); } - final TransmittedConnectionEntry te = (TransmittedConnectionEntry) e; - // Now check session match if (envelope.getSessionId() != te.getSessionId()) { LOG.debug("Expecting session {}, ignoring response {}", te.getSessionId(), envelope); diff --git a/opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/ActorBehaviorTest.java b/opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/ActorBehaviorTest.java index 0b630e2da8..90ffd77a34 100644 --- a/opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/ActorBehaviorTest.java +++ b/opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/ActorBehaviorTest.java @@ -7,11 +7,11 @@ */ package org.opendaylight.controller.cluster.access.client; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.timeout; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; import akka.actor.ActorRef; import akka.actor.ActorSystem; @@ -22,34 +22,50 @@ import akka.persistence.SnapshotMetadata; import akka.testkit.TestProbe; import akka.testkit.javadsl.TestKit; import com.typesafe.config.ConfigFactory; -import java.lang.reflect.Field; import java.util.Optional; import java.util.concurrent.TimeUnit; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Answers; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier; import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier; import org.opendaylight.controller.cluster.access.concepts.FrontendType; import org.opendaylight.controller.cluster.access.concepts.MemberName; import scala.concurrent.duration.FiniteDuration; -public class ActorBehaviorTest { - +@ExtendWith(MockitoExtension.class) +class ActorBehaviorTest { private static final String MEMBER_1_FRONTEND_TYPE_1 = "member-1-frontend-type-1"; private static final FiniteDuration TIMEOUT = FiniteDuration.create(5, TimeUnit.SECONDS); + @Mock + private InternalCommand cmd; + @Mock(answer = Answers.CALLS_REAL_METHODS) + private ClientActorBehavior initialBehavior; + @Mock + private AbstractClientActorContext ctx; + private ActorSystem system; private TestProbe probe; - private ClientActorBehavior initialBehavior; private MockedSnapshotStore.SaveRequest saveRequest; private FrontendIdentifier id; private ActorRef mockedActor; - @Before - public void setUp() throws Exception { - initialBehavior = createInitialBehaviorMock(); + @BeforeEach + void beforeEach() throws Exception { + //persistenceId() in AbstractClientActorBehavior is final and can't be mocked + //use reflection to work around this + final var context = AbstractClientActorBehavior.class.getDeclaredField("context"); + context.setAccessible(true); + context.set(initialBehavior, ctx); + final var persistenceId = AbstractClientActorContext.class.getDeclaredField("persistenceId"); + persistenceId.setAccessible(true); + persistenceId.set(ctx, MEMBER_1_FRONTEND_TYPE_1); + system = ActorSystem.apply("system1"); final ActorRef storeRef = system.registerExtension(Persistence.lookup()).snapshotStoreFor(null, ConfigFactory.empty()); @@ -62,25 +78,23 @@ public class ActorBehaviorTest { saveRequest = handleRecovery(null); } - @After - public void tearDown() { + @AfterEach + void afterEach() { TestKit.shutdownActorSystem(system); } @Test - public void testInitialBehavior() { - final InternalCommand cmd = mock(InternalCommand.class); - when(cmd.execute(any())).thenReturn(initialBehavior); + void testInitialBehavior() { + doReturn(initialBehavior).when(cmd).execute(any()); mockedActor.tell(cmd, ActorRef.noSender()); verify(cmd, timeout(1000)).execute(initialBehavior); } @Test - public void testCommandStashing() { + void testCommandStashing() { system.stop(mockedActor); mockedActor = system.actorOf(MockedActor.props(id, initialBehavior)); - final InternalCommand cmd = mock(InternalCommand.class); - when(cmd.execute(any())).thenReturn(initialBehavior); + doReturn(initialBehavior).when(cmd).execute(any()); //send messages before recovery is completed mockedActor.tell(cmd, ActorRef.noSender()); mockedActor.tell(cmd, ActorRef.noSender()); @@ -91,16 +105,16 @@ public class ActorBehaviorTest { } @Test - public void testRecoveryAfterRestart() { + void testRecoveryAfterRestart() { system.stop(mockedActor); mockedActor = system.actorOf(MockedActor.props(id, initialBehavior)); final MockedSnapshotStore.SaveRequest newSaveRequest = handleRecovery(new SelectedSnapshot(saveRequest.getMetadata(), saveRequest.getSnapshot())); - Assert.assertEquals(MEMBER_1_FRONTEND_TYPE_1, newSaveRequest.getMetadata().persistenceId()); + assertEquals(MEMBER_1_FRONTEND_TYPE_1, newSaveRequest.getMetadata().persistenceId()); } @Test - public void testRecoveryAfterRestartFrontendIdMismatch() { + void testRecoveryAfterRestartFrontendIdMismatch() { system.stop(mockedActor); //start actor again mockedActor = system.actorOf(MockedActor.props(id, initialBehavior)); @@ -117,7 +131,7 @@ public class ActorBehaviorTest { } @Test - public void testRecoveryAfterRestartSaveSnapshotFail() { + void testRecoveryAfterRestartSaveSnapshotFail() { system.stop(mockedActor); mockedActor = system.actorOf(MockedActor.props(id, initialBehavior)); probe.watch(mockedActor); @@ -130,7 +144,7 @@ public class ActorBehaviorTest { } @Test - public void testRecoveryAfterRestartDeleteSnapshotsFail() { + void testRecoveryAfterRestartDeleteSnapshotsFail() { system.stop(mockedActor); mockedActor = system.actorOf(MockedActor.props(id, initialBehavior)); probe.watch(mockedActor); @@ -144,21 +158,6 @@ public class ActorBehaviorTest { probe.expectNoMessage(); } - @SuppressWarnings("unchecked") - private static ClientActorBehavior createInitialBehaviorMock() throws Exception { - final ClientActorBehavior initialBehavior = mock(ClientActorBehavior.class); - //persistenceId() in AbstractClientActorBehavior is final and can't be mocked - //use reflection to work around this - final Field context = AbstractClientActorBehavior.class.getDeclaredField("context"); - context.setAccessible(true); - final AbstractClientActorContext ctx = mock(AbstractClientActorContext.class); - context.set(initialBehavior, ctx); - final Field persistenceId = AbstractClientActorContext.class.getDeclaredField("persistenceId"); - persistenceId.setAccessible(true); - persistenceId.set(ctx, MEMBER_1_FRONTEND_TYPE_1); - return initialBehavior; - } - private MockedSnapshotStore.SaveRequest handleRecovery(final SelectedSnapshot savedState) { probe.expectMsgClass(MockedSnapshotStore.LoadRequest.class); //offer snapshot @@ -173,7 +172,6 @@ public class ActorBehaviorTest { } private static class MockedActor extends AbstractClientActor { - private final ClientActorBehavior initialBehavior; private final ClientActorConfig mockConfig = AccessClientUtil.newMockClientActorConfig(); @@ -196,5 +194,4 @@ public class ActorBehaviorTest { return mockConfig; } } - } diff --git a/opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/ConnectedClientConnectionTest.java b/opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/ConnectedClientConnectionTest.java index 1af656b341..819de8b8b4 100644 --- a/opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/ConnectedClientConnectionTest.java +++ b/opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/ConnectedClientConnectionTest.java @@ -27,7 +27,8 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier import org.opendaylight.controller.cluster.messaging.MessageSlice; import org.opendaylight.yangtools.yang.common.QName; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; -import org.opendaylight.yangtools.yang.data.impl.schema.Builders; +import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier; +import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes; public class ConnectedClientConnectionTest extends AbstractClientConnectionTest, BackendInfo> { @@ -42,7 +43,7 @@ public class ConnectedClientConnectionTest @Override protected ConnectedClientConnection createConnection() { - final BackendInfo backend = new BackendInfo(backendProbe.ref(), "test", 0L, ABIVersion.BORON, 10); + final BackendInfo backend = new BackendInfo(backendProbe.ref(), "test", 0L, ABIVersion.current(), 10); final ConnectingClientConnection connectingConn = new ConnectingClientConnection<>(context, 0L, backend.getName()); return new ConnectedClientConnection<>(connectingConn, backend); @@ -70,9 +71,10 @@ public class ConnectedClientConnectionTest new TransactionIdentifier(new LocalHistoryIdentifier(CLIENT_ID, 0L), 0L); ModifyTransactionRequestBuilder reqBuilder = new ModifyTransactionRequestBuilder(identifier, replyToProbe.ref()); - reqBuilder.addModification(new TransactionWrite(YangInstanceIdentifier.empty(), Builders.containerBuilder() - .withNodeIdentifier(YangInstanceIdentifier.NodeIdentifier.create( - QName.create("namespace", "localName"))).build())); + reqBuilder.addModification(new TransactionWrite(YangInstanceIdentifier.of(), + ImmutableNodes.newContainerBuilder() + .withNodeIdentifier(new NodeIdentifier(QName.create("namespace", "localName"))) + .build())); reqBuilder.setSequence(0L); final Request request = reqBuilder.build(); connection.sendRequest(request, callback); diff --git a/opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/ConnectingClientConnectionTest.java b/opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/ConnectingClientConnectionTest.java index d566e0ec3b..f77500328d 100644 --- a/opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/ConnectingClientConnectionTest.java +++ b/opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/ConnectingClientConnectionTest.java @@ -38,8 +38,6 @@ import org.mockito.ArgumentCaptor; import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; import org.opendaylight.controller.cluster.access.ABIVersion; -import org.opendaylight.controller.cluster.access.concepts.AbstractRequestFailureProxy; -import org.opendaylight.controller.cluster.access.concepts.AbstractRequestProxy; import org.opendaylight.controller.cluster.access.concepts.FailureEnvelope; import org.opendaylight.controller.cluster.access.concepts.Request; import org.opendaylight.controller.cluster.access.concepts.RequestEnvelope; @@ -64,8 +62,7 @@ public class ConnectingClientConnectionTest { } @Override - protected AbstractRequestFailureProxy externalizableProxy( - final ABIVersion version) { + protected SerialForm externalizableProxy(final ABIVersion version) { return null; } @@ -88,7 +85,7 @@ public class ConnectingClientConnectionTest { } @Override - protected AbstractRequestProxy externalizableProxy(final ABIVersion version) { + protected Request.SerialForm externalizableProxy(final ABIVersion version) { return null; } diff --git a/opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/ConnectionEntryTest.java b/opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/ConnectionEntryTest.java index 2d1afb81ad..b3bfdec66e 100644 --- a/opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/ConnectionEntryTest.java +++ b/opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/ConnectionEntryTest.java @@ -26,8 +26,6 @@ import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; import org.opendaylight.controller.cluster.access.ABIVersion; -import org.opendaylight.controller.cluster.access.concepts.AbstractRequestFailureProxy; -import org.opendaylight.controller.cluster.access.concepts.AbstractRequestProxy; import org.opendaylight.controller.cluster.access.concepts.Request; import org.opendaylight.controller.cluster.access.concepts.RequestException; import org.opendaylight.controller.cluster.access.concepts.RequestFailure; @@ -47,8 +45,7 @@ public class ConnectionEntryTest { } @Override - protected AbstractRequestFailureProxy externalizableProxy( - final ABIVersion version) { + protected SerialForm externalizableProxy(final ABIVersion version) { return null; } @@ -71,7 +68,7 @@ public class ConnectionEntryTest { } @Override - protected AbstractRequestProxy externalizableProxy(final ABIVersion version) { + protected Request.SerialForm externalizableProxy(final ABIVersion version) { return null; } diff --git a/opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/ReconnectingClientConnectionTest.java b/opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/ReconnectingClientConnectionTest.java index 84cfea481b..367acb3b6d 100644 --- a/opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/ReconnectingClientConnectionTest.java +++ b/opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/ReconnectingClientConnectionTest.java @@ -44,7 +44,7 @@ public class ReconnectingClientConnectionTest @Override protected ReconnectingClientConnection createConnection() { - final BackendInfo backend = new BackendInfo(backendProbe.ref(), "test", 0L, ABIVersion.BORON, 10); + final BackendInfo backend = new BackendInfo(backendProbe.ref(), "test", 0L, ABIVersion.current(), 10); final ConnectingClientConnection connectingConn = new ConnectingClientConnection<>(context, 0L, backend.getName()); final ConnectedClientConnection connectedConn = diff --git a/opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/TransmittingTransmitQueueTest.java b/opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/TransmittingTransmitQueueTest.java index b40aefb663..9974e1b1cd 100644 --- a/opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/TransmittingTransmitQueueTest.java +++ b/opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/TransmittingTransmitQueueTest.java @@ -64,7 +64,7 @@ public class TransmittingTransmitQueueTest extends AbstractTransmitQueueTest transmitted = queue.transmit(entry, now); assertTrue(transmitted.isPresent()); - assertEquals(request, transmitted.get().getRequest()); - assertEquals(callback, transmitted.get().getCallback()); + assertEquals(request, transmitted.orElseThrow().getRequest()); + assertEquals(callback, transmitted.orElseThrow().getCallback()); final RequestEnvelope requestEnvelope = probe.expectMsgClass(RequestEnvelope.class); assertEquals(request, requestEnvelope.getMessage()); diff --git a/opendaylight/md-sal/cds-dom-api/pom.xml b/opendaylight/md-sal/cds-dom-api/pom.xml index 28ae088e5a..a28781c07d 100644 --- a/opendaylight/md-sal/cds-dom-api/pom.xml +++ b/opendaylight/md-sal/cds-dom-api/pom.xml @@ -4,7 +4,7 @@ org.opendaylight.controller mdsal-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ../parent @@ -13,18 +13,13 @@ - org.opendaylight.controller - repackaged-akka + org.eclipse.jdt + org.eclipse.jdt.annotation - org.opendaylight.yangtools concepts - - org.opendaylight.mdsal - mdsal-dom-api - diff --git a/opendaylight/md-sal/cds-dom-api/src/main/java/org/opendaylight/controller/cluster/dom/api/LeaderLocation.java b/opendaylight/md-sal/cds-dom-api/src/main/java/org/opendaylight/controller/cluster/dom/api/LeaderLocation.java index 696fec2753..cd20186349 100644 --- a/opendaylight/md-sal/cds-dom-api/src/main/java/org/opendaylight/controller/cluster/dom/api/LeaderLocation.java +++ b/opendaylight/md-sal/cds-dom-api/src/main/java/org/opendaylight/controller/cluster/dom/api/LeaderLocation.java @@ -7,14 +7,9 @@ */ package org.opendaylight.controller.cluster.dom.api; -import com.google.common.annotations.Beta; - /** * Enumeration of possible shard leader locations relative to the local node. - * - * @author Robert Varga */ -@Beta public enum LeaderLocation { /** * The leader is co-located on this node. diff --git a/opendaylight/md-sal/cds-dom-api/src/main/java/org/opendaylight/controller/cluster/dom/api/LeaderLocationListener.java b/opendaylight/md-sal/cds-dom-api/src/main/java/org/opendaylight/controller/cluster/dom/api/LeaderLocationListener.java index 4f5fd4e83e..69e34ca44c 100644 --- a/opendaylight/md-sal/cds-dom-api/src/main/java/org/opendaylight/controller/cluster/dom/api/LeaderLocationListener.java +++ b/opendaylight/md-sal/cds-dom-api/src/main/java/org/opendaylight/controller/cluster/dom/api/LeaderLocationListener.java @@ -7,17 +7,12 @@ */ package org.opendaylight.controller.cluster.dom.api; -import com.google.common.annotations.Beta; -import java.util.EventListener; import org.eclipse.jdt.annotation.NonNull; /** * Listener for shard leader location changes. - * - * @author Robert Varga */ -@Beta -public interface LeaderLocationListener extends EventListener { +public interface LeaderLocationListener { /** * Invoked when shard leader location changes. * diff --git a/opendaylight/md-sal/cds-dom-api/src/main/java/org/opendaylight/controller/cluster/dom/api/LeaderLocationListenerRegistration.java b/opendaylight/md-sal/cds-dom-api/src/main/java/org/opendaylight/controller/cluster/dom/api/LeaderLocationListenerRegistration.java deleted file mode 100644 index 61f6426af5..0000000000 --- a/opendaylight/md-sal/cds-dom-api/src/main/java/org/opendaylight/controller/cluster/dom/api/LeaderLocationListenerRegistration.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.dom.api; - -import com.google.common.annotations.Beta; -import org.opendaylight.yangtools.concepts.ListenerRegistration; - -/** - * Registration of a {@link LeaderLocationListener}. - * - * @author Robert Varga - * - * @param Listener type - */ -@Beta -public interface LeaderLocationListenerRegistration extends ListenerRegistration { - -} diff --git a/opendaylight/md-sal/cds-mgmt-api/pom.xml b/opendaylight/md-sal/cds-mgmt-api/pom.xml index c60b2e2013..2ea3c28f04 100644 --- a/opendaylight/md-sal/cds-mgmt-api/pom.xml +++ b/opendaylight/md-sal/cds-mgmt-api/pom.xml @@ -4,13 +4,20 @@ org.opendaylight.controller mdsal-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ../parent cds-mgmt-api bundle + + + org.eclipse.jdt + org.eclipse.jdt.annotation + + + diff --git a/opendaylight/md-sal/eos-dom-akka/pom.xml b/opendaylight/md-sal/eos-dom-akka/pom.xml index 0eeed2cee9..dce797ca4f 100644 --- a/opendaylight/md-sal/eos-dom-akka/pom.xml +++ b/opendaylight/md-sal/eos-dom-akka/pom.xml @@ -14,7 +14,7 @@ org.opendaylight.controller mdsal-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ../parent @@ -23,12 +23,13 @@ - com.google.guava - guava + com.github.spotbugs + spotbugs-annotations + true - com.typesafe - config + com.google.guava + guava org.opendaylight.controller @@ -80,8 +81,8 @@ true - javax.annotation - javax.annotation-api + jakarta.annotation + jakarta.annotation-api provided true @@ -90,6 +91,10 @@ scala-library + + com.typesafe.akka + akka-testkit_2.13 + com.typesafe.akka akka-actor-testkit-typed_2.13 @@ -99,6 +104,11 @@ awaitility + + com.typesafe + config + test + org.opendaylight.mdsal mdsal-binding-dom-codec @@ -109,6 +119,11 @@ mdsal-binding-generator test + + org.opendaylight.mdsal + mdsal-binding-runtime-api + test + org.opendaylight.mdsal mdsal-binding-runtime-spi @@ -116,12 +131,12 @@ org.opendaylight.mdsal - mdsal-singleton-common-api + mdsal-singleton-api test org.opendaylight.mdsal - mdsal-singleton-dom-impl + mdsal-singleton-impl test diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/AkkaEntityOwnershipService.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/AkkaEntityOwnershipService.java index 9520b58d59..332fb44af7 100644 --- a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/AkkaEntityOwnershipService.java +++ b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/AkkaEntityOwnershipService.java @@ -17,6 +17,7 @@ import akka.cluster.typed.Cluster; import com.google.common.annotations.VisibleForTesting; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.SettableFuture; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.time.Duration; import java.util.Optional; import java.util.Set; @@ -57,17 +58,17 @@ import org.opendaylight.mdsal.binding.dom.codec.api.BindingInstanceIdentifierCod import org.opendaylight.mdsal.eos.common.api.CandidateAlreadyRegisteredException; import org.opendaylight.mdsal.eos.common.api.EntityOwnershipState; import org.opendaylight.mdsal.eos.dom.api.DOMEntity; -import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipCandidateRegistration; import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener; -import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListenerRegistration; import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipService; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntities; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntitiesInput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntitiesOutput; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntity; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityInput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOutput; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOwner; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOwnerInput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOwnerOutput; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.OdlEntityOwnersService; import org.opendaylight.yangtools.concepts.Registration; import org.opendaylight.yangtools.yang.binding.RpcOutput; import org.opendaylight.yangtools.yang.common.Empty; @@ -87,8 +88,7 @@ import org.slf4j.LoggerFactory; */ @Singleton @Component(immediate = true, service = { DOMEntityOwnershipService.class, DataCenterControl.class }) -public class AkkaEntityOwnershipService implements DOMEntityOwnershipService, DataCenterControl, AutoCloseable, - OdlEntityOwnersService { +public class AkkaEntityOwnershipService implements DOMEntityOwnershipService, DataCenterControl, AutoCloseable { private static final Logger LOG = LoggerFactory.getLogger(AkkaEntityOwnershipService.class); private static final String DATACENTER_PREFIX = "dc"; private static final Duration DATACENTER_OP_TIMEOUT = Duration.ofSeconds(20); @@ -140,12 +140,17 @@ public class AkkaEntityOwnershipService implements DOMEntityOwnershipService, Da @Inject @Activate + @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", + justification = "Non-final for testing 'this' reference is expected to be stable at registration time") public AkkaEntityOwnershipService(@Reference final ActorSystemProvider actorProvider, @Reference final RpcProviderService rpcProvider, @Reference final BindingCodecTree codecTree) throws ExecutionException, InterruptedException { this(actorProvider.getActorSystem(), codecTree); - reg = rpcProvider.registerRpcImplementation(OdlEntityOwnersService.class, this); + reg = rpcProvider.registerRpcImplementations( + (GetEntity) this::getEntity, + (GetEntities) this::getEntities, + (GetEntityOwner) this::getEntityOwner); } @PreDestroy @@ -160,7 +165,7 @@ public class AkkaEntityOwnershipService implements DOMEntityOwnershipService, Da } @Override - public DOMEntityOwnershipCandidateRegistration registerCandidate(final DOMEntity entity) + public Registration registerCandidate(final DOMEntity entity) throws CandidateAlreadyRegisteredException { if (!registeredEntities.add(entity)) { throw new CandidateAlreadyRegisteredException(entity); @@ -174,8 +179,7 @@ public class AkkaEntityOwnershipService implements DOMEntityOwnershipService, Da } @Override - public DOMEntityOwnershipListenerRegistration registerListener(final String entityType, - final DOMEntityOwnershipListener listener) { + public Registration registerListener(final String entityType, final DOMEntityOwnershipListener listener) { LOG.debug("Registering listener {} for type {}", listener, entityType); listenerRegistry.tell(new RegisterListener(entityType, listener)); @@ -221,21 +225,21 @@ public class AkkaEntityOwnershipService implements DOMEntityOwnershipService, Da AskPattern.ask(ownerSupervisor, DeactivateDataCenter::new, DATACENTER_OP_TIMEOUT, scheduler)); } - @Override - public ListenableFuture> getEntities(final GetEntitiesInput input) { + @VisibleForTesting + final ListenableFuture> getEntities(final GetEntitiesInput input) { return toRpcFuture(AskPattern.ask(ownerStateChecker, GetEntitiesRequest::new, QUERY_TIMEOUT, scheduler), reply -> reply.toOutput(iidCodec)); } - @Override - public ListenableFuture> getEntity(final GetEntityInput input) { + @VisibleForTesting + final ListenableFuture> getEntity(final GetEntityInput input) { return toRpcFuture(AskPattern.ask(ownerStateChecker, (final ActorRef replyTo) -> new GetEntityRequest(replyTo, input), QUERY_TIMEOUT, scheduler), GetEntityReply::toOutput); } - @Override - public ListenableFuture> getEntityOwner(final GetEntityOwnerInput input) { + @VisibleForTesting + final ListenableFuture> getEntityOwner(final GetEntityOwnerInput input) { return toRpcFuture(AskPattern.ask(ownerStateChecker, (final ActorRef replyTo) -> new GetEntityOwnerRequest(replyTo, input), QUERY_TIMEOUT, scheduler), GetEntityOwnerReply::toOutput); @@ -282,7 +286,7 @@ public class AkkaEntityOwnershipService implements DOMEntityOwnershipService, Da future.setException(failure); } else { LOG.debug("{} DataCenter successful", op); - future.set(Empty.getInstance()); + future.set(Empty.value()); } }); return future; diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/CandidateRegistration.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/CandidateRegistration.java index 56a2f099f6..fd80ee1c56 100644 --- a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/CandidateRegistration.java +++ b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/CandidateRegistration.java @@ -10,11 +10,9 @@ package org.opendaylight.controller.eos.akka; import static java.util.Objects.requireNonNull; import org.opendaylight.mdsal.eos.dom.api.DOMEntity; -import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipCandidateRegistration; import org.opendaylight.yangtools.concepts.AbstractObjectRegistration; -final class CandidateRegistration extends AbstractObjectRegistration - implements DOMEntityOwnershipCandidateRegistration { +final class CandidateRegistration extends AbstractObjectRegistration { private final AkkaEntityOwnershipService service; CandidateRegistration(final DOMEntity instance, final AkkaEntityOwnershipService service) { diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/DataCenterControl.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/DataCenterControl.java index 568fbd7b68..f70abc9a86 100644 --- a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/DataCenterControl.java +++ b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/DataCenterControl.java @@ -7,7 +7,6 @@ */ package org.opendaylight.controller.eos.akka; -import com.google.common.annotations.Beta; import com.google.common.util.concurrent.ListenableFuture; import org.eclipse.jdt.annotation.NonNull; import org.opendaylight.yangtools.yang.common.Empty; @@ -23,7 +22,6 @@ import org.opendaylight.yangtools.yang.common.Empty; * on any node from the datacenter to be activated. Datacenters only need to brought up when using non-default * datacenter or multiple datacenters. */ -@Beta public interface DataCenterControl { /** * Activates the Entity Ownership Service in the datacenter that this method is called. diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/ListenerRegistration.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/ListenerRegistration.java index 435babe8ec..8d101c24c6 100644 --- a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/ListenerRegistration.java +++ b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/ListenerRegistration.java @@ -12,11 +12,9 @@ import static java.util.Objects.requireNonNull; import com.google.common.base.MoreObjects; import org.eclipse.jdt.annotation.NonNull; import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener; -import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListenerRegistration; import org.opendaylight.yangtools.concepts.AbstractObjectRegistration; -final class ListenerRegistration extends AbstractObjectRegistration - implements DOMEntityOwnershipListenerRegistration { +final class ListenerRegistration extends AbstractObjectRegistration { private final AkkaEntityOwnershipService service; private final @NonNull String entityType; @@ -27,8 +25,7 @@ final class ListenerRegistration extends AbstractObjectRegistration { final String role = Cluster.get(context.getSystem()).selfMember().getRoles().iterator().next(); listenerRegistry = context.spawn(EntityTypeListenerRegistry.create(role), "ListenerRegistry"); - candidateRegistry = context.spawn(CandidateRegistryInit.create(), "CandidateRegistry"); final ClusterSingleton clusterSingleton = ClusterSingleton.get(context.getSystem()); // start the initial sync behavior that switches to the regular one after syncing ownerSupervisor = clusterSingleton.init( - SingletonActor.of(IdleSupervisor.create(iidCodec), "OwnerSupervisor")); + SingletonActor.of(Behaviors.supervise(IdleSupervisor.create(iidCodec)) + .onFailure(SupervisorStrategy.restart()), "OwnerSupervisor")); + candidateRegistry = context.spawn(CandidateRegistryInit.create(ownerSupervisor), "CandidateRegistry"); ownerStateChecker = context.spawn(OwnerStateChecker.create(role, ownerSupervisor, iidCodec), "OwnerStateChecker"); @@ -73,7 +75,7 @@ public final class EOSMain extends AbstractBehavior { } private Behavior onTerminate(final Terminate request) { - request.getReplyTo().tell(Empty.getInstance()); + request.getReplyTo().tell(Empty.value()); return Behaviors.stopped(); } } \ No newline at end of file diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/AbstractSupervisor.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/AbstractSupervisor.java new file mode 100644 index 0000000000..a4366b5736 --- /dev/null +++ b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/AbstractSupervisor.java @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.eos.akka.owner.supervisor; + +import akka.actor.typed.ActorRef; +import akka.actor.typed.Behavior; +import akka.actor.typed.javadsl.AbstractBehavior; +import akka.actor.typed.javadsl.ActorContext; +import akka.cluster.ddata.ORMap; +import akka.cluster.ddata.ORSet; +import akka.cluster.ddata.typed.javadsl.DistributedData; +import akka.cluster.ddata.typed.javadsl.Replicator; +import akka.cluster.ddata.typed.javadsl.ReplicatorMessageAdapter; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import java.time.Duration; +import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidates; +import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesForMember; +import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesResponse; +import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand; +import org.opendaylight.controller.eos.akka.registry.candidate.CandidateRegistry; +import org.opendaylight.mdsal.eos.dom.api.DOMEntity; +import org.slf4j.Logger; + +abstract class AbstractSupervisor extends AbstractBehavior { + + final ReplicatorMessageAdapter>> candidateReplicator; + + @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", + justification = "getContext() is non-final") + AbstractSupervisor(final ActorContext context) { + super(context); + + final ActorRef replicator = DistributedData.get(getContext().getSystem()).replicator(); + candidateReplicator = new ReplicatorMessageAdapter<>(getContext(), replicator, Duration.ofSeconds(5)); + } + + Behavior onClearCandidatesForMember(final ClearCandidatesForMember command) { + getLogger().debug("Clearing candidates for member: {}", command.getCandidate()); + + candidateReplicator.askGet( + askReplyTo -> new Replicator.Get<>(CandidateRegistry.KEY, + new Replicator.ReadMajority(Duration.ofSeconds(15)), askReplyTo), + response -> new ClearCandidates(response, command)); + + return this; + } + + Behavior finishClearCandidates(final ClearCandidates command) { + if (command.getResponse() instanceof Replicator.GetSuccess) { + getLogger().debug("Retrieved candidate data, clearing candidates for {}", + command.getOriginalMessage().getCandidate()); + + getContext().spawnAnonymous(CandidateCleaner.create()).tell(command); + } else { + getLogger().debug("Unable to retrieve candidate data for {}, no candidates present sending empty reply", + command.getOriginalMessage().getCandidate()); + command.getOriginalMessage().getReplyTo().tell(new ClearCandidatesResponse()); + } + + return this; + } + + abstract Logger getLogger(); +} diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/CandidateCleaner.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/CandidateCleaner.java new file mode 100644 index 0000000000..8ce9adb1f9 --- /dev/null +++ b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/CandidateCleaner.java @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.eos.akka.owner.supervisor; + +import akka.actor.typed.ActorRef; +import akka.actor.typed.Behavior; +import akka.actor.typed.javadsl.AbstractBehavior; +import akka.actor.typed.javadsl.ActorContext; +import akka.actor.typed.javadsl.Behaviors; +import akka.actor.typed.javadsl.Receive; +import akka.cluster.ddata.ORMap; +import akka.cluster.ddata.ORSet; +import akka.cluster.ddata.SelfUniqueAddress; +import akka.cluster.ddata.typed.javadsl.DistributedData; +import akka.cluster.ddata.typed.javadsl.Replicator; +import akka.cluster.ddata.typed.javadsl.ReplicatorMessageAdapter; +import java.time.Duration; +import java.util.Map; +import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidates; +import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesResponse; +import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesUpdateResponse; +import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand; +import org.opendaylight.controller.eos.akka.registry.candidate.CandidateRegistry; +import org.opendaylight.mdsal.eos.dom.api.DOMEntity; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Actor that can be spawned by all the supervisor implementations that executes clearing of candidates once + * candidate retrieval succeeds. Once candidates for the member are cleared(or immediately if none need to be cleared), + * the actor stops itself. + */ +public final class CandidateCleaner extends AbstractBehavior { + private static final Logger LOG = LoggerFactory.getLogger(CandidateCleaner.class); + + private final ReplicatorMessageAdapter>> candidateReplicator; + private final SelfUniqueAddress node; + + private int remaining = 0; + + private CandidateCleaner(final ActorContext context) { + super(context); + + final ActorRef replicator = DistributedData.get(getContext().getSystem()).replicator(); + candidateReplicator = new ReplicatorMessageAdapter<>(getContext(), replicator, Duration.ofSeconds(5)); + node = DistributedData.get(context.getSystem()).selfUniqueAddress(); + + } + + public static Behavior create() { + return Behaviors.setup(CandidateCleaner::new); + } + + @Override + public Receive createReceive() { + return newReceiveBuilder() + .onMessage(ClearCandidates.class, this::onClearCandidates) + .onMessage(ClearCandidatesUpdateResponse.class, this::onClearCandidatesUpdateResponse) + .build(); + } + + private Behavior onClearCandidates(final ClearCandidates command) { + LOG.debug("Clearing candidates for member: {}", command.getOriginalMessage().getCandidate()); + + final ORMap> candidates = + ((Replicator.GetSuccess>>) command.getResponse()) + .get(CandidateRegistry.KEY); + + for (final Map.Entry> entry : candidates.getEntries().entrySet()) { + if (entry.getValue().contains(command.getOriginalMessage().getCandidate())) { + LOG.debug("Removing {} from {}", command.getOriginalMessage().getCandidate(), entry.getKey()); + + remaining++; + candidateReplicator.askUpdate( + askReplyTo -> new Replicator.Update<>( + CandidateRegistry.KEY, + ORMap.empty(), + new Replicator.WriteMajority(Duration.ofSeconds(10)), + askReplyTo, + map -> map.update(node, entry.getKey(), ORSet.empty(), + value -> value.remove(node, command.getOriginalMessage().getCandidate()))), + updateResponse -> new ClearCandidatesUpdateResponse(updateResponse, + command.getOriginalMessage().getReplyTo())); + } + } + + if (remaining == 0) { + LOG.debug("Did not clear any candidates for {}", command.getOriginalMessage().getCandidate()); + command.getOriginalMessage().getReplyTo().tell(new ClearCandidatesResponse()); + return Behaviors.stopped(); + } + return this; + } + + private Behavior onClearCandidatesUpdateResponse( + final ClearCandidatesUpdateResponse command) { + remaining--; + if (remaining == 0) { + LOG.debug("Last update response for candidate removal received, replying to: {}", command.getReplyTo()); + command.getReplyTo().tell(new ClearCandidatesResponse()); + return Behaviors.stopped(); + } else { + LOG.debug("Have still {} outstanding requests after {}", remaining, command.getResponse()); + } + return this; + } +} diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/IdleSupervisor.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/IdleSupervisor.java index 2baeb62fc3..3028552a10 100644 --- a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/IdleSupervisor.java +++ b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/IdleSupervisor.java @@ -10,7 +10,6 @@ package org.opendaylight.controller.eos.akka.owner.supervisor; import static java.util.Objects.requireNonNull; import akka.actor.typed.Behavior; -import akka.actor.typed.javadsl.AbstractBehavior; import akka.actor.typed.javadsl.ActorContext; import akka.actor.typed.javadsl.Behaviors; import akka.actor.typed.javadsl.Receive; @@ -18,6 +17,8 @@ import akka.cluster.Member; import akka.cluster.typed.Cluster; import akka.pattern.StatusReply; import org.opendaylight.controller.eos.akka.owner.supervisor.command.ActivateDataCenter; +import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidates; +import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesForMember; import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntitiesBackendRequest; import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntityBackendRequest; import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntityOwnerBackendRequest; @@ -32,7 +33,7 @@ import org.slf4j.LoggerFactory; * in the primary datacenter, or is activated on demand. Once the supervisor instance is no longer needed in the * secondary datacenter it needs to be deactivated manually. */ -public final class IdleSupervisor extends AbstractBehavior { +public final class IdleSupervisor extends AbstractSupervisor { private static final Logger LOG = LoggerFactory.getLogger(IdleSupervisor.class); private static final String DATACENTER_PREFIX = "dc-"; @@ -56,7 +57,6 @@ public final class IdleSupervisor extends AbstractBehavior create(final BindingInstanceIdentifierCodec iidCodec) { - return Behaviors.setup(context -> new IdleSupervisor(context, iidCodec)); } @@ -67,6 +67,8 @@ public final class IdleSupervisor extends AbstractBehavior role.startsWith(DATACENTER_PREFIX)) .findFirst() .orElseThrow(() -> new IllegalArgumentException(selfMember + " does not have a valid role")); } + + @Override + Logger getLogger() { + return LOG; + } } diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/OwnerSupervisor.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/OwnerSupervisor.java index 9841b65b7b..1e2a41beca 100644 --- a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/OwnerSupervisor.java +++ b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/OwnerSupervisor.java @@ -12,7 +12,6 @@ import static java.util.Objects.requireNonNull; import akka.actor.typed.ActorRef; import akka.actor.typed.Behavior; -import akka.actor.typed.javadsl.AbstractBehavior; import akka.actor.typed.javadsl.ActorContext; import akka.actor.typed.javadsl.Behaviors; import akka.actor.typed.javadsl.Receive; @@ -47,6 +46,8 @@ import java.util.stream.Collectors; import java.util.stream.StreamSupport; import org.opendaylight.controller.eos.akka.owner.supervisor.command.AbstractEntityRequest; import org.opendaylight.controller.eos.akka.owner.supervisor.command.CandidatesChanged; +import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidates; +import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesForMember; import org.opendaylight.controller.eos.akka.owner.supervisor.command.DataCenterDeactivated; import org.opendaylight.controller.eos.akka.owner.supervisor.command.DeactivateDataCenter; import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntitiesBackendReply; @@ -73,7 +74,7 @@ import scala.collection.JavaConverters; * registry in distributed-data and picks entity owners based on the current cluster state and registered candidates. * On cluster up/down etc. events the owners are reassigned if possible. */ -public final class OwnerSupervisor extends AbstractBehavior { +public final class OwnerSupervisor extends AbstractSupervisor { private static final Logger LOG = LoggerFactory.getLogger(OwnerSupervisor.class); private static final String DATACENTER_PREFIX = "dc-"; @@ -83,7 +84,7 @@ public final class OwnerSupervisor extends AbstractBehavior clock = (currentTimestamp, value) -> currentTimestamp + 1; + private static final LWWRegister.Clock CLOCK = (currentTimestamp, value) -> currentTimestamp + 1; private final Cluster cluster; private final SelfUniqueAddress node; @@ -152,8 +153,7 @@ public final class OwnerSupervisor extends AbstractBehavior>>(context, replicator, - Duration.ofSeconds(5)).subscribe(CandidateRegistry.KEY, CandidatesChanged::new); + candidateReplicator.subscribe(CandidateRegistry.KEY, CandidatesChanged::new); LOG.debug("Owner Supervisor started"); } @@ -176,6 +176,8 @@ public final class OwnerSupervisor extends AbstractBehavior ownersToReassign = new HashSet<>(); for (final String owner : ownerToEntity.keys()) { - if (!activeMembers.contains(owner)) { + if (!isActiveCandidate(owner)) { ownersToReassign.add(owner); } } @@ -259,8 +261,10 @@ public final class OwnerSupervisor extends AbstractBehavior candidatesForEntity = currentCandidates.get(entity); if (candidatesForEntity.isEmpty()) { @@ -356,7 +371,7 @@ public final class OwnerSupervisor extends AbstractBehavior(node.uniqueAddress(), candidate, 0), Replicator.writeLocal(), askReplyTo, - register -> register.withValue(node, candidate, clock)), + register -> register.withValue(node, candidate, CLOCK)), OwnerChanged::new); } @@ -465,4 +480,9 @@ public final class OwnerSupervisor extends AbstractBehavior role.startsWith(DATACENTER_PREFIX)) .findFirst().orElseThrow(() -> new IllegalArgumentException("No valid role found.")); } + + @Override + Logger getLogger() { + return LOG; + } } diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/OwnerSyncer.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/OwnerSyncer.java index 092f532dfb..32a0a64369 100644 --- a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/OwnerSyncer.java +++ b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/OwnerSyncer.java @@ -11,7 +11,6 @@ import static java.util.Objects.requireNonNull; import akka.actor.typed.ActorRef; import akka.actor.typed.Behavior; -import akka.actor.typed.javadsl.AbstractBehavior; import akka.actor.typed.javadsl.ActorContext; import akka.actor.typed.javadsl.Behaviors; import akka.actor.typed.javadsl.Receive; @@ -29,6 +28,8 @@ import java.util.HashSet; import java.util.Map; import java.util.Set; import org.eclipse.jdt.annotation.Nullable; +import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidates; +import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesForMember; import org.opendaylight.controller.eos.akka.owner.supervisor.command.DataCenterActivated; import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntitiesBackendRequest; import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntityBackendRequest; @@ -48,7 +49,7 @@ import org.slf4j.LoggerFactory; * Behavior that retrieves current candidates/owners from distributed-data and switches to OwnerSupervisor when the * sync has finished. */ -public final class OwnerSyncer extends AbstractBehavior { +public final class OwnerSyncer extends AbstractSupervisor { private static final Logger LOG = LoggerFactory.getLogger(OwnerSyncer.class); private final ReplicatorMessageAdapter> ownerReplicator; @@ -72,8 +73,7 @@ public final class OwnerSyncer extends AbstractBehavior ownerReplicator = new ReplicatorMessageAdapter<>(context, replicator, Duration.ofSeconds(5)); - new ReplicatorMessageAdapter>>(context, replicator, - Duration.ofSeconds(5)).askGet( + candidateReplicator.askGet( askReplyTo -> new Replicator.Get<>(CandidateRegistry.KEY, Replicator.readLocal(), askReplyTo), InitialCandidateSync::new); @@ -95,6 +95,8 @@ public final class OwnerSyncer extends AbstractBehavior .onMessage(GetEntitiesBackendRequest.class, this::onFailEntityRpc) .onMessage(GetEntityBackendRequest.class, this::onFailEntityRpc) .onMessage(GetEntityOwnerBackendRequest.class, this::onFailEntityRpc) + .onMessage(ClearCandidatesForMember.class, this::onClearCandidatesForMember) + .onMessage(ClearCandidates.class, this::finishClearCandidates) .build(); } @@ -176,4 +178,9 @@ public final class OwnerSyncer extends AbstractBehavior private static void handleNotFoundOwnerRsp(final Replicator.NotFound> rsp) { LOG.debug("Owner not found. {}", rsp); } + + @Override + Logger getLogger() { + return LOG; + } } diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/InitialCandidateSync.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidates.java similarity index 58% rename from opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/InitialCandidateSync.java rename to opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidates.java index 2760d2e7e0..64971c80bc 100644 --- a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/InitialCandidateSync.java +++ b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidates.java @@ -5,22 +5,29 @@ * terms of the Eclipse Public License v1.0 which accompanies this distribution, * and is available at http://www.eclipse.org/legal/epl-v10.html */ -package org.opendaylight.controller.eos.akka.registry.candidate.command; +package org.opendaylight.controller.eos.akka.owner.supervisor.command; import akka.cluster.ddata.ORMap; import akka.cluster.ddata.ORSet; import akka.cluster.ddata.typed.javadsl.Replicator; import org.opendaylight.mdsal.eos.dom.api.DOMEntity; -public class InitialCandidateSync extends CandidateRegistryCommand { +public class ClearCandidates extends OwnerSupervisorCommand { private final Replicator.GetResponse>> response; + private final ClearCandidatesForMember originalMessage; - public InitialCandidateSync(final Replicator.GetResponse>> response) { + public ClearCandidates(final Replicator.GetResponse>> response, + final ClearCandidatesForMember originalMessage) { this.response = response; + this.originalMessage = originalMessage; } public Replicator.GetResponse>> getResponse() { return response; } + + public ClearCandidatesForMember getOriginalMessage() { + return originalMessage; + } } diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidatesForMember.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidatesForMember.java new file mode 100644 index 0000000000..1e27cb5161 --- /dev/null +++ b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidatesForMember.java @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.eos.akka.owner.supervisor.command; + +import akka.actor.typed.ActorRef; +import java.io.Serializable; + +/** + * Request sent from Candidate registration actors to clear the candidate from all entities. Issued at start to clear + * candidates from previous iteration of a node. Owner supervisor responds to this request to notify the registration + * actor it can start up and process candidate requests. + */ +public class ClearCandidatesForMember extends OwnerSupervisorCommand implements Serializable { + private static final long serialVersionUID = 1L; + + private final ActorRef replyTo; + private final String candidate; + + public ClearCandidatesForMember(final ActorRef replyTo, final String candidate) { + this.replyTo = replyTo; + this.candidate = candidate; + } + + public ActorRef getReplyTo() { + return replyTo; + } + + public String getCandidate() { + return candidate; + } +} diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidatesResponse.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidatesResponse.java new file mode 100644 index 0000000000..7399bd859f --- /dev/null +++ b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidatesResponse.java @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.eos.akka.owner.supervisor.command; + +import java.io.Serializable; + +/** + * Response sent from OwnerSupervisor to the ClearCandidatesForMember request, notifying the caller that removal has + * finished. + */ +public class ClearCandidatesResponse implements Serializable { + + private static final long serialVersionUID = 1L; +} diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidatesUpdateResponse.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidatesUpdateResponse.java new file mode 100644 index 0000000000..9f48323028 --- /dev/null +++ b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidatesUpdateResponse.java @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.eos.akka.owner.supervisor.command; + +import akka.actor.typed.ActorRef; +import akka.cluster.ddata.ORMap; +import akka.cluster.ddata.ORSet; +import akka.cluster.ddata.typed.javadsl.Replicator; +import org.opendaylight.mdsal.eos.dom.api.DOMEntity; + +public class ClearCandidatesUpdateResponse extends OwnerSupervisorCommand { + private final Replicator.UpdateResponse>> response; + private final ActorRef replyTo; + + public ClearCandidatesUpdateResponse(final Replicator.UpdateResponse>> response, + final ActorRef replyTo) { + this.response = response; + this.replyTo = replyTo; + } + + public Replicator.UpdateResponse>> getResponse() { + return response; + } + + + public ActorRef getReplyTo() { + return replyTo; + } +} diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/CandidateRegistry.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/CandidateRegistry.java index 16c2ab6258..03ecbae10a 100644 --- a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/CandidateRegistry.java +++ b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/CandidateRegistry.java @@ -12,6 +12,7 @@ import akka.actor.typed.javadsl.AbstractBehavior; import akka.actor.typed.javadsl.ActorContext; import akka.actor.typed.javadsl.Behaviors; import akka.actor.typed.javadsl.Receive; +import akka.cluster.Cluster; import akka.cluster.ddata.Key; import akka.cluster.ddata.ORMap; import akka.cluster.ddata.ORMapKey; @@ -20,6 +21,7 @@ import akka.cluster.ddata.SelfUniqueAddress; import akka.cluster.ddata.typed.javadsl.DistributedData; import akka.cluster.ddata.typed.javadsl.Replicator; import akka.cluster.ddata.typed.javadsl.ReplicatorMessageAdapter; +import java.util.Set; import org.opendaylight.controller.eos.akka.registry.candidate.command.CandidateRegistryCommand; import org.opendaylight.controller.eos.akka.registry.candidate.command.InternalUpdateResponse; import org.opendaylight.controller.eos.akka.registry.candidate.command.RegisterCandidate; @@ -35,10 +37,13 @@ public final class CandidateRegistry extends AbstractBehavior>> KEY = new ORMapKey<>("candidateRegistry"); private final ReplicatorMessageAdapter>> replicatorAdapter; private final SelfUniqueAddress node; + private final String selfRole; private CandidateRegistry(final ActorContext context, final ReplicatorMessageAdapter create() { @@ -69,7 +75,7 @@ public final class CandidateRegistry extends AbstractBehavior onRegisterCandidate(final RegisterCandidate registerCandidate) { - LOG.debug("Registering candidate({}) for entity: {}", + LOG.debug("{} - Registering candidate({}) for entity: {}", selfRole, registerCandidate.getCandidate(), registerCandidate.getEntity()); replicatorAdapter.askUpdate( askReplyTo -> new Replicator.Update<>( @@ -84,7 +90,7 @@ public final class CandidateRegistry extends AbstractBehavior onUnregisterCandidate(final UnregisterCandidate unregisterCandidate) { - LOG.debug("Removing candidate({}) from entity: {}", + LOG.debug("{} - Removing candidate({}) from entity: {}", selfRole, unregisterCandidate.getCandidate(), unregisterCandidate.getEntity()); replicatorAdapter.askUpdate( askReplyTo -> new Replicator.Update<>( @@ -99,7 +105,12 @@ public final class CandidateRegistry extends AbstractBehavior onInternalUpdateResponse(final InternalUpdateResponse updateResponse) { - LOG.debug("Received update response: {}", updateResponse.getRsp()); + LOG.debug("{} : Received update response: {}", selfRole, updateResponse.getRsp()); return this; } + + private static String extractRole(final Set roles) { + return roles.stream().filter(role -> !role.contains(DATACENTER_PREFIX)) + .findFirst().orElseThrow(() -> new IllegalArgumentException("No valid role found.")); + } } diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/CandidateRegistryInit.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/CandidateRegistryInit.java index 34cfe78bad..f9ca06896e 100644 --- a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/CandidateRegistryInit.java +++ b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/CandidateRegistryInit.java @@ -7,28 +7,25 @@ */ package org.opendaylight.controller.eos.akka.registry.candidate; +import akka.actor.typed.ActorRef; import akka.actor.typed.Behavior; import akka.actor.typed.javadsl.AbstractBehavior; import akka.actor.typed.javadsl.ActorContext; import akka.actor.typed.javadsl.Behaviors; import akka.actor.typed.javadsl.Receive; import akka.actor.typed.javadsl.StashBuffer; -import akka.cluster.ddata.ORMap; -import akka.cluster.ddata.ORSet; -import akka.cluster.ddata.SelfUniqueAddress; -import akka.cluster.ddata.typed.javadsl.DistributedData; -import akka.cluster.ddata.typed.javadsl.Replicator; -import akka.cluster.ddata.typed.javadsl.ReplicatorMessageAdapter; -import akka.cluster.typed.Cluster; +import akka.cluster.Cluster; import java.time.Duration; -import java.util.Map; import java.util.Set; +import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesForMember; +import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesResponse; +import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand; import org.opendaylight.controller.eos.akka.registry.candidate.command.CandidateRegistryCommand; -import org.opendaylight.controller.eos.akka.registry.candidate.command.InitialCandidateSync; -import org.opendaylight.controller.eos.akka.registry.candidate.command.InternalUpdateResponse; +import org.opendaylight.controller.eos.akka.registry.candidate.command.CandidateRemovalFailed; +import org.opendaylight.controller.eos.akka.registry.candidate.command.CandidateRemovalFinished; import org.opendaylight.controller.eos.akka.registry.candidate.command.RegisterCandidate; +import org.opendaylight.controller.eos.akka.registry.candidate.command.RemovePreviousCandidates; import org.opendaylight.controller.eos.akka.registry.candidate.command.UnregisterCandidate; -import org.opendaylight.mdsal.eos.dom.api.DOMEntity; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -39,91 +36,70 @@ public class CandidateRegistryInit extends AbstractBehavior stash; - private final ReplicatorMessageAdapter>> candidateReplicator; + private final ActorRef ownerSupervisor; private final String selfRole; - private final SelfUniqueAddress node; public CandidateRegistryInit(final ActorContext ctx, final StashBuffer stash, - final ReplicatorMessageAdapter>> candidateReplicator) { + final ActorRef ownerSupervisor) { super(ctx); this.stash = stash; - this.candidateReplicator = candidateReplicator; - selfRole = extractRole(Cluster.get(ctx.getSystem()).selfMember().getRoles()); + this.ownerSupervisor = ownerSupervisor; + this.selfRole = extractRole(Cluster.get(ctx.getSystem()).selfMember().getRoles()); - this.node = DistributedData.get(ctx.getSystem()).selfUniqueAddress(); + ctx.getSelf().tell(new RemovePreviousCandidates()); - - this.candidateReplicator.askGet( - askReplyTo -> new Replicator.Get<>( - CandidateRegistry.KEY, - new Replicator.ReadAll(Duration.ofSeconds(15)), askReplyTo), - InitialCandidateSync::new); - - LOG.debug("CandidateRegistry syncing behavior started."); + LOG.debug("{} : CandidateRegistry syncing behavior started.", selfRole); } - public static Behavior create() { + public static Behavior create(final ActorRef ownerSupervisor) { return Behaviors.withStash(100, stash -> - Behaviors.setup(ctx -> DistributedData.withReplicatorMessageAdapter( - (ReplicatorMessageAdapter>> replicatorAdapter) -> - new CandidateRegistryInit(ctx, stash, replicatorAdapter)))); + Behaviors.setup(ctx -> new CandidateRegistryInit(ctx, stash, ownerSupervisor))); } @Override public Receive createReceive() { return newReceiveBuilder() - .onMessage(InitialCandidateSync.class, this::handleCandidateSync) + .onMessage(RemovePreviousCandidates.class, this::onRemoveCandidates) + .onMessage(CandidateRemovalFinished.class, command -> switchToCandidateRegistry()) + .onMessage(CandidateRemovalFailed.class, this::candidateRemovalFailed) .onMessage(RegisterCandidate.class, this::stashCommand) .onMessage(UnregisterCandidate.class, this::stashCommand) .build(); } - private Behavior stashCommand(final CandidateRegistryCommand command) { - stash.stash(command); + private Behavior candidateRemovalFailed(final CandidateRemovalFailed command) { + LOG.warn("{} : Initial removal of candidates from previous iteration failed. Rescheduling.", selfRole, + command.getThrowable()); + getContext().getSelf().tell(new RemovePreviousCandidates()); return this; } - private Behavior handleCandidateSync(final InitialCandidateSync command) { - final Replicator.GetResponse>> response = command.getResponse(); - if (response instanceof Replicator.GetSuccess) { - clearExistingCandidates((Replicator.GetSuccess>>) response); - } - // TODO implement other cases if needed, seems like only a retry would be needed here when we get a failure - // from distributed data - return switchToCandidateRegistry(); - } - - private void clearExistingCandidates(final Replicator.GetSuccess>> response) { - final Map> entitiesToCandidates = response.get(response.key()).getEntries(); + private Behavior onRemoveCandidates(final RemovePreviousCandidates command) { + LOG.debug("Sending RemovePreviousCandidates."); + getContext().ask(ClearCandidatesResponse.class, + ownerSupervisor, Duration.ofSeconds(5), + ref -> new ClearCandidatesForMember(ref, selfRole), + (response, throwable) -> { + if (response != null) { + return new CandidateRemovalFinished(); + } else { + return new CandidateRemovalFailed(throwable); + } + }); - for (Map.Entry> entry : entitiesToCandidates.entrySet()) { - if (entry.getValue().getElements().contains(selfRole)) { - LOG.debug("Clearing candidate: {} from entity: {}, current state of entity candidates: {}", - selfRole, entry.getKey(), entry.getValue().getElements()); - clearRegistration(entry.getKey()); - } - } + return this; } - private void clearRegistration(final DOMEntity entity) { - candidateReplicator.askUpdate( - askReplyTo -> new Replicator.Update<>( - CandidateRegistry.KEY, - ORMap.empty(), - Replicator.writeLocal(), - askReplyTo, - map -> map.update(node, entity, ORSet.empty(), - value -> value.remove(node, selfRole))), - InternalUpdateResponse::new); + private Behavior stashCommand(final CandidateRegistryCommand command) { + LOG.debug("Stashing {}", command); + stash.stash(command); + return this; } private Behavior switchToCandidateRegistry() { - LOG.debug("Clearing of candidates from previous instance done, switching to CandidateRegistry."); + LOG.debug("{} : Clearing of candidates from previous instance done, switching to CandidateRegistry.", selfRole); return stash.unstashAll(CandidateRegistry.create()); } diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/CandidateRemovalFailed.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/CandidateRemovalFailed.java new file mode 100644 index 0000000000..04109429d8 --- /dev/null +++ b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/CandidateRemovalFailed.java @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.eos.akka.registry.candidate.command; + +public class CandidateRemovalFailed extends CandidateRegistryCommand { + + private final Throwable throwable; + + public CandidateRemovalFailed(final Throwable throwable) { + this.throwable = throwable; + } + + public Throwable getThrowable() { + return throwable; + } +} diff --git a/opendaylight/config/netty-threadgroup-config/src/main/java/org/opendaylight/controller/config/yang/netty/threadgroup/package-info.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/CandidateRemovalFinished.java similarity index 67% rename from opendaylight/config/netty-threadgroup-config/src/main/java/org/opendaylight/controller/config/yang/netty/threadgroup/package-info.java rename to opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/CandidateRemovalFinished.java index f5b65ee83a..3c42b10ae2 100644 --- a/opendaylight/config/netty-threadgroup-config/src/main/java/org/opendaylight/controller/config/yang/netty/threadgroup/package-info.java +++ b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/CandidateRemovalFinished.java @@ -5,5 +5,7 @@ * terms of the Eclipse Public License v1.0 which accompanies this distribution, * and is available at http://www.eclipse.org/legal/epl-v10.html */ -@org.osgi.service.component.annotations.RequireServiceComponentRuntime -package org.opendaylight.controller.config.yang.netty.threadgroup; \ No newline at end of file +package org.opendaylight.controller.eos.akka.registry.candidate.command; + +public class CandidateRemovalFinished extends CandidateRegistryCommand { +} diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/RemovePreviousCandidates.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/RemovePreviousCandidates.java new file mode 100644 index 0000000000..9e1da1e970 --- /dev/null +++ b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/RemovePreviousCandidates.java @@ -0,0 +1,15 @@ +/* + * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.eos.akka.registry.candidate.command; + +/** + * Message sent to candidate registry initial behavior by self to trigger and retrigger(in case of failures) removal + * of candidates registered by the previous iteration of this node. + */ +public class RemovePreviousCandidates extends CandidateRegistryCommand { +} diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/owner/SingleEntityListenerActor.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/owner/SingleEntityListenerActor.java index 279ee8fa8e..4419fdf4db 100644 --- a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/owner/SingleEntityListenerActor.java +++ b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/owner/SingleEntityListenerActor.java @@ -24,9 +24,8 @@ import org.opendaylight.controller.eos.akka.registry.listener.owner.command.List import org.opendaylight.controller.eos.akka.registry.listener.owner.command.OwnerChanged; import org.opendaylight.controller.eos.akka.registry.listener.type.command.EntityOwnerChanged; import org.opendaylight.controller.eos.akka.registry.listener.type.command.TypeListenerCommand; -import org.opendaylight.mdsal.eos.common.api.EntityOwnershipChangeState; +import org.opendaylight.mdsal.eos.common.api.EntityOwnershipStateChange; import org.opendaylight.mdsal.eos.dom.api.DOMEntity; -import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipChange; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -99,9 +98,7 @@ public class SingleEntityListenerActor extends AbstractBehavior private void triggerNoOwnerNotification() { LOG.debug("Triggering initial notification without an owner for: {}", entity); - - toNotify.tell(new EntityOwnerChanged(new DOMEntityOwnershipChange( - entity, EntityOwnershipChangeState.REMOTE_OWNERSHIP_LOST_NO_OWNER))); + toNotify.tell(new EntityOwnerChanged(entity, EntityOwnershipStateChange.REMOTE_OWNERSHIP_LOST_NO_OWNER, false)); } private Behavior onOwnerChanged(final OwnerChanged ownerChanged) { @@ -133,8 +130,8 @@ public class SingleEntityListenerActor extends AbstractBehavior currentOwner = newOwner; - toNotify.tell(new EntityOwnerChanged(new DOMEntityOwnershipChange( - entity, EntityOwnershipChangeState.from(wasOwner, isOwner, hasOwner)))); + toNotify.tell(new EntityOwnerChanged(entity, EntityOwnershipStateChange.from(wasOwner, isOwner, hasOwner), + false)); } private void handleOwnerLost(final Replicator.Deleted> changed) { @@ -143,7 +140,6 @@ public class SingleEntityListenerActor extends AbstractBehavior LOG.debug("Owner lost for entity:{}, currentOwner: {}, wasOwner: {}", entity, currentOwner, wasOwner); currentOwner = ""; - toNotify.tell(new EntityOwnerChanged(new DOMEntityOwnershipChange( - entity, EntityOwnershipChangeState.from(wasOwner, false, false)))); + toNotify.tell(new EntityOwnerChanged(entity, EntityOwnershipStateChange.from(wasOwner, false, false), false)); } } diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/EntityTypeListenerActor.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/EntityTypeListenerActor.java index e97fe77a2f..7e445c581c 100644 --- a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/EntityTypeListenerActor.java +++ b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/EntityTypeListenerActor.java @@ -110,8 +110,7 @@ public class EntityTypeListenerActor extends AbstractBehavior onOwnerChanged(final EntityOwnerChanged rsp) { LOG.debug("{} : Entity-type: {} listener, owner change: {}", localMember, entityType, rsp); - - listener.ownershipChanged(rsp.getOwnershipChange()); + listener.ownershipChanged(rsp.entity(), rsp.change(), false); return this; } diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/command/EntityOwnerChanged.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/command/EntityOwnerChanged.java index 02d0e2fe50..ee0f54f431 100644 --- a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/command/EntityOwnerChanged.java +++ b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/command/EntityOwnerChanged.java @@ -10,25 +10,45 @@ package org.opendaylight.controller.eos.akka.registry.listener.type.command; import static java.util.Objects.requireNonNull; import com.google.common.base.MoreObjects; -import org.eclipse.jdt.annotation.NonNull; -import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipChange; +import org.eclipse.jdt.annotation.NonNullByDefault; +import org.opendaylight.controller.eos.akka.registry.listener.type.EntityTypeListenerActor; +import org.opendaylight.mdsal.eos.common.api.EntityOwnershipStateChange; +import org.opendaylight.mdsal.eos.dom.api.DOMEntity; /** - * Notification sent to EntityTypeListenerActor when there is an owner change for an Entity of a given type. + * Notification sent to {@link EntityTypeListenerActor} when there is an owner change for an Entity of a given type. */ +@NonNullByDefault public final class EntityOwnerChanged extends TypeListenerCommand { - private final @NonNull DOMEntityOwnershipChange ownershipChange; + private final DOMEntity entity; + private final EntityOwnershipStateChange change; + private final boolean inJeopardy; - public EntityOwnerChanged(final DOMEntityOwnershipChange ownershipChange) { - this.ownershipChange = requireNonNull(ownershipChange); + public EntityOwnerChanged(final DOMEntity entity, final EntityOwnershipStateChange change, + final boolean inJeopardy) { + this.entity = requireNonNull(entity); + this.change = requireNonNull(change); + this.inJeopardy = requireNonNull(inJeopardy); } - public @NonNull DOMEntityOwnershipChange getOwnershipChange() { - return ownershipChange; + public DOMEntity entity() { + return entity; + } + + public EntityOwnershipStateChange change() { + return change; + } + + public boolean inJeopardy() { + return inJeopardy; } @Override public String toString() { - return MoreObjects.toStringHelper(this).add("ownershipChange", ownershipChange).toString(); + return MoreObjects.toStringHelper(this) + .add("entity", entity) + .add("change", change) + .add("inJeopardy", inJeopardy) + .toString(); } } diff --git a/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/AbstractNativeEosTest.java b/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/AbstractNativeEosTest.java index 27b4bcba84..6adba42c09 100644 --- a/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/AbstractNativeEosTest.java +++ b/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/AbstractNativeEosTest.java @@ -19,6 +19,8 @@ import akka.actor.typed.javadsl.AskPattern; import akka.actor.typed.javadsl.Behaviors; import akka.cluster.ddata.LWWRegister; import akka.cluster.ddata.LWWRegisterKey; +import akka.cluster.ddata.ORMap; +import akka.cluster.ddata.ORSet; import akka.cluster.ddata.typed.javadsl.DistributedData; import akka.cluster.ddata.typed.javadsl.Replicator; import com.typesafe.config.Config; @@ -44,17 +46,19 @@ import org.opendaylight.controller.eos.akka.owner.supervisor.command.MemberReach import org.opendaylight.controller.eos.akka.owner.supervisor.command.MemberUnreachableEvent; import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand; import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorReply; +import org.opendaylight.controller.eos.akka.registry.candidate.CandidateRegistry; import org.opendaylight.controller.eos.akka.registry.candidate.command.CandidateRegistryCommand; import org.opendaylight.controller.eos.akka.registry.candidate.command.RegisterCandidate; import org.opendaylight.controller.eos.akka.registry.candidate.command.UnregisterCandidate; +import org.opendaylight.controller.eos.akka.registry.listener.type.command.EntityOwnerChanged; import org.opendaylight.controller.eos.akka.registry.listener.type.command.RegisterListener; import org.opendaylight.controller.eos.akka.registry.listener.type.command.TypeListenerRegistryCommand; import org.opendaylight.mdsal.binding.dom.codec.impl.BindingCodecContext; import org.opendaylight.mdsal.binding.generator.impl.DefaultBindingRuntimeGenerator; import org.opendaylight.mdsal.binding.runtime.api.BindingRuntimeGenerator; import org.opendaylight.mdsal.binding.runtime.spi.BindingRuntimeHelpers; +import org.opendaylight.mdsal.eos.common.api.EntityOwnershipStateChange; import org.opendaylight.mdsal.eos.dom.api.DOMEntity; -import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipChange; import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -263,7 +267,7 @@ public abstract class AbstractNativeEosTest { } protected static void waitUntillOwnerPresent(final ClusterNode clusterNode, final DOMEntity entity) { - await().until(() -> { + await().atMost(Duration.ofSeconds(15)).until(() -> { final DistributedData distributedData = DistributedData.get(clusterNode.getActorSystem()); final CompletionStage>> ask = AskPattern.ask(distributedData.replicator(), @@ -284,6 +288,32 @@ public abstract class AbstractNativeEosTest { }); } + protected static void waitUntillCandidatePresent(final ClusterNode clusterNode, final DOMEntity entity, + final String candidate) { + await().atMost(Duration.ofSeconds(15)).until(() -> { + final DistributedData distributedData = DistributedData.get(clusterNode.getActorSystem()); + + final CompletionStage>>> ask = + AskPattern.ask(distributedData.replicator(), + replyTo -> new Replicator.Get<>( + CandidateRegistry.KEY, Replicator.readLocal(), replyTo), + Duration.ofSeconds(5), + clusterNode.getActorSystem().scheduler()); + + final Replicator.GetResponse>> response = + ask.toCompletableFuture().get(5, TimeUnit.SECONDS); + + if (response instanceof Replicator.GetSuccess) { + final Map> entries = + ((Replicator.GetSuccess>>) response).dataValue().getEntries(); + + return entries.get(entity).contains(candidate); + + } + return false; + }); + } + protected static CompletableFuture activateDatacenter(final ClusterNode clusterNode) { final CompletionStage ask = AskPattern.ask(clusterNode.getOwnerSupervisor(), @@ -306,14 +336,14 @@ public abstract class AbstractNativeEosTest { final boolean hasOwner, final boolean isOwner, final boolean wasOwner) { await().until(() -> !listener.getChanges().isEmpty()); - await().untilAsserted(() -> { - final List changes = listener.getChanges(); - final DOMEntityOwnershipChange domEntityOwnershipChange = listener.getChanges().get(changes.size() - 1); - assertEquals(entity, domEntityOwnershipChange.getEntity()); + await().atMost(Duration.ofSeconds(10)).untilAsserted(() -> { + final var changes = listener.getChanges(); + final var domEntityOwnershipChange = listener.getChanges().get(changes.size() - 1); + assertEquals(entity, domEntityOwnershipChange.entity()); - assertEquals(hasOwner, domEntityOwnershipChange.getState().hasOwner()); - assertEquals(isOwner, domEntityOwnershipChange.getState().isOwner()); - assertEquals(wasOwner, domEntityOwnershipChange.getState().wasOwner()); + assertEquals(hasOwner, domEntityOwnershipChange.change().hasOwner()); + assertEquals(isOwner, domEntityOwnershipChange.change().isOwner()); + assertEquals(wasOwner, domEntityOwnershipChange.change().wasOwner()); }); } @@ -386,11 +416,9 @@ public abstract class AbstractNativeEosTest { } protected static final class MockEntityOwnershipListener implements DOMEntityOwnershipListener { - - private final Logger log; - - private final List changes = new ArrayList<>(); + private final List changes = new ArrayList<>(); private final String member; + private final Logger log; public MockEntityOwnershipListener(final String member) { log = LoggerFactory.getLogger("EOS-listener-" + member); @@ -398,13 +426,15 @@ public abstract class AbstractNativeEosTest { } @Override - public void ownershipChanged(final DOMEntityOwnershipChange ownershipChange) { - log.info("{} Received ownershipCHanged: {}", member, ownershipChange); + public void ownershipChanged(final DOMEntity entity, final EntityOwnershipStateChange change, + final boolean inJeopardy) { + final var changed = new EntityOwnerChanged(entity, change, inJeopardy); + log.info("{} Received ownershipCHanged: {}", member, changed); log.info("{} changes: {}", member, changes.size()); - changes.add(ownershipChange); + changes.add(changed); } - public List getChanges() { + public List getChanges() { return changes; } diff --git a/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/AkkaEntityOwnershipServiceTest.java b/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/AkkaEntityOwnershipServiceTest.java index 199e7931bb..652102f903 100644 --- a/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/AkkaEntityOwnershipServiceTest.java +++ b/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/AkkaEntityOwnershipServiceTest.java @@ -26,6 +26,7 @@ import akka.cluster.ddata.typed.javadsl.DistributedData; import akka.cluster.ddata.typed.javadsl.Replicator; import com.typesafe.config.ConfigFactory; import java.time.Duration; +import java.util.List; import java.util.Map; import java.util.Optional; import java.util.concurrent.CompletionStage; @@ -40,24 +41,19 @@ import org.opendaylight.controller.eos.akka.registry.candidate.CandidateRegistry import org.opendaylight.mdsal.eos.common.api.CandidateAlreadyRegisteredException; import org.opendaylight.mdsal.eos.common.api.EntityOwnershipState; import org.opendaylight.mdsal.eos.dom.api.DOMEntity; -import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipCandidateRegistration; -import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListenerRegistration; import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipService; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityName; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityType; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntitiesInputBuilder; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntitiesOutput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityInputBuilder; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOutput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOwnerInputBuilder; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOwnerOutput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.NodeName; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.get.entities.output.EntitiesKey; import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopology; import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology; import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node; +import org.opendaylight.yangtools.concepts.Registration; import org.opendaylight.yangtools.yang.common.QName; -import org.opendaylight.yangtools.yang.common.RpcResult; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates; @@ -75,7 +71,7 @@ public class AkkaEntityOwnershipServiceTest extends AbstractNativeEosTest { @Before public void setUp() throws Exception { system = ActorSystem.create("ClusterSystem", ConfigFactory.load()); - typedSystem = Adapter.toTyped(this.system); + typedSystem = Adapter.toTyped(system); replicator = DistributedData.get(typedSystem).replicator(); service = new AkkaEntityOwnershipService(system, CODEC_CONTEXT); @@ -92,9 +88,9 @@ public class AkkaEntityOwnershipServiceTest extends AbstractNativeEosTest { final YangInstanceIdentifier entityId = YangInstanceIdentifier.of(QNAME); final DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId); - final DOMEntityOwnershipCandidateRegistration reg = service.registerCandidate(entity); + final Registration reg = service.registerCandidate(entity); + assertNotNull(reg); - verifyEntityOwnershipCandidateRegistration(entity, reg); verifyEntityCandidateRegistered(ENTITY_TYPE, entityId, "member-1"); try { @@ -106,9 +102,9 @@ public class AkkaEntityOwnershipServiceTest extends AbstractNativeEosTest { } final DOMEntity entity2 = new DOMEntity(ENTITY_TYPE2, entityId); - final DOMEntityOwnershipCandidateRegistration reg2 = service.registerCandidate(entity2); + final Registration reg2 = service.registerCandidate(entity2); - verifyEntityOwnershipCandidateRegistration(entity2, reg2); + assertNotNull(reg2); verifyEntityCandidateRegistered(ENTITY_TYPE2, entityId, "member-1"); } @@ -117,9 +113,9 @@ public class AkkaEntityOwnershipServiceTest extends AbstractNativeEosTest { final YangInstanceIdentifier entityId = YangInstanceIdentifier.of(QNAME); final DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId); - final DOMEntityOwnershipCandidateRegistration reg = service.registerCandidate(entity); + final Registration reg = service.registerCandidate(entity); + assertNotNull(reg); - verifyEntityOwnershipCandidateRegistration(entity, reg); verifyEntityCandidateRegistered(ENTITY_TYPE, entityId, "member-1"); reg.close(); @@ -136,13 +132,11 @@ public class AkkaEntityOwnershipServiceTest extends AbstractNativeEosTest { final DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId); final MockEntityOwnershipListener listener = new MockEntityOwnershipListener("member-1"); - final DOMEntityOwnershipListenerRegistration reg = service.registerListener(entity.getType(), listener); + final Registration reg = service.registerListener(entity.getType(), listener); assertNotNull("EntityOwnershipListenerRegistration null", reg); - assertEquals("getEntityType", entity.getType(), reg.getEntityType()); - assertEquals("getInstance", listener, reg.getInstance()); - final DOMEntityOwnershipCandidateRegistration candidate = service.registerCandidate(entity); + final Registration candidate = service.registerCandidate(entity); verifyListenerState(listener, entity, true, true, false); final int changes = listener.getChanges().size(); @@ -161,7 +155,7 @@ public class AkkaEntityOwnershipServiceTest extends AbstractNativeEosTest { public void testGetOwnershipState() throws Exception { final DOMEntity entity = new DOMEntity(ENTITY_TYPE, "one"); - final DOMEntityOwnershipCandidateRegistration registration = service.registerCandidate(entity); + final Registration registration = service.registerCandidate(entity); verifyGetOwnershipState(service, entity, EntityOwnershipState.IS_OWNER); final RunningContext runningContext = service.getRunningContext(); @@ -193,7 +187,7 @@ public class AkkaEntityOwnershipServiceTest extends AbstractNativeEosTest { @Test public void testEntityRetrievalWithYiid() throws Exception { - final YangInstanceIdentifier entityId = YangInstanceIdentifier.create(new NodeIdentifier(NetworkTopology.QNAME), + final YangInstanceIdentifier entityId = YangInstanceIdentifier.of(new NodeIdentifier(NetworkTopology.QNAME), new NodeIdentifier(Topology.QNAME), NodeIdentifierWithPredicates.of(Topology.QNAME, QName.create(Topology.QNAME, "topology-id"), "test"), new NodeIdentifier(Node.QNAME), @@ -201,50 +195,49 @@ public class AkkaEntityOwnershipServiceTest extends AbstractNativeEosTest { final DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId); - final DOMEntityOwnershipCandidateRegistration reg = service.registerCandidate(entity); + final Registration reg = service.registerCandidate(entity); - verifyEntityOwnershipCandidateRegistration(entity, reg); + assertNotNull(reg); verifyEntityCandidateRegistered(ENTITY_TYPE, entityId, "member-1"); - RpcResult getEntityResult = service.getEntity(new GetEntityInputBuilder() - .setName(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId))) - .setType(new EntityType(ENTITY_TYPE)) - .build()) - .get(); + var result = service.getEntity(new GetEntityInputBuilder() + .setName(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId))) + .setType(new EntityType(ENTITY_TYPE)) + .build()) + .get() + .getResult(); - assertEquals(getEntityResult.getResult().getOwnerNode().getValue(), "member-1"); - assertEquals(getEntityResult.getResult().getCandidateNodes().get(0).getValue(), "member-1"); + assertEquals(result.getOwnerNode().getValue(), "member-1"); + assertEquals(result.getCandidateNodes().get(0).getValue(), "member-1"); // we should not be able to retrieve the entity when using string final String entityPathEncoded = "/network-topology:network-topology/topology[topology-id='test']/node[node-id='test://test-node']"; - getEntityResult = service.getEntity(new GetEntityInputBuilder() - .setName(new EntityName(entityPathEncoded)) - .setType(new EntityType(ENTITY_TYPE)) - .build()) - .get(); - - assertNull(getEntityResult.getResult().getOwnerNode()); - assertTrue(getEntityResult.getResult().getCandidateNodes().isEmpty()); - - final GetEntitiesOutput getEntitiesResult = - service.getEntities(new GetEntitiesInputBuilder().build()).get().getResult(); - - assertEquals(getEntitiesResult.getEntities().size(), 1); - assertTrue(getEntitiesResult.getEntities().get(new EntitiesKey( - new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)), new EntityType(ENTITY_TYPE))) - .getCandidateNodes().contains(new NodeName("member-1"))); - assertTrue(getEntitiesResult.getEntities().get(new EntitiesKey( + result = service.getEntity(new GetEntityInputBuilder() + .setName(new EntityName(entityPathEncoded)) + .setType(new EntityType(ENTITY_TYPE)) + .build()) + .get() + .getResult(); + + assertNull(result.getOwnerNode()); + assertEquals(List.of(), result.getCandidateNodes()); + + final var getEntitiesResult = service.getEntities(new GetEntitiesInputBuilder().build()).get().getResult(); + final var entities = getEntitiesResult.nonnullEntities(); + assertEquals(1, entities.size()); + assertTrue(entities.get(new EntitiesKey(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)), + new EntityType(ENTITY_TYPE))).getCandidateNodes().contains(new NodeName("member-1"))); + assertTrue(entities.get(new EntitiesKey( new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)), new EntityType(ENTITY_TYPE))) .getOwnerNode().getValue().equals("member-1")); - final GetEntityOwnerOutput getOwnerResult = service.getEntityOwner(new GetEntityOwnerInputBuilder() - .setName(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId))) - .setType(new EntityType(ENTITY_TYPE)) - .build()) - .get().getResult(); + final var getOwnerResult = service.getEntityOwner(new GetEntityOwnerInputBuilder() + .setName(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId))) + .setType(new EntityType(ENTITY_TYPE)) + .build()).get().getResult(); assertEquals(getOwnerResult.getOwnerNode().getValue(), "member-1"); } @@ -252,9 +245,7 @@ public class AkkaEntityOwnershipServiceTest extends AbstractNativeEosTest { private static void verifyGetOwnershipState(final DOMEntityOwnershipService service, final DOMEntity entity, final EntityOwnershipState expState) { await().atMost(Duration.ofSeconds(5)).untilAsserted(() -> { - final Optional state = service.getOwnershipState(entity); - assertTrue("getOwnershipState present", state.isPresent()); - assertEquals("EntityOwnershipState", expState, state.get()); + assertEquals(Optional.of(expState), service.getOwnershipState(entity)); }); } @@ -310,10 +301,4 @@ public class AkkaEntityOwnershipServiceTest extends AbstractNativeEosTest { return success.get(CandidateRegistry.KEY).getEntries(); } - - private static void verifyEntityOwnershipCandidateRegistration(final DOMEntity entity, - final DOMEntityOwnershipCandidateRegistration reg) { - assertNotNull("EntityOwnershipCandidateRegistration null", reg); - assertEquals("getInstance", entity, reg.getInstance()); - } -} \ No newline at end of file +} diff --git a/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/DataCentersTest.java b/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/DataCentersTest.java index 28566c7f15..e4927ca67b 100644 --- a/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/DataCentersTest.java +++ b/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/DataCentersTest.java @@ -60,7 +60,6 @@ public class DataCentersTest extends AbstractNativeEosTest { public void testDatacenterActivation() throws Exception { registerCandidates(node1, ENTITY_1, "member-1"); registerCandidates(node3, ENTITY_1, "member-3"); - registerCandidates(node4, ENTITY_1, "member-4"); activateDatacenter(node1).get(); @@ -82,18 +81,15 @@ public class DataCentersTest extends AbstractNativeEosTest { verifyListenerState(listener1, ENTITY_1, true, false, false); verifyListenerState(listener2, ENTITY_1, true, true, false); + registerCandidates(node4, ENTITY_1, "member-4"); unregisterCandidates(node3, ENTITY_1, "member-3"); // checking index after notif so current + 1 verifyListenerState(listener1, ENTITY_1, true, false, false); - verifyListenerState(listener2, ENTITY_1, true, false, true); + verifyListenerState(listener2, ENTITY_1, true, false, false); deactivateDatacenter(node3).get(); activateDatacenter(node2).get(); - - // no candidate in dc-primary so no owners after datacenter activation - verifyListenerState(listener1, ENTITY_1, false, false, false); - verifyListenerState(listener2, ENTITY_1, false, false, false); } @Test @@ -102,9 +98,13 @@ public class DataCentersTest extends AbstractNativeEosTest { registerCandidates(node3, ENTITY_1, "member-3"); registerCandidates(node4, ENTITY_1, "member-4"); + waitUntillCandidatePresent(node1, ENTITY_1, "member-1"); + waitUntillCandidatePresent(node1, ENTITY_1, "member-3"); + waitUntillCandidatePresent(node1, ENTITY_1, "member-4"); + activateDatacenter(node1).get(); - waitUntillOwnerPresent(node1, ENTITY_1); + waitUntillOwnerPresent(node4, ENTITY_1); final MockEntityOwnershipListener listener1 = registerListener(node1, ENTITY_1); verifyListenerState(listener1, ENTITY_1, true, true, false); @@ -122,6 +122,7 @@ public class DataCentersTest extends AbstractNativeEosTest { activateDatacenter(node3).get(); verifyListenerState(listener2, ENTITY_1, true, true, false); + waitUntillOwnerPresent(node3, ENTITY_1); unregisterCandidates(node3, ENTITY_1, "member-3"); verifyListenerState(listener2, ENTITY_1, true, false, true); } diff --git a/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/EntityRpcHandlerTest.java b/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/EntityRpcHandlerTest.java index 3e43be1b49..28991c1e51 100644 --- a/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/EntityRpcHandlerTest.java +++ b/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/EntityRpcHandlerTest.java @@ -26,22 +26,18 @@ import org.junit.After; import org.junit.Before; import org.junit.Test; import org.opendaylight.mdsal.eos.dom.api.DOMEntity; -import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipCandidateRegistration; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityName; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityType; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntitiesInputBuilder; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntitiesOutput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityInputBuilder; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOutput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOwnerInputBuilder; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOwnerOutput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.NodeName; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.get.entities.output.EntitiesKey; import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopology; import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology; import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node; +import org.opendaylight.yangtools.concepts.Registration; import org.opendaylight.yangtools.yang.common.QName; -import org.opendaylight.yangtools.yang.common.RpcResult; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates; @@ -64,7 +60,7 @@ public class EntityRpcHandlerTest extends AbstractNativeEosTest { service2 = new AkkaEntityOwnershipService(system2, CODEC_CONTEXT); // need to wait until all nodes are ready - final Cluster cluster = Cluster.get(Adapter.toTyped(system2)); + final var cluster = Cluster.get(Adapter.toTyped(system2)); Awaitility.await().atMost(Duration.ofSeconds(20)).until(() -> { final List members = new ArrayList<>(); cluster.state().getMembers().forEach(members::add); @@ -97,7 +93,7 @@ public class EntityRpcHandlerTest extends AbstractNativeEosTest { */ @Test public void testEntityRetrievalWithUnavailableSupervisor() throws Exception { - final YangInstanceIdentifier entityId = YangInstanceIdentifier.create(new NodeIdentifier(NetworkTopology.QNAME), + final YangInstanceIdentifier entityId = YangInstanceIdentifier.of(new NodeIdentifier(NetworkTopology.QNAME), new NodeIdentifier(Topology.QNAME), NodeIdentifierWithPredicates.of(Topology.QNAME, QName.create(Topology.QNAME, "topology-id"), "test"), new NodeIdentifier(Node.QNAME), @@ -105,14 +101,13 @@ public class EntityRpcHandlerTest extends AbstractNativeEosTest { final DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId); - final DOMEntityOwnershipCandidateRegistration reg = service1.registerCandidate(entity); + final Registration reg = service1.registerCandidate(entity); await().untilAsserted(() -> { - final RpcResult getEntityResult = service1.getEntity(new GetEntityInputBuilder() - .setName(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId))) - .setType(new EntityType(ENTITY_TYPE)) - .build()) - .get(); + final var getEntityResult = service1.getEntity(new GetEntityInputBuilder() + .setName(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId))) + .setType(new EntityType(ENTITY_TYPE)) + .build()).get(); assertEquals(getEntityResult.getResult().getOwnerNode().getValue(), "member-1"); assertEquals(getEntityResult.getResult().getCandidateNodes().get(0).getValue(), "member-1"); @@ -121,37 +116,34 @@ public class EntityRpcHandlerTest extends AbstractNativeEosTest { // keep this under ask timeout to make sure the singleton actor in the inactive datacenter responds with failure // immediately, so that the rpc actor retries with distributed-data asap await().atMost(Duration.ofSeconds(2)).untilAsserted(() -> { - final GetEntitiesOutput getEntitiesResult = - service2.getEntities(new GetEntitiesInputBuilder().build()).get().getResult(); - - assertEquals(getEntitiesResult.getEntities().size(), 1); - assertTrue(getEntitiesResult.getEntities().get(new EntitiesKey( - new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)), - new EntityType(ENTITY_TYPE))) - .getCandidateNodes().contains(new NodeName("member-1"))); - assertTrue(getEntitiesResult.getEntities().get(new EntitiesKey( - new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)), - new EntityType(ENTITY_TYPE))) - .getOwnerNode().getValue().equals("member-1")); + final var getEntitiesResult = service2.getEntities(new GetEntitiesInputBuilder().build()).get().getResult(); + final var entities = getEntitiesResult.nonnullEntities(); + assertEquals(1, entities.size()); + assertTrue(entities.get(new EntitiesKey( + new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)), + new EntityType(ENTITY_TYPE))) + .getCandidateNodes().contains(new NodeName("member-1"))); + assertTrue(entities.get(new EntitiesKey( + new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)), + new EntityType(ENTITY_TYPE))) + .getOwnerNode().getValue().equals("member-1")); }); await().atMost(Duration.ofSeconds(2)).untilAsserted(() -> { - final GetEntityOutput getEntityResult = service2.getEntity(new GetEntityInputBuilder() - .setName(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId))) - .setType(new EntityType(ENTITY_TYPE)) - .build()) - .get().getResult(); + final var getEntityResult = service2.getEntity(new GetEntityInputBuilder() + .setName(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId))) + .setType(new EntityType(ENTITY_TYPE)) + .build()).get().getResult(); assertEquals(getEntityResult.getOwnerNode().getValue(), "member-1"); assertEquals(getEntityResult.getCandidateNodes().get(0).getValue(), "member-1"); }); await().atMost(Duration.ofSeconds(2)).untilAsserted(() -> { - final GetEntityOwnerOutput getOwnerResult = service2.getEntityOwner(new GetEntityOwnerInputBuilder() - .setName(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId))) - .setType(new EntityType(ENTITY_TYPE)) - .build()) - .get().getResult(); + final var getOwnerResult = service2.getEntityOwner(new GetEntityOwnerInputBuilder() + .setName(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId))) + .setType(new EntityType(ENTITY_TYPE)) + .build()).get().getResult(); assertEquals(getOwnerResult.getOwnerNode().getValue(), "member-1"); }); diff --git a/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/ThreeNodeReachabilityTest.java b/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/ThreeNodeReachabilityTest.java index 71fffe12ba..7699799ba2 100644 --- a/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/ThreeNodeReachabilityTest.java +++ b/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/ThreeNodeReachabilityTest.java @@ -223,6 +223,39 @@ public class ThreeNodeReachabilityTest extends AbstractNativeEosTest { verifyListenerState(node1Listener, ENTITY_1, true, false, false); } + @Test + public void testOwnerNotReassignedWhenOnlyCandidate() throws Exception { + startNode3(); + final MockEntityOwnershipListener listener1 = registerListener(node1, ENTITY_1); + final MockEntityOwnershipListener listener2 = registerListener(node2, ENTITY_1); + verifyNoNotifications(listener1); + verifyNoNotifications(listener2); + + registerCandidates(node3, ENTITY_1, "member-3"); + waitUntillOwnerPresent(node1, ENTITY_1); + + MockEntityOwnershipListener listener3 = registerListener(node3, ENTITY_1); + verifyListenerState(listener1, ENTITY_1, true, false, false); + verifyListenerState(listener3, ENTITY_1, true, true, false); + + ActorTestKit.shutdown(node3.getActorSystem(), Duration.ofSeconds(20)); + + verifyListenerState(listener1, ENTITY_1, true, false, false); + verifyListenerState(listener2, ENTITY_1, true, false, false); + + startNode3(); + verifyListenerState(listener1, ENTITY_1, false, false, false); + + listener3 = registerListener(node3, ENTITY_1); + verifyListenerState(listener3, ENTITY_1, false, false, false); + + registerCandidates(node1, ENTITY_1, "member-1"); + + verifyListenerState(listener1, ENTITY_1, true, true, false); + verifyListenerState(listener3, ENTITY_1, true, false, false); + + } + private void startNode3() throws Exception { startNode3(3); } @@ -232,7 +265,7 @@ public class ThreeNodeReachabilityTest extends AbstractNativeEosTest { // need to wait until all nodes are ready final Cluster cluster = Cluster.get(node2.getActorSystem()); - await().atMost(Duration.ofSeconds(20)).until(() -> { + await().atMost(Duration.ofSeconds(30)).until(() -> { final List members = ImmutableList.copyOf(cluster.state().getMembers()); if (members.size() != membersPresent) { return false; diff --git a/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/service/ClusterSingletonIntegrationTest.java b/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/service/ClusterSingletonIntegrationTest.java index 90bfb88b20..f544ed1a4a 100644 --- a/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/service/ClusterSingletonIntegrationTest.java +++ b/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/service/ClusterSingletonIntegrationTest.java @@ -28,11 +28,10 @@ import org.junit.After; import org.junit.Before; import org.junit.Test; import org.opendaylight.controller.eos.akka.AbstractNativeEosTest; -import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipService; -import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonService; -import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceRegistration; -import org.opendaylight.mdsal.singleton.common.api.ServiceGroupIdentifier; -import org.opendaylight.mdsal.singleton.dom.impl.DOMClusterSingletonServiceProviderImpl; +import org.opendaylight.mdsal.singleton.api.ClusterSingletonService; +import org.opendaylight.mdsal.singleton.api.ServiceGroupIdentifier; +import org.opendaylight.mdsal.singleton.impl.EOSClusterSingletonServiceProvider; +import org.opendaylight.yangtools.concepts.Registration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -44,9 +43,9 @@ public class ClusterSingletonIntegrationTest extends AbstractNativeEosTest { private MockNativeEntityOwnershipService node2; private MockNativeEntityOwnershipService node3; - private MockSingletonService singletonNode1; - private MockSingletonService singletonNode2; - private MockSingletonService singletonNode3; + private EOSClusterSingletonServiceProvider singletonNode1; + private EOSClusterSingletonServiceProvider singletonNode2; + private EOSClusterSingletonServiceProvider singletonNode3; @Before @@ -55,14 +54,9 @@ public class ClusterSingletonIntegrationTest extends AbstractNativeEosTest { node2 = startupNativeService(2551, List.of("member-2"), THREE_NODE_SEED_NODES); node3 = startupNativeService(2552, List.of("member-3"), THREE_NODE_SEED_NODES); - singletonNode1 = new MockSingletonService(node1); - singletonNode1.initializeProvider(); - - singletonNode2 = new MockSingletonService(node2); - singletonNode2.initializeProvider(); - - singletonNode3 = new MockSingletonService(node3); - singletonNode3.initializeProvider(); + singletonNode1 = new EOSClusterSingletonServiceProvider(node1); + singletonNode2 = new EOSClusterSingletonServiceProvider(node2); + singletonNode3 = new EOSClusterSingletonServiceProvider(node3); waitUntillNodeReady(node3); } @@ -90,8 +84,7 @@ public class ClusterSingletonIntegrationTest extends AbstractNativeEosTest { @Test public void testSingletonOwnershipHandoff() { final MockClusterSingletonService service = new MockClusterSingletonService("member-1", "service-1"); - final ClusterSingletonServiceRegistration registration = - singletonNode1.registerClusterSingletonService(service); + final Registration registration = singletonNode1.registerClusterSingletonService(service); verifyServiceActive(service); @@ -108,14 +101,12 @@ public class ClusterSingletonIntegrationTest extends AbstractNativeEosTest { @Test public void testSingletonOwnershipHandoffOnNodeShutdown() throws Exception { MockClusterSingletonService service2 = new MockClusterSingletonService("member-2", "service-1"); - ClusterSingletonServiceRegistration registration2 = - singletonNode2.registerClusterSingletonService(service2); + Registration registration2 = singletonNode2.registerClusterSingletonService(service2); verifyServiceActive(service2); final MockClusterSingletonService service3 = new MockClusterSingletonService("member-3", "service-1"); - final ClusterSingletonServiceRegistration registration3 = - singletonNode3.registerClusterSingletonService(service3); + final Registration registration3 = singletonNode3.registerClusterSingletonService(service3); verifyServiceInactive(service3, 2); @@ -124,8 +115,7 @@ public class ClusterSingletonIntegrationTest extends AbstractNativeEosTest { verifyServiceActive(service3); node2 = startupNativeService(2551, List.of("member-1"), THREE_NODE_SEED_NODES); - singletonNode2 = new MockSingletonService(node2); - singletonNode2.initializeProvider(); + singletonNode2 = new EOSClusterSingletonServiceProvider(node2); waitUntillNodeReady(node2); service2 = new MockClusterSingletonService("member-2", "service-1"); @@ -135,7 +125,7 @@ public class ClusterSingletonIntegrationTest extends AbstractNativeEosTest { verifyServiceInactive(service2, 5); } - private void waitUntillNodeReady(MockNativeEntityOwnershipService node) { + private static void waitUntillNodeReady(final MockNativeEntityOwnershipService node) { // need to wait until all nodes are ready final Cluster cluster = Cluster.get(Adapter.toTyped(node.getActorSystem())); Awaitility.await().atMost(Duration.ofSeconds(20)).until(() -> { @@ -155,19 +145,19 @@ public class ClusterSingletonIntegrationTest extends AbstractNativeEosTest { }); } - private static void verifyServiceActive(MockClusterSingletonService service) { + private static void verifyServiceActive(final MockClusterSingletonService service) { await().untilAsserted(() -> assertTrue(service.isActivated())); } - private static void verifyServiceActive(MockClusterSingletonService service, long delay) { + private static void verifyServiceActive(final MockClusterSingletonService service, final long delay) { await().pollDelay(delay, TimeUnit.SECONDS).untilAsserted(() -> assertTrue(service.isActivated())); } - private static void verifyServiceInactive(MockClusterSingletonService service) { + private static void verifyServiceInactive(final MockClusterSingletonService service) { await().untilAsserted(() -> assertFalse(service.isActivated())); } - private static void verifyServiceInactive(MockClusterSingletonService service, long delay) { + private static void verifyServiceInactive(final MockClusterSingletonService service, final long delay) { await().pollDelay(delay, TimeUnit.SECONDS).untilAsserted(() -> assertFalse(service.isActivated())); } @@ -177,9 +167,9 @@ public class ClusterSingletonIntegrationTest extends AbstractNativeEosTest { private final ServiceGroupIdentifier identifier; private boolean activated = false; - MockClusterSingletonService(String member, String identifier) { + MockClusterSingletonService(final String member, final String identifier) { this.member = member; - this.identifier = ServiceGroupIdentifier.create(identifier); + this.identifier = new ServiceGroupIdentifier(identifier); } @Override @@ -204,10 +194,4 @@ public class ClusterSingletonIntegrationTest extends AbstractNativeEosTest { return activated; } } - - private static class MockSingletonService extends DOMClusterSingletonServiceProviderImpl { - MockSingletonService(DOMEntityOwnershipService entityOwnershipService) { - super(entityOwnershipService); - } - } } diff --git a/opendaylight/md-sal/eos-dom-akka/src/test/resources/application.conf b/opendaylight/md-sal/eos-dom-akka/src/test/resources/application.conf index ff23633e5e..08c2a36fb0 100644 --- a/opendaylight/md-sal/eos-dom-akka/src/test/resources/application.conf +++ b/opendaylight/md-sal/eos-dom-akka/src/test/resources/application.conf @@ -31,7 +31,11 @@ akka { # This value controls how quickly Entity Ownership Service decisions are # propagated within a node. notify-subscribers-interval = 20 ms - } + } + split-brain-resolver { + active-strategy = keep-majority + stable-after = 7s + } } } diff --git a/opendaylight/md-sal/mdsal-it-base/pom.xml b/opendaylight/md-sal/mdsal-it-base/pom.xml index 159c507c27..77e751dd0e 100644 --- a/opendaylight/md-sal/mdsal-it-base/pom.xml +++ b/opendaylight/md-sal/mdsal-it-base/pom.xml @@ -12,13 +12,13 @@ and is available at http://www.eclipse.org/legal/epl-v10.html org.opendaylight.odlparent bundle-parent - 9.0.12 + 13.0.11 org.opendaylight.controller mdsal-it-base - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT bundle @@ -26,7 +26,7 @@ and is available at http://www.eclipse.org/legal/epl-v10.html org.opendaylight.mdsal mdsal-artifacts - 8.0.10 + 13.0.1 pom import @@ -104,7 +104,7 @@ and is available at http://www.eclipse.org/legal/epl-v10.html org.osgi - osgi.core + org.osgi.framework compile diff --git a/opendaylight/md-sal/mdsal-it-parent/pom.xml b/opendaylight/md-sal/mdsal-it-parent/pom.xml index 3cce222809..3ea3d8e4a0 100644 --- a/opendaylight/md-sal/mdsal-it-parent/pom.xml +++ b/opendaylight/md-sal/mdsal-it-parent/pom.xml @@ -13,13 +13,13 @@ and is available at http://www.eclipse.org/legal/epl-v10.html org.opendaylight.odlparent bundle-parent - 9.0.12 + 13.0.11 org.opendaylight.controller mdsal-it-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT pom @@ -37,7 +37,7 @@ and is available at http://www.eclipse.org/legal/epl-v10.html org.opendaylight.controller controller-artifacts - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT pom import @@ -91,26 +91,25 @@ and is available at http://www.eclipse.org/legal/epl-v10.html org.osgi - osgi.core - - - junit - junit + org.osgi.framework - + org.mockito mockito-core + 4.11.0 test + maven-surefire-plugin - - 3.0.0-M5 @@ -177,20 +176,20 @@ and is available at http://www.eclipse.org/legal/epl-v10.html maven-dependency-plugin - unpack-karaf-resources - - unpack-dependencies - - process-test-resources - - ${project.build.directory}/test-classes - org.opendaylight.controller - mockito-core,objenesis,mdsal-it-base - META-INF\/** - false - + unpack-karaf-resources + + unpack-dependencies + + process-test-resources + + ${project.build.directory}/test-classes + org.opendaylight.controller + mockito-core,byte-buddy,objenesis,mdsal-it-base + META-INF\/** + false + - + diff --git a/opendaylight/md-sal/parent/pom.xml b/opendaylight/md-sal/parent/pom.xml index 32703e58ec..2cd8d0ac1b 100644 --- a/opendaylight/md-sal/parent/pom.xml +++ b/opendaylight/md-sal/parent/pom.xml @@ -11,13 +11,13 @@ org.opendaylight.mdsal binding-parent - 8.0.10 + 13.0.1 org.opendaylight.controller mdsal-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT pom @@ -25,7 +25,7 @@ org.opendaylight.controller bundle-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT pom import diff --git a/opendaylight/md-sal/pom.xml b/opendaylight/md-sal/pom.xml index bc453ae147..86f5e203f2 100644 --- a/opendaylight/md-sal/pom.xml +++ b/opendaylight/md-sal/pom.xml @@ -5,13 +5,13 @@ org.opendaylight.odlparent odlparent-lite - 9.0.12 + 13.0.11 org.opendaylight.controller mdsal-aggregator - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT pom diff --git a/opendaylight/md-sal/sal-akka-raft-example/pom.xml b/opendaylight/md-sal/sal-akka-raft-example/pom.xml index e9dd05a177..295d0d0552 100644 --- a/opendaylight/md-sal/sal-akka-raft-example/pom.xml +++ b/opendaylight/md-sal/sal-akka-raft-example/pom.xml @@ -11,7 +11,7 @@ org.opendaylight.controller mdsal-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ../parent @@ -19,6 +19,11 @@ bundle + + com.github.spotbugs + spotbugs-annotations + true + org.opendaylight.controller sal-akka-raft diff --git a/opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/ExampleActor.java b/opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/ExampleActor.java index 17d6f980cd..052a48940c 100644 --- a/opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/ExampleActor.java +++ b/opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/ExampleActor.java @@ -29,8 +29,8 @@ import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort; import org.opendaylight.controller.cluster.raft.RaftState; import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply; import org.opendaylight.controller.cluster.raft.behaviors.Leader; +import org.opendaylight.controller.cluster.raft.messages.Payload; import org.opendaylight.controller.cluster.raft.persisted.Snapshot; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload; import org.opendaylight.yangtools.concepts.Identifier; import org.opendaylight.yangtools.util.AbstractStringIdentifier; @@ -68,10 +68,8 @@ public class ExampleActor extends RaftActor implements RaftActorRecoveryCohort, if (message instanceof KeyValue) { if (isLeader()) { persistData(getSender(), new PayloadIdentifier(persistIdentifier++), (Payload) message, false); - } else { - if (getLeader() != null) { - getLeader().forward(message, getContext()); - } + } else if (getLeader() != null) { + getLeader().forward(message, getContext()); } } else if (message instanceof PrintState) { @@ -83,7 +81,7 @@ public class ExampleActor extends RaftActor implements RaftActorRecoveryCohort, } else if (message instanceof PrintRole) { if (LOG.isDebugEnabled()) { if (getRaftState() == RaftState.Leader || getRaftState() == RaftState.IsolatedLeader) { - final String followers = ((Leader)this.getCurrentBehavior()).printFollowerStates(); + final String followers = ((Leader)getCurrentBehavior()).printFollowerStates(); LOG.debug("{} = {}, Peers={}, followers={}", getId(), getRaftState(), getRaftActorContext().getPeerIds(), followers); } else { @@ -106,7 +104,7 @@ public class ExampleActor extends RaftActor implements RaftActorRecoveryCohort, } public Optional createRoleChangeNotifier(final String actorId) { - ActorRef exampleRoleChangeNotifier = this.getContext().actorOf( + ActorRef exampleRoleChangeNotifier = getContext().actorOf( RoleChangeNotifier.getProps(actorId), actorId + "-notifier"); return Optional.of(exampleRoleChangeNotifier); } @@ -118,8 +116,7 @@ public class ExampleActor extends RaftActor implements RaftActorRecoveryCohort, @Override protected void applyState(final ActorRef clientActor, final Identifier identifier, final Object data) { - if (data instanceof KeyValue) { - KeyValue kv = (KeyValue) data; + if (data instanceof KeyValue kv) { state.put(kv.getKey(), kv.getValue()); if (clientActor != null) { clientActor.tell(new KeyValueSaved(), getSelf()); @@ -132,7 +129,7 @@ public class ExampleActor extends RaftActor implements RaftActorRecoveryCohort, public void createSnapshot(final ActorRef actorRef, final Optional installSnapshotStream) { try { if (installSnapshotStream.isPresent()) { - SerializationUtils.serialize((Serializable) state, installSnapshotStream.get()); + SerializationUtils.serialize((Serializable) state, installSnapshotStream.orElseThrow()); } } catch (RuntimeException e) { LOG.error("Exception in creating snapshot", e); @@ -202,7 +199,7 @@ public class ExampleActor extends RaftActor implements RaftActorRecoveryCohort, try { return new MapState((Map) SerializationUtils.deserialize(snapshotBytes.read())); } catch (IOException e) { - throw new RuntimeException(e); + throw new IllegalStateException(e); } } diff --git a/opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/ExampleConfigParamsImpl.java b/opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/ExampleConfigParamsImpl.java index 65d2109b30..3126156713 100644 --- a/opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/ExampleConfigParamsImpl.java +++ b/opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/ExampleConfigParamsImpl.java @@ -19,7 +19,7 @@ public class ExampleConfigParamsImpl extends DefaultConfigParamsImpl { } @Override - public int getSnapshotChunkSize() { + public int getMaximumMessageSliceSize() { return 50; } } diff --git a/opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/LogGenerator.java b/opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/LogGenerator.java index 6ef8a07d9d..9559f1cff4 100644 --- a/opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/LogGenerator.java +++ b/opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/LogGenerator.java @@ -35,8 +35,9 @@ public class LogGenerator { } public static class LoggingThread implements Runnable { - + private final Random random = new Random(); private final ActorRef clientActor; + private volatile boolean stopLogging = false; public LoggingThread(final ActorRef clientActor) { @@ -45,7 +46,6 @@ public class LogGenerator { @Override public void run() { - Random random = new Random(); while (true) { if (stopLogging) { LOG.info("Logging stopped for client: {}", clientActor.path()); diff --git a/opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/Main.java b/opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/Main.java index ae8ea82295..871d3dfc2c 100644 --- a/opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/Main.java +++ b/opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/Main.java @@ -15,7 +15,6 @@ import java.io.InputStreamReader; import java.nio.charset.Charset; import java.util.Arrays; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.Optional; import org.opendaylight.controller.cluster.example.messages.KeyValue; @@ -49,11 +48,10 @@ public final class Main { withoutPeer("example-3"), Optional.empty()), "example-3"); - List examples = Arrays.asList(example1Actor, example2Actor, example3Actor); + final var examples = Arrays.asList(example1Actor, example2Actor, example3Actor); - ActorRef clientActor = ACTOR_SYSTEM.actorOf(ClientActor.props(example1Actor)); - BufferedReader br = - new BufferedReader(new InputStreamReader(System.in, Charset.defaultCharset())); + final var clientActor = ACTOR_SYSTEM.actorOf(ClientActor.props(example1Actor)); + final var br = new BufferedReader(new InputStreamReader(System.in, Charset.defaultCharset())); System.out.println("Usage :"); System.out.println("s <1-3> to start a peer"); diff --git a/opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/messages/KVv1.java b/opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/messages/KVv1.java new file mode 100644 index 0000000000..7721a8bbf6 --- /dev/null +++ b/opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/messages/KVv1.java @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.example.messages; + +import java.io.Serializable; + +final class KVv1 implements Serializable { + private static final long serialVersionUID = 1L; + + private final String key; + private final String value; + + KVv1(String key, String value) { + this.key = key; + this.value = value; + } + + Object readResolve() { + return new KeyValue(key, value); + } +} diff --git a/opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/messages/KeyValue.java b/opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/messages/KeyValue.java index 520188b8cd..78eea5cd86 100644 --- a/opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/messages/KeyValue.java +++ b/opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/messages/KeyValue.java @@ -5,21 +5,20 @@ * terms of the Eclipse Public License v1.0 which accompanies this distribution, * and is available at http://www.eclipse.org/legal/epl-v10.html */ - package org.opendaylight.controller.cluster.example.messages; -import java.io.Serializable; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload; +import org.opendaylight.controller.cluster.raft.messages.Payload; -public class KeyValue extends Payload implements Serializable { +public final class KeyValue extends Payload { private static final long serialVersionUID = 1L; + private String key; private String value; public KeyValue() { } - public KeyValue(String key, String value) { + public KeyValue(final String key, final String value) { this.key = key; this.value = value; } @@ -32,12 +31,15 @@ public class KeyValue extends Payload implements Serializable { return value; } - public void setKey(String key) { - this.key = key; + @Override + public int size() { + return value.length() + key.length(); } - public void setValue(String value) { - this.value = value; + @Override + public int serializedSize() { + // Should be a better estimate + return size(); } @Override @@ -46,8 +48,7 @@ public class KeyValue extends Payload implements Serializable { } @Override - public int size() { - return this.value.length() + this.key.length(); + protected Object writeReplace() { + return new KVv1(value, key); } - } diff --git a/opendaylight/md-sal/sal-akka-raft/pom.xml b/opendaylight/md-sal/sal-akka-raft/pom.xml index 4a84542846..306e7561a5 100644 --- a/opendaylight/md-sal/sal-akka-raft/pom.xml +++ b/opendaylight/md-sal/sal-akka-raft/pom.xml @@ -4,7 +4,7 @@ org.opendaylight.controller mdsal-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ../parent @@ -12,6 +12,27 @@ bundle + + com.github.spotbugs + spotbugs-annotations + true + + + com.google.guava + guava + + + org.eclipse.jdt + org.eclipse.jdt.annotation + + + org.opendaylight.yangtools + concepts + + + org.opendaylight.yangtools + util + org.opendaylight.controller sal-clustering-commons @@ -32,10 +53,6 @@ org.apache.commons commons-lang3 - - org.osgi - osgi.core - @@ -57,11 +74,6 @@ commons-io test - - commons-lang - commons-lang - test - diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/AbstractReplicatedLogImpl.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/AbstractReplicatedLogImpl.java index 64506ee686..53d317fba1 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/AbstractReplicatedLogImpl.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/AbstractReplicatedLogImpl.java @@ -43,7 +43,7 @@ public abstract class AbstractReplicatedLogImpl implements ReplicatedLog { this.snapshotTerm = snapshotTerm; this.logContext = logContext; - this.journal = new ArrayList<>(unAppliedEntries.size()); + journal = new ArrayList<>(unAppliedEntries.size()); for (ReplicatedLogEntry entry: unAppliedEntries) { append(entry); } @@ -168,7 +168,7 @@ public abstract class AbstractReplicatedLogImpl implements ReplicatedLog { long totalSize = 0; for (int i = fromIndex; i < toIndex; i++) { ReplicatedLogEntry entry = journal.get(i); - totalSize += entry.size(); + totalSize += entry.serializedSize(); if (totalSize <= maxDataSize) { retList.add(entry); } else { diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ClientRequestTracker.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ClientRequestTracker.java index 0f14844d56..c69decdd14 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ClientRequestTracker.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ClientRequestTracker.java @@ -5,33 +5,19 @@ * terms of the Eclipse Public License v1.0 which accompanies this distribution, * and is available at http://www.eclipse.org/legal/epl-v10.html */ - package org.opendaylight.controller.cluster.raft; import akka.actor.ActorRef; import org.opendaylight.yangtools.concepts.Identifier; -public interface ClientRequestTracker { - /** - * Returns the client actor that should be sent a response when consensus is achieved. - * - * @return the client actor - */ - ActorRef getClientActor(); - - /** - * Returns the identifier of the object that is to be replicated. For example a transaction identifier in the case - * of a transaction. - * - * @return the identifier - */ - Identifier getIdentifier(); - - /** - * Returns the index of the log entry that is to be replicated. - * - * @return the index - */ - long getIndex(); +/** + * Consensus forwarding tracker. + * + * @param clientActor the client actor that should be sent a response when consensus is achieved + * @param identifier the identifier of the object that is to be replicated. For example a transaction identifier in the + * case of a transaction + * @param logIndex the index of the log entry that is to be replicated + */ +public record ClientRequestTracker(long logIndex, ActorRef clientActor, Identifier identifier) { } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ClientRequestTrackerImpl.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ClientRequestTrackerImpl.java deleted file mode 100644 index 6ffb9228dc..0000000000 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ClientRequestTrackerImpl.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ - -package org.opendaylight.controller.cluster.raft; - -import akka.actor.ActorRef; -import org.opendaylight.yangtools.concepts.Identifier; - -public class ClientRequestTrackerImpl implements ClientRequestTracker { - - private final ActorRef clientActor; - private final Identifier identifier; - private final long logIndex; - - public ClientRequestTrackerImpl(ActorRef clientActor, Identifier identifier, long logIndex) { - - this.clientActor = clientActor; - - this.identifier = identifier; - - this.logIndex = logIndex; - } - - @Override - public ActorRef getClientActor() { - return clientActor; - } - - @Override - public long getIndex() { - return logIndex; - } - - @Override - public Identifier getIdentifier() { - return identifier; - } -} diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ConfigParams.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ConfigParams.java index 8351374d60..7f98295a30 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ConfigParams.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ConfigParams.java @@ -87,7 +87,7 @@ public interface ConfigParams { * * @return the maximum size (in bytes). */ - int getSnapshotChunkSize(); + int getMaximumMessageSliceSize(); /** * Returns the maximum number of journal log entries to batch on recovery before applying. diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/DefaultConfigParamsImpl.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/DefaultConfigParamsImpl.java index 37ed729bed..c83f90ec43 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/DefaultConfigParamsImpl.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/DefaultConfigParamsImpl.java @@ -41,7 +41,7 @@ public class DefaultConfigParamsImpl implements ConfigParams { */ private static final int ELECTION_TIME_MAX_VARIANCE = 100; - private static final int SNAPSHOT_CHUNK_SIZE = 2048 * 1000; //2MB + private static final int MAXIMUM_MESSAGE_SLICE_SIZE = 480 * 1024; // 480KiB /** @@ -72,7 +72,7 @@ public class DefaultConfigParamsImpl implements ConfigParams { // 0 means direct threshold if disabled private int snapshotDataThreshold = 0; - private int snapshotChunkSize = SNAPSHOT_CHUNK_SIZE; + private int maximumMessageSliceSize = MAXIMUM_MESSAGE_SLICE_SIZE; private long electionTimeoutFactor = 2; private long candidateElectionTimeoutDivisor = 1; @@ -95,9 +95,9 @@ public class DefaultConfigParamsImpl implements ConfigParams { this.snapshotBatchCount = snapshotBatchCount; } - public void setRecoverySnapshotIntervalSeconds(int recoverySnapshotInterval) { + public void setRecoverySnapshotIntervalSeconds(final int recoverySnapshotInterval) { checkArgument(recoverySnapshotInterval >= 0); - this.recoverySnapshotIntervalSeconds = recoverySnapshotInterval; + recoverySnapshotIntervalSeconds = recoverySnapshotInterval; } public void setSnapshotDataThresholdPercentage(final int snapshotDataThresholdPercentage) { @@ -108,8 +108,8 @@ public class DefaultConfigParamsImpl implements ConfigParams { this.snapshotDataThreshold = snapshotDataThreshold; } - public void setSnapshotChunkSize(final int snapshotChunkSize) { - this.snapshotChunkSize = snapshotChunkSize; + public void setMaximumMessageSliceSize(final int maximumMessageSliceSize) { + this.maximumMessageSliceSize = maximumMessageSliceSize; } public void setJournalRecoveryLogBatchSize(final int journalRecoveryLogBatchSize) { @@ -163,7 +163,7 @@ public class DefaultConfigParamsImpl implements ConfigParams { @Override public int getRecoverySnapshotIntervalSeconds() { - return this.recoverySnapshotIntervalSeconds; + return recoverySnapshotIntervalSeconds; } @Override @@ -191,8 +191,8 @@ public class DefaultConfigParamsImpl implements ConfigParams { } @Override - public int getSnapshotChunkSize() { - return snapshotChunkSize; + public int getMaximumMessageSliceSize() { + return maximumMessageSliceSize; } @Override diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/FollowerLogInformation.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/FollowerLogInformation.java index a76d6a29c2..f5c94fbf4c 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/FollowerLogInformation.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/FollowerLogInformation.java @@ -7,6 +7,7 @@ */ package org.opendaylight.controller.cluster.raft; +import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkState; import static java.util.Objects.requireNonNull; @@ -42,11 +43,8 @@ public final class FollowerLogInformation { private short payloadVersion = -1; - // Assume the HELIUM_VERSION version initially for backwards compatibility until we obtain the follower's - // actual version via AppendEntriesReply. Although we no longer support the Helium version, a pre-Boron - // follower will not have the version field in AppendEntriesReply so it will be set to 0 which is - // HELIUM_VERSION. - private short raftVersion = RaftVersions.HELIUM_VERSION; + // Assume the FLUORINE_VERSION version initially, as we no longer support pre-Fluorine versions. + private short raftVersion = RaftVersions.FLUORINE_VERSION; private final PeerInfo peerInfo; @@ -65,7 +63,7 @@ public final class FollowerLogInformation { */ @VisibleForTesting FollowerLogInformation(final PeerInfo peerInfo, final long matchIndex, final RaftActorContext context) { - this.nextIndex = context.getCommitIndex(); + nextIndex = context.getCommitIndex(); this.matchIndex = matchIndex; this.context = context; this.peerInfo = requireNonNull(peerInfo); @@ -299,6 +297,7 @@ public final class FollowerLogInformation { * @param raftVersion the raft version. */ public void setRaftVersion(final short raftVersion) { + checkArgument(raftVersion >= RaftVersions.FLUORINE_VERSION, "Unexpected version %s", raftVersion); this.raftVersion = raftVersion; } @@ -317,8 +316,8 @@ public final class FollowerLogInformation { * @param state the LeaderInstallSnapshotState */ public void setLeaderInstallSnapshotState(final @NonNull LeaderInstallSnapshotState state) { - if (this.installSnapshotState == null) { - this.installSnapshotState = requireNonNull(state); + if (installSnapshotState == null) { + installSnapshotState = requireNonNull(state); } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/GetSnapshotReplyActor.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/GetSnapshotReplyActor.java index d1e0b5c286..6febb90251 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/GetSnapshotReplyActor.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/GetSnapshotReplyActor.java @@ -29,7 +29,7 @@ import scala.concurrent.duration.FiniteDuration; * * @author Thomas Pantelis */ -class GetSnapshotReplyActor extends UntypedAbstractActor { +final class GetSnapshotReplyActor extends UntypedAbstractActor { private static final Logger LOG = LoggerFactory.getLogger(GetSnapshotReplyActor.class); private final Params params; @@ -88,7 +88,7 @@ class GetSnapshotReplyActor extends UntypedAbstractActor { this.replyToActor = requireNonNull(replyToActor); this.receiveTimeout = requireNonNull(receiveTimeout); this.id = requireNonNull(id); - this.peerInformation = peerInfo; + peerInformation = peerInfo; } } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActor.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActor.java index 00f6a04346..d71d879a5c 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActor.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActor.java @@ -15,11 +15,12 @@ import akka.actor.ActorRef; import akka.actor.ActorSelection; import akka.actor.PoisonPill; import akka.actor.Status; +import akka.persistence.JournalProtocol; +import akka.persistence.SnapshotProtocol; import com.google.common.annotations.VisibleForTesting; -import java.util.ArrayList; -import java.util.Collection; +import com.google.common.collect.ImmutableList; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; @@ -50,12 +51,12 @@ import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply; import org.opendaylight.controller.cluster.raft.client.messages.GetOnDemandRaftState; import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState; import org.opendaylight.controller.cluster.raft.client.messages.Shutdown; +import org.opendaylight.controller.cluster.raft.messages.Payload; import org.opendaylight.controller.cluster.raft.messages.RequestLeadership; import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries; import org.opendaylight.controller.cluster.raft.persisted.NoopPayload; import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload; import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload; import org.opendaylight.yangtools.concepts.Identifier; import org.opendaylight.yangtools.concepts.Immutable; @@ -100,8 +101,7 @@ import org.opendaylight.yangtools.concepts.Immutable; * */ public abstract class RaftActor extends AbstractUntypedPersistentActor { - - private static final long APPLY_STATE_DELAY_THRESHOLD_IN_NANOS = TimeUnit.MILLISECONDS.toNanos(50L); // 50 millis + private static final long APPLY_STATE_DELAY_THRESHOLD_IN_NANOS = TimeUnit.MILLISECONDS.toNanos(50); /** * This context should NOT be passed directly to any other actor it is @@ -123,16 +123,16 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor { private boolean shuttingDown; + @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design") protected RaftActor(final String id, final Map peerAddresses, final Optional configParams, final short payloadVersion) { persistentProvider = new PersistentDataProvider(this); delegatingPersistenceProvider = new RaftActorDelegatingPersistentDataProvider(null, persistentProvider); - context = new RaftActorContextImpl(this.getSelf(), - this.getContext(), id, new ElectionTermImpl(persistentProvider, id, LOG), - -1, -1, peerAddresses, - configParams.isPresent() ? configParams.get() : new DefaultConfigParamsImpl(), + context = new RaftActorContextImpl(getSelf(), getContext(), id, + new ElectionTermImpl(persistentProvider, id, LOG), -1, -1, peerAddresses, + configParams.isPresent() ? configParams.orElseThrow() : new DefaultConfigParamsImpl(), delegatingPersistenceProvider, this::handleApplyState, LOG, this::executeInSelf); context.setPayloadVersion(payloadVersion); @@ -225,9 +225,7 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor { if (snapshotSupport.handleSnapshotMessage(message, getSender())) { return; } - if (message instanceof ApplyState) { - ApplyState applyState = (ApplyState) message; - + if (message instanceof ApplyState applyState) { if (!hasFollowers()) { // for single node, the capture should happen after the apply state // as we delete messages from the persistent journal which have made it to the snapshot @@ -239,35 +237,38 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor { } possiblyHandleBehaviorMessage(message); - } else if (message instanceof ApplyJournalEntries) { - ApplyJournalEntries applyEntries = (ApplyJournalEntries) message; + } else if (message instanceof ApplyJournalEntries applyEntries) { LOG.debug("{}: Persisting ApplyJournalEntries with index={}", persistenceId(), applyEntries.getToIndex()); persistence().persistAsync(applyEntries, NoopProcedure.instance()); - } else if (message instanceof FindLeader) { - getSender().tell( - new FindLeaderReply(getLeaderAddress()), - getSelf() - ); + getSender().tell(new FindLeaderReply(getLeaderAddress()), getSelf()); } else if (message instanceof GetOnDemandRaftState) { onGetOnDemandRaftStats(); } else if (message instanceof InitiateCaptureSnapshot) { captureSnapshot(); - } else if (message instanceof SwitchBehavior) { - switchBehavior((SwitchBehavior) message); - } else if (message instanceof LeaderTransitioning) { - onLeaderTransitioning((LeaderTransitioning)message); + } else if (message instanceof SwitchBehavior switchBehavior) { + switchBehavior(switchBehavior); + } else if (message instanceof LeaderTransitioning leaderTransitioning) { + onLeaderTransitioning(leaderTransitioning); } else if (message instanceof Shutdown) { onShutDown(); - } else if (message instanceof Runnable) { - ((Runnable)message).run(); - } else if (message instanceof NoopPayload) { - persistData(null, null, (NoopPayload) message, false); - } else if (message instanceof RequestLeadership) { - onRequestLeadership((RequestLeadership) message); + } else if (message instanceof Runnable runnable) { + runnable.run(); + } else if (message instanceof NoopPayload noopPayload) { + persistData(null, null, noopPayload, false); + } else if (message instanceof RequestLeadership requestLeadership) { + onRequestLeadership(requestLeadership); } else if (!possiblyHandleBehaviorMessage(message)) { - handleNonRaftCommand(message); + if (message instanceof JournalProtocol.Response response + && delegatingPersistenceProvider.handleJournalResponse(response)) { + LOG.debug("{}: handled a journal response", persistenceId()); + } else if (message instanceof SnapshotProtocol.Response response + && delegatingPersistenceProvider.handleSnapshotResponse(response)) { + LOG.debug("{}: handled a snapshot response", persistenceId()); + } else { + handleNonRaftCommand(message); + } } } @@ -412,7 +413,7 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor { Optional roleChangeNotifier = getRoleChangeNotifier(); if (getRaftState() == RaftState.Follower && roleChangeNotifier.isPresent() && leaderTransitioning.getLeaderId().equals(getCurrentBehavior().getLeaderId())) { - roleChangeNotifier.get().tell(newLeaderStateChanged(getId(), null, + roleChangeNotifier.orElseThrow().tell(newLeaderStateChanged(getId(), null, getCurrentBehavior().getLeaderPayloadVersion()), getSelf()); } } @@ -451,7 +452,7 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor { } final RaftActorBehavior currentBehavior = context.getCurrentBehavior(); - OnDemandRaftState.AbstractBuilder builder = newOnDemandRaftStateBuilder() + final var builder = newOnDemandRaftStateBuilder() .commitIndex(context.getCommitIndex()) .currentTerm(context.getTermInformation().getCurrentTerm()) .inMemoryJournalDataSize(replicatedLog().dataSize()) @@ -477,19 +478,14 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor { builder.lastLogTerm(lastLogEntry.getTerm()); } - if (getCurrentBehavior() instanceof AbstractLeader) { - AbstractLeader leader = (AbstractLeader)getCurrentBehavior(); - Collection followerIds = leader.getFollowerIds(); - List followerInfoList = new ArrayList<>(followerIds.size()); - for (String id: followerIds) { - final FollowerLogInformation info = leader.getFollower(id); - followerInfoList.add(new FollowerInfo(id, info.getNextIndex(), info.getMatchIndex(), - info.isFollowerActive(), DurationFormatUtils.formatDurationHMS( - TimeUnit.NANOSECONDS.toMillis(info.nanosSinceLastActivity())), - context.getPeerInfo(info.getId()).isVoting())); - } - - builder.followerInfoList(followerInfoList); + if (getCurrentBehavior() instanceof AbstractLeader leader) { + builder.followerInfoList(leader.getFollowerIds().stream() + .map(leader::getFollower) + .map(info -> new FollowerInfo(info.getId(), info.getNextIndex(), info.getMatchIndex(), + info.isFollowerActive(), DurationFormatUtils.formatDurationHMS( + TimeUnit.NANOSECONDS.toMillis(info.nanosSinceLastActivity())), + context.getPeerInfo(info.getId()).isVoting())) + .collect(ImmutableList.toImmutableList())); } sender().tell(builder.build(), self()); @@ -516,7 +512,7 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor { if (!Objects.equals(lastLeaderId, currentBehavior.getLeaderId()) || oldBehaviorState.getLeaderPayloadVersion() != currentBehavior.getLeaderPayloadVersion()) { if (roleChangeNotifier.isPresent()) { - roleChangeNotifier.get().tell(newLeaderStateChanged(getId(), currentBehavior.getLeaderId(), + roleChangeNotifier.orElseThrow().tell(newLeaderStateChanged(getId(), currentBehavior.getLeaderId(), currentBehavior.getLeaderPayloadVersion()), getSelf()); } @@ -533,7 +529,7 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor { if (roleChangeNotifier.isPresent() && (oldBehavior == null || oldBehavior.state() != currentBehavior.state())) { - roleChangeNotifier.get().tell(new RoleChanged(getId(), oldBehaviorStateName , + roleChangeNotifier.orElseThrow().tell(new RoleChanged(getId(), oldBehaviorStateName , currentBehavior.state().name()), getSelf()); } } @@ -625,8 +621,8 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor { if (wasAppended && hasFollowers()) { // Send log entry for replication. - getCurrentBehavior().handleMessage(getSelf(), new Replicate(clientActor, identifier, replicatedLogEntry, - !batchHint)); + getCurrentBehavior().handleMessage(getSelf(), + new Replicate(replicatedLogEntry.getIndex(), !batchHint, clientActor, identifier)); } } @@ -963,7 +959,7 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor { this.lastValidLeaderId = lastValidLeaderId; this.lastLeaderId = lastLeaderId; this.behavior = requireNonNull(behavior); - this.leaderPayloadVersion = behavior.getLeaderPayloadVersion(); + leaderPayloadVersion = behavior.getLeaderPayloadVersion(); } @Override diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorContextImpl.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorContextImpl.java index 8ba0f48d72..a27bb9c395 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorContextImpl.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorContextImpl.java @@ -16,14 +16,12 @@ import akka.actor.ActorSystem; import akka.actor.Props; import akka.cluster.Cluster; import com.google.common.annotations.VisibleForTesting; -import java.util.ArrayList; +import com.google.common.collect.ImmutableList; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; -import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.Set; import java.util.concurrent.Executor; import java.util.function.Consumer; import java.util.function.LongSupplier; @@ -110,7 +108,7 @@ public class RaftActorContextImpl implements RaftActorContext { this.lastApplied = lastApplied; this.configParams = requireNonNull(configParams); this.persistenceProvider = requireNonNull(persistenceProvider); - this.log = requireNonNull(logger); + log = requireNonNull(logger); this.applyStateConsumer = requireNonNull(applyStateConsumer); fileBackedOutputStreamFactory = new FileBackedOutputStreamFactory( @@ -219,7 +217,7 @@ public class RaftActorContextImpl implements RaftActorContext { @Override public Logger getLogger() { - return this.log; + return log; } @Override @@ -256,34 +254,27 @@ public class RaftActorContextImpl implements RaftActorContext { @Override public void updatePeerIds(final ServerConfigurationPayload serverConfig) { - votingMember = true; - boolean foundSelf = false; - Set currentPeers = new HashSet<>(this.getPeerIds()); - for (ServerInfo server : serverConfig.getServerConfig()) { - if (getId().equals(server.getId())) { - foundSelf = true; - if (!server.isVoting()) { - votingMember = false; - } + boolean newVotingMember = false; + var currentPeers = new HashSet<>(getPeerIds()); + for (var server : serverConfig.getServerConfig()) { + if (getId().equals(server.peerId())) { + newVotingMember = server.isVoting(); } else { - VotingState votingState = server.isVoting() ? VotingState.VOTING : VotingState.NON_VOTING; - if (!currentPeers.contains(server.getId())) { - this.addToPeers(server.getId(), null, votingState); + final var votingState = server.isVoting() ? VotingState.VOTING : VotingState.NON_VOTING; + if (currentPeers.contains(server.peerId())) { + getPeerInfo(server.peerId()).setVotingState(votingState); + currentPeers.remove(server.peerId()); } else { - this.getPeerInfo(server.getId()).setVotingState(votingState); - currentPeers.remove(server.getId()); + addToPeers(server.peerId(), null, votingState); } } } for (String peerIdToRemove : currentPeers) { - this.removePeer(peerIdToRemove); - } - - if (!foundSelf) { - votingMember = false; + removePeer(peerIdToRemove); } + votingMember = newVotingMember; log.debug("{}: Updated server config: isVoting: {}, peers: {}", id, votingMember, peerInfoMap.values()); setDynamicServerConfigurationInUse(); @@ -367,7 +358,7 @@ public class RaftActorContextImpl implements RaftActorContext { @Override public void setDynamicServerConfigurationInUse() { - this.dynamicServerConfiguration = true; + dynamicServerConfiguration = true; } @Override @@ -375,9 +366,9 @@ public class RaftActorContextImpl implements RaftActorContext { if (!isDynamicServerConfigurationInUse()) { return null; } - Collection peers = getPeers(); - List newConfig = new ArrayList<>(peers.size() + 1); - for (PeerInfo peer: peers) { + final var peers = getPeers(); + final var newConfig = ImmutableList.builderWithExpectedSize(peers.size() + (includeSelf ? 1 : 0)); + for (PeerInfo peer : peers) { newConfig.add(new ServerInfo(peer.getId(), peer.isVoting())); } @@ -385,7 +376,7 @@ public class RaftActorContextImpl implements RaftActorContext { newConfig.add(new ServerInfo(getId(), votingMember)); } - return new ServerConfigurationPayload(newConfig); + return new ServerConfigurationPayload(newConfig.build()); } @Override @@ -413,7 +404,7 @@ public class RaftActorContextImpl implements RaftActorContext { } void setCurrentBehavior(final RaftActorBehavior behavior) { - this.currentBehavior = requireNonNull(behavior); + currentBehavior = requireNonNull(behavior); } @Override diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorDelegatingPersistentDataProvider.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorDelegatingPersistentDataProvider.java index 0bd8638260..846ef22bb0 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorDelegatingPersistentDataProvider.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorDelegatingPersistentDataProvider.java @@ -13,8 +13,7 @@ import akka.japi.Procedure; import org.opendaylight.controller.cluster.DataPersistenceProvider; import org.opendaylight.controller.cluster.DelegatingPersistentDataProvider; import org.opendaylight.controller.cluster.PersistentDataProvider; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.PersistentPayload; +import org.opendaylight.controller.cluster.raft.messages.PersistentPayload; /** * The DelegatingPersistentDataProvider used by RaftActor to override the configured persistent provider to @@ -42,33 +41,19 @@ class RaftActorDelegatingPersistentDataProvider extends DelegatingPersistentData } private void doPersist(final T entry, final Procedure procedure, final boolean async) { - if (getDelegate().isRecoveryApplicable()) { - persistSuper(entry, procedure, async); - } else { - if (entry instanceof ReplicatedLogEntry) { - Payload payload = ((ReplicatedLogEntry)entry).getData(); - if (payload instanceof PersistentPayload) { - // We persist the Payload but not the ReplicatedLogEntry to avoid gaps in the journal indexes - // on recovery if data persistence is later enabled. - if (async) { - persistentProvider.persistAsync(payload, p -> procedure.apply(entry)); - } else { - persistentProvider.persist(payload, p -> procedure.apply(entry)); - } - } else { - persistSuper(entry, procedure, async); - } + if (!getDelegate().isRecoveryApplicable() && entry instanceof ReplicatedLogEntry replicatedLogEntry + && replicatedLogEntry.getData() instanceof PersistentPayload payload) { + // We persist the Payload but not the ReplicatedLogEntry to avoid gaps in the journal indexes on recovery + // if data persistence is later enabled. + if (async) { + persistentProvider.persistAsync(payload, p -> procedure.apply(entry)); } else { - persistSuper(entry, procedure, async); + persistentProvider.persist(payload, p -> procedure.apply(entry)); } - } - } - - private void persistSuper(final T object, final Procedure procedure, final boolean async) { - if (async) { - super.persistAsync(object, procedure); + } else if (async) { + super.persistAsync(entry, procedure); } else { - super.persist(object, procedure); + super.persist(entry, procedure); } } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorLeadershipTransferCohort.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorLeadershipTransferCohort.java index c3d5af55cd..3aeaff6d89 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorLeadershipTransferCohort.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorLeadershipTransferCohort.java @@ -89,7 +89,7 @@ public class RaftActorLeadershipTransferCohort { Optional roleChangeNotifier = raftActor.getRoleChangeNotifier(); if (roleChangeNotifier.isPresent()) { - roleChangeNotifier.get().tell(raftActor.newLeaderStateChanged(context.getId(), null, + roleChangeNotifier.orElseThrow().tell(raftActor.newLeaderStateChanged(context.getId(), null, currentBehavior.getLeaderPayloadVersion()), raftActor.self()); } @@ -122,9 +122,9 @@ public class RaftActorLeadershipTransferCohort { void doTransfer() { RaftActorBehavior behavior = raftActor.getCurrentBehavior(); // Sanity check... - if (behavior instanceof Leader) { + if (behavior instanceof Leader leader) { isTransferring = true; - ((Leader)behavior).transferLeadership(this); + leader.transferLeadership(this); } else { LOG.debug("{}: No longer the leader - skipping transfer", raftActor.persistenceId()); finish(true); diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorRecoveryCohort.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorRecoveryCohort.java index 1f9b93acd7..4df0e7b58b 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorRecoveryCohort.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorRecoveryCohort.java @@ -8,8 +8,8 @@ package org.opendaylight.controller.cluster.raft; import org.eclipse.jdt.annotation.Nullable; +import org.opendaylight.controller.cluster.raft.messages.Payload; import org.opendaylight.controller.cluster.raft.persisted.Snapshot; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload; /** * Interface for a class that participates in raft actor persistence recovery. diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorRecoverySupport.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorRecoverySupport.java index 10375f9406..389e8dfd8f 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorRecoverySupport.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorRecoverySupport.java @@ -14,6 +14,7 @@ import java.util.Collections; import java.util.concurrent.TimeUnit; import org.opendaylight.controller.cluster.PersistentDataProvider; import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot; +import org.opendaylight.controller.cluster.raft.messages.PersistentPayload; import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries; import org.opendaylight.controller.cluster.raft.persisted.DeleteEntries; import org.opendaylight.controller.cluster.raft.persisted.EmptyState; @@ -22,7 +23,6 @@ import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPay import org.opendaylight.controller.cluster.raft.persisted.Snapshot; import org.opendaylight.controller.cluster.raft.persisted.Snapshot.State; import org.opendaylight.controller.cluster.raft.persisted.UpdateElectionTerm; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.PersistentPayload; import org.slf4j.Logger; /** @@ -46,7 +46,7 @@ class RaftActorRecoverySupport { RaftActorRecoverySupport(final RaftActorContext context, final RaftActorRecoveryCohort cohort) { this.context = context; this.cohort = cohort; - this.log = context.getLogger(); + log = context.getLogger(); } boolean handleRecoveryMessage(final Object message, final PersistentDataProvider persistentProvider) { @@ -59,19 +59,18 @@ class RaftActorRecoverySupport { } boolean recoveryComplete = false; - if (message instanceof UpdateElectionTerm) { - context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(), - ((UpdateElectionTerm) message).getVotedFor()); - } else if (message instanceof SnapshotOffer) { - onRecoveredSnapshot((SnapshotOffer) message); - } else if (message instanceof ReplicatedLogEntry) { - onRecoveredJournalLogEntry((ReplicatedLogEntry) message); - } else if (message instanceof ApplyJournalEntries) { - onRecoveredApplyLogEntries(((ApplyJournalEntries) message).getToIndex()); - } else if (message instanceof DeleteEntries) { - onDeleteEntries((DeleteEntries) message); - } else if (message instanceof ServerConfigurationPayload) { - context.updatePeerIds((ServerConfigurationPayload)message); + if (message instanceof UpdateElectionTerm updateElectionTerm) { + context.getTermInformation().update(updateElectionTerm.getCurrentTerm(), updateElectionTerm.getVotedFor()); + } else if (message instanceof SnapshotOffer snapshotOffer) { + onRecoveredSnapshot(snapshotOffer); + } else if (message instanceof ReplicatedLogEntry replicatedLogEntry) { + onRecoveredJournalLogEntry(replicatedLogEntry); + } else if (message instanceof ApplyJournalEntries applyJournalEntries) { + onRecoveredApplyLogEntries(applyJournalEntries.getToIndex()); + } else if (message instanceof DeleteEntries deleteEntries) { + onDeleteEntries(deleteEntries); + } else if (message instanceof ServerConfigurationPayload serverConfigurationPayload) { + context.updatePeerIds(serverConfigurationPayload); } else if (message instanceof RecoveryCompleted) { recoveryComplete = true; onRecoveryCompletedMessage(persistentProvider); @@ -254,7 +253,7 @@ class RaftActorRecoverySupport { final SnapshotManager snapshotManager = context.getSnapshotManager(); if (snapshotManager.capture(logEntry, -1)) { log.info("Capturing snapshot, resetting timer for the next recovery snapshot interval."); - this.recoverySnapshotTimer.reset().start(); + recoverySnapshotTimer.reset().start(); } else { log.info("SnapshotManager is not able to capture snapshot at this time. It will be retried " + "again with the next recovered entry."); @@ -262,7 +261,7 @@ class RaftActorRecoverySupport { } private boolean shouldTakeRecoverySnapshot() { - return this.recoverySnapshotTimer != null && this.recoverySnapshotTimer.elapsed(TimeUnit.SECONDS) + return recoverySnapshotTimer != null && recoverySnapshotTimer.elapsed(TimeUnit.SECONDS) >= context.getConfigParams().getRecoverySnapshotIntervalSeconds(); } @@ -338,6 +337,6 @@ class RaftActorRecoverySupport { } private static boolean isMigratedSerializable(final Object message) { - return message instanceof MigratedSerializable && ((MigratedSerializable)message).isMigrated(); + return message instanceof MigratedSerializable migrated && migrated.isMigrated(); } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorServerConfigurationSupport.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorServerConfigurationSupport.java index 29641cb00e..fd2cd419d7 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorServerConfigurationSupport.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorServerConfigurationSupport.java @@ -12,11 +12,10 @@ import static java.util.Objects.requireNonNull; import akka.actor.ActorRef; import akka.actor.ActorSelection; import akka.actor.Cancellable; +import com.google.common.collect.ImmutableList; import java.util.ArrayDeque; -import java.util.ArrayList; import java.util.Collection; import java.util.HashSet; -import java.util.List; import java.util.Map; import java.util.Queue; import java.util.UUID; @@ -28,6 +27,7 @@ import org.opendaylight.controller.cluster.raft.behaviors.AbstractLeader; import org.opendaylight.controller.cluster.raft.messages.AddServer; import org.opendaylight.controller.cluster.raft.messages.AddServerReply; import org.opendaylight.controller.cluster.raft.messages.ChangeServersVotingStatus; +import org.opendaylight.controller.cluster.raft.messages.Payload; import org.opendaylight.controller.cluster.raft.messages.RemoveServer; import org.opendaylight.controller.cluster.raft.messages.RemoveServerReply; import org.opendaylight.controller.cluster.raft.messages.ServerChangeReply; @@ -36,7 +36,6 @@ import org.opendaylight.controller.cluster.raft.messages.ServerRemoved; import org.opendaylight.controller.cluster.raft.messages.UnInitializedFollowerSnapshotReply; import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload; import org.opendaylight.controller.cluster.raft.persisted.ServerInfo; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload; import org.opendaylight.yangtools.concepts.Identifier; import org.opendaylight.yangtools.util.AbstractUUIDIdentifier; import org.slf4j.Logger; @@ -64,27 +63,27 @@ class RaftActorServerConfigurationSupport { RaftActorServerConfigurationSupport(final RaftActor raftActor) { this.raftActor = raftActor; - this.raftContext = raftActor.getRaftActorContext(); + raftContext = raftActor.getRaftActorContext(); } boolean handleMessage(final Object message, final ActorRef sender) { - if (message instanceof AddServer) { - onAddServer((AddServer) message, sender); + if (message instanceof AddServer addServer) { + onAddServer(addServer, sender); return true; - } else if (message instanceof RemoveServer) { - onRemoveServer((RemoveServer) message, sender); + } else if (message instanceof RemoveServer removeServer) { + onRemoveServer(removeServer, sender); return true; - } else if (message instanceof ChangeServersVotingStatus) { - onChangeServersVotingStatus((ChangeServersVotingStatus) message, sender); + } else if (message instanceof ChangeServersVotingStatus changeServersVotingStatus) { + onChangeServersVotingStatus(changeServersVotingStatus, sender); return true; - } else if (message instanceof ServerOperationTimeout) { - currentOperationState.onServerOperationTimeout((ServerOperationTimeout) message); + } else if (message instanceof ServerOperationTimeout serverOperationTimeout) { + currentOperationState.onServerOperationTimeout(serverOperationTimeout); return true; - } else if (message instanceof UnInitializedFollowerSnapshotReply) { - currentOperationState.onUnInitializedFollowerSnapshotReply((UnInitializedFollowerSnapshotReply) message); + } else if (message instanceof UnInitializedFollowerSnapshotReply uninitFollowerSnapshotReply) { + currentOperationState.onUnInitializedFollowerSnapshotReply(uninitFollowerSnapshotReply); return true; - } else if (message instanceof ApplyState) { - return onApplyState((ApplyState) message); + } else if (message instanceof ApplyState applyState) { + return onApplyState(applyState); } else if (message instanceof SnapshotComplete) { currentOperationState.onSnapshotComplete(); return false; @@ -748,7 +747,7 @@ class RaftActorServerConfigurationSupport { } private boolean updateLocalPeerInfo() { - List newServerInfoList = newServerInfoList(); + final var newServerInfoList = newServerInfoList(); // Check if new voting state would leave us with no voting members. boolean atLeastOneVoting = false; @@ -765,28 +764,28 @@ class RaftActorServerConfigurationSupport { } raftContext.updatePeerIds(new ServerConfigurationPayload(newServerInfoList)); - if (raftActor.getCurrentBehavior() instanceof AbstractLeader) { - AbstractLeader leader = (AbstractLeader) raftActor.getCurrentBehavior(); + if (raftActor.getCurrentBehavior() instanceof AbstractLeader leader) { leader.updateMinReplicaCount(); } return true; } - private List newServerInfoList() { - Map serverVotingStatusMap = changeVotingStatusContext.getOperation() - .getServerVotingStatusMap(); - List newServerInfoList = new ArrayList<>(); - for (String peerId: raftContext.getPeerIds()) { - newServerInfoList.add(new ServerInfo(peerId, serverVotingStatusMap.containsKey(peerId) - ? serverVotingStatusMap.get(peerId) : raftContext.getPeerInfo(peerId).isVoting())); + private ImmutableList newServerInfoList() { + final var serverVotingStatusMap = changeVotingStatusContext.getOperation().getServerVotingStatusMap(); + final var peerInfos = raftContext.getPeers(); + final var newServerInfoList = ImmutableList.builderWithExpectedSize(peerInfos.size() + 1); + for (var peerInfo : peerInfos) { + final var peerId = peerInfo.getId(); + final var voting = serverVotingStatusMap.get(peerId); + newServerInfoList.add(new ServerInfo(peerId, voting != null ? voting : peerInfo.isVoting())); } - newServerInfoList.add(new ServerInfo(raftContext.getId(), serverVotingStatusMap.containsKey( - raftContext.getId()) ? serverVotingStatusMap.get(raftContext.getId()) - : raftContext.isVotingMember())); + final var myId = raftContext.getId(); + final var myVoting = serverVotingStatusMap.get(myId); + newServerInfoList.add(new ServerInfo(myId, myVoting != null ? myVoting : raftContext.isVotingMember())); - return newServerInfoList; + return newServerInfoList.build(); } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorSnapshotMessageSupport.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorSnapshotMessageSupport.java index bc96713dc4..e7344d9b4f 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorSnapshotMessageSupport.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorSnapshotMessageSupport.java @@ -60,18 +60,18 @@ class RaftActorSnapshotMessageSupport { } boolean handleSnapshotMessage(final Object message, final ActorRef sender) { - if (message instanceof ApplySnapshot) { - onApplySnapshot((ApplySnapshot) message); - } else if (message instanceof SaveSnapshotSuccess) { - onSaveSnapshotSuccess((SaveSnapshotSuccess) message); - } else if (message instanceof SaveSnapshotFailure) { - onSaveSnapshotFailure((SaveSnapshotFailure) message); - } else if (message instanceof CaptureSnapshotReply) { - onCaptureSnapshotReply((CaptureSnapshotReply) message); + if (message instanceof ApplySnapshot applySnapshot) { + onApplySnapshot(applySnapshot); + } else if (message instanceof SaveSnapshotSuccess saveSnapshotSuccess) { + onSaveSnapshotSuccess(saveSnapshotSuccess); + } else if (message instanceof SaveSnapshotFailure saveSnapshotFailure) { + onSaveSnapshotFailure(saveSnapshotFailure); + } else if (message instanceof CaptureSnapshotReply captureSnapshotReply) { + onCaptureSnapshotReply(captureSnapshotReply); } else if (COMMIT_SNAPSHOT.equals(message)) { context.getSnapshotManager().commit(-1, -1); - } else if (message instanceof GetSnapshot) { - onGetSnapshot(sender, (GetSnapshot) message); + } else if (message instanceof GetSnapshot getSnapshot) { + onGetSnapshot(sender, getSnapshot); } else if (message instanceof SnapshotComplete) { log.debug("{}: SnapshotComplete received", context.getId()); } else { diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftVersions.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftVersions.java index 7876ea7cf5..a09a4aa2cb 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftVersions.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftVersions.java @@ -13,13 +13,14 @@ package org.opendaylight.controller.cluster.raft; * @author Thomas Pantelis */ public final class RaftVersions { - public static final short HELIUM_VERSION = 0; - public static final short LITHIUM_VERSION = 1; - public static final short BORON_VERSION = 3; + // HELIUM_VERSION = 0 + // LITHIUM_VERSION = 1 + // BORON_VERSION = 3 public static final short FLUORINE_VERSION = 4; - public static final short CURRENT_VERSION = FLUORINE_VERSION; + public static final short ARGON_VERSION = 5; + public static final short CURRENT_VERSION = ARGON_VERSION; private RaftVersions() { - + // Hidden on purpose } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ReplicatedLogEntry.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ReplicatedLogEntry.java index 1348ffca91..360f6b6903 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ReplicatedLogEntry.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ReplicatedLogEntry.java @@ -8,7 +8,7 @@ package org.opendaylight.controller.cluster.raft; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload; +import org.opendaylight.controller.cluster.raft.messages.Payload; /** * Represents one entry in the replicated log. @@ -42,6 +42,15 @@ public interface ReplicatedLogEntry { */ int size(); + /** + * Return the estimate of serialized size of this entry when passed through serialization. The estimate needs to + * be reasonably accurate and should err on the side of caution and report a slightly-higher size in face of + * uncertainty. + * + * @return An estimate of serialized size. + */ + int serializedSize(); + /** * Checks if persistence is pending for this entry. * diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/SnapshotManager.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/SnapshotManager.java index 8037fb8d73..57e6140fc9 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/SnapshotManager.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/SnapshotManager.java @@ -68,7 +68,7 @@ public class SnapshotManager implements SnapshotState { */ public SnapshotManager(final RaftActorContext context, final Logger logger) { this.context = context; - this.log = logger; + log = logger; } public boolean isApplying() { @@ -195,7 +195,7 @@ public class SnapshotManager implements SnapshotState { newReplicatedToAllIndex, newReplicatedToAllTerm, unAppliedEntries, mandatoryTrim); } - private class AbstractSnapshotState implements SnapshotState { + private abstract class AbstractSnapshotState implements SnapshotState { @Override public boolean isCapturing() { @@ -282,8 +282,7 @@ public class SnapshotManager implements SnapshotState { } } - private class Idle extends AbstractSnapshotState { - + private final class Idle extends AbstractSnapshotState { @Override public boolean isCapturing() { return false; @@ -307,12 +306,12 @@ public class SnapshotManager implements SnapshotState { log.debug("{}: lastSequenceNumber prior to capture: {}", persistenceId(), lastSequenceNumber); - SnapshotManager.this.currentState = CREATING; + currentState = CREATING; try { createSnapshotProcedure.accept(Optional.ofNullable(installSnapshotStream)); } catch (Exception e) { - SnapshotManager.this.currentState = IDLE; + currentState = IDLE; log.error("Error creating snapshot", e); return false; } @@ -338,7 +337,7 @@ public class SnapshotManager implements SnapshotState { @Override public void apply(final ApplySnapshot toApply) { - SnapshotManager.this.applySnapshot = toApply; + applySnapshot = toApply; lastSequenceNumber = context.getPersistenceProvider().getLastSequenceNumber(); @@ -346,7 +345,7 @@ public class SnapshotManager implements SnapshotState { context.getPersistenceProvider().saveSnapshot(toApply.getSnapshot()); - SnapshotManager.this.currentState = PERSISTING; + currentState = PERSISTING; } @Override @@ -360,8 +359,7 @@ public class SnapshotManager implements SnapshotState { } } - private class Creating extends AbstractSnapshotState { - + private final class Creating extends AbstractSnapshotState { @Override public void persist(final Snapshot.State snapshotState, final Optional installSnapshotStream, final long totalMemory) { @@ -440,7 +438,8 @@ public class SnapshotManager implements SnapshotState { if (installSnapshotStream.isPresent()) { if (context.getId().equals(currentBehavior.getLeaderId())) { try { - ByteSource snapshotBytes = ((FileBackedOutputStream)installSnapshotStream.get()).asByteSource(); + ByteSource snapshotBytes = ((FileBackedOutputStream)installSnapshotStream.orElseThrow()) + .asByteSource(); currentBehavior.handleMessage(context.getActor(), new SendInstallSnapshot(snapshot, snapshotBytes)); } catch (IOException e) { @@ -448,12 +447,12 @@ public class SnapshotManager implements SnapshotState { context.getId(), e); } } else { - ((FileBackedOutputStream)installSnapshotStream.get()).cleanup(); + ((FileBackedOutputStream)installSnapshotStream.orElseThrow()).cleanup(); } } captureSnapshot = null; - SnapshotManager.this.currentState = PERSISTING; + currentState = PERSISTING; } @Override @@ -463,8 +462,7 @@ public class SnapshotManager implements SnapshotState { } - private class Persisting extends AbstractSnapshotState { - + private final class Persisting extends AbstractSnapshotState { @Override @SuppressWarnings("checkstyle:IllegalCatch") public void commit(final long sequenceNumber, final long timeStamp) { @@ -525,7 +523,7 @@ public class SnapshotManager implements SnapshotState { private void snapshotComplete() { lastSequenceNumber = -1; applySnapshot = null; - SnapshotManager.this.currentState = IDLE; + currentState = IDLE; context.getActor().tell(SnapshotComplete.INSTANCE, context.getActor()); } @@ -543,15 +541,15 @@ public class SnapshotManager implements SnapshotState { long getTerm(); } - static class LastAppliedTermInformationReader implements TermInformationReader { + static final class LastAppliedTermInformationReader implements TermInformationReader { private long index; private long term; LastAppliedTermInformationReader init(final ReplicatedLog log, final long originalIndex, final ReplicatedLogEntry lastLogEntry, final boolean hasFollowers) { ReplicatedLogEntry entry = log.get(originalIndex); - this.index = -1L; - this.term = -1L; + index = -1L; + term = -1L; if (!hasFollowers) { if (lastLogEntry != null) { // since we have persisted the last-log-entry to persistent journal before the capture, @@ -571,23 +569,23 @@ public class SnapshotManager implements SnapshotState { @Override public long getIndex() { - return this.index; + return index; } @Override public long getTerm() { - return this.term; + return term; } } - private static class ReplicatedToAllTermInformationReader implements TermInformationReader { + private static final class ReplicatedToAllTermInformationReader implements TermInformationReader { private long index; private long term; ReplicatedToAllTermInformationReader init(final ReplicatedLog log, final long originalIndex) { ReplicatedLogEntry entry = log.get(originalIndex); - this.index = -1L; - this.term = -1L; + index = -1L; + term = -1L; if (entry != null) { index = entry.getIndex(); @@ -599,12 +597,12 @@ public class SnapshotManager implements SnapshotState { @Override public long getIndex() { - return this.index; + return index; } @Override public long getTerm() { - return this.term; + return term; } } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/TimedRunnable.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/TimedRunnable.java index f16e5e2b59..93b5f04df3 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/TimedRunnable.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/TimedRunnable.java @@ -10,6 +10,7 @@ package org.opendaylight.controller.cluster.raft; import static java.util.Objects.requireNonNull; import akka.actor.Cancellable; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import scala.concurrent.duration.FiniteDuration; /** @@ -27,10 +28,12 @@ abstract class TimedRunnable implements Runnable { private final Cancellable cancelTimer; private boolean canRun = true; + @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", + justification = "https://github.com/spotbugs/spotbugs/issues/1867") TimedRunnable(final FiniteDuration timeout, final RaftActor actor) { cancelTimer = requireNonNull(actor).getContext().system().scheduler() - .scheduleOnce(requireNonNull(timeout), actor.self(), (Runnable) this::cancel, - actor.getContext().system().dispatcher(), actor.self()); + .scheduleOnce(requireNonNull(timeout), actor.self(), (Runnable) this::cancel, + actor.getContext().system().dispatcher(), actor.self()); } @Override diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/base/messages/EmptyExternalizableProxy.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/base/messages/EmptyExternalizableProxy.java deleted file mode 100644 index 67f3ed92ae..0000000000 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/base/messages/EmptyExternalizableProxy.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (c) 2017 Inocybe Technologies and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.raft.base.messages; - -import static java.util.Objects.requireNonNull; - -import java.io.Externalizable; -import java.io.ObjectInput; -import java.io.ObjectOutput; - -/** - * Abstract base that implements Externalizable with no-op methods that is intended for classes that use the - * externalizable proxy pattern but have no data to serialize and read-resolve to a static instance. - * - * @author Thomas Pantelis - */ -public abstract class EmptyExternalizableProxy implements Externalizable { - private static final long serialVersionUID = 1L; - - private final Object readResolveTo; - - protected EmptyExternalizableProxy(final Object readResolveTo) { - this.readResolveTo = requireNonNull(readResolveTo); - } - - @Override - public void writeExternal(final ObjectOutput out) { - } - - @Override - public void readExternal(final ObjectInput in) { - } - - protected Object readResolve() { - return readResolveTo; - } -} diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/base/messages/Replicate.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/base/messages/Replicate.java index c58d86354a..edd4986a47 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/base/messages/Replicate.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/base/messages/Replicate.java @@ -5,40 +5,11 @@ * terms of the Eclipse Public License v1.0 which accompanies this distribution, * and is available at http://www.eclipse.org/legal/epl-v10.html */ - package org.opendaylight.controller.cluster.raft.base.messages; import akka.actor.ActorRef; -import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry; import org.opendaylight.yangtools.concepts.Identifier; -public class Replicate { - private final ActorRef clientActor; - private final Identifier identifier; - private final ReplicatedLogEntry replicatedLogEntry; - private final boolean sendImmediate; - - public Replicate(ActorRef clientActor, Identifier identifier, ReplicatedLogEntry replicatedLogEntry, - boolean sendImmediate) { - this.clientActor = clientActor; - this.identifier = identifier; - this.replicatedLogEntry = replicatedLogEntry; - this.sendImmediate = sendImmediate; - } - - public ActorRef getClientActor() { - return clientActor; - } - - public Identifier getIdentifier() { - return identifier; - } - - public ReplicatedLogEntry getReplicatedLogEntry() { - return replicatedLogEntry; - } - - public boolean isSendImmediate() { - return sendImmediate; - } +public record Replicate(long logIndex, boolean sendImmediate, ActorRef clientActor, Identifier identifier) { + // Nothing else here } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/base/messages/TimeoutNow.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/base/messages/TimeoutNow.java index b212250dd4..2b76844819 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/base/messages/TimeoutNow.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/base/messages/TimeoutNow.java @@ -16,25 +16,18 @@ import java.io.Serializable; * @author Thomas Pantelis */ public final class TimeoutNow implements Serializable, ControlMessage { + @java.io.Serial private static final long serialVersionUID = 1L; + public static final TimeoutNow INSTANCE = new TimeoutNow(); private TimeoutNow() { // Hidden on purpose } - private Object writeReplace() { - return new Proxy(); - } - - private static class Proxy extends EmptyExternalizableProxy { - private static final long serialVersionUID = 1L; - - // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't - // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public Proxy() { - super(INSTANCE); - } + @java.io.Serial + @SuppressWarnings("static-method") + private Object readResolve() { + return INSTANCE; } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractLeader.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractLeader.java index 6560ad76c3..7514dccff4 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractLeader.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractLeader.java @@ -20,7 +20,6 @@ import java.io.ObjectOutputStream; import java.util.Collection; import java.util.Collections; import java.util.HashMap; -import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -34,11 +33,11 @@ import org.opendaylight.controller.cluster.io.SharedFileBackedOutputStream; import org.opendaylight.controller.cluster.messaging.MessageSlicer; import org.opendaylight.controller.cluster.messaging.SliceOptions; import org.opendaylight.controller.cluster.raft.ClientRequestTracker; -import org.opendaylight.controller.cluster.raft.ClientRequestTrackerImpl; import org.opendaylight.controller.cluster.raft.FollowerLogInformation; import org.opendaylight.controller.cluster.raft.PeerInfo; import org.opendaylight.controller.cluster.raft.RaftActorContext; import org.opendaylight.controller.cluster.raft.RaftState; +import org.opendaylight.controller.cluster.raft.RaftVersions; import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry; import org.opendaylight.controller.cluster.raft.VotingState; import org.opendaylight.controller.cluster.raft.base.messages.ApplyState; @@ -48,6 +47,7 @@ import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat; import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapshot; import org.opendaylight.controller.cluster.raft.messages.AppendEntries; import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply; +import org.opendaylight.controller.cluster.raft.messages.IdentifiablePayload; import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot; import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply; import org.opendaylight.controller.cluster.raft.messages.RaftRPC; @@ -56,8 +56,6 @@ import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply; import org.opendaylight.controller.cluster.raft.messages.UnInitializedFollowerSnapshotReply; import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload; import org.opendaylight.controller.cluster.raft.persisted.Snapshot; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.IdentifiablePayload; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload; import scala.concurrent.duration.FiniteDuration; /** @@ -110,7 +108,7 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior { super(context, state); appendEntriesMessageSlicer = MessageSlicer.builder().logContext(logName()) - .messageSliceSize(context.getConfigParams().getSnapshotChunkSize()) + .messageSliceSize(context.getConfigParams().getMaximumMessageSliceSize()) .expireStateAfterInactivity(context.getConfigParams().getElectionTimeOutInterval().toMillis() * 3, TimeUnit.MILLISECONDS).build(); @@ -166,7 +164,7 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior { followerToLog.remove(followerId); } - public void updateMinReplicaCount() { + public final void updateMinReplicaCount() { int numVoting = 0; for (PeerInfo peer: context.getPeers()) { if (peer.isVoting()) { @@ -221,6 +219,13 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior { return this; } + final var followerRaftVersion = appendEntriesReply.getRaftVersion(); + if (followerRaftVersion < RaftVersions.FLUORINE_VERSION) { + log.warn("{}: handleAppendEntriesReply - ignoring reply from follower {} raft version {}", logName(), + followerId, followerRaftVersion); + return this; + } + final long lastActivityNanos = followerLogInformation.nanosSinceLastActivity(); if (lastActivityNanos > context.getConfigParams().getElectionTimeOutInterval().toNanos()) { log.warn("{} : handleAppendEntriesReply delayed beyond election timeout, " @@ -231,7 +236,7 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior { followerLogInformation.markFollowerActive(); followerLogInformation.setPayloadVersion(appendEntriesReply.getPayloadVersion()); - followerLogInformation.setRaftVersion(appendEntriesReply.getRaftVersion()); + followerLogInformation.setRaftVersion(followerRaftVersion); followerLogInformation.setNeedsLeaderAddress(appendEntriesReply.isNeedsLeaderAddress()); long followerLastLogIndex = appendEntriesReply.getLogLastIndex(); @@ -442,15 +447,14 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior { * @return the ClientRequestTracker or null if none available */ private ClientRequestTracker removeClientRequestTracker(final long logIndex) { - final Iterator it = trackers.iterator(); + final var it = trackers.iterator(); while (it.hasNext()) { - final ClientRequestTracker t = it.next(); - if (t.getIndex() == logIndex) { + final var tracker = it.next(); + if (tracker.logIndex() == logIndex) { it.remove(); - return t; + return tracker; } } - return null; } @@ -460,17 +464,16 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior { // If it does that means the leader wasn't dropped before the transaction applied. // That means that this transaction can be safely applied as a local transaction since we // have the ClientRequestTracker. - final ClientRequestTracker tracker = removeClientRequestTracker(entry.getIndex()); + final var tracker = removeClientRequestTracker(entry.getIndex()); if (tracker != null) { - return new ApplyState(tracker.getClientActor(), tracker.getIdentifier(), entry); + return new ApplyState(tracker.clientActor(), tracker.identifier(), entry); } // Tracker is missing, this means that we switched behaviours between replicate and applystate // and became the leader again,. We still want to apply this as a local modification because // we have resumed leadership with that log entry having been committed. - final Payload payload = entry.getData(); - if (payload instanceof IdentifiablePayload) { - return new ApplyState(null, ((IdentifiablePayload) payload).getIdentifier(), entry); + if (entry.getData() instanceof IdentifiablePayload identifiable) { + return new ApplyState(null, identifiable.getIdentifier(), entry); } return new ApplyState(null, null, entry); @@ -493,47 +496,45 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior { return this; } - if (message instanceof RaftRPC) { - RaftRPC rpc = (RaftRPC) message; - // If RPC request or response contains term T > currentTerm: - // set currentTerm = T, convert to follower (§5.1) - // This applies to all RPC messages and responses - if (rpc.getTerm() > context.getTermInformation().getCurrentTerm() && shouldUpdateTerm(rpc)) { - log.info("{}: Term {} in \"{}\" message is greater than leader's term {} - switching to Follower", - logName(), rpc.getTerm(), rpc, context.getTermInformation().getCurrentTerm()); - - context.getTermInformation().updateAndPersist(rpc.getTerm(), null); - - // This is a special case. Normally when stepping down as leader we don't process and reply to the - // RaftRPC as per raft. But if we're in the process of transferring leadership and we get a - // RequestVote, process the RequestVote before switching to Follower. This enables the requesting - // candidate node to be elected the leader faster and avoids us possibly timing out in the Follower - // state and starting a new election and grabbing leadership back before the other candidate node can - // start a new election due to lack of responses. This case would only occur if there isn't a majority - // of other nodes available that can elect the requesting candidate. Since we're transferring - // leadership, we should make every effort to get the requesting node elected. - if (rpc instanceof RequestVote && context.getRaftActorLeadershipTransferCohort() != null) { - log.debug("{}: Leadership transfer in progress - processing RequestVote", logName()); - super.handleMessage(sender, rpc); - } - - return internalSwitchBehavior(RaftState.Follower); + // If RPC request or response contains term T > currentTerm: + // set currentTerm = T, convert to follower (§5.1) + // This applies to all RPC messages and responses + if (message instanceof RaftRPC rpc && rpc.getTerm() > context.getTermInformation().getCurrentTerm() + && shouldUpdateTerm(rpc)) { + + log.info("{}: Term {} in \"{}\" message is greater than leader's term {} - switching to Follower", + logName(), rpc.getTerm(), rpc, context.getTermInformation().getCurrentTerm()); + + context.getTermInformation().updateAndPersist(rpc.getTerm(), null); + + // This is a special case. Normally when stepping down as leader we don't process and reply to the + // RaftRPC as per raft. But if we're in the process of transferring leadership and we get a + // RequestVote, process the RequestVote before switching to Follower. This enables the requesting + // candidate node to be elected the leader faster and avoids us possibly timing out in the Follower + // state and starting a new election and grabbing leadership back before the other candidate node can + // start a new election due to lack of responses. This case would only occur if there isn't a majority + // of other nodes available that can elect the requesting candidate. Since we're transferring + // leadership, we should make every effort to get the requesting node elected. + if (rpc instanceof RequestVote requestVote && context.getRaftActorLeadershipTransferCohort() != null) { + log.debug("{}: Leadership transfer in progress - processing RequestVote", logName()); + requestVote(sender, requestVote); } + + return internalSwitchBehavior(RaftState.Follower); } if (message instanceof SendHeartBeat) { beforeSendHeartbeat(); sendHeartBeat(); scheduleHeartBeat(context.getConfigParams().getHeartBeatInterval()); - } else if (message instanceof SendInstallSnapshot) { - SendInstallSnapshot sendInstallSnapshot = (SendInstallSnapshot) message; + } else if (message instanceof SendInstallSnapshot sendInstallSnapshot) { setSnapshotHolder(new SnapshotHolder(sendInstallSnapshot.getSnapshot(), sendInstallSnapshot.getSnapshotBytes())); sendInstallSnapshot(); - } else if (message instanceof Replicate) { - replicate((Replicate) message); - } else if (message instanceof InstallSnapshotReply) { - handleInstallSnapshotReply((InstallSnapshotReply) message); + } else if (message instanceof Replicate replicate) { + replicate(replicate); + } else if (message instanceof InstallSnapshotReply installSnapshotReply) { + handleInstallSnapshotReply(installSnapshotReply); } else if (message instanceof CheckConsensusReached) { possiblyUpdateCommitIndex(); } else { @@ -573,7 +574,7 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior { if (installSnapshotState.isLastChunk(reply.getChunkIndex())) { //this was the last chunk reply - long followerMatchIndex = snapshotHolder.get().getLastIncludedIndex(); + long followerMatchIndex = snapshotHolder.orElseThrow().getLastIncludedIndex(); followerLogInformation.setMatchIndex(followerMatchIndex); followerLogInformation.setNextIndex(followerMatchIndex + 1); followerLogInformation.clearLeaderInstallSnapshotState(); @@ -641,17 +642,16 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior { } private void replicate(final Replicate replicate) { - long logIndex = replicate.getReplicatedLogEntry().getIndex(); + final long logIndex = replicate.logIndex(); - log.debug("{}: Replicate message: identifier: {}, logIndex: {}, payload: {}, isSendImmediate: {}", logName(), - replicate.getIdentifier(), logIndex, replicate.getReplicatedLogEntry().getData().getClass(), - replicate.isSendImmediate()); + log.debug("{}: Replicate message: identifier: {}, logIndex: {}, isSendImmediate: {}", logName(), + replicate.identifier(), logIndex, replicate.sendImmediate()); // Create a tracker entry we will use this later to notify the // client actor - if (replicate.getClientActor() != null) { - trackers.add(new ClientRequestTrackerImpl(replicate.getClientActor(), replicate.getIdentifier(), - logIndex)); + final var clientActor = replicate.clientActor(); + if (clientActor != null) { + trackers.add(new ClientRequestTracker(logIndex, clientActor, replicate.identifier())); } boolean applyModificationToState = !context.anyVotingPeers() @@ -662,7 +662,7 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior { applyLogToStateMachine(logIndex); } - if (replicate.isSendImmediate() && !followerToLog.isEmpty()) { + if (replicate.sendImmediate() && !followerToLog.isEmpty()) { sendAppendEntries(0, false); } } @@ -776,14 +776,14 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior { // Try to get all the entries in the journal but not exceeding the max data size for a single AppendEntries // message. int maxEntries = (int) context.getReplicatedLog().size(); - final int maxDataSize = context.getConfigParams().getSnapshotChunkSize(); + final int maxDataSize = context.getConfigParams().getMaximumMessageSliceSize(); final long followerNextIndex = followerLogInfo.getNextIndex(); List entries = context.getReplicatedLog().getFrom(followerNextIndex, maxEntries, maxDataSize); // If the first entry's size exceeds the max data size threshold, it will be returned from the call above. If // that is the case, then we need to slice it into smaller chunks. - if (!(entries.size() == 1 && entries.get(0).getData().size() > maxDataSize)) { + if (entries.size() != 1 || entries.get(0).getData().serializedSize() <= maxDataSize) { // Don't need to slice. return entries; } @@ -904,10 +904,10 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior { } boolean captureInitiated = context.getSnapshotManager().captureToInstall(context.getReplicatedLog().last(), - this.getReplicatedToAllIndex(), followerId); + getReplicatedToAllIndex(), followerId); if (captureInitiated) { followerLogInfo.setLeaderInstallSnapshotState(new LeaderInstallSnapshotState( - context.getConfigParams().getSnapshotChunkSize(), logName())); + context.getConfigParams().getMaximumMessageSliceSize(), logName())); } return captureInitiated; @@ -949,14 +949,14 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior { if (snapshotHolder.isPresent()) { LeaderInstallSnapshotState installSnapshotState = followerLogInfo.getInstallSnapshotState(); if (installSnapshotState == null) { - installSnapshotState = new LeaderInstallSnapshotState(context.getConfigParams().getSnapshotChunkSize(), - logName()); + installSnapshotState = new LeaderInstallSnapshotState( + context.getConfigParams().getMaximumMessageSliceSize(), logName()); followerLogInfo.setLeaderInstallSnapshotState(installSnapshotState); } try { // Ensure the snapshot bytes are set - this is a no-op. - installSnapshotState.setSnapshotBytes(snapshotHolder.get().getSnapshotBytes()); + installSnapshotState.setSnapshotBytes(snapshotHolder.orElseThrow().getSnapshotBytes()); if (!installSnapshotState.canSendNextChunk()) { return; @@ -981,7 +981,7 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior { } catch (IOException e) { log.warn("{}: Unable to send chunk: {}/{}. Reseting snapshot progress. Snapshot state: {}", logName(), installSnapshotState.getChunkIndex(), installSnapshotState.getTotalChunks(), - installSnapshotState); + installSnapshotState, e); installSnapshotState.reset(); } } @@ -995,14 +995,14 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior { installSnapshotState.startChunkTimer(); followerActor.tell( new InstallSnapshot(currentTerm(), context.getId(), - snapshotHolder.get().getLastIncludedIndex(), - snapshotHolder.get().getLastIncludedTerm(), + snapshotHolder.orElseThrow().getLastIncludedIndex(), + snapshotHolder.orElseThrow().getLastIncludedTerm(), snapshotChunk, chunkIndex, installSnapshotState.getTotalChunks(), OptionalInt.of(installSnapshotState.getLastChunkHashCode()), - serverConfig - ).toSerializable(followerLogInfo.getRaftVersion()), + serverConfig, + followerLogInfo.getRaftVersion()), actor() ); } @@ -1124,8 +1124,8 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior { private final ByteSource snapshotBytes; SnapshotHolder(final Snapshot snapshot, final ByteSource snapshotBytes) { - this.lastIncludedTerm = snapshot.getLastAppliedTerm(); - this.lastIncludedIndex = snapshot.getLastAppliedIndex(); + lastIncludedTerm = snapshot.getLastAppliedTerm(); + lastIncludedIndex = snapshot.getLastAppliedIndex(); this.snapshotBytes = snapshotBytes; } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractRaftActorBehavior.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractRaftActorBehavior.java index fd2fbd332c..055a053500 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractRaftActorBehavior.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractRaftActorBehavior.java @@ -15,8 +15,8 @@ import akka.cluster.Cluster; import akka.cluster.Member; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.util.Optional; -import java.util.Random; import java.util.Set; +import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; import org.opendaylight.controller.cluster.raft.RaftActorContext; import org.opendaylight.controller.cluster.raft.RaftState; @@ -70,26 +70,19 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior { AbstractRaftActorBehavior(final RaftActorContext context, final RaftState state) { this.context = requireNonNull(context); this.state = requireNonNull(state); - this.log = context.getLogger(); + log = context.getLogger(); logName = String.format("%s (%s)", context.getId(), state); } public static RaftActorBehavior createBehavior(final RaftActorContext context, final RaftState state) { - switch (state) { - case Candidate: - return new Candidate(context); - case Follower: - return new Follower(context); - case IsolatedLeader: - return new IsolatedLeader(context); - case Leader: - return new Leader(context); - case PreLeader: - return new PreLeader(context); - default: - throw new IllegalArgumentException("Unhandled state " + state); - } + return switch (state) { + case Candidate -> new Candidate(context); + case Follower -> new Follower(context); + case IsolatedLeader -> new IsolatedLeader(context); + case Leader -> new Leader(context); + case PreLeader -> new PreLeader(context); + }; } @Override @@ -212,10 +205,8 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior { // the log with the later term is more up-to-date. If the logs // end with the same term, then whichever log is longer is // more up-to-date. - if (requestVote.getLastLogTerm() > lastTerm()) { - candidateLatest = true; - } else if (requestVote.getLastLogTerm() == lastTerm() - && requestVote.getLastLogIndex() >= lastIndex()) { + if (requestVote.getLastLogTerm() > lastTerm() + || requestVote.getLastLogTerm() == lastTerm() && requestVote.getLastLogIndex() >= lastIndex()) { candidateLatest = true; } @@ -247,7 +238,7 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior { * @return a random election duration */ protected FiniteDuration electionDuration() { - long variance = new Random().nextInt(context.getConfigParams().getElectionTimeVariance()); + long variance = ThreadLocalRandom.current().nextInt(context.getConfigParams().getElectionTimeVariance()); return context.getConfigParams().getElectionTimeOutInterval().$plus( new FiniteDuration(variance, TimeUnit.MILLISECONDS)); } @@ -270,6 +261,7 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior { * * @param interval the duration after which we should trigger a new election */ + // Non-final for testing protected void scheduleElection(final FiniteDuration interval) { stopElection(); @@ -301,7 +293,7 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior { * * @return the actor */ - protected ActorRef actor() { + protected final ActorRef actor() { return context.getActor(); } @@ -419,14 +411,14 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior { @Override public RaftActorBehavior handleMessage(final ActorRef sender, final Object message) { - if (message instanceof AppendEntries) { - return appendEntries(sender, (AppendEntries) message); - } else if (message instanceof AppendEntriesReply) { - return handleAppendEntriesReply(sender, (AppendEntriesReply) message); - } else if (message instanceof RequestVote) { - return requestVote(sender, (RequestVote) message); - } else if (message instanceof RequestVoteReply) { - return handleRequestVoteReply(sender, (RequestVoteReply) message); + if (message instanceof AppendEntries appendEntries) { + return appendEntries(sender, appendEntries); + } else if (message instanceof AppendEntriesReply appendEntriesReply) { + return handleAppendEntriesReply(sender, appendEntriesReply); + } else if (message instanceof RequestVote requestVote) { + return requestVote(sender, requestVote); + } else if (message instanceof RequestVoteReply requestVoteReply) { + return handleRequestVoteReply(sender, requestVoteReply); } else { return null; } @@ -447,12 +439,12 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior { return this; } - log.info("{} :- Switching from behavior {} to {}, election term: {}", logName(), this.state(), + log.info("{} :- Switching from behavior {} to {}, election term: {}", logName(), state(), newBehavior.state(), context.getTermInformation().getCurrentTerm()); try { close(); } catch (RuntimeException e) { - log.error("{}: Failed to close behavior : {}", logName(), this.state(), e); + log.error("{}: Failed to close behavior : {}", logName(), state(), e); } return newBehavior; } @@ -496,25 +488,24 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior { } } - protected String getId() { + protected final String getId() { return context.getId(); } // Check whether we should update the term. In case of half-connected nodes, we want to ignore RequestVote // messages, as the candidate is not able to receive our response. protected boolean shouldUpdateTerm(final RaftRPC rpc) { - if (!(rpc instanceof RequestVote)) { + if (!(rpc instanceof RequestVote requestVote)) { return true; } - final RequestVote requestVote = (RequestVote) rpc; log.debug("{}: Found higher term in RequestVote rpc, verifying whether it's safe to update term.", logName()); final Optional maybeCluster = context.getCluster(); if (!maybeCluster.isPresent()) { return true; } - final Cluster cluster = maybeCluster.get(); + final Cluster cluster = maybeCluster.orElseThrow(); final Set unreachable = cluster.state().getUnreachable(); log.debug("{}: Cluster state: {}", logName(), unreachable); diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Candidate.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Candidate.java index a8762ec76e..77f7a06c49 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Candidate.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Candidate.java @@ -5,13 +5,11 @@ * terms of the Eclipse Public License v1.0 which accompanies this distribution, * and is available at http://www.eclipse.org/legal/epl-v10.html */ - package org.opendaylight.controller.cluster.raft.behaviors; import akka.actor.ActorRef; import akka.actor.ActorSelection; -import java.util.ArrayList; -import java.util.Collection; +import com.google.common.collect.ImmutableList; import org.opendaylight.controller.cluster.raft.PeerInfo; import org.opendaylight.controller.cluster.raft.RaftActorContext; import org.opendaylight.controller.cluster.raft.RaftState; @@ -44,22 +42,19 @@ import scala.concurrent.duration.FiniteDuration; *

  • If election timeout elapses: start new election * */ -public class Candidate extends AbstractRaftActorBehavior { - - private int voteCount; - +public final class Candidate extends AbstractRaftActorBehavior { + private final ImmutableList votingPeers; private final int votesRequired; - private final Collection votingPeers = new ArrayList<>(); + private int voteCount; public Candidate(final RaftActorContext context) { super(context, RaftState.Candidate); - for (PeerInfo peer: context.getPeers()) { - if (peer.isVoting()) { - votingPeers.add(peer.getId()); - } - } + votingPeers = context.getPeers().stream() + .filter(PeerInfo::isVoting) + .map(PeerInfo::getId) + .collect(ImmutableList.toImmutableList()); log.debug("{}: Election: Candidate has following voting peers: {}", logName(), votingPeers); @@ -75,12 +70,12 @@ public class Candidate extends AbstractRaftActorBehavior { } @Override - public final String getLeaderId() { + public String getLeaderId() { return null; } @Override - public final short getLeaderPayloadVersion() { + public short getLeaderPayloadVersion() { return -1; } @@ -134,7 +129,7 @@ public class Candidate extends AbstractRaftActorBehavior { @Override - final ApplyState getApplyStateFor(final ReplicatedLogEntry entry) { + ApplyState getApplyStateFor(final ReplicatedLogEntry entry) { throw new IllegalStateException("A candidate should never attempt to apply " + entry); } @@ -158,9 +153,7 @@ public class Candidate extends AbstractRaftActorBehavior { return this; } - if (message instanceof RaftRPC) { - - RaftRPC rpc = (RaftRPC) message; + if (message instanceof RaftRPC rpc) { log.debug("{}: RaftRPC message received {}, my term is {}", logName(), rpc, context.getTermInformation().getCurrentTerm()); diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/FI.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/FI.java new file mode 100644 index 0000000000..79c605a528 --- /dev/null +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/FI.java @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.raft.behaviors; + +import static java.util.Objects.requireNonNull; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; + +/** + * Serialization proxy for {@link FollowerIdentifier}. + */ +final class FI implements Externalizable { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private String value; + + @SuppressWarnings("checkstyle:RedundantModifier") + public FI() { + // For Externalizable + } + + FI(final String value) { + this.value = requireNonNull(value); + } + + @Override + public void writeExternal(final ObjectOutput out) throws IOException { + out.writeObject(value); + } + + @Override + public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { + value = (String) in.readObject(); + } + + @java.io.Serial + private Object readResolve() { + return new FollowerIdentifier(value); + } +} diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Follower.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Follower.java index 288ce32a64..9dd630aade 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Follower.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Follower.java @@ -16,8 +16,8 @@ import akka.cluster.Member; import akka.cluster.MemberStatus; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Stopwatch; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.io.IOException; -import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Optional; @@ -53,6 +53,7 @@ import org.opendaylight.controller.cluster.raft.persisted.Snapshot; * convert to candidate * */ +// Non-final for testing public class Follower extends AbstractRaftActorBehavior { private static final long MAX_ELECTION_TIMEOUT_FACTOR = 18; @@ -69,11 +70,13 @@ public class Follower extends AbstractRaftActorBehavior { this(context, null, (short)-1); } + @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", + justification = "electionDuration() is not final for Candidate override") public Follower(final RaftActorContext context, final String initialLeaderId, final short initialLeaderPayloadVersion) { super(context, RaftState.Follower); - this.leaderId = initialLeaderId; - this.leaderPayloadVersion = initialLeaderPayloadVersion; + leaderId = initialLeaderId; + leaderPayloadVersion = initialLeaderPayloadVersion; initialSyncStatusTracker = new SyncStatusTracker(context.getActor(), getId(), context.getConfigParams() .getSyncIndexThreshold()); @@ -162,12 +165,11 @@ public class Follower extends AbstractRaftActorBehavior { leaderId = appendEntries.getLeaderId(); leaderPayloadVersion = appendEntries.getPayloadVersion(); - if (appendEntries.getLeaderAddress().isPresent()) { - final String address = appendEntries.getLeaderAddress().get(); - log.debug("New leader address: {}", address); - - context.setPeerAddress(leaderId, address); - context.getConfigParams().getPeerAddressResolver().setResolved(leaderId, address); + final var leaderAddress = appendEntries.leaderAddress(); + if (leaderAddress != null) { + log.debug("New leader address: {}", leaderAddress); + context.setPeerAddress(leaderId, leaderAddress); + context.getConfigParams().getPeerAddressResolver().setResolved(leaderId, leaderAddress); } // First check if the logs are in sync or not @@ -324,8 +326,8 @@ public class Follower extends AbstractRaftActorBehavior { shouldCaptureSnapshot.compareAndSet(false, context.getReplicatedLog().shouldCaptureSnapshot(entry.getIndex())); - if (entry.getData() instanceof ServerConfigurationPayload) { - context.updatePeerIds((ServerConfigurationPayload)entry.getData()); + if (entry.getData() instanceof ServerConfigurationPayload serverConfiguration) { + context.updatePeerIds(serverConfiguration); } } @@ -452,12 +454,11 @@ public class Follower extends AbstractRaftActorBehavior { return this; } - if (!(message instanceof RaftRPC)) { + if (!(message instanceof RaftRPC rpc)) { // The rest of the processing requires the message to be a RaftRPC return null; } - final RaftRPC rpc = (RaftRPC) message; // If RPC request or response contains term T > currentTerm: // set currentTerm = T, convert to follower (§5.1) // This applies to all RPC messages and responses @@ -468,14 +469,14 @@ public class Follower extends AbstractRaftActorBehavior { context.getTermInformation().updateAndPersist(rpc.getTerm(), null); } - if (rpc instanceof InstallSnapshot) { - handleInstallSnapshot(sender, (InstallSnapshot) rpc); + if (rpc instanceof InstallSnapshot installSnapshot) { + handleInstallSnapshot(sender, installSnapshot); restartLastLeaderMessageTimer(); scheduleElection(electionDuration()); return this; } - if (!(rpc instanceof RequestVote) || canGrantVote((RequestVote) rpc)) { + if (!(rpc instanceof RequestVote requestVote) || canGrantVote(requestVote)) { restartLastLeaderMessageTimer(); scheduleElection(electionDuration()); } @@ -548,7 +549,7 @@ public class Follower extends AbstractRaftActorBehavior { Address leaderAddress = leaderActor.anchorPath().address(); - CurrentClusterState state = cluster.get().state(); + CurrentClusterState state = cluster.orElseThrow().state(); Set unreachable = state.getUnreachable(); log.debug("{}: Checking for leader {} in the cluster unreachable set {}", logName(), leaderAddress, @@ -586,7 +587,7 @@ public class Follower extends AbstractRaftActorBehavior { return false; } - final Cluster cluster = maybeCluster.get(); + final Cluster cluster = maybeCluster.orElseThrow(); final Member selfMember = cluster.selfMember(); final CurrentClusterState state = cluster.state(); @@ -597,7 +598,7 @@ public class Follower extends AbstractRaftActorBehavior { + "all members {} self member: {}", logName(), unreachable, members, selfMember); // no unreachable peers means we cannot be isolated - if (unreachable.size() == 0) { + if (unreachable.isEmpty()) { return false; } @@ -607,11 +608,7 @@ public class Follower extends AbstractRaftActorBehavior { membersToCheck.removeAll(unreachable); // check if the only member not unreachable is us - if (membersToCheck.size() == 1 && membersToCheck.iterator().next().equals(selfMember)) { - return true; - } - - return false; + return membersToCheck.size() == 1 && membersToCheck.iterator().next().equals(selfMember); } private void handleInstallSnapshot(final ActorRef sender, final InstallSnapshot installSnapshot) { @@ -638,7 +635,7 @@ public class Follower extends AbstractRaftActorBehavior { Snapshot snapshot = Snapshot.create( context.getSnapshotManager().convertSnapshot(snapshotTracker.getSnapshotBytes()), - new ArrayList<>(), + List.of(), installSnapshot.getLastIncludedIndex(), installSnapshot.getLastIncludedTerm(), installSnapshot.getLastIncludedIndex(), @@ -672,8 +669,7 @@ public class Follower extends AbstractRaftActorBehavior { } catch (IOException e) { log.debug("{}: Exception in InstallSnapshot of follower", logName(), e); - sender.tell(new InstallSnapshotReply(currentTerm(), context.getId(), - -1, false), actor()); + sender.tell(new InstallSnapshotReply(currentTerm(), context.getId(), -1, false), actor()); closeSnapshotTracker(); } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/FollowerIdentifier.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/FollowerIdentifier.java index 32c6da4b52..2586f2091e 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/FollowerIdentifier.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/FollowerIdentifier.java @@ -7,10 +7,6 @@ */ package org.opendaylight.controller.cluster.raft.behaviors; -import java.io.Externalizable; -import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; import org.opendaylight.yangtools.util.AbstractStringIdentifier; /** @@ -18,44 +14,16 @@ import org.opendaylight.yangtools.util.AbstractStringIdentifier; * * @author Thomas Pantelis */ -class FollowerIdentifier extends AbstractStringIdentifier { +final class FollowerIdentifier extends AbstractStringIdentifier { + @java.io.Serial private static final long serialVersionUID = 1L; - FollowerIdentifier(String followerId) { + FollowerIdentifier(final String followerId) { super(followerId); } + @java.io.Serial private Object writeReplace() { - return new Proxy(this); - } - - private static class Proxy implements Externalizable { - private static final long serialVersionUID = 1L; - - private FollowerIdentifier identifier; - - // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't - // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public Proxy() { - } - - Proxy(FollowerIdentifier identifier) { - this.identifier = identifier; - } - - @Override - public void writeExternal(ObjectOutput out) throws IOException { - out.writeObject(identifier.getValue()); - } - - @Override - public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { - identifier = new FollowerIdentifier((String) in.readObject()); - } - - private Object readResolve() { - return identifier; - } + return new FI(getValue()); } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Leader.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Leader.java index 3534ac5cf1..0e29352052 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Leader.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Leader.java @@ -139,7 +139,7 @@ public class Leader extends AbstractLeader { final Optional requestedFollowerIdOptional = leadershipTransferContext.transferCohort.getRequestedFollowerId(); - if (requestedFollowerIdOptional.isPresent() && !requestedFollowerIdOptional.get().equals(followerId)) { + if (requestedFollowerIdOptional.isPresent() && !requestedFollowerIdOptional.orElseThrow().equals(followerId)) { // we want to transfer leadership to specific follower return; } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/LeaderInstallSnapshotState.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/LeaderInstallSnapshotState.java index cc4caa32eb..a2617dc639 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/LeaderInstallSnapshotState.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/LeaderInstallSnapshotState.java @@ -38,9 +38,9 @@ public final class LeaderInstallSnapshotState implements AutoCloseable { private final int snapshotChunkSize; private final String logName; private ByteSource snapshotBytes; - private int offset = INITIAL_OFFSET; + private long offset = INITIAL_OFFSET; // the next snapshot chunk is sent only if the replyReceivedForOffset matches offset - private int replyReceivedForOffset = -1; + private long replyReceivedForOffset = -1; // if replyStatus is false, the previous chunk is attempted private boolean replyStatus = false; private int chunkIndex = FIRST_CHUNK_INDEX; @@ -49,7 +49,7 @@ public final class LeaderInstallSnapshotState implements AutoCloseable { private int nextChunkHashCode = INITIAL_LAST_CHUNK_HASH_CODE; private long snapshotSize; private InputStream snapshotInputStream; - private Stopwatch chunkTimer = Stopwatch.createUnstarted(); + private final Stopwatch chunkTimer = Stopwatch.createUnstarted(); private byte[] currentChunk = null; LeaderInstallSnapshotState(final int snapshotChunkSize, final String logName) { @@ -75,8 +75,8 @@ public final class LeaderInstallSnapshotState implements AutoCloseable { chunkIndex = FIRST_CHUNK_INDEX; } - int incrementOffset() { - // if offset is -1 doesnt matter whether it was the initial value or reset, move the offset to 0 to begin with + private long incrementOffset() { + // if offset is -1 doesn't matter whether it was the initial value or reset, move the offset to 0 to begin with if (offset == INITIAL_OFFSET) { offset = 0; } else { @@ -139,7 +139,7 @@ public final class LeaderInstallSnapshotState implements AutoCloseable { byte[] getNextChunk() throws IOException { // increment offset to indicate next chunk is in flight, canSendNextChunk() wont let us hit this again until, // markSendStatus() is called with either success or failure - int start = incrementOffset(); + final var start = incrementOffset(); if (replyStatus || currentChunk == null) { int size = snapshotChunkSize; if (snapshotChunkSize > snapshotSize) { @@ -149,11 +149,11 @@ public final class LeaderInstallSnapshotState implements AutoCloseable { } currentChunk = new byte[size]; - int numRead = snapshotInputStream.read(currentChunk); + final var numRead = snapshotInputStream.read(currentChunk); if (numRead != size) { throw new IOException(String.format( - "The # of bytes read from the input stream, %d," - + "does not match the expected # %d", numRead, size)); + "The # of bytes read from the input stream, %d, does not match the expected # %d", + numRead, size)); } nextChunkHashCode = Arrays.hashCode(currentChunk); @@ -183,7 +183,7 @@ public final class LeaderInstallSnapshotState implements AutoCloseable { try { snapshotInputStream = snapshotBytes.openStream(); } catch (IOException e) { - throw new RuntimeException(e); + throw new IllegalStateException(e); } } @@ -198,7 +198,7 @@ public final class LeaderInstallSnapshotState implements AutoCloseable { try { snapshotInputStream.close(); } catch (IOException e) { - LOG.warn("{}: Error closing snapshot stream", logName); + LOG.warn("{}: Error closing snapshot stream", logName, e); } snapshotInputStream = null; diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/SnapshotTracker.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/SnapshotTracker.java index 1538bed74c..c3b75161b0 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/SnapshotTracker.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/SnapshotTracker.java @@ -48,11 +48,12 @@ class SnapshotTracker implements AutoCloseable { * @param lastChunkHashCode the optional hash code for the chunk * @return true if this is the last chunk is received * @throws InvalidChunkException if the chunk index is invalid or out of order + * @throws IOException if there is a problem writing to the stream */ boolean addChunk(final int chunkIndex, final byte[] chunk, final OptionalInt maybeLastChunkHashCode) - throws InvalidChunkException, IOException { + throws IOException { log.debug("addChunk: chunkIndex={}, lastChunkIndex={}, collectedChunks.size={}, lastChunkHashCode={}", - chunkIndex, lastChunkIndex, count, this.lastChunkHashCode); + chunkIndex, lastChunkIndex, count, lastChunkHashCode); if (sealed) { throw new InvalidChunkException("Invalid chunk received with chunkIndex " + chunkIndex @@ -63,10 +64,10 @@ class SnapshotTracker implements AutoCloseable { throw new InvalidChunkException("Expected chunkIndex " + (lastChunkIndex + 1) + " got " + chunkIndex); } - if (maybeLastChunkHashCode.isPresent() && maybeLastChunkHashCode.getAsInt() != this.lastChunkHashCode) { + if (maybeLastChunkHashCode.isPresent() && maybeLastChunkHashCode.orElseThrow() != lastChunkHashCode) { throw new InvalidChunkException("The hash code of the recorded last chunk does not match " - + "the senders hash code, expected " + this.lastChunkHashCode + " was " - + maybeLastChunkHashCode.getAsInt()); + + "the senders hash code, expected " + lastChunkHashCode + " was " + + maybeLastChunkHashCode.orElseThrow()); } bufferedStream.write(chunk); @@ -74,7 +75,7 @@ class SnapshotTracker implements AutoCloseable { count += chunk.length; sealed = chunkIndex == totalChunks; lastChunkIndex = chunkIndex; - this.lastChunkHashCode = Arrays.hashCode(chunk); + lastChunkHashCode = Arrays.hashCode(chunk); return sealed; } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/client/messages/Shutdown.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/client/messages/Shutdown.java index b670243b42..7fba245bf2 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/client/messages/Shutdown.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/client/messages/Shutdown.java @@ -9,7 +9,6 @@ package org.opendaylight.controller.cluster.raft.client.messages; import akka.dispatch.ControlMessage; import java.io.Serializable; -import org.opendaylight.controller.cluster.raft.base.messages.EmptyExternalizableProxy; /** * Message sent to a raft actor to shutdown gracefully. If it's the leader it will transfer leadership to a @@ -19,25 +18,18 @@ import org.opendaylight.controller.cluster.raft.base.messages.EmptyExternalizabl * @author Thomas Pantelis */ public final class Shutdown implements Serializable, ControlMessage { + @java.io.Serial private static final long serialVersionUID = 1L; + public static final Shutdown INSTANCE = new Shutdown(); private Shutdown() { // Hidden on purpose } - private Object writeReplace() { - return new Proxy(); - } - - private static class Proxy extends EmptyExternalizableProxy { - private static final long serialVersionUID = 1L; - - // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't - // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public Proxy() { - super(INSTANCE); - } + @java.io.Serial + @SuppressWarnings("static-method") + private Object readResolve() { + return INSTANCE; } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AE.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AE.java new file mode 100644 index 0000000000..491ca3fb0a --- /dev/null +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AE.java @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.raft.messages; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +import com.google.common.collect.ImmutableList; +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import org.opendaylight.controller.cluster.raft.RaftVersions; +import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry; +import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry; +import org.opendaylight.yangtools.concepts.WritableObjects; + +/** + * Argon serialization proxy for {@link AppendEntries}. + */ +final class AE implements Externalizable { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private AppendEntries appendEntries; + + @SuppressWarnings("checkstyle:RedundantModifier") + public AE() { + // For Externalizable + } + + AE(final AppendEntries appendEntries) { + this.appendEntries = requireNonNull(appendEntries); + } + + @Override + public void writeExternal(final ObjectOutput out) throws IOException { + out.writeShort(appendEntries.getLeaderRaftVersion()); + WritableObjects.writeLong(out, appendEntries.getTerm()); + out.writeObject(appendEntries.getLeaderId()); + + WritableObjects.writeLongs(out, appendEntries.getPrevLogTerm(), appendEntries.getPrevLogIndex()); + WritableObjects.writeLongs(out, appendEntries.getLeaderCommit(), appendEntries.getReplicatedToAllIndex()); + + out.writeShort(appendEntries.getPayloadVersion()); + + final var entries = appendEntries.getEntries(); + out.writeInt(entries.size()); + for (var e : entries) { + WritableObjects.writeLongs(out, e.getIndex(), e.getTerm()); + out.writeObject(e.getData()); + } + + out.writeObject(appendEntries.leaderAddress()); + } + + @Override + public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { + short leaderRaftVersion = in.readShort(); + long term = WritableObjects.readLong(in); + String leaderId = (String) in.readObject(); + + byte hdr = WritableObjects.readLongHeader(in); + long prevLogTerm = WritableObjects.readFirstLong(in, hdr); + long prevLogIndex = WritableObjects.readSecondLong(in, hdr); + + hdr = WritableObjects.readLongHeader(in); + long leaderCommit = WritableObjects.readFirstLong(in, hdr); + long replicatedToAllIndex = WritableObjects.readSecondLong(in, hdr); + short payloadVersion = in.readShort(); + + int size = in.readInt(); + var entries = ImmutableList.builderWithExpectedSize(size); + for (int i = 0; i < size; i++) { + hdr = WritableObjects.readLongHeader(in); + entries.add(new SimpleReplicatedLogEntry(WritableObjects.readFirstLong(in, hdr), + WritableObjects.readSecondLong(in, hdr), (Payload) in.readObject())); + } + + String leaderAddress = (String)in.readObject(); + + appendEntries = new AppendEntries(term, leaderId, prevLogIndex, prevLogTerm, entries.build(), leaderCommit, + replicatedToAllIndex, payloadVersion, RaftVersions.CURRENT_VERSION, leaderRaftVersion, + leaderAddress); + } + + @java.io.Serial + private Object readResolve() { + return verifyNotNull(appendEntries); + } +} diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AR.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AR.java new file mode 100644 index 0000000000..6aa2ed8636 --- /dev/null +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AR.java @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.raft.messages; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import org.opendaylight.controller.cluster.raft.RaftVersions; +import org.opendaylight.yangtools.concepts.WritableObjects; + +/** + * Serialization proxy for {@link AppendEntriesReply}. + */ +final class AR implements Externalizable { + @java.io.Serial + private static final long serialVersionUID = 1L; + + // Flag bits + private static final int SUCCESS = 0x10; + private static final int FORCE_INSTALL_SNAPSHOT = 0x20; + private static final int NEEDS_LEADER_ADDRESS = 0x40; + + private AppendEntriesReply appendEntriesReply; + + @SuppressWarnings("checkstyle:RedundantModifier") + public AR() { + // For Externalizable + } + + AR(final AppendEntriesReply appendEntriesReply) { + this.appendEntriesReply = requireNonNull(appendEntriesReply); + } + + @Override + public void writeExternal(final ObjectOutput out) throws IOException { + out.writeShort(appendEntriesReply.getRaftVersion()); + + int flags = 0; + if (appendEntriesReply.isSuccess()) { + flags |= SUCCESS; + } + if (appendEntriesReply.isForceInstallSnapshot()) { + flags |= FORCE_INSTALL_SNAPSHOT; + } + if (appendEntriesReply.isNeedsLeaderAddress()) { + flags |= NEEDS_LEADER_ADDRESS; + } + WritableObjects.writeLong(out, appendEntriesReply.getTerm(), flags); + + out.writeObject(appendEntriesReply.getFollowerId()); + + WritableObjects.writeLongs(out, appendEntriesReply.getLogLastIndex(), appendEntriesReply.getLogLastTerm()); + + out.writeShort(appendEntriesReply.getPayloadVersion()); + } + + @Override + public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { + short raftVersion = in.readShort(); + + byte hdr = WritableObjects.readLongHeader(in); + final int flags = WritableObjects.longHeaderFlags(hdr); + + long term = WritableObjects.readLongBody(in, hdr); + String followerId = (String) in.readObject(); + + hdr = WritableObjects.readLongHeader(in); + long logLastIndex = WritableObjects.readFirstLong(in, hdr); + long logLastTerm = WritableObjects.readSecondLong(in, hdr); + + short payloadVersion = in.readShort(); + + appendEntriesReply = new AppendEntriesReply(followerId, term, getFlag(flags, SUCCESS), logLastIndex, + logLastTerm, payloadVersion, getFlag(flags, FORCE_INSTALL_SNAPSHOT), getFlag(flags, NEEDS_LEADER_ADDRESS), + raftVersion, RaftVersions.CURRENT_VERSION); + } + + @java.io.Serial + private Object readResolve() { + return verifyNotNull(appendEntriesReply); + } + + private static boolean getFlag(final int flags, final int bit) { + return (flags & bit) != 0; + } +} diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AbstractRaftRPC.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AbstractRaftRPC.java index 76edc54990..038ad48b8e 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AbstractRaftRPC.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AbstractRaftRPC.java @@ -8,6 +8,7 @@ package org.opendaylight.controller.cluster.raft.messages; public abstract class AbstractRaftRPC implements RaftRPC { + @java.io.Serial private static final long serialVersionUID = -6061342433962854822L; // term @@ -23,5 +24,6 @@ public abstract class AbstractRaftRPC implements RaftRPC { } // All implementations must use Externalizable Proxy pattern + @java.io.Serial abstract Object writeReplace(); } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AppendEntries.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AppendEntries.java index 6bdb7a499e..892ea3356a 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AppendEntries.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AppendEntries.java @@ -10,24 +10,23 @@ package org.opendaylight.controller.cluster.raft.messages; import static java.util.Objects.requireNonNull; import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableList; import java.io.Externalizable; import java.io.IOException; import java.io.ObjectInput; import java.io.ObjectOutput; -import java.util.ArrayList; import java.util.List; -import java.util.Optional; import org.eclipse.jdt.annotation.NonNull; import org.eclipse.jdt.annotation.Nullable; import org.opendaylight.controller.cluster.raft.RaftVersions; import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry; import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload; /** * Invoked by leader to replicate log entries (§5.3); also used as heartbeat (§5.2). */ public final class AppendEntries extends AbstractRaftRPC { + @java.io.Serial private static final long serialVersionUID = 1L; // So that follower can redirect clients @@ -56,7 +55,7 @@ public final class AppendEntries extends AbstractRaftRPC { private final String leaderAddress; - private AppendEntries(final long term, @NonNull final String leaderId, final long prevLogIndex, + AppendEntries(final long term, @NonNull final String leaderId, final long prevLogIndex, final long prevLogTerm, @NonNull final List entries, final long leaderCommit, final long replicatedToAllIndex, final short payloadVersion, final short recipientRaftVersion, final short leaderRaftVersion, @Nullable final String leaderAddress) { @@ -117,8 +116,8 @@ public final class AppendEntries extends AbstractRaftRPC { return payloadVersion; } - public Optional getLeaderAddress() { - return Optional.ofNullable(leaderAddress); + public @Nullable String leaderAddress() { + return leaderAddress; } public short getLeaderRaftVersion() { @@ -141,13 +140,14 @@ public final class AppendEntries extends AbstractRaftRPC { @Override Object writeReplace() { - return recipientRaftVersion >= RaftVersions.FLUORINE_VERSION ? new ProxyV2(this) : new Proxy(this); + return recipientRaftVersion <= RaftVersions.FLUORINE_VERSION ? new ProxyV2(this) : new AE(this); } /** * Fluorine version that adds the leader address. */ private static class ProxyV2 implements Externalizable { + @java.io.Serial private static final long serialVersionUID = 1L; private AppendEntries appendEntries; @@ -195,80 +195,19 @@ public final class AppendEntries extends AbstractRaftRPC { short payloadVersion = in.readShort(); int size = in.readInt(); - List entries = new ArrayList<>(size); + var entries = ImmutableList.builderWithExpectedSize(size); for (int i = 0; i < size; i++) { entries.add(new SimpleReplicatedLogEntry(in.readLong(), in.readLong(), (Payload) in.readObject())); } String leaderAddress = (String)in.readObject(); - appendEntries = new AppendEntries(term, leaderId, prevLogIndex, prevLogTerm, entries, leaderCommit, + appendEntries = new AppendEntries(term, leaderId, prevLogIndex, prevLogTerm, entries.build(), leaderCommit, replicatedToAllIndex, payloadVersion, RaftVersions.CURRENT_VERSION, leaderRaftVersion, leaderAddress); } - private Object readResolve() { - return appendEntries; - } - } - - /** - * Pre-Fluorine version. - */ - @Deprecated - private static class Proxy implements Externalizable { - private static final long serialVersionUID = 1L; - - private AppendEntries appendEntries; - - // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't - // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public Proxy() { - } - - Proxy(final AppendEntries appendEntries) { - this.appendEntries = appendEntries; - } - - @Override - public void writeExternal(final ObjectOutput out) throws IOException { - out.writeLong(appendEntries.getTerm()); - out.writeObject(appendEntries.leaderId); - out.writeLong(appendEntries.prevLogTerm); - out.writeLong(appendEntries.prevLogIndex); - out.writeLong(appendEntries.leaderCommit); - out.writeLong(appendEntries.replicatedToAllIndex); - out.writeShort(appendEntries.payloadVersion); - - out.writeInt(appendEntries.entries.size()); - for (ReplicatedLogEntry e: appendEntries.entries) { - out.writeLong(e.getIndex()); - out.writeLong(e.getTerm()); - out.writeObject(e.getData()); - } - } - - @Override - public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { - long term = in.readLong(); - String leaderId = (String) in.readObject(); - long prevLogTerm = in.readLong(); - long prevLogIndex = in.readLong(); - long leaderCommit = in.readLong(); - long replicatedToAllIndex = in.readLong(); - short payloadVersion = in.readShort(); - - int size = in.readInt(); - List entries = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - entries.add(new SimpleReplicatedLogEntry(in.readLong(), in.readLong(), (Payload) in.readObject())); - } - - appendEntries = new AppendEntries(term, leaderId, prevLogIndex, prevLogTerm, entries, leaderCommit, - replicatedToAllIndex, payloadVersion, RaftVersions.CURRENT_VERSION, RaftVersions.BORON_VERSION, null); - } - + @java.io.Serial private Object readResolve() { return appendEntries; } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AppendEntriesReply.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AppendEntriesReply.java index ef2469790b..033a19a7b2 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AppendEntriesReply.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AppendEntriesReply.java @@ -18,6 +18,7 @@ import org.opendaylight.controller.cluster.raft.RaftVersions; * Reply for the AppendEntries message. */ public final class AppendEntriesReply extends AbstractRaftRPC { + @java.io.Serial private static final long serialVersionUID = -7487547356392536683L; // true if follower contained entry matching @@ -59,7 +60,7 @@ public final class AppendEntriesReply extends AbstractRaftRPC { needsLeaderAddress, RaftVersions.CURRENT_VERSION, recipientRaftVersion); } - private AppendEntriesReply(final String followerId, final long term, final boolean success, final long logLastIndex, + AppendEntriesReply(final String followerId, final long term, final boolean success, final long logLastIndex, final long logLastTerm, final short payloadVersion, final boolean forceInstallSnapshot, final boolean needsLeaderAddress, final short raftVersion, final short recipientRaftVersion) { super(term); @@ -117,13 +118,14 @@ public final class AppendEntriesReply extends AbstractRaftRPC { @Override Object writeReplace() { - return recipientRaftVersion >= RaftVersions.FLUORINE_VERSION ? new Proxy2(this) : new Proxy(this); + return recipientRaftVersion <= RaftVersions.FLUORINE_VERSION ? new Proxy2(this) : new AR(this); } /** * Fluorine version that adds the needsLeaderAddress flag. */ private static class Proxy2 implements Externalizable { + @java.io.Serial private static final long serialVersionUID = 1L; private AppendEntriesReply appendEntriesReply; @@ -168,57 +170,7 @@ public final class AppendEntriesReply extends AbstractRaftRPC { RaftVersions.CURRENT_VERSION); } - private Object readResolve() { - return appendEntriesReply; - } - } - - /** - * Pre-Fluorine version. - */ - @Deprecated - private static class Proxy implements Externalizable { - private static final long serialVersionUID = 1L; - - private AppendEntriesReply appendEntriesReply; - - // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't - // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public Proxy() { - } - - Proxy(final AppendEntriesReply appendEntriesReply) { - this.appendEntriesReply = appendEntriesReply; - } - - @Override - public void writeExternal(final ObjectOutput out) throws IOException { - out.writeShort(appendEntriesReply.raftVersion); - out.writeLong(appendEntriesReply.getTerm()); - out.writeObject(appendEntriesReply.followerId); - out.writeBoolean(appendEntriesReply.success); - out.writeLong(appendEntriesReply.logLastIndex); - out.writeLong(appendEntriesReply.logLastTerm); - out.writeShort(appendEntriesReply.payloadVersion); - out.writeBoolean(appendEntriesReply.forceInstallSnapshot); - } - - @Override - public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { - short raftVersion = in.readShort(); - long term = in.readLong(); - String followerId = (String) in.readObject(); - boolean success = in.readBoolean(); - long logLastIndex = in.readLong(); - long logLastTerm = in.readLong(); - short payloadVersion = in.readShort(); - boolean forceInstallSnapshot = in.readBoolean(); - - appendEntriesReply = new AppendEntriesReply(followerId, term, success, logLastIndex, logLastTerm, - payloadVersion, forceInstallSnapshot, false, raftVersion, RaftVersions.CURRENT_VERSION); - } - + @java.io.Serial private Object readResolve() { return appendEntriesReply; } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/IR.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/IR.java new file mode 100644 index 0000000000..e9d95d84e7 --- /dev/null +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/IR.java @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.raft.messages; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import org.opendaylight.yangtools.concepts.WritableObjects; + +/** + * Serialization proxy for {@link InstallSnapshotReply}. + */ +final class IR implements Externalizable { + @java.io.Serial + private static final long serialVersionUID = 1L; + + // Flags + private static final int SUCCESS = 0x10; + + private InstallSnapshotReply installSnapshotReply; + + @SuppressWarnings("checkstyle:RedundantModifier") + public IR() { + // For Externalizable + } + + IR(final InstallSnapshotReply installSnapshotReply) { + this.installSnapshotReply = requireNonNull(installSnapshotReply); + } + + @Override + public void writeExternal(final ObjectOutput out) throws IOException { + WritableObjects.writeLong(out, installSnapshotReply.getTerm(), installSnapshotReply.isSuccess() ? SUCCESS : 0); + out.writeObject(installSnapshotReply.getFollowerId()); + out.writeInt(installSnapshotReply.getChunkIndex()); + } + + @Override + public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { + final byte hdr = WritableObjects.readLongHeader(in); + final int flags = WritableObjects.longHeaderFlags(hdr); + + long term = WritableObjects.readLongBody(in, hdr); + String followerId = (String) in.readObject(); + int chunkIndex = in.readInt(); + + installSnapshotReply = new InstallSnapshotReply(term, followerId, chunkIndex, (flags & SUCCESS) != 0); + } + + @java.io.Serial + private Object readResolve() { + return verifyNotNull(installSnapshotReply); + } +} diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/IS.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/IS.java new file mode 100644 index 0000000000..3247bb241e --- /dev/null +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/IS.java @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.raft.messages; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Optional; +import java.util.OptionalInt; +import org.opendaylight.controller.cluster.raft.RaftVersions; +import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload; +import org.opendaylight.yangtools.concepts.WritableObjects; + +/** + * Serialization proxy for {@link InstallSnapshot}. + */ +final class IS implements Externalizable { + @java.io.Serial + private static final long serialVersionUID = 1L; + + // Flags + private static final int LAST_CHUNK_HASHCODE = 0x10; + private static final int SERVER_CONFIG = 0x20; + + private InstallSnapshot installSnapshot; + + @SuppressWarnings("checkstyle:RedundantModifier") + public IS() { + // For Externalizable + } + + IS(final InstallSnapshot installSnapshot) { + this.installSnapshot = requireNonNull(installSnapshot); + } + + @Override + public void writeExternal(final ObjectOutput out) throws IOException { + int flags = 0; + final var lastChunkHashCode = installSnapshot.getLastChunkHashCode(); + if (lastChunkHashCode.isPresent()) { + flags |= LAST_CHUNK_HASHCODE; + } + final var serverConfig = installSnapshot.getServerConfig(); + if (serverConfig.isPresent()) { + flags |= SERVER_CONFIG; + } + + WritableObjects.writeLong(out, installSnapshot.getTerm(), flags); + out.writeObject(installSnapshot.getLeaderId()); + WritableObjects.writeLongs(out, installSnapshot.getLastIncludedIndex(), installSnapshot.getLastIncludedTerm()); + out.writeInt(installSnapshot.getChunkIndex()); + out.writeInt(installSnapshot.getTotalChunks()); + + if (lastChunkHashCode.isPresent()) { + out.writeInt(lastChunkHashCode.orElseThrow()); + } + if (serverConfig.isPresent()) { + out.writeObject(serverConfig.orElseThrow()); + } + + out.writeObject(installSnapshot.getData()); + } + + @Override + public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { + byte hdr = WritableObjects.readLongHeader(in); + final int flags = WritableObjects.longHeaderFlags(hdr); + + long term = WritableObjects.readLongBody(in, hdr); + String leaderId = (String) in.readObject(); + + hdr = WritableObjects.readLongHeader(in); + long lastIncludedIndex = WritableObjects.readFirstLong(in, hdr); + long lastIncludedTerm = WritableObjects.readSecondLong(in, hdr); + int chunkIndex = in.readInt(); + int totalChunks = in.readInt(); + + OptionalInt lastChunkHashCode = getFlag(flags, LAST_CHUNK_HASHCODE) ? OptionalInt.of(in.readInt()) + : OptionalInt.empty(); + Optional serverConfig = getFlag(flags, SERVER_CONFIG) + ? Optional.of((ServerConfigurationPayload)in.readObject()) : Optional.empty(); + + byte[] data = (byte[])in.readObject(); + + installSnapshot = new InstallSnapshot(term, leaderId, lastIncludedIndex, lastIncludedTerm, data, + chunkIndex, totalChunks, lastChunkHashCode, serverConfig, RaftVersions.CURRENT_VERSION); + } + + @java.io.Serial + private Object readResolve() { + return verifyNotNull(installSnapshot); + } + + private static boolean getFlag(final int flags, final int bit) { + return (flags & bit) != 0; + } +} + diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/IdentifiablePayload.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/IdentifiablePayload.java similarity index 80% rename from opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/IdentifiablePayload.java rename to opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/IdentifiablePayload.java index a323fba817..a6034c55cc 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/IdentifiablePayload.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/IdentifiablePayload.java @@ -5,11 +5,12 @@ * terms of the Eclipse Public License v1.0 which accompanies this distribution, * and is available at http://www.eclipse.org/legal/epl-v10.html */ - -package org.opendaylight.controller.cluster.raft.protobuff.client.messages; +package org.opendaylight.controller.cluster.raft.messages; import org.opendaylight.yangtools.concepts.Identifiable; import org.opendaylight.yangtools.concepts.Identifier; public abstract class IdentifiablePayload extends Payload implements Identifiable { + @java.io.Serial + private static final long serialVersionUID = 1L; } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshot.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshot.java index 60c54f7fd0..3cd470f6cc 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshot.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshot.java @@ -7,6 +7,7 @@ */ package org.opendaylight.controller.cluster.raft.messages; +import com.google.common.annotations.VisibleForTesting; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.io.Externalizable; import java.io.IOException; @@ -14,12 +15,14 @@ import java.io.ObjectInput; import java.io.ObjectOutput; import java.util.Optional; import java.util.OptionalInt; +import org.opendaylight.controller.cluster.raft.RaftVersions; import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload; /** * Message sent from a leader to install a snapshot chunk on a follower. */ public final class InstallSnapshot extends AbstractRaftRPC { + @java.io.Serial private static final long serialVersionUID = 1L; private final String leaderId; @@ -32,13 +35,16 @@ public final class InstallSnapshot extends AbstractRaftRPC { private final OptionalInt lastChunkHashCode; @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "Handled via writeReplace()") private final Optional serverConfig; + private final short recipientRaftVersion; - @SuppressFBWarnings(value = "EI_EXPOSE_REP2", justification = "Stores a reference to an externally mutable byte[] " - + "object but this is OK since this class is merely a DTO and does not process byte[] internally. " - + "Also it would be inefficient to create a copy as the byte[] could be large.") + @SuppressFBWarnings(value = "EI_EXPOSE_REP2", justification = """ + Stores a reference to an externally mutable byte[] object but this is OK since this class is merely a DTO and \ + does not process byte[] internally. Also it would be inefficient to create a copy as the byte[] could be \ + large.""") public InstallSnapshot(final long term, final String leaderId, final long lastIncludedIndex, final long lastIncludedTerm, final byte[] data, final int chunkIndex, final int totalChunks, - final OptionalInt lastChunkHashCode, final Optional serverConfig) { + final OptionalInt lastChunkHashCode, final Optional serverConfig, + final short recipientRaftVersion) { super(term); this.leaderId = leaderId; this.lastIncludedIndex = lastIncludedIndex; @@ -48,13 +54,15 @@ public final class InstallSnapshot extends AbstractRaftRPC { this.totalChunks = totalChunks; this.lastChunkHashCode = lastChunkHashCode; this.serverConfig = serverConfig; + this.recipientRaftVersion = recipientRaftVersion; } + @VisibleForTesting public InstallSnapshot(final long term, final String leaderId, final long lastIncludedIndex, final long lastIncludedTerm, final byte[] data, final int chunkIndex, final int totalChunks) { this(term, leaderId, lastIncludedIndex, lastIncludedTerm, data, chunkIndex, totalChunks, OptionalInt.empty(), - Optional.empty()); + Optional.empty(), RaftVersions.CURRENT_VERSION); } public String getLeaderId() { @@ -69,9 +77,10 @@ public final class InstallSnapshot extends AbstractRaftRPC { return lastIncludedTerm; } - @SuppressFBWarnings(value = "EI_EXPOSE_REP", justification = "Exposes a mutable object stored in a field but " - + "this is OK since this class is merely a DTO and does not process the byte[] internally. " - + "Also it would be inefficient to create a return copy as the byte[] could be large.") + @SuppressFBWarnings(value = "EI_EXPOSE_REP", justification = """ + Exposes a mutable object stored in a field but this is OK since this class is merely a DTO and does not \ + process the byte[] internally. Also it would be inefficient to create a return copy as the byte[] could be \ + large.""") public byte[] getData() { return data; } @@ -92,10 +101,6 @@ public final class InstallSnapshot extends AbstractRaftRPC { return serverConfig; } - public Object toSerializable(final short version) { - return this; - } - @Override public String toString() { return "InstallSnapshot [term=" + getTerm() + ", leaderId=" + leaderId + ", lastIncludedIndex=" @@ -106,10 +111,11 @@ public final class InstallSnapshot extends AbstractRaftRPC { @Override Object writeReplace() { - return new Proxy(this); + return recipientRaftVersion <= RaftVersions.FLUORINE_VERSION ? new Proxy(this) : new IS(this); } private static class Proxy implements Externalizable { + @java.io.Serial private static final long serialVersionUID = 1L; private InstallSnapshot installSnapshot; @@ -135,12 +141,12 @@ public final class InstallSnapshot extends AbstractRaftRPC { out.writeByte(installSnapshot.lastChunkHashCode.isPresent() ? 1 : 0); if (installSnapshot.lastChunkHashCode.isPresent()) { - out.writeInt(installSnapshot.lastChunkHashCode.getAsInt()); + out.writeInt(installSnapshot.lastChunkHashCode.orElseThrow()); } out.writeByte(installSnapshot.serverConfig.isPresent() ? 1 : 0); if (installSnapshot.serverConfig.isPresent()) { - out.writeObject(installSnapshot.serverConfig.get()); + out.writeObject(installSnapshot.serverConfig.orElseThrow()); } out.writeObject(installSnapshot.data); @@ -162,9 +168,10 @@ public final class InstallSnapshot extends AbstractRaftRPC { byte[] data = (byte[])in.readObject(); installSnapshot = new InstallSnapshot(term, leaderId, lastIncludedIndex, lastIncludedTerm, data, - chunkIndex, totalChunks, lastChunkHashCode, serverConfig); + chunkIndex, totalChunks, lastChunkHashCode, serverConfig, RaftVersions.CURRENT_VERSION); } + @java.io.Serial private Object readResolve() { return installSnapshot; } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshotReply.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshotReply.java index 693fe7e7b8..ed8b280081 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshotReply.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshotReply.java @@ -7,12 +7,8 @@ */ package org.opendaylight.controller.cluster.raft.messages; -import java.io.Externalizable; -import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; - public final class InstallSnapshotReply extends AbstractRaftRPC { + @java.io.Serial private static final long serialVersionUID = 642227896390779503L; // The followerId - this will be used to figure out which follower is @@ -50,44 +46,6 @@ public final class InstallSnapshotReply extends AbstractRaftRPC { @Override Object writeReplace() { - return new Proxy(this); - } - - private static class Proxy implements Externalizable { - private static final long serialVersionUID = 1L; - - private InstallSnapshotReply installSnapshotReply; - - // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't - // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public Proxy() { - } - - Proxy(final InstallSnapshotReply installSnapshotReply) { - this.installSnapshotReply = installSnapshotReply; - } - - @Override - public void writeExternal(final ObjectOutput out) throws IOException { - out.writeLong(installSnapshotReply.getTerm()); - out.writeObject(installSnapshotReply.followerId); - out.writeInt(installSnapshotReply.chunkIndex); - out.writeBoolean(installSnapshotReply.success); - } - - @Override - public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { - long term = in.readLong(); - String followerId = (String) in.readObject(); - int chunkIndex = in.readInt(); - boolean success = in.readBoolean(); - - installSnapshotReply = new InstallSnapshotReply(term, followerId, chunkIndex, success); - } - - private Object readResolve() { - return installSnapshotReply; - } + return new IR(this); } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/Payload.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/Payload.java new file mode 100644 index 0000000000..c75385ab1e --- /dev/null +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/Payload.java @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.raft.messages; + +import java.io.Serializable; + +/** + * An instance of a {@link Payload} class is meant to be used as the Payload for {@link AppendEntries}. + * + *

    + * When an actor which is derived from RaftActor attempts to persistData it must pass an instance of the Payload class. + * Similarly when state needs to be applied to the derived RaftActor it will be passed an instance of the Payload class. + */ +public abstract class Payload implements Serializable { + @java.io.Serial + private static final long serialVersionUID = 1L; + + /** + * Return the estimate of in-memory size of this payload. + * + * @return An estimate of the in-memory size of this payload. + */ + public abstract int size(); + + /** + * Return the estimate of serialized size of this payload when passed through serialization. The estimate needs to + * be reasonably accurate and should err on the side of caution and report a slightly-higher size in face of + * uncertainty. + * + * @return An estimate of serialized size. + */ + public abstract int serializedSize(); + + /** + * Return the serialization proxy for this object. + * + * @return Serialization proxy + */ + @java.io.Serial + protected abstract Object writeReplace(); +} diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/PersistentPayload.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/PersistentPayload.java similarity index 88% rename from opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/PersistentPayload.java rename to opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/PersistentPayload.java index 828ed2cbac..f327fd50c0 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/PersistentPayload.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/PersistentPayload.java @@ -5,7 +5,7 @@ * terms of the Eclipse Public License v1.0 which accompanies this distribution, * and is available at http://www.eclipse.org/legal/epl-v10.html */ -package org.opendaylight.controller.cluster.raft.protobuff.client.messages; +package org.opendaylight.controller.cluster.raft.messages; /** * This is a tagging interface for a Payload implementation that needs to always be persisted regardless of diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/RV.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/RV.java new file mode 100644 index 0000000000..b75f1b7dab --- /dev/null +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/RV.java @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.raft.messages; + +import static java.util.Objects.requireNonNull; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import org.opendaylight.yangtools.concepts.WritableObjects; + +/** + * Serialization proxy for {@link RequestVote}. + */ +final class RV implements Externalizable { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private RequestVote requestVote; + + @SuppressWarnings("checkstyle:RedundantModifier") + public RV() { + // For Externalizable + } + + RV(final RequestVote requestVote) { + this.requestVote = requireNonNull(requestVote); + } + + @Override + public void writeExternal(final ObjectOutput out) throws IOException { + WritableObjects.writeLong(out, requestVote.getTerm()); + out.writeObject(requestVote.getCandidateId()); + WritableObjects.writeLongs(out, requestVote.getLastLogIndex(), requestVote.getLastLogTerm()); + } + + @Override + public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { + long term = WritableObjects.readLong(in); + String candidateId = (String) in.readObject(); + + final byte hdr = WritableObjects.readLongHeader(in); + long lastLogIndex = WritableObjects.readFirstLong(in, hdr); + long lastLogTerm = WritableObjects.readSecondLong(in, hdr); + + requestVote = new RequestVote(term, candidateId, lastLogIndex, lastLogTerm); + } + + @java.io.Serial + private Object readResolve() { + return requestVote; + } +} diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/RequestVote.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/RequestVote.java index b23c76d06f..2b33a12950 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/RequestVote.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/RequestVote.java @@ -7,15 +7,11 @@ */ package org.opendaylight.controller.cluster.raft.messages; -import java.io.Externalizable; -import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; - /** * Invoked by candidates to gather votes (§5.2). */ public final class RequestVote extends AbstractRaftRPC { + @java.io.Serial private static final long serialVersionUID = -6967509186297108657L; // candidate requesting vote @@ -57,44 +53,6 @@ public final class RequestVote extends AbstractRaftRPC { @Override Object writeReplace() { - return new Proxy(this); - } - - private static class Proxy implements Externalizable { - private static final long serialVersionUID = 1L; - - private RequestVote requestVote; - - // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't - // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public Proxy() { - } - - Proxy(final RequestVote requestVote) { - this.requestVote = requestVote; - } - - @Override - public void writeExternal(final ObjectOutput out) throws IOException { - out.writeLong(requestVote.getTerm()); - out.writeObject(requestVote.candidateId); - out.writeLong(requestVote.lastLogIndex); - out.writeLong(requestVote.lastLogTerm); - } - - @Override - public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { - long term = in.readLong(); - String candidateId = (String) in.readObject(); - long lastLogIndex = in.readLong(); - long lastLogTerm = in.readLong(); - - requestVote = new RequestVote(term, candidateId, lastLogIndex, lastLogTerm); - } - - private Object readResolve() { - return requestVote; - } + return new RV(this); } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/RequestVoteReply.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/RequestVoteReply.java index 2554c17fd8..01fd9abe2e 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/RequestVoteReply.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/RequestVoteReply.java @@ -7,12 +7,8 @@ */ package org.opendaylight.controller.cluster.raft.messages; -import java.io.Externalizable; -import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; - public final class RequestVoteReply extends AbstractRaftRPC { + @java.io.Serial private static final long serialVersionUID = 8427899326488775660L; // true means candidate received vote @@ -34,40 +30,6 @@ public final class RequestVoteReply extends AbstractRaftRPC { @Override Object writeReplace() { - return new Proxy(this); - } - - private static class Proxy implements Externalizable { - private static final long serialVersionUID = 1L; - - private RequestVoteReply requestVoteReply; - - // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't - // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public Proxy() { - } - - Proxy(final RequestVoteReply requestVoteReply) { - this.requestVoteReply = requestVoteReply; - } - - @Override - public void writeExternal(final ObjectOutput out) throws IOException { - out.writeLong(requestVoteReply.getTerm()); - out.writeBoolean(requestVoteReply.voteGranted); - } - - @Override - public void readExternal(final ObjectInput in) throws IOException { - long term = in.readLong(); - boolean voteGranted = in.readBoolean(); - - requestVoteReply = new RequestVoteReply(term, voteGranted); - } - - private Object readResolve() { - return requestVoteReply; - } + return new VR(this); } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/VR.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/VR.java new file mode 100644 index 0000000000..d5a489bd97 --- /dev/null +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/VR.java @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.raft.messages; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import org.opendaylight.yangtools.concepts.WritableObjects; + +/** + * Serialization proxy for {@link RequestVoteReply}. + */ +final class VR implements Externalizable { + @java.io.Serial + private static final long serialVersionUID = 1L; + + // Flags + private static final int VOTE_GRANTED = 0x10; + + private RequestVoteReply requestVoteReply; + + @SuppressWarnings("checkstyle:RedundantModifier") + public VR() { + // For Externalizable + } + + VR(final RequestVoteReply requestVoteReply) { + this.requestVoteReply = requireNonNull(requestVoteReply); + } + + @Override + public void writeExternal(final ObjectOutput out) throws IOException { + WritableObjects.writeLong(out, requestVoteReply.getTerm(), requestVoteReply.isVoteGranted() ? VOTE_GRANTED : 0); + } + + @Override + public void readExternal(final ObjectInput in) throws IOException { + final byte hdr = WritableObjects.readLongHeader(in); + requestVoteReply = new RequestVoteReply(WritableObjects.readLongBody(in, hdr), + (WritableObjects.longHeaderFlags(hdr) & VOTE_GRANTED) != 0); + } + + @java.io.Serial + private Object readResolve() { + return verifyNotNull(requestVoteReply); + } +} diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/AJE.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/AJE.java new file mode 100644 index 0000000000..4e39e9884c --- /dev/null +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/AJE.java @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.raft.persisted; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import org.opendaylight.yangtools.concepts.WritableObjects; + +/** + * Serialization proxy for {@link ApplyJournalEntries}. + */ +final class AJE implements Externalizable { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private ApplyJournalEntries applyEntries; + + @SuppressWarnings("checkstyle:RedundantModifier") + public AJE() { + // For Externalizable + } + + AJE(final ApplyJournalEntries applyEntries) { + this.applyEntries = requireNonNull(applyEntries); + } + + @Override + public void writeExternal(final ObjectOutput out) throws IOException { + WritableObjects.writeLong(out, applyEntries.getToIndex()); + } + + @Override + public void readExternal(final ObjectInput in) throws IOException { + applyEntries = new ApplyJournalEntries(WritableObjects.readLong(in)); + } + + @java.io.Serial + private Object readResolve() { + return verifyNotNull(applyEntries); + } +} diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/ApplyJournalEntries.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/ApplyJournalEntries.java index 3c0a8ac700..30da667c26 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/ApplyJournalEntries.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/ApplyJournalEntries.java @@ -8,10 +8,6 @@ package org.opendaylight.controller.cluster.raft.persisted; import akka.dispatch.ControlMessage; -import java.io.Externalizable; -import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; import java.io.Serializable; /** @@ -22,38 +18,8 @@ import java.io.Serializable; * * @author Thomas Pantelis */ -public class ApplyJournalEntries implements Serializable, ControlMessage { - private static final class Proxy implements Externalizable { - private static final long serialVersionUID = 1L; - - private ApplyJournalEntries applyEntries; - - // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't - // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public Proxy() { - // For Externalizable - } - - Proxy(final ApplyJournalEntries applyEntries) { - this.applyEntries = applyEntries; - } - - @Override - public void writeExternal(final ObjectOutput out) throws IOException { - out.writeLong(applyEntries.toIndex); - } - - @Override - public void readExternal(final ObjectInput in) throws IOException { - applyEntries = new ApplyJournalEntries(in.readLong()); - } - - private Object readResolve() { - return applyEntries; - } - } - +public final class ApplyJournalEntries implements Serializable, ControlMessage { + @java.io.Serial private static final long serialVersionUID = 1L; private final long toIndex; @@ -66,12 +32,13 @@ public class ApplyJournalEntries implements Serializable, ControlMessage { return toIndex; } - private Object writeReplace() { - return new Proxy(this); - } - @Override public String toString() { return "ApplyJournalEntries [toIndex=" + toIndex + "]"; } + + @java.io.Serial + private Object writeReplace() { + return new AJE(this); + } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/DE.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/DE.java new file mode 100644 index 0000000000..6bd34c25ca --- /dev/null +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/DE.java @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.raft.persisted; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import org.opendaylight.yangtools.concepts.WritableObjects; + +/** + * Serialization proxy for {@link DeleteEntries}. + */ +final class DE implements Externalizable { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private DeleteEntries deleteEntries; + + @SuppressWarnings("checkstyle:RedundantModifier") + public DE() { + // For Externalizable + } + + DE(final DeleteEntries deleteEntries) { + this.deleteEntries = requireNonNull(deleteEntries); + } + + @Override + public void writeExternal(final ObjectOutput out) throws IOException { + WritableObjects.writeLong(out, deleteEntries.getFromIndex()); + } + + @Override + public void readExternal(final ObjectInput in) throws IOException { + deleteEntries = new DeleteEntries(WritableObjects.readLong(in)); + } + + @java.io.Serial + private Object readResolve() { + return verifyNotNull(deleteEntries); + } +} diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/DeleteEntries.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/DeleteEntries.java index 57f5af3a03..8b4eb8388a 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/DeleteEntries.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/DeleteEntries.java @@ -7,10 +7,6 @@ */ package org.opendaylight.controller.cluster.raft.persisted; -import java.io.Externalizable; -import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; import java.io.Serializable; /** @@ -18,38 +14,8 @@ import java.io.Serializable; * * @author Thomas Pantelis */ -public class DeleteEntries implements Serializable { - private static final class Proxy implements Externalizable { - private static final long serialVersionUID = 1L; - - private DeleteEntries deleteEntries; - - // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't - // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public Proxy() { - // For Externalizable - } - - Proxy(final DeleteEntries deleteEntries) { - this.deleteEntries = deleteEntries; - } - - @Override - public void writeExternal(final ObjectOutput out) throws IOException { - out.writeLong(deleteEntries.fromIndex); - } - - @Override - public void readExternal(final ObjectInput in) throws IOException { - deleteEntries = new DeleteEntries(in.readLong()); - } - - private Object readResolve() { - return deleteEntries; - } - } - +public final class DeleteEntries implements Serializable { + @java.io.Serial private static final long serialVersionUID = 1L; private final long fromIndex; @@ -62,12 +28,13 @@ public class DeleteEntries implements Serializable { return fromIndex; } - private Object writeReplace() { - return new Proxy(this); - } - @Override public String toString() { return "DeleteEntries [fromIndex=" + fromIndex + "]"; } + + @java.io.Serial + private Object writeReplace() { + return new DE(this); + } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/EmptyState.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/EmptyState.java index aee90ace41..9939e2f2b0 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/EmptyState.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/EmptyState.java @@ -13,13 +13,16 @@ package org.opendaylight.controller.cluster.raft.persisted; * @author Thomas Pantelis */ public final class EmptyState implements Snapshot.State { + @java.io.Serial private static final long serialVersionUID = 1L; public static final EmptyState INSTANCE = new EmptyState(); private EmptyState() { + // Hidden on purpose } + @java.io.Serial @SuppressWarnings("static-method") private Object readResolve() { return INSTANCE; diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/LE.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/LE.java new file mode 100644 index 0000000000..7e609ab35c --- /dev/null +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/LE.java @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.raft.persisted; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import org.opendaylight.controller.cluster.raft.messages.Payload; +import org.opendaylight.yangtools.concepts.WritableObjects; + +/** + * Serialization proxy for {@link SimpleReplicatedLogEntry}. + */ +final class LE implements Externalizable { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private long index; + private long term; + private Payload data; + + @SuppressWarnings("checkstyle:RedundantModifier") + public LE() { + // For Externalizable + } + + // For size estimation only, use full bit size + LE(final Void dummy) { + index = Long.MIN_VALUE; + term = Long.MIN_VALUE; + data = null; + } + + LE(final SimpleReplicatedLogEntry logEntry) { + index = logEntry.getIndex(); + term = logEntry.getTerm(); + data = logEntry.getData(); + } + + @Override + public void writeExternal(final ObjectOutput out) throws IOException { + WritableObjects.writeLongs(out, index, term); + out.writeObject(data); + } + + @Override + public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { + final byte hdr = WritableObjects.readLongHeader(in); + index = WritableObjects.readFirstLong(in, hdr); + term = WritableObjects.readSecondLong(in, hdr); + data = (Payload) in.readObject(); + } + + @java.io.Serial + private Object readResolve() { + return new SimpleReplicatedLogEntry(index, term, data); + } +} diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/LegacySerializable.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/LegacySerializable.java new file mode 100644 index 0000000000..0e75d88fff --- /dev/null +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/LegacySerializable.java @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.raft.persisted; + +/** + * Marker interface for serializable objects which have been migrated. It implements {@link MigratedSerializable} and + * always returns {@code true} from {@link #isMigrated()}. This interface is marked as deprecated , as any of its users + * should also be marked as deprecated. + */ +@Deprecated +public interface LegacySerializable extends MigratedSerializable { + @Override + @Deprecated(forRemoval = true) + default boolean isMigrated() { + return true; + } +} diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/NP.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/NP.java new file mode 100644 index 0000000000..a041f2f571 --- /dev/null +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/NP.java @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.raft.persisted; + +import java.io.Serializable; + +/** + * Serialization proxy for {@link NoopPayload}. + */ +// There is no need for Externalizable +final class NP implements Serializable { + @java.io.Serial + private static final long serialVersionUID = 1L; + + @java.io.Serial + private Object readResolve() { + return NoopPayload.INSTANCE; + } +} + diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/NoopPayload.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/NoopPayload.java index 46628c6078..0f076c55d7 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/NoopPayload.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/NoopPayload.java @@ -8,8 +8,9 @@ package org.opendaylight.controller.cluster.raft.persisted; import akka.dispatch.ControlMessage; -import java.io.Serializable; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload; +import org.apache.commons.lang3.SerializationUtils; +import org.eclipse.jdt.annotation.NonNull; +import org.opendaylight.controller.cluster.raft.messages.Payload; /** * Payload used for no-op log entries that are put into the journal by the PreLeader in order to commit @@ -17,22 +18,17 @@ import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payloa * * @author Thomas Pantelis */ -public final class NoopPayload extends Payload implements Serializable, ControlMessage { - public static final NoopPayload INSTANCE = new NoopPayload(); - - // There is no need for Externalizable - private static final class Proxy implements Serializable { - private static final long serialVersionUID = 1L; - - private Object readResolve() { - return INSTANCE; - } - } - +public final class NoopPayload extends Payload implements ControlMessage { + @java.io.Serial private static final long serialVersionUID = 1L; - private static final Proxy PROXY = new Proxy(); + private static final @NonNull NP PROXY = new NP(); + // Estimate to how big the proxy is. Note this includes object stream overhead, so it is a bit conservative + private static final int PROXY_SIZE = SerializationUtils.serialize(PROXY).length; + + public static final @NonNull NoopPayload INSTANCE = new NoopPayload(); private NoopPayload() { + // Hidden on purpose } @Override @@ -40,7 +36,13 @@ public final class NoopPayload extends Payload implements Serializable, ControlM return 0; } - private Object writeReplace() { + @Override + public int serializedSize() { + return PROXY_SIZE; + } + + @Override + protected Object writeReplace() { return PROXY; } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/SS.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/SS.java new file mode 100644 index 0000000000..0523d08351 --- /dev/null +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/SS.java @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.raft.persisted; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +import com.google.common.collect.ImmutableList; +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry; +import org.opendaylight.controller.cluster.raft.messages.Payload; +import org.opendaylight.controller.cluster.raft.persisted.Snapshot.State; +import org.opendaylight.yangtools.concepts.WritableObjects; + +/** + * Externalizable proxy for {@link Snapshot}. + */ +final class SS implements Externalizable { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private Snapshot snapshot; + + @SuppressWarnings("checkstyle:RedundantModifier") + public SS() { + // For Externalizable + } + + SS(final Snapshot snapshot) { + this.snapshot = requireNonNull(snapshot); + } + + @Override + public void writeExternal(final ObjectOutput out) throws IOException { + WritableObjects.writeLongs(out, snapshot.getLastIndex(), snapshot.getLastTerm()); + WritableObjects.writeLongs(out, snapshot.getLastAppliedIndex(), snapshot.getLastAppliedTerm()); + WritableObjects.writeLong(out, snapshot.getElectionTerm()); + out.writeObject(snapshot.getElectionVotedFor()); + out.writeObject(snapshot.getServerConfiguration()); + + final var unAppliedEntries = snapshot.getUnAppliedEntries(); + out.writeInt(unAppliedEntries.size()); + for (var e : unAppliedEntries) { + WritableObjects.writeLongs(out, e.getIndex(), e.getTerm()); + out.writeObject(e.getData()); + } + + out.writeObject(snapshot.getState()); + } + + @Override + public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { + byte hdr = WritableObjects.readLongHeader(in); + long lastIndex = WritableObjects.readFirstLong(in, hdr); + long lastTerm = WritableObjects.readSecondLong(in, hdr); + + hdr = WritableObjects.readLongHeader(in); + long lastAppliedIndex = WritableObjects.readFirstLong(in, hdr); + long lastAppliedTerm = WritableObjects.readSecondLong(in, hdr); + long electionTerm = WritableObjects.readLong(in); + String electionVotedFor = (String) in.readObject(); + ServerConfigurationPayload serverConfig = (ServerConfigurationPayload) in.readObject(); + + int size = in.readInt(); + var unAppliedEntries = ImmutableList.builderWithExpectedSize(size); + for (int i = 0; i < size; i++) { + hdr = WritableObjects.readLongHeader(in); + unAppliedEntries.add(new SimpleReplicatedLogEntry( + WritableObjects.readFirstLong(in, hdr), WritableObjects.readSecondLong(in, hdr), + (Payload) in.readObject())); + } + + State state = (State) in.readObject(); + + snapshot = Snapshot.create(state, unAppliedEntries.build(), lastIndex, lastTerm, lastAppliedIndex, + lastAppliedTerm, electionTerm, electionVotedFor, serverConfig); + } + + @java.io.Serial + private Object readResolve() { + return verifyNotNull(snapshot); + } +} diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/ServerConfigurationPayload.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/ServerConfigurationPayload.java index 055984229b..dbb64f1d82 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/ServerConfigurationPayload.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/ServerConfigurationPayload.java @@ -15,12 +15,10 @@ import java.io.IOException; import java.io.ObjectInput; import java.io.ObjectOutput; import java.io.ObjectOutputStream; -import java.io.Serializable; -import java.util.ArrayList; import java.util.List; import org.eclipse.jdt.annotation.NonNull; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.PersistentPayload; +import org.opendaylight.controller.cluster.raft.messages.Payload; +import org.opendaylight.controller.cluster.raft.messages.PersistentPayload; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -29,8 +27,9 @@ import org.slf4j.LoggerFactory; * * @author Thomas Pantelis */ -public final class ServerConfigurationPayload extends Payload implements PersistentPayload, Serializable { +public final class ServerConfigurationPayload extends Payload implements PersistentPayload { private static final class Proxy implements Externalizable { + @java.io.Serial private static final long serialVersionUID = 1L; private List serverConfig; @@ -43,35 +42,39 @@ public final class ServerConfigurationPayload extends Payload implements Persist } Proxy(final ServerConfigurationPayload payload) { - this.serverConfig = payload.getServerConfig(); + serverConfig = payload.getServerConfig(); } @Override public void writeExternal(final ObjectOutput out) throws IOException { out.writeInt(serverConfig.size()); - for (ServerInfo i : serverConfig) { - out.writeObject(i.getId()); - out.writeBoolean(i.isVoting()); + for (var serverInfo : serverConfig) { + out.writeObject(serverInfo.peerId()); + out.writeBoolean(serverInfo.isVoting()); } } @Override public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { final int size = in.readInt(); - serverConfig = new ArrayList<>(size); + + final var builder = ImmutableList.builderWithExpectedSize(size); for (int i = 0; i < size; ++i) { final String id = (String) in.readObject(); final boolean voting = in.readBoolean(); - serverConfig.add(new ServerInfo(id, voting)); + builder.add(new ServerInfo(id, voting)); } + serverConfig = builder.build(); } + @java.io.Serial private Object readResolve() { return new ServerConfigurationPayload(serverConfig); } } private static final Logger LOG = LoggerFactory.getLogger(ServerConfigurationPayload.class); + @java.io.Serial private static final long serialVersionUID = 1L; @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but this class " @@ -90,6 +93,11 @@ public final class ServerConfigurationPayload extends Payload implements Persist @Override public int size() { + return serializedSize(); + } + + @Override + public int serializedSize() { if (serializedSize < 0) { try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { try (ObjectOutputStream out = new ObjectOutputStream(bos)) { @@ -112,21 +120,9 @@ public final class ServerConfigurationPayload extends Payload implements Persist } @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - - if (obj == null) { - return false; - } - - if (getClass() != obj.getClass()) { - return false; - } - - ServerConfigurationPayload other = (ServerConfigurationPayload) obj; - return serverConfig.equals(other.serverConfig); + public boolean equals(final Object obj) { + return this == obj || obj instanceof ServerConfigurationPayload other + && serverConfig.equals(other.serverConfig); } @Override @@ -134,7 +130,8 @@ public final class ServerConfigurationPayload extends Payload implements Persist return "ServerConfigurationPayload [serverConfig=" + serverConfig + "]"; } - private Object writeReplace() { + @Override + protected Object writeReplace() { return new Proxy(this); } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/ServerInfo.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/ServerInfo.java index 6e1ca82f77..de70e17d36 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/ServerInfo.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/ServerInfo.java @@ -17,47 +17,8 @@ import org.eclipse.jdt.annotation.NonNull; * * @author Thomas Pantelis */ -public final class ServerInfo { - private final String id; - private final boolean isVoting; - - public ServerInfo(@NonNull String id, boolean isVoting) { - this.id = requireNonNull(id); - this.isVoting = isVoting; - } - - public @NonNull String getId() { - return id; - } - - public boolean isVoting() { - return isVoting; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + Boolean.hashCode(isVoting); - result = prime * result + id.hashCode(); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (!(obj instanceof ServerInfo)) { - return false; - } - - final ServerInfo other = (ServerInfo) obj; - return isVoting == other.isVoting && id.equals(other.id); - } - - @Override - public String toString() { - return "ServerInfo [id=" + id + ", isVoting=" + isVoting + "]"; +public record ServerInfo(@NonNull String peerId, boolean isVoting) { + public ServerInfo { + requireNonNull(peerId); } } \ No newline at end of file diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/SimpleReplicatedLogEntry.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/SimpleReplicatedLogEntry.java index 4c07e6b812..610d53a9e7 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/SimpleReplicatedLogEntry.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/SimpleReplicatedLogEntry.java @@ -9,13 +9,10 @@ package org.opendaylight.controller.cluster.raft.persisted; import static java.util.Objects.requireNonNull; -import java.io.Externalizable; -import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; import java.io.Serializable; +import org.apache.commons.lang3.SerializationUtils; import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload; +import org.opendaylight.controller.cluster.raft.messages.Payload; /** * A {@link ReplicatedLogEntry} implementation. @@ -23,45 +20,10 @@ import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payloa * @author Thomas Pantelis */ public final class SimpleReplicatedLogEntry implements ReplicatedLogEntry, Serializable { - private static final class Proxy implements Externalizable { - private static final long serialVersionUID = 1L; - - private ReplicatedLogEntry replicatedLogEntry; - - // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't - // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public Proxy() { - // For Externalizable - } - - Proxy(final ReplicatedLogEntry replicatedLogEntry) { - this.replicatedLogEntry = replicatedLogEntry; - } - - static int estimatedSerializedSize(final ReplicatedLogEntry replicatedLogEntry) { - return 8 /* index */ + 8 /* term */ + replicatedLogEntry.getData().size() - + 400 /* estimated extra padding for class info */; - } - - @Override - public void writeExternal(final ObjectOutput out) throws IOException { - out.writeLong(replicatedLogEntry.getIndex()); - out.writeLong(replicatedLogEntry.getTerm()); - out.writeObject(replicatedLogEntry.getData()); - } - - @Override - public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { - replicatedLogEntry = new SimpleReplicatedLogEntry(in.readLong(), in.readLong(), (Payload) in.readObject()); - } - - private Object readResolve() { - return replicatedLogEntry; - } - } - + @java.io.Serial private static final long serialVersionUID = 1L; + // Estimate to how big the proxy is. Note this includes object stream overhead, so it is a bit conservative. + private static final int PROXY_SIZE = SerializationUtils.serialize(new LE((Void) null)).length; private final long index; private final long term; @@ -98,7 +60,12 @@ public final class SimpleReplicatedLogEntry implements ReplicatedLogEntry, Seria @Override public int size() { - return getData().size(); + return payload.size(); + } + + @Override + public int serializedSize() { + return PROXY_SIZE + payload.serializedSize(); } @Override @@ -111,14 +78,6 @@ public final class SimpleReplicatedLogEntry implements ReplicatedLogEntry, Seria persistencePending = pending; } - private Object writeReplace() { - return new Proxy(this); - } - - public int estimatedSerializedSize() { - return Proxy.estimatedSerializedSize(this); - } - @Override public int hashCode() { final int prime = 31; @@ -131,20 +90,17 @@ public final class SimpleReplicatedLogEntry implements ReplicatedLogEntry, Seria @Override public boolean equals(final Object obj) { - if (this == obj) { - return true; - } - - if (obj == null || getClass() != obj.getClass()) { - return false; - } - - SimpleReplicatedLogEntry other = (SimpleReplicatedLogEntry) obj; - return index == other.index && term == other.term && payload.equals(other.payload); + return this == obj || obj instanceof SimpleReplicatedLogEntry other && index == other.index + && term == other.term && payload.equals(other.payload); } @Override public String toString() { return "SimpleReplicatedLogEntry [index=" + index + ", term=" + term + ", payload=" + payload + "]"; } + + @java.io.Serial + private Object writeReplace() { + return new LE(this); + } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/SimpleReplicatedLogEntrySerializer.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/SimpleReplicatedLogEntrySerializer.java index ca6e6dff30..250551a780 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/SimpleReplicatedLogEntrySerializer.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/SimpleReplicatedLogEntrySerializer.java @@ -7,7 +7,6 @@ */ package org.opendaylight.controller.cluster.raft.persisted; -import static com.google.common.base.Preconditions.checkArgument; import static java.util.Objects.requireNonNull; import akka.actor.ExtendedActorSystem; @@ -45,11 +44,12 @@ public class SimpleReplicatedLogEntrySerializer extends JSerializer { } @Override - public byte[] toBinary(Object obj) { - checkArgument(obj instanceof SimpleReplicatedLogEntry, "Unsupported object type %s", obj.getClass()); + public byte[] toBinary(final Object obj) { + if (!(obj instanceof SimpleReplicatedLogEntry replicatedLogEntry)) { + throw new IllegalArgumentException("Unsupported object type " + obj.getClass()); + } - SimpleReplicatedLogEntry replicatedLogEntry = (SimpleReplicatedLogEntry)obj; - final int estimatedSerializedSize = replicatedLogEntry.estimatedSerializedSize(); + final int estimatedSerializedSize = replicatedLogEntry.serializedSize(); final ByteArrayOutputStream bos = new ByteArrayOutputStream(estimatedSerializedSize); SerializationUtils.serialize(replicatedLogEntry, bos); @@ -62,7 +62,7 @@ public class SimpleReplicatedLogEntrySerializer extends JSerializer { } @Override - public Object fromBinaryJava(byte[] bytes, Class manifest) { + public Object fromBinaryJava(final byte[] bytes, final Class manifest) { try (ClassLoaderObjectInputStream is = new ClassLoaderObjectInputStream(system.dynamicAccess().classLoader(), new ByteArrayInputStream(bytes))) { return is.readObject(); diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/Snapshot.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/Snapshot.java index 091009e2bd..81d2331bb4 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/Snapshot.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/Snapshot.java @@ -7,24 +7,16 @@ */ package org.opendaylight.controller.cluster.raft.persisted; -import java.io.Externalizable; -import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; import java.io.Serializable; -import java.util.ArrayList; import java.util.List; import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload; /** * Represents a snapshot of the raft data. * * @author Thomas Pantelis */ -// Not final for mocking -public class Snapshot implements Serializable { - +public final class Snapshot implements Serializable { /** * Implementations of this interface are used as the state payload for a snapshot. * @@ -42,70 +34,7 @@ public class Snapshot implements Serializable { } } - private static final class Proxy implements Externalizable { - private static final long serialVersionUID = 1L; - - private Snapshot snapshot; - - // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't - // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public Proxy() { - // For Externalizable - } - - Proxy(final Snapshot snapshot) { - this.snapshot = snapshot; - } - - @Override - public void writeExternal(final ObjectOutput out) throws IOException { - out.writeLong(snapshot.lastIndex); - out.writeLong(snapshot.lastTerm); - out.writeLong(snapshot.lastAppliedIndex); - out.writeLong(snapshot.lastAppliedTerm); - out.writeLong(snapshot.electionTerm); - out.writeObject(snapshot.electionVotedFor); - out.writeObject(snapshot.serverConfig); - - out.writeInt(snapshot.unAppliedEntries.size()); - for (ReplicatedLogEntry e: snapshot.unAppliedEntries) { - out.writeLong(e.getIndex()); - out.writeLong(e.getTerm()); - out.writeObject(e.getData()); - } - - out.writeObject(snapshot.state); - } - - @Override - public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { - long lastIndex = in.readLong(); - long lastTerm = in.readLong(); - long lastAppliedIndex = in.readLong(); - long lastAppliedTerm = in.readLong(); - long electionTerm = in.readLong(); - String electionVotedFor = (String) in.readObject(); - ServerConfigurationPayload serverConfig = (ServerConfigurationPayload) in.readObject(); - - int size = in.readInt(); - List unAppliedEntries = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - unAppliedEntries.add(new SimpleReplicatedLogEntry(in.readLong(), in.readLong(), - (Payload) in.readObject())); - } - - State state = (State) in.readObject(); - - snapshot = Snapshot.create(state, unAppliedEntries, lastIndex, lastTerm, lastAppliedIndex, lastAppliedTerm, - electionTerm, electionVotedFor, serverConfig); - } - - private Object readResolve() { - return snapshot; - } - } - + @java.io.Serial private static final long serialVersionUID = 1L; private final State state; @@ -118,7 +47,7 @@ public class Snapshot implements Serializable { private final String electionVotedFor; private final ServerConfigurationPayload serverConfig; - Snapshot(final State state, final List unAppliedEntries, final long lastIndex, + private Snapshot(final State state, final List unAppliedEntries, final long lastIndex, final long lastTerm, final long lastAppliedIndex, final long lastAppliedTerm, final long electionTerm, final String electionVotedFor, final ServerConfigurationPayload serverConfig) { this.state = state; @@ -160,7 +89,7 @@ public class Snapshot implements Serializable { } public long getLastIndex() { - return this.lastIndex; + return lastIndex; } public long getElectionTerm() { @@ -175,10 +104,6 @@ public class Snapshot implements Serializable { return serverConfig; } - private Object writeReplace() { - return new Proxy(this); - } - @Override public String toString() { return "Snapshot [lastIndex=" + lastIndex + ", lastTerm=" + lastTerm + ", lastAppliedIndex=" + lastAppliedIndex @@ -186,4 +111,9 @@ public class Snapshot implements Serializable { + ", state=" + state + ", electionTerm=" + electionTerm + ", electionVotedFor=" + electionVotedFor + ", ServerConfigPayload=" + serverConfig + "]"; } + + @java.io.Serial + private Object writeReplace() { + return new SS(this); + } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/UT.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/UT.java new file mode 100644 index 0000000000..0fc6f6d618 --- /dev/null +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/UT.java @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.raft.persisted; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import org.opendaylight.yangtools.concepts.WritableObjects; + +/** + * Serialization proxy for {@link UpdateElectionTerm}. + */ +final class UT implements Externalizable { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private UpdateElectionTerm updateElectionTerm; + + @SuppressWarnings("checkstyle:RedundantModifier") + public UT() { + // For Externalizable + } + + UT(final UpdateElectionTerm updateElectionTerm) { + this.updateElectionTerm = requireNonNull(updateElectionTerm); + } + + @Override + public void writeExternal(final ObjectOutput out) throws IOException { + WritableObjects.writeLong(out, updateElectionTerm.getCurrentTerm()); + out.writeObject(updateElectionTerm.getVotedFor()); + } + + @Override + public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { + updateElectionTerm = new UpdateElectionTerm(WritableObjects.readLong(in), (String) in.readObject()); + } + + @java.io.Serial + private Object readResolve() { + return verifyNotNull(updateElectionTerm); + } +} diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/UpdateElectionTerm.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/UpdateElectionTerm.java index 939d893a2e..3ef7acbea3 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/UpdateElectionTerm.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/UpdateElectionTerm.java @@ -7,48 +7,13 @@ */ package org.opendaylight.controller.cluster.raft.persisted; -import java.io.Externalizable; -import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; import java.io.Serializable; /** * Message class to persist election term information. */ -public class UpdateElectionTerm implements Serializable { - private static final class Proxy implements Externalizable { - private static final long serialVersionUID = 1L; - - private UpdateElectionTerm updateElectionTerm; - - // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't - // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public Proxy() { - // For Externalizable - } - - Proxy(final UpdateElectionTerm updateElectionTerm) { - this.updateElectionTerm = updateElectionTerm; - } - - @Override - public void writeExternal(final ObjectOutput out) throws IOException { - out.writeLong(updateElectionTerm.currentTerm); - out.writeObject(updateElectionTerm.votedFor); - } - - @Override - public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { - updateElectionTerm = new UpdateElectionTerm(in.readLong(), (String) in.readObject()); - } - - private Object readResolve() { - return updateElectionTerm; - } - } - +public final class UpdateElectionTerm implements Serializable { + @java.io.Serial private static final long serialVersionUID = 1L; private final long currentTerm; @@ -67,13 +32,14 @@ public class UpdateElectionTerm implements Serializable { return votedFor; } - private Object writeReplace() { - return new Proxy(this); - } - @Override public String toString() { return "UpdateElectionTerm [currentTerm=" + currentTerm + ", votedFor=" + votedFor + "]"; } + + @java.io.Serial + private Object writeReplace() { + return new UT(this); + } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/AbstractRaftActorIntegrationTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/AbstractRaftActorIntegrationTest.java index d8d0ce5772..f59598876d 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/AbstractRaftActorIntegrationTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/AbstractRaftActorIntegrationTest.java @@ -7,7 +7,6 @@ */ package org.opendaylight.controller.cluster.raft; -import static akka.pattern.Patterns.ask; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; @@ -17,16 +16,15 @@ import akka.actor.PoisonPill; import akka.actor.Terminated; import akka.dispatch.Dispatchers; import akka.dispatch.Mailboxes; +import akka.pattern.Patterns; import akka.testkit.TestActorRef; import akka.testkit.javadsl.TestKit; import akka.util.Timeout; import com.google.common.base.Stopwatch; -import com.google.common.collect.ImmutableMap; import com.google.common.util.concurrent.Uninterruptibles; import java.io.OutputStream; import java.time.Duration; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Optional; @@ -44,10 +42,10 @@ import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat; import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior; import org.opendaylight.controller.cluster.raft.client.messages.GetOnDemandRaftState; import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState; +import org.opendaylight.controller.cluster.raft.messages.Payload; import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries; import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload; import org.opendaylight.controller.cluster.raft.persisted.Snapshot; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload; import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal; import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore; import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor; @@ -126,7 +124,7 @@ public abstract class AbstractRaftActorIntegrationTest extends AbstractActorTest TestRaftActor(final Builder builder) { super(builder); - this.collectorActor = builder.collectorActor; + collectorActor = builder.collectorActor; } public void startDropMessages(final Class msgClass) { @@ -148,26 +146,23 @@ public abstract class AbstractRaftActorIntegrationTest extends AbstractActorTest @SuppressWarnings({ "rawtypes", "unchecked", "checkstyle:IllegalCatch" }) @Override public void handleCommand(final Object message) { - if (message instanceof MockPayload) { - MockPayload payload = (MockPayload) message; + if (message instanceof MockPayload payload) { super.persistData(collectorActor, new MockIdentifier(payload.toString()), payload, false); return; } - if (message instanceof ServerConfigurationPayload) { - super.persistData(collectorActor, new MockIdentifier("serverConfig"), (Payload) message, false); + if (message instanceof ServerConfigurationPayload payload) { + super.persistData(collectorActor, new MockIdentifier("serverConfig"), payload, false); return; } - if (message instanceof SetPeerAddress) { - setPeerAddress(((SetPeerAddress) message).getPeerId(), - ((SetPeerAddress) message).getPeerAddress()); + if (message instanceof SetPeerAddress setPeerAddress) { + setPeerAddress(setPeerAddress.getPeerId(), setPeerAddress.getPeerAddress()); return; } - if (message instanceof TestPersist) { - persistData(((TestPersist) message).getActorRef(), ((TestPersist) message).getIdentifier(), - ((TestPersist) message).getPayload(), false); + if (message instanceof TestPersist testPersist) { + persistData(testPersist.getActorRef(), testPersist.getIdentifier(), testPersist.getPayload(), false); return; } @@ -190,9 +185,9 @@ public abstract class AbstractRaftActorIntegrationTest extends AbstractActorTest @Override @SuppressWarnings("checkstyle:IllegalCatch") public void createSnapshot(final ActorRef actorRef, final Optional installSnapshotStream) { - MockSnapshotState snapshotState = new MockSnapshotState(new ArrayList<>(getState())); + MockSnapshotState snapshotState = new MockSnapshotState(List.copyOf(getState())); if (installSnapshotStream.isPresent()) { - SerializationUtils.serialize(snapshotState, installSnapshotStream.get()); + SerializationUtils.serialize(snapshotState, installSnapshotStream.orElseThrow()); } actorRef.tell(new CaptureSnapshotReply(snapshotState, installSnapshotStream), actorRef); @@ -214,13 +209,14 @@ public abstract class AbstractRaftActorIntegrationTest extends AbstractActorTest } public Builder collectorActor(final ActorRef newCollectorActor) { - this.collectorActor = newCollectorActor; + collectorActor = newCollectorActor; return this; } } } - protected static final int SNAPSHOT_CHUNK_SIZE = 100; + // FIXME: this is an arbitrary limit. Document interactions and/or improve them to improve maintainability + protected static final int MAXIMUM_MESSAGE_SLICE_SIZE = 700; protected final Logger testLog = LoggerFactory.getLogger(getClass()); @@ -242,16 +238,16 @@ public abstract class AbstractRaftActorIntegrationTest extends AbstractActorTest protected String follower2Id = factory.generateActorId("follower"); protected TestActorRef follower2Actor; protected ActorRef follower2CollectorActor; - protected RaftActorBehavior follower2; + protected RaftActorBehavior follower2; protected RaftActorContext follower2Context; - protected ImmutableMap peerAddresses; + protected Map peerAddresses; protected long initialTerm = 5; protected long currentTerm; protected int snapshotBatchCount = 4; - protected int snapshotChunkSize = SNAPSHOT_CHUNK_SIZE; + protected int maximumMessageSliceSize = MAXIMUM_MESSAGE_SLICE_SIZE; protected List expSnapshotState = new ArrayList<>(); @@ -269,7 +265,7 @@ public abstract class AbstractRaftActorIntegrationTest extends AbstractActorTest configParams.setSnapshotBatchCount(snapshotBatchCount); configParams.setSnapshotDataThresholdPercentage(70); configParams.setIsolatedLeaderCheckInterval(new FiniteDuration(1, TimeUnit.DAYS)); - configParams.setSnapshotChunkSize(snapshotChunkSize); + configParams.setMaximumMessageSliceSize(maximumMessageSliceSize); return configParams; } @@ -287,7 +283,7 @@ public abstract class AbstractRaftActorIntegrationTest extends AbstractActorTest protected TestActorRef newTestRaftActor(final String id, final Map newPeerAddresses, final ConfigParams configParams) { return newTestRaftActor(id, TestRaftActor.newBuilder().peerAddresses(newPeerAddresses != null - ? newPeerAddresses : Collections.emptyMap()).config(configParams)); + ? newPeerAddresses : Map.of()).config(configParams)); } protected TestActorRef newTestRaftActor(final String id, final TestRaftActor.Builder builder) { @@ -423,7 +419,7 @@ public abstract class AbstractRaftActorIntegrationTest extends AbstractActorTest Stopwatch sw = Stopwatch.createStarted(); while (sw.elapsed(TimeUnit.SECONDS) <= 5) { try { - OnDemandRaftState raftState = (OnDemandRaftState)Await.result(ask(raftActor, + OnDemandRaftState raftState = (OnDemandRaftState)Await.result(Patterns.ask(raftActor, GetOnDemandRaftState.INSTANCE, timeout), timeout.duration()); verifier.accept(raftState); return; diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/AbstractReplicatedLogImplTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/AbstractReplicatedLogImplTest.java index 983b26da9c..65ac83d0d0 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/AbstractReplicatedLogImplTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/AbstractReplicatedLogImplTest.java @@ -145,25 +145,32 @@ public class AbstractReplicatedLogImplTest { from = replicatedLogImpl.getFrom(0, 20, ReplicatedLog.NO_MAX_SIZE); assertEquals(4, from.size()); assertEquals("A", from.get(0).getData().toString()); + assertEquals("B", from.get(1).getData().toString()); + assertEquals("C", from.get(2).getData().toString()); assertEquals("D", from.get(3).getData().toString()); + // Pre-calculate sizing information for use with capping + final int sizeB = from.get(1).serializedSize(); + final int sizeC = from.get(2).serializedSize(); + final int sizeD = from.get(3).serializedSize(); + from = replicatedLogImpl.getFrom(1, 2, ReplicatedLog.NO_MAX_SIZE); assertEquals(2, from.size()); assertEquals("B", from.get(0).getData().toString()); assertEquals("C", from.get(1).getData().toString()); - from = replicatedLogImpl.getFrom(1, 3, 2); + from = replicatedLogImpl.getFrom(1, 3, sizeB + sizeC); assertEquals(2, from.size()); assertEquals("B", from.get(0).getData().toString()); assertEquals("C", from.get(1).getData().toString()); - from = replicatedLogImpl.getFrom(1, 3, 3); + from = replicatedLogImpl.getFrom(1, 3, sizeB + sizeC + sizeD); assertEquals(3, from.size()); assertEquals("B", from.get(0).getData().toString()); assertEquals("C", from.get(1).getData().toString()); assertEquals("D", from.get(2).getData().toString()); - from = replicatedLogImpl.getFrom(1, 2, 3); + from = replicatedLogImpl.getFrom(1, 2, sizeB + sizeC + sizeD); assertEquals(2, from.size()); assertEquals("B", from.get(0).getData().toString()); assertEquals("C", from.get(1).getData().toString()); diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/LeadershipTransferIntegrationTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/LeadershipTransferIntegrationTest.java index e99215ddba..a565932a02 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/LeadershipTransferIntegrationTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/LeadershipTransferIntegrationTest.java @@ -18,11 +18,10 @@ import akka.actor.Status; import akka.pattern.Patterns; import akka.testkit.TestActorRef; import akka.testkit.javadsl.TestKit; -import com.google.common.collect.ImmutableMap; -import java.util.Arrays; import java.util.Collections; import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.concurrent.TimeUnit; import org.junit.Test; import org.opendaylight.controller.cluster.notifications.LeaderStateChanged; @@ -143,9 +142,9 @@ public class LeadershipTransferIntegrationTest extends AbstractRaftActorIntegrat private void createRaftActors() { testLog.info("createRaftActors starting"); - final Snapshot snapshot = Snapshot.create(EmptyState.INSTANCE, Collections.emptyList(), -1, -1, -1, -1, + final Snapshot snapshot = Snapshot.create(EmptyState.INSTANCE, List.of(), -1, -1, -1, -1, 1, null, new org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload( - Arrays.asList(new ServerInfo(leaderId, true), new ServerInfo(follower1Id, true), + List.of(new ServerInfo(leaderId, true), new ServerInfo(follower1Id, true), new ServerInfo(follower2Id, true), new ServerInfo(follower3Id, false)))); InMemorySnapshotStore.addSnapshot(leaderId, snapshot); @@ -156,28 +155,28 @@ public class LeadershipTransferIntegrationTest extends AbstractRaftActorIntegrat follower1NotifierActor = factory.createActor(MessageCollectorActor.props(), factory.generateActorId(follower1Id + "-notifier")); follower1Actor = newTestRaftActor(follower1Id, TestRaftActor.newBuilder().peerAddresses( - ImmutableMap.of(leaderId, testActorPath(leaderId), follower2Id, testActorPath(follower2Id), + Map.of(leaderId, testActorPath(leaderId), follower2Id, testActorPath(follower2Id), follower3Id, testActorPath(follower3Id))) .config(newFollowerConfigParams()).roleChangeNotifier(follower1NotifierActor)); follower2NotifierActor = factory.createActor(MessageCollectorActor.props(), factory.generateActorId(follower2Id + "-notifier")); follower2Actor = newTestRaftActor(follower2Id,TestRaftActor.newBuilder().peerAddresses( - ImmutableMap.of(leaderId, testActorPath(leaderId), follower1Id, follower1Actor.path().toString(), + Map.of(leaderId, testActorPath(leaderId), follower1Id, follower1Actor.path().toString(), follower3Id, testActorPath(follower3Id))) .config(newFollowerConfigParams()).roleChangeNotifier(follower2NotifierActor)); follower3NotifierActor = factory.createActor(MessageCollectorActor.props(), factory.generateActorId(follower3Id + "-notifier")); follower3Actor = newTestRaftActor(follower3Id,TestRaftActor.newBuilder().peerAddresses( - ImmutableMap.of(leaderId, testActorPath(leaderId), follower1Id, follower1Actor.path().toString(), + Map.of(leaderId, testActorPath(leaderId), follower1Id, follower1Actor.path().toString(), follower2Id, follower2Actor.path().toString())) .config(newFollowerConfigParams()).roleChangeNotifier(follower3NotifierActor)); - peerAddresses = ImmutableMap.builder() - .put(follower1Id, follower1Actor.path().toString()) - .put(follower2Id, follower2Actor.path().toString()) - .put(follower3Id, follower3Actor.path().toString()).build(); + peerAddresses = Map.of( + follower1Id, follower1Actor.path().toString(), + follower2Id, follower2Actor.path().toString(), + follower3Id, follower3Actor.path().toString()); leaderConfigParams = newLeaderConfigParams(); leaderConfigParams.setElectionTimeoutFactor(3); diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/MockRaftActor.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/MockRaftActor.java index 4b241f9576..83aebc37c3 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/MockRaftActor.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/MockRaftActor.java @@ -21,6 +21,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -28,8 +29,8 @@ import java.util.function.Function; import org.apache.commons.lang3.SerializationUtils; import org.opendaylight.controller.cluster.DataPersistenceProvider; import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior; +import org.opendaylight.controller.cluster.raft.messages.Payload; import org.opendaylight.controller.cluster.raft.persisted.Snapshot; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload; import org.opendaylight.yangtools.concepts.Identifier; public class MockRaftActor extends RaftActor implements RaftActorRecoveryCohort, RaftActorSnapshotCohort { @@ -52,14 +53,14 @@ public class MockRaftActor extends RaftActor implements RaftActorRecoveryCohort, super(builder.id, builder.peerAddresses != null ? builder.peerAddresses : Collections.emptyMap(), Optional.ofNullable(builder.config), PAYLOAD_VERSION); state = Collections.synchronizedList(new ArrayList<>()); - this.actorDelegate = mock(RaftActor.class); - this.recoveryCohortDelegate = mock(RaftActorRecoveryCohort.class); + actorDelegate = mock(RaftActor.class); + recoveryCohortDelegate = mock(RaftActorRecoveryCohort.class); - this.snapshotCohortDelegate = builder.snapshotCohort != null ? builder.snapshotCohort : + snapshotCohortDelegate = builder.snapshotCohort != null ? builder.snapshotCohort : mock(RaftActorSnapshotCohort.class); if (builder.dataPersistenceProvider == null) { - setPersistence(builder.persistent.isPresent() ? builder.persistent.get() : true); + setPersistence(builder.persistent.isPresent() ? builder.persistent.orElseThrow() : true); } else { setPersistence(builder.dataPersistenceProvider); } @@ -174,9 +175,9 @@ public class MockRaftActor extends RaftActor implements RaftActorRecoveryCohort, } private void applySnapshotState(final Snapshot.State newState) { - if (newState instanceof MockSnapshotState) { + if (newState instanceof MockSnapshotState mockState) { state.clear(); - state.addAll(((MockSnapshotState)newState).getState()); + state.addAll(mockState.getState()); } } @@ -213,7 +214,7 @@ public class MockRaftActor extends RaftActor implements RaftActorRecoveryCohort, } @Override public String persistenceId() { - return this.getId(); + return getId(); } protected void newBehavior(final RaftActorBehavior newBehavior) { @@ -243,15 +244,15 @@ public class MockRaftActor extends RaftActor implements RaftActorRecoveryCohort, } public static List fromState(final Snapshot.State from) { - if (from instanceof MockSnapshotState) { - return ((MockSnapshotState)from).getState(); + if (from instanceof MockSnapshotState mockState) { + return mockState.getState(); } throw new IllegalStateException("Unexpected snapshot State: " + from); } public ReplicatedLog getReplicatedLog() { - return this.getRaftActorContext().getReplicatedLog(); + return getRaftActorContext().getReplicatedLog(); } @Override @@ -296,52 +297,52 @@ public class MockRaftActor extends RaftActor implements RaftActorRecoveryCohort, } public T id(final String newId) { - this.id = newId; + id = newId; return self(); } public T peerAddresses(final Map newPeerAddresses) { - this.peerAddresses = newPeerAddresses; + peerAddresses = newPeerAddresses; return self(); } public T config(final ConfigParams newConfig) { - this.config = newConfig; + config = newConfig; return self(); } public T dataPersistenceProvider(final DataPersistenceProvider newDataPersistenceProvider) { - this.dataPersistenceProvider = newDataPersistenceProvider; + dataPersistenceProvider = newDataPersistenceProvider; return self(); } public T roleChangeNotifier(final ActorRef newRoleChangeNotifier) { - this.roleChangeNotifier = newRoleChangeNotifier; + roleChangeNotifier = newRoleChangeNotifier; return self(); } public T snapshotMessageSupport(final RaftActorSnapshotMessageSupport newSnapshotMessageSupport) { - this.snapshotMessageSupport = newSnapshotMessageSupport; + snapshotMessageSupport = newSnapshotMessageSupport; return self(); } public T restoreFromSnapshot(final Snapshot newRestoreFromSnapshot) { - this.restoreFromSnapshot = newRestoreFromSnapshot; + restoreFromSnapshot = newRestoreFromSnapshot; return self(); } public T persistent(final Optional newPersistent) { - this.persistent = newPersistent; + persistent = newPersistent; return self(); } public T pauseLeaderFunction(final Function newPauseLeaderFunction) { - this.pauseLeaderFunction = newPauseLeaderFunction; + pauseLeaderFunction = newPauseLeaderFunction; return self(); } public T snapshotCohort(final RaftActorSnapshotCohort newSnapshotCohort) { - this.snapshotCohort = newSnapshotCohort; + snapshotCohort = newSnapshotCohort; return self(); } @@ -371,10 +372,7 @@ public class MockRaftActor extends RaftActor implements RaftActorRecoveryCohort, @Override public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + (state == null ? 0 : state.hashCode()); - return result; + return Objects.hash(state); } @Override @@ -389,11 +387,7 @@ public class MockRaftActor extends RaftActor implements RaftActorRecoveryCohort, return false; } MockSnapshotState other = (MockSnapshotState) obj; - if (state == null) { - if (other.state != null) { - return false; - } - } else if (!state.equals(other.state)) { + if (!Objects.equals(state, other.state)) { return false; } return true; diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/MockRaftActorContext.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/MockRaftActorContext.java index 8c17e1e8e8..6d4ec22e3d 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/MockRaftActorContext.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/MockRaftActorContext.java @@ -8,6 +8,8 @@ package org.opendaylight.controller.cluster.raft; +import static java.util.Objects.requireNonNull; + import akka.actor.ActorRef; import akka.actor.ActorSelection; import akka.actor.ActorSystem; @@ -19,16 +21,17 @@ import java.io.OutputStream; import java.io.Serializable; import java.util.HashMap; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.function.Consumer; import org.opendaylight.controller.cluster.DataPersistenceProvider; import org.opendaylight.controller.cluster.NonPersistentDataProvider; import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior; +import org.opendaylight.controller.cluster.raft.messages.Payload; import org.opendaylight.controller.cluster.raft.persisted.ByteState; import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry; import org.opendaylight.controller.cluster.raft.persisted.Snapshot.State; import org.opendaylight.controller.cluster.raft.policy.RaftPolicy; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -56,8 +59,8 @@ public class MockRaftActorContext extends RaftActorContextImpl { @Override public void update(final long newTerm, final String newVotedFor) { - this.currentTerm = newTerm; - this.votedFor = newVotedFor; + currentTerm = newTerm; + votedFor = newVotedFor; // TODO : Write to some persistent state } @@ -109,7 +112,7 @@ public class MockRaftActorContext extends RaftActorContextImpl { } @Override public ActorSystem getActorSystem() { - return this.system; + return system; } @Override public ActorSelection getPeerActorSelection(final String peerId) { @@ -200,21 +203,22 @@ public class MockRaftActorContext extends RaftActorContextImpl { } } - public static class MockPayload extends Payload implements Serializable { + public static final class MockPayload extends Payload { private static final long serialVersionUID = 3121380393130864247L; - private String value = ""; - private int size; + + private final String data; + private final int size; public MockPayload() { + this(""); } public MockPayload(final String data) { - this.value = data; - size = value.length(); + this(data, data.length()); } public MockPayload(final String data, final int size) { - this(data); + this.data = requireNonNull(data); this.size = size; } @@ -223,39 +227,46 @@ public class MockRaftActorContext extends RaftActorContextImpl { return size; } + @Override + public int serializedSize() { + return size; + } + @Override public String toString() { - return value; + return data; } @Override public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + (value == null ? 0 : value.hashCode()); - return result; + return data.hashCode(); } @Override public boolean equals(final Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - MockPayload other = (MockPayload) obj; - if (value == null) { - if (other.value != null) { - return false; - } - } else if (!value.equals(other.value)) { - return false; - } - return true; + return this == obj || obj instanceof MockPayload other && Objects.equals(data, other.data) + && size == other.size; + } + + @Override + protected Object writeReplace() { + return new MockPayloadProxy(data, size); + } + } + + private static final class MockPayloadProxy implements Serializable { + private static final long serialVersionUID = 1L; + + private final String value; + private final int size; + + MockPayloadProxy(String value, int size) { + this.value = value; + this.size = size; + } + + Object readResolve() { + return new MockPayload(value, size); } } @@ -264,19 +275,19 @@ public class MockRaftActorContext extends RaftActorContextImpl { public MockReplicatedLogBuilder createEntries(final int start, final int end, final int term) { for (int i = start; i < end; i++) { - this.mockLog.append(new SimpleReplicatedLogEntry(i, term, + mockLog.append(new SimpleReplicatedLogEntry(i, term, new MockRaftActorContext.MockPayload(Integer.toString(i)))); } return this; } public MockReplicatedLogBuilder addEntry(final int index, final int term, final MockPayload payload) { - this.mockLog.append(new SimpleReplicatedLogEntry(index, term, payload)); + mockLog.append(new SimpleReplicatedLogEntry(index, term, payload)); return this; } public ReplicatedLog build() { - return this.mockLog; + return mockLog; } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/NonVotingFollowerIntegrationTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/NonVotingFollowerIntegrationTest.java index 9f930fd71a..f875f891b1 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/NonVotingFollowerIntegrationTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/NonVotingFollowerIntegrationTest.java @@ -10,10 +10,10 @@ package org.opendaylight.controller.cluster.raft; import static org.junit.Assert.assertEquals; import akka.actor.ActorRef; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Sets; -import java.util.Arrays; +import java.util.List; +import java.util.Map; import java.util.Optional; +import java.util.Set; import java.util.concurrent.TimeUnit; import org.junit.Test; import org.opendaylight.controller.cluster.notifications.LeaderStateChanged; @@ -291,7 +291,7 @@ public class NonVotingFollowerIntegrationTest extends AbstractRaftActorIntegrati // // We also add another voting follower actor into the mix even though it shoildn't affect the // outcome. - ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList( + ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(List.of( new ServerInfo(leaderId, true), new ServerInfo(follower1Id, false), new ServerInfo(follower2Id, true), new ServerInfo("downPeer", false))); SimpleReplicatedLogEntry persistedServerConfigEntry = new SimpleReplicatedLogEntry(0, currentTerm, @@ -305,13 +305,13 @@ public class NonVotingFollowerIntegrationTest extends AbstractRaftActorIntegrati DefaultConfigParamsImpl follower2ConfigParams = newFollowerConfigParams(); follower2ConfigParams.setCustomRaftPolicyImplementationClass(DisableElectionsRaftPolicy.class.getName()); follower2Actor = newTestRaftActor(follower2Id, TestRaftActor.newBuilder().peerAddresses( - ImmutableMap.of(leaderId, testActorPath(leaderId), follower1Id, follower1Actor.path().toString())) + Map.of(leaderId, testActorPath(leaderId), follower1Id, follower1Actor.path().toString())) .config(follower2ConfigParams).persistent(Optional.of(false))); TestRaftActor follower2Instance = follower2Actor.underlyingActor(); follower2Instance.waitForRecoveryComplete(); follower2CollectorActor = follower2Instance.collectorActor(); - peerAddresses = ImmutableMap.of(follower1Id, follower1Actor.path().toString(), + peerAddresses = Map.of(follower1Id, follower1Actor.path().toString(), follower2Id, follower2Actor.path().toString()); createNewLeaderActor(); @@ -399,7 +399,7 @@ public class NonVotingFollowerIntegrationTest extends AbstractRaftActorIntegrati // Set up a persisted ServerConfigurationPayload with the leader voting and the follower non-voting. - ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList( + ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(List.of( new ServerInfo(leaderId, true), new ServerInfo(follower1Id, false))); SimpleReplicatedLogEntry persistedServerConfigEntry = new SimpleReplicatedLogEntry(0, persistedTerm, persistedServerConfig); @@ -411,11 +411,10 @@ public class NonVotingFollowerIntegrationTest extends AbstractRaftActorIntegrati DefaultConfigParamsImpl followerConfigParams = newFollowerConfigParams(); follower1Actor = newTestRaftActor(follower1Id, follower1Builder.peerAddresses( - ImmutableMap.of(leaderId, testActorPath(leaderId))).config(followerConfigParams) + Map.of(leaderId, testActorPath(leaderId))).config(followerConfigParams) .persistent(Optional.of(false))); - peerAddresses = ImmutableMap.builder() - .put(follower1Id, follower1Actor.path().toString()).build(); + peerAddresses = Map.of(follower1Id, follower1Actor.path().toString()); leaderConfigParams = newLeaderConfigParams(); leaderActor = newTestRaftActor(leaderId, TestRaftActor.newBuilder().peerAddresses(peerAddresses) @@ -436,16 +435,16 @@ public class NonVotingFollowerIntegrationTest extends AbstractRaftActorIntegrati currentTerm = persistedTerm + 1; assertEquals("Leader term", currentTerm, leaderContext.getTermInformation().getCurrentTerm()); - assertEquals("Leader server config", Sets.newHashSet(persistedServerConfig.getServerConfig()), - Sets.newHashSet(leaderContext.getPeerServerInfo(true).getServerConfig())); + assertEquals("Leader server config", Set.copyOf(persistedServerConfig.getServerConfig()), + Set.copyOf(leaderContext.getPeerServerInfo(true).getServerConfig())); assertEquals("Leader isVotingMember", true, leaderContext.isVotingMember()); // Verify follower's context after startup MessageCollectorActor.expectFirstMatching(follower1CollectorActor, AppendEntries.class); assertEquals("Follower term", currentTerm, follower1Context.getTermInformation().getCurrentTerm()); - assertEquals("Follower server config", Sets.newHashSet(persistedServerConfig.getServerConfig()), - Sets.newHashSet(follower1Context.getPeerServerInfo(true).getServerConfig())); + assertEquals("Follower server config", Set.copyOf(persistedServerConfig.getServerConfig()), + Set.copyOf(follower1Context.getPeerServerInfo(true).getServerConfig())); assertEquals("FollowerisVotingMember", false, follower1Context.isVotingMember()); } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorContextImplTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorContextImplTest.java index 815b8d9d48..fabfc6c280 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorContextImplTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorContextImplTest.java @@ -19,10 +19,9 @@ import static org.mockito.Mockito.verify; import akka.actor.Props; import akka.testkit.TestActorRef; -import com.google.common.collect.ImmutableMap; import com.google.common.util.concurrent.MoreExecutors; -import java.util.Arrays; import java.util.HashMap; +import java.util.List; import java.util.Map; import org.junit.After; import org.junit.Test; @@ -84,7 +83,7 @@ public class RaftActorContextImplTest extends AbstractActorTest { DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl(); RaftActorContextImpl context = new RaftActorContextImpl(actor, actor.underlyingActor().getContext(), "test", new ElectionTermImpl(createProvider(), "test", LOG), -1, -1, - new HashMap<>(ImmutableMap.of("peer1", "peerAddress1")), configParams, + Map.of("peer1", "peerAddress1"), configParams, createProvider(), applyState -> { }, LOG, MoreExecutors.directExecutor()); context.setPeerAddress("peer1", "peerAddress1_1"); @@ -98,24 +97,24 @@ public class RaftActorContextImplTest extends AbstractActorTest { public void testUpdatePeerIds() { RaftActorContextImpl context = new RaftActorContextImpl(actor, actor.underlyingActor().getContext(), "self", new ElectionTermImpl(createProvider(), "test", LOG), -1, -1, - new HashMap<>(ImmutableMap.of("peer1", "peerAddress1")), + Map.of("peer1", "peerAddress1"), new DefaultConfigParamsImpl(), createProvider(), applyState -> { }, LOG, MoreExecutors.directExecutor()); - context.updatePeerIds(new ServerConfigurationPayload(Arrays.asList(new ServerInfo("self", false), + context.updatePeerIds(new ServerConfigurationPayload(List.of(new ServerInfo("self", false), new ServerInfo("peer2", true), new ServerInfo("peer3", false)))); verifyPeerInfo(context, "peer1", null); verifyPeerInfo(context, "peer2", true); verifyPeerInfo(context, "peer3", false); assertEquals("isVotingMember", false, context.isVotingMember()); - context.updatePeerIds(new ServerConfigurationPayload(Arrays.asList(new ServerInfo("self", true), + context.updatePeerIds(new ServerConfigurationPayload(List.of(new ServerInfo("self", true), new ServerInfo("peer2", true), new ServerInfo("peer3", true)))); verifyPeerInfo(context, "peer2", true); verifyPeerInfo(context, "peer3", true); assertEquals("isVotingMember", true, context.isVotingMember()); - context.updatePeerIds(new ServerConfigurationPayload(Arrays.asList(new ServerInfo("peer2", true), + context.updatePeerIds(new ServerConfigurationPayload(List.of(new ServerInfo("peer2", true), new ServerInfo("peer3", true)))); verifyPeerInfo(context, "peer2", true); verifyPeerInfo(context, "peer3", true); @@ -130,7 +129,7 @@ public class RaftActorContextImplTest extends AbstractActorTest { PeerInfo peerInfo = context.getPeerInfo(peerId); if (voting != null) { assertNotNull("Expected peer " + peerId, peerInfo); - assertEquals("getVotingState for " + peerId, voting.booleanValue() + assertEquals("getVotingState for " + peerId, voting ? VotingState.VOTING : VotingState.NON_VOTING, peerInfo.getVotingState()); } else { assertNull("Unexpected peer " + peerId, peerInfo); diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorDelegatingPersistentDataProviderTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorDelegatingPersistentDataProviderTest.java index 7ef3c3237b..22369d7887 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorDelegatingPersistentDataProviderTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorDelegatingPersistentDataProviderTest.java @@ -21,8 +21,8 @@ import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; import org.opendaylight.controller.cluster.DataPersistenceProvider; import org.opendaylight.controller.cluster.PersistentDataProvider; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.PersistentPayload; +import org.opendaylight.controller.cluster.raft.messages.Payload; +import org.opendaylight.controller.cluster.raft.messages.PersistentPayload; /** * Unit tests for RaftActorDelegatingPersistentDataProvider. @@ -98,12 +98,28 @@ public class RaftActorDelegatingPersistentDataProviderTest { } static class TestNonPersistentPayload extends Payload { + @java.io.Serial + private static final long serialVersionUID = 1L; + @Override public int size() { return 0; } + + @Override + public int serializedSize() { + return 0; + } + + @Override + protected Object writeReplace() { + // Not needed + throw new UnsupportedOperationException(); + } } static class TestPersistentPayload extends TestNonPersistentPayload implements PersistentPayload { + @java.io.Serial + private static final long serialVersionUID = 1L; } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorRecoverySupportTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorRecoverySupportTest.java index 3e66c708dd..cceea83740 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorRecoverySupportTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorRecoverySupportTest.java @@ -53,6 +53,7 @@ import org.opendaylight.controller.cluster.DataPersistenceProvider; import org.opendaylight.controller.cluster.PersistentDataProvider; import org.opendaylight.controller.cluster.raft.MockRaftActor.MockSnapshotState; import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload; +import org.opendaylight.controller.cluster.raft.messages.Payload; import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries; import org.opendaylight.controller.cluster.raft.persisted.DeleteEntries; import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload; @@ -60,7 +61,6 @@ import org.opendaylight.controller.cluster.raft.persisted.ServerInfo; import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry; import org.opendaylight.controller.cluster.raft.persisted.Snapshot; import org.opendaylight.controller.cluster.raft.persisted.UpdateElectionTerm; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload; import org.opendaylight.controller.cluster.raft.utils.DoNothingActor; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorServerConfigurationSupportTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorServerConfigurationSupportTest.java index 140735b646..884b16c11e 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorServerConfigurationSupportTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorServerConfigurationSupportTest.java @@ -22,20 +22,14 @@ import akka.dispatch.Dispatchers; import akka.testkit.TestActorRef; import akka.testkit.javadsl.TestKit; import com.google.common.base.Stopwatch; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; import com.google.common.io.ByteSource; import com.google.common.util.concurrent.MoreExecutors; import java.io.OutputStream; import java.time.Duration; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.Set; import java.util.concurrent.TimeUnit; import org.apache.commons.lang3.SerializationUtils; import org.junit.After; @@ -157,7 +151,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { followerActorContext.setCurrentBehavior(follower); TestActorRef leaderActor = actorFactory.createTestActor( - MockLeaderRaftActor.props(ImmutableMap.of(FOLLOWER_ID, followerActor.path().toString()), + MockLeaderRaftActor.props(Map.of(FOLLOWER_ID, followerActor.path().toString()), followerActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()), actorFactory.generateActorId(LEADER_ID)); @@ -178,7 +172,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { AddServerReply addServerReply = testKit.expectMsgClass(Duration.ofSeconds(5), AddServerReply.class); assertEquals("getStatus", ServerChangeStatus.OK, addServerReply.getStatus()); - assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().get()); + assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().orElseThrow()); // Verify ServerConfigurationPayload entry in leader's log @@ -204,10 +198,9 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { // Verify new server config was applied in both followers - assertEquals("Follower peers", ImmutableSet.of(LEADER_ID, NEW_SERVER_ID), followerActorContext.getPeerIds()); + assertEquals("Follower peers", Set.of(LEADER_ID, NEW_SERVER_ID), followerActorContext.getPeerIds()); - assertEquals("New follower peers", ImmutableSet.of(LEADER_ID, FOLLOWER_ID), - newFollowerActorContext.getPeerIds()); + assertEquals("New follower peers", Set.of(LEADER_ID, FOLLOWER_ID), newFollowerActorContext.getPeerIds()); assertEquals("Follower commit index", 3, followerActorContext.getCommitIndex()); assertEquals("Follower last applied index", 3, followerActorContext.getLastApplied()); @@ -239,8 +232,8 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { 0, 2, 1).build()); TestActorRef leaderActor = actorFactory.createTestActor( - MockLeaderRaftActor.props(ImmutableMap.of(), - initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()), + MockLeaderRaftActor.props(Map.of(), initialActorContext) + .withDispatcher(Dispatchers.DefaultDispatcherId()), actorFactory.generateActorId(LEADER_ID)); MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor(); @@ -258,7 +251,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { AddServerReply addServerReply = testKit.expectMsgClass(Duration.ofSeconds(5), AddServerReply.class); assertEquals("getStatus", ServerChangeStatus.OK, addServerReply.getStatus()); - assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().get()); + assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().orElseThrow()); // Verify ServerConfigurationPayload entry in leader's log @@ -278,7 +271,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { // Verify new server config was applied in the new follower - assertEquals("New follower peers", ImmutableSet.of(LEADER_ID), newFollowerActorContext.getPeerIds()); + assertEquals("New follower peers", Set.of(LEADER_ID), newFollowerActorContext.getPeerIds()); LOG.info("testAddServerWithNoExistingFollower ending"); } @@ -291,8 +284,8 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { RaftActorContext initialActorContext = new MockRaftActorContext(); TestActorRef leaderActor = actorFactory.createTestActor( - MockLeaderRaftActor.props(ImmutableMap.of(), - initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()), + MockLeaderRaftActor.props(Map.of(), initialActorContext) + .withDispatcher(Dispatchers.DefaultDispatcherId()), actorFactory.generateActorId(LEADER_ID)); MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor(); @@ -304,7 +297,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { AddServerReply addServerReply = testKit.expectMsgClass(Duration.ofSeconds(5), AddServerReply.class); assertEquals("getStatus", ServerChangeStatus.OK, addServerReply.getStatus()); - assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().get()); + assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().orElseThrow()); // Verify ServerConfigurationPayload entry in leader's log @@ -325,7 +318,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { // Verify new server config was applied in the new follower - assertEquals("New follower peers", ImmutableSet.of(LEADER_ID), newFollowerActorContext.getPeerIds()); + assertEquals("New follower peers", Set.of(LEADER_ID), newFollowerActorContext.getPeerIds()); assertNoneMatching(newFollowerCollectorActor, InstallSnapshot.class, 500); @@ -361,8 +354,8 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { RaftActorContext initialActorContext = new MockRaftActorContext(); TestActorRef leaderActor = actorFactory.createTestActor( - MockLeaderRaftActor.props(ImmutableMap.of(), - initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()), + MockLeaderRaftActor.props(Map.of(), initialActorContext) + .withDispatcher(Dispatchers.DefaultDispatcherId()), actorFactory.generateActorId(LEADER_ID)); MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor(); @@ -410,8 +403,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { // Verify ServerConfigurationPayload entry in the new follower expectMatching(newFollowerCollectorActor, ApplyState.class, 2); - assertEquals("New follower peers", ImmutableSet.of(LEADER_ID, NEW_SERVER_ID2), - newFollowerActorContext.getPeerIds()); + assertEquals("New follower peers", Set.of(LEADER_ID, NEW_SERVER_ID2), newFollowerActorContext.getPeerIds()); LOG.info("testAddServerWithOperationInProgress ending"); } @@ -424,8 +416,8 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { RaftActorContext initialActorContext = new MockRaftActorContext(); TestActorRef leaderActor = actorFactory.createTestActor( - MockLeaderRaftActor.props(ImmutableMap.of(), - initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()), + MockLeaderRaftActor.props(Map.of(), initialActorContext) + .withDispatcher(Dispatchers.DefaultDispatcherId()), actorFactory.generateActorId(LEADER_ID)); MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor(); @@ -447,7 +439,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { AddServerReply addServerReply = testKit.expectMsgClass(Duration.ofSeconds(5), AddServerReply.class); assertEquals("getStatus", ServerChangeStatus.OK, addServerReply.getStatus()); - assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().get()); + assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().orElseThrow()); expectFirstMatching(newFollowerCollectorActor, ApplySnapshot.class); @@ -471,8 +463,8 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { RaftActorContext initialActorContext = new MockRaftActorContext(); TestActorRef leaderActor = actorFactory.createTestActor( - MockLeaderRaftActor.props(ImmutableMap.of(), - initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()), + MockLeaderRaftActor.props(Map.of(), initialActorContext) + .withDispatcher(Dispatchers.DefaultDispatcherId()), actorFactory.generateActorId(LEADER_ID)); MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor(); @@ -503,8 +495,8 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { RaftActorContext initialActorContext = new MockRaftActorContext(); TestActorRef leaderActor = actorFactory.createTestActor( - MockLeaderRaftActor.props(ImmutableMap.of(), - initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()), + MockLeaderRaftActor.props(Map.of(), initialActorContext) + .withDispatcher(Dispatchers.DefaultDispatcherId()), actorFactory.generateActorId(LEADER_ID)); MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor(); @@ -552,8 +544,8 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { RaftActorContext initialActorContext = new MockRaftActorContext(); TestActorRef leaderActor = actorFactory.createTestActor( - MockLeaderRaftActor.props(ImmutableMap.of(), - initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()), + MockLeaderRaftActor.props(Map.of(), initialActorContext) + .withDispatcher(Dispatchers.DefaultDispatcherId()), actorFactory.generateActorId(LEADER_ID)); MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor(); @@ -597,8 +589,8 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { RaftActorContext initialActorContext = new MockRaftActorContext(); TestActorRef leaderActor = actorFactory.createTestActor( - MockLeaderRaftActor.props(ImmutableMap.of(), - initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()), + MockLeaderRaftActor.props(Map.of(), initialActorContext) + .withDispatcher(Dispatchers.DefaultDispatcherId()), actorFactory.generateActorId(LEADER_ID)); MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor(); @@ -631,7 +623,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { configParams.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS)); TestActorRef noLeaderActor = actorFactory.createTestActor( - MockRaftActor.builder().id(LEADER_ID).peerAddresses(ImmutableMap.of(FOLLOWER_ID, + MockRaftActor.builder().id(LEADER_ID).peerAddresses(Map.of(FOLLOWER_ID, followerActor.path().toString())).config(configParams).persistent(Optional.of(false)) .props().withDispatcher(Dispatchers.DefaultDispatcherId()), actorFactory.generateActorId(LEADER_ID)); @@ -653,8 +645,8 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { RaftActorContext initialActorContext = new MockRaftActorContext(); TestActorRef leaderActor = actorFactory.createTestActor( - MockLeaderRaftActor.props(ImmutableMap.of(), - initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()), + MockLeaderRaftActor.props(Map.of(), initialActorContext) + .withDispatcher(Dispatchers.DefaultDispatcherId()), actorFactory.generateActorId(LEADER_ID)); MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor(); @@ -688,7 +680,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { // The first AddServer should succeed with OK even though consensus wasn't reached AddServerReply addServerReply = testKit.expectMsgClass(Duration.ofSeconds(5), AddServerReply.class); assertEquals("getStatus", ServerChangeStatus.OK, addServerReply.getStatus()); - assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().get()); + assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().orElseThrow()); // Verify ServerConfigurationPayload entry in leader's log verifyServerConfigurationPayloadEntry(leaderActorContext.getReplicatedLog(), votingServer(LEADER_ID), @@ -713,7 +705,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { RaftActorContext initialActorContext = new MockRaftActorContext(); TestActorRef leaderActor = actorFactory.createTestActor( - MockLeaderRaftActor.props(ImmutableMap.of(FOLLOWER_ID, followerActor.path().toString()), + MockLeaderRaftActor.props(Map.of(FOLLOWER_ID, followerActor.path().toString()), initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()), actorFactory.generateActorId(LEADER_ID)); @@ -737,14 +729,13 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { MessageCollectorActor.props(), actorFactory.generateActorId(LEADER_ID)); TestActorRef followerRaftActor = actorFactory.createTestActor( - MockRaftActor.builder().id(FOLLOWER_ID).peerAddresses(ImmutableMap.of(LEADER_ID, + MockRaftActor.builder().id(FOLLOWER_ID).peerAddresses(Map.of(LEADER_ID, leaderActor.path().toString())).config(configParams).persistent(Optional.of(false)) .props().withDispatcher(Dispatchers.DefaultDispatcherId()), actorFactory.generateActorId(FOLLOWER_ID)); followerRaftActor.underlyingActor().waitForInitializeBehaviorComplete(); - followerRaftActor.tell(new AppendEntries(1, LEADER_ID, 0, 1, Collections.emptyList(), - -1, -1, (short)0), leaderActor); + followerRaftActor.tell(new AppendEntries(1, LEADER_ID, 0, 1, List.of(), -1, -1, (short)0), leaderActor); followerRaftActor.tell(new AddServer(NEW_SERVER_ID, newFollowerRaftActor.path().toString(), true), testKit.getRef()); @@ -760,7 +751,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl(); configParams.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS)); TestActorRef noLeaderActor = actorFactory.createTestActor( - MockRaftActor.builder().id(LEADER_ID).peerAddresses(ImmutableMap.of(FOLLOWER_ID, + MockRaftActor.builder().id(LEADER_ID).peerAddresses(Map.of(FOLLOWER_ID, followerActor.path().toString())).config(configParams).persistent(Optional.of(false)) .props().withDispatcher(Dispatchers.DefaultDispatcherId()), actorFactory.generateActorId(LEADER_ID)); @@ -769,7 +760,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { noLeaderActor.underlyingActor()); ReplicatedLogEntry serverConfigEntry = new SimpleReplicatedLogEntry(1, 1, - new ServerConfigurationPayload(Collections.emptyList())); + new ServerConfigurationPayload(List.of())); boolean handled = support.handleMessage(new ApplyState(null, null, serverConfigEntry), ActorRef.noSender()); assertEquals("Message handled", true, handled); @@ -789,7 +780,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { configParams.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS)); TestActorRef leaderActor = actorFactory.createTestActor( - MockRaftActor.builder().id(LEADER_ID).peerAddresses(ImmutableMap.of(FOLLOWER_ID, + MockRaftActor.builder().id(LEADER_ID).peerAddresses(Map.of(FOLLOWER_ID, followerActor.path().toString())).config(configParams).persistent(Optional.of(false)) .props().withDispatcher(Dispatchers.DefaultDispatcherId()), actorFactory.generateActorId(LEADER_ID)); @@ -809,7 +800,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { RaftActorContext initialActorContext = new MockRaftActorContext(); TestActorRef leaderActor = actorFactory.createTestActor( - MockLeaderRaftActor.props(ImmutableMap.of(FOLLOWER_ID, followerActor.path().toString()), + MockLeaderRaftActor.props(Map.of(FOLLOWER_ID, followerActor.path().toString()), initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()), actorFactory.generateActorId(LEADER_ID)); @@ -831,14 +822,13 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { MessageCollectorActor.props(), actorFactory.generateActorId(LEADER_ID)); TestActorRef followerRaftActor = actorFactory.createTestActor( - MockRaftActor.builder().id(FOLLOWER_ID).peerAddresses(ImmutableMap.of(LEADER_ID, + MockRaftActor.builder().id(FOLLOWER_ID).peerAddresses(Map.of(LEADER_ID, leaderActor.path().toString())).config(configParams).persistent(Optional.of(false)) .props().withDispatcher(Dispatchers.DefaultDispatcherId()), actorFactory.generateActorId(FOLLOWER_ID)); followerRaftActor.underlyingActor().waitForInitializeBehaviorComplete(); - followerRaftActor.tell(new AppendEntries(1, LEADER_ID, 0, 1, Collections.emptyList(), - -1, -1, (short)0), leaderActor); + followerRaftActor.tell(new AppendEntries(1, LEADER_ID, 0, 1, List.of(), -1, -1, (short)0), leaderActor); followerRaftActor.tell(new RemoveServer(FOLLOWER_ID), testKit.getRef()); expectFirstMatching(leaderActor, RemoveServer.class); @@ -862,7 +852,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { final String downNodeId = "downNode"; TestActorRef leaderActor = actorFactory.createTestActor(MockLeaderRaftActor.props( - ImmutableMap.of(FOLLOWER_ID, follower1ActorPath, FOLLOWER_ID2, follower2ActorPath, downNodeId, ""), + Map.of(FOLLOWER_ID, follower1ActorPath, FOLLOWER_ID2, follower2ActorPath, downNodeId, ""), initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()), actorFactory.generateActorId(LEADER_ID)); @@ -871,14 +861,14 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { ActorRef follower1Collector = actorFactory.createActor( MessageCollectorActor.props(), actorFactory.generateActorId("collector")); final TestActorRef follower1Actor = actorFactory.createTestActor( - CollectingMockRaftActor.props(FOLLOWER_ID, ImmutableMap.of(LEADER_ID, leaderActor.path().toString(), + CollectingMockRaftActor.props(FOLLOWER_ID, Map.of(LEADER_ID, leaderActor.path().toString(), FOLLOWER_ID2, follower2ActorPath, downNodeId, ""), configParams, NO_PERSISTENCE, follower1Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), follower1ActorId); ActorRef follower2Collector = actorFactory.createActor( MessageCollectorActor.props(), actorFactory.generateActorId("collector")); final TestActorRef follower2Actor = actorFactory.createTestActor( - CollectingMockRaftActor.props(FOLLOWER_ID2, ImmutableMap.of(LEADER_ID, leaderActor.path().toString(), + CollectingMockRaftActor.props(FOLLOWER_ID2, Map.of(LEADER_ID, leaderActor.path().toString(), FOLLOWER_ID, follower1ActorPath, downNodeId, ""), configParams, NO_PERSISTENCE, follower2Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), follower2ActorId); @@ -922,7 +912,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { RaftActorContext initialActorContext = new MockRaftActorContext(); TestActorRef leaderActor = actorFactory.createTestActor( - MockLeaderRaftActor.props(ImmutableMap.of(FOLLOWER_ID, followerActorPath), + MockLeaderRaftActor.props(Map.of(FOLLOWER_ID, followerActorPath), initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()), actorFactory.generateActorId(LEADER_ID)); @@ -931,7 +921,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { final ActorRef followerCollector = actorFactory.createActor(MessageCollectorActor.props(), actorFactory.generateActorId("collector")); actorFactory.createTestActor( - CollectingMockRaftActor.props(FOLLOWER_ID, ImmutableMap.of(LEADER_ID, leaderActor.path().toString()), + CollectingMockRaftActor.props(FOLLOWER_ID, Map.of(LEADER_ID, leaderActor.path().toString()), configParams, NO_PERSISTENCE, followerCollector) .withDispatcher(Dispatchers.DefaultDispatcherId()), followerActorId); @@ -955,7 +945,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { LOG.info("testRemoveServerLeaderWithNoFollowers starting"); TestActorRef leaderActor = actorFactory.createTestActor( - MockLeaderRaftActor.props(Collections.emptyMap(), + MockLeaderRaftActor.props(Map.of(), new MockRaftActorContext()).withDispatcher(Dispatchers.DefaultDispatcherId()), actorFactory.generateActorId(LEADER_ID)); @@ -980,7 +970,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { final String follower2ActorPath = actorFactory.createTestActorPath(follower2ActorId); TestActorRef leaderActor = actorFactory.createTestActor( - MockLeaderRaftActor.props(ImmutableMap.of(FOLLOWER_ID, follower1ActorPath, + MockLeaderRaftActor.props(Map.of(FOLLOWER_ID, follower1ActorPath, FOLLOWER_ID2, follower2ActorPath), new MockRaftActorContext()) .withDispatcher(Dispatchers.DefaultDispatcherId()), actorFactory.generateActorId(LEADER_ID)); ActorRef leaderCollector = newLeaderCollectorActor(leaderActor.underlyingActor()); @@ -988,20 +978,20 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { ActorRef follower1Collector = actorFactory.createActor( MessageCollectorActor.props(), actorFactory.generateActorId("collector")); final TestActorRef follower1RaftActor = actorFactory.createTestActor( - CollectingMockRaftActor.props(FOLLOWER_ID, ImmutableMap.of(LEADER_ID, leaderActor.path().toString(), + CollectingMockRaftActor.props(FOLLOWER_ID, Map.of(LEADER_ID, leaderActor.path().toString(), FOLLOWER_ID2, follower2ActorPath), configParams, NO_PERSISTENCE, follower1Collector) .withDispatcher(Dispatchers.DefaultDispatcherId()), follower1ActorId); ActorRef follower2Collector = actorFactory.createActor( MessageCollectorActor.props(), actorFactory.generateActorId("collector")); final TestActorRef follower2RaftActor = actorFactory.createTestActor( - CollectingMockRaftActor.props(FOLLOWER_ID2, ImmutableMap.of(LEADER_ID, leaderActor.path().toString(), + CollectingMockRaftActor.props(FOLLOWER_ID2, Map.of(LEADER_ID, leaderActor.path().toString(), FOLLOWER_ID, follower1ActorPath), configParams, NO_PERSISTENCE, follower2Collector) .withDispatcher(Dispatchers.DefaultDispatcherId()), follower2ActorId); // Send first ChangeServersVotingStatus message - leaderActor.tell(new ChangeServersVotingStatus(ImmutableMap.of(FOLLOWER_ID, false, FOLLOWER_ID2, false)), + leaderActor.tell(new ChangeServersVotingStatus(Map.of(FOLLOWER_ID, false, FOLLOWER_ID2, false)), testKit.getRef()); ServerChangeReply reply = testKit.expectMsgClass(Duration.ofSeconds(5), ServerChangeReply.class); assertEquals("getStatus", ServerChangeStatus.OK, reply.getStatus()); @@ -1027,7 +1017,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { // Send second ChangeServersVotingStatus message - leaderActor.tell(new ChangeServersVotingStatus(ImmutableMap.of(FOLLOWER_ID, true)), testKit.getRef()); + leaderActor.tell(new ChangeServersVotingStatus(Map.of(FOLLOWER_ID, true)), testKit.getRef()); reply = testKit.expectMsgClass(Duration.ofSeconds(5), ServerChangeReply.class); assertEquals("getStatus", ServerChangeStatus.OK, reply.getStatus()); @@ -1059,7 +1049,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { final String follower2ActorPath = actorFactory.createTestActorPath(follower2ActorId); TestActorRef leaderActor = actorFactory.createTestActor( - MockLeaderRaftActor.props(ImmutableMap.of(FOLLOWER_ID, follower1ActorPath, + MockLeaderRaftActor.props(Map.of(FOLLOWER_ID, follower1ActorPath, FOLLOWER_ID2, follower2ActorPath), new MockRaftActorContext()) .withDispatcher(Dispatchers.DefaultDispatcherId()), actorFactory.generateActorId(LEADER_ID)); ActorRef leaderCollector = newLeaderCollectorActor(leaderActor.underlyingActor()); @@ -1067,20 +1057,20 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { ActorRef follower1Collector = actorFactory.createActor( MessageCollectorActor.props(), actorFactory.generateActorId("collector")); final TestActorRef follower1RaftActor = actorFactory.createTestActor( - CollectingMockRaftActor.props(FOLLOWER_ID, ImmutableMap.of(LEADER_ID, leaderActor.path().toString(), + CollectingMockRaftActor.props(FOLLOWER_ID, Map.of(LEADER_ID, leaderActor.path().toString(), FOLLOWER_ID2, follower2ActorPath), configParams, NO_PERSISTENCE, follower1Collector) .withDispatcher(Dispatchers.DefaultDispatcherId()), follower1ActorId); ActorRef follower2Collector = actorFactory.createActor( MessageCollectorActor.props(), actorFactory.generateActorId("collector")); final TestActorRef follower2RaftActor = actorFactory.createTestActor( - CollectingMockRaftActor.props(FOLLOWER_ID2, ImmutableMap.of(LEADER_ID, leaderActor.path().toString(), + CollectingMockRaftActor.props(FOLLOWER_ID2, Map.of(LEADER_ID, leaderActor.path().toString(), FOLLOWER_ID, follower1ActorPath), configParams, NO_PERSISTENCE, follower2Collector) .withDispatcher(Dispatchers.DefaultDispatcherId()), follower2ActorId); // Send ChangeServersVotingStatus message - leaderActor.tell(new ChangeServersVotingStatus(ImmutableMap.of(LEADER_ID, false)), testKit.getRef()); + leaderActor.tell(new ChangeServersVotingStatus(Map.of(LEADER_ID, false)), testKit.getRef()); ServerChangeReply reply = testKit.expectMsgClass(Duration.ofSeconds(5), ServerChangeReply.class); assertEquals("getStatus", ServerChangeStatus.OK, reply.getStatus()); @@ -1109,10 +1099,10 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { LOG.info("testChangeLeaderToNonVotingInSingleNode starting"); TestActorRef leaderActor = actorFactory.createTestActor( - MockLeaderRaftActor.props(ImmutableMap.of(), new MockRaftActorContext()) + MockLeaderRaftActor.props(Map.of(), new MockRaftActorContext()) .withDispatcher(Dispatchers.DefaultDispatcherId()), actorFactory.generateActorId(LEADER_ID)); - leaderActor.tell(new ChangeServersVotingStatus(ImmutableMap.of(LEADER_ID, false)), testKit.getRef()); + leaderActor.tell(new ChangeServersVotingStatus(Map.of(LEADER_ID, false)), testKit.getRef()); ServerChangeReply reply = testKit.expectMsgClass(Duration.ofSeconds(5), ServerChangeReply.class); assertEquals("getStatus", ServerChangeStatus.INVALID_REQUEST, reply.getStatus()); @@ -1134,7 +1124,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { // via the server config. The server config will also contain 2 voting peers that are down (ie no // actors created). - ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList( + ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(List.of( new ServerInfo(node1ID, false), new ServerInfo(node2ID, false), new ServerInfo("downNode1", true), new ServerInfo("downNode2", true))); SimpleReplicatedLogEntry persistedServerConfigEntry = new SimpleReplicatedLogEntry(0, 1, persistedServerConfig); @@ -1149,14 +1139,14 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { ActorRef node1Collector = actorFactory.createActor( MessageCollectorActor.props(), actorFactory.generateActorId("collector")); TestActorRef node1RaftActorRef = actorFactory.createTestActor( - CollectingMockRaftActor.props(node1ID, ImmutableMap.of(), configParams, + CollectingMockRaftActor.props(node1ID, Map.of(), configParams, PERSISTENT, node1Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node1ID); CollectingMockRaftActor node1RaftActor = node1RaftActorRef.underlyingActor(); ActorRef node2Collector = actorFactory.createActor( MessageCollectorActor.props(), actorFactory.generateActorId("collector")); TestActorRef node2RaftActorRef = actorFactory.createTestActor( - CollectingMockRaftActor.props(node2ID, ImmutableMap.of(), configParams, + CollectingMockRaftActor.props(node2ID, Map.of(), configParams, PERSISTENT, node2Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node2ID); CollectingMockRaftActor node2RaftActor = node2RaftActorRef.underlyingActor(); @@ -1183,7 +1173,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { // First send the message such that node1 has no peer address for node2 - should fail. - ChangeServersVotingStatus changeServers = new ChangeServersVotingStatus(ImmutableMap.of(node1ID, true, + ChangeServersVotingStatus changeServers = new ChangeServersVotingStatus(Map.of(node1ID, true, node2ID, true, "downNode1", false, "downNode2", false)); node1RaftActorRef.tell(changeServers, testKit.getRef()); ServerChangeReply reply = testKit.expectMsgClass(Duration.ofSeconds(5), ServerChangeReply.class); @@ -1194,7 +1184,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { long term = node1RaftActor.getRaftActorContext().getTermInformation().getCurrentTerm(); node1RaftActorRef.tell(new AppendEntries(term, "downNode1", -1L, -1L, - Collections.emptyList(), 0, -1, (short)1), ActorRef.noSender()); + List.of(), 0, -1, (short)1), ActorRef.noSender()); // Wait for the ElectionTimeout to clear the leaderId. The leaderId must be null so on the next // ChangeServersVotingStatus message, it will try to elect a leader. @@ -1241,7 +1231,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { ? actorFactory.createTestActorPath(node1ID) : peerId.equals(node2ID) ? actorFactory.createTestActorPath(node2ID) : null; - ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList( + ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(List.of( new ServerInfo(node1ID, false), new ServerInfo(node2ID, true))); SimpleReplicatedLogEntry persistedServerConfigEntry = new SimpleReplicatedLogEntry(0, 1, persistedServerConfig); @@ -1257,7 +1247,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { ActorRef node1Collector = actorFactory.createActor( MessageCollectorActor.props(), actorFactory.generateActorId("collector")); TestActorRef node1RaftActorRef = actorFactory.createTestActor( - CollectingMockRaftActor.props(node1ID, ImmutableMap.of(), configParams1, + CollectingMockRaftActor.props(node1ID, Map.of(), configParams1, PERSISTENT, node1Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node1ID); final CollectingMockRaftActor node1RaftActor = node1RaftActorRef.underlyingActor(); @@ -1267,7 +1257,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { ActorRef node2Collector = actorFactory.createActor( MessageCollectorActor.props(), actorFactory.generateActorId("collector")); TestActorRef node2RaftActorRef = actorFactory.createTestActor( - CollectingMockRaftActor.props(node2ID, ImmutableMap.of(), configParams2, + CollectingMockRaftActor.props(node2ID, Map.of(), configParams2, PERSISTENT, node2Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node2ID); CollectingMockRaftActor node2RaftActor = node2RaftActorRef.underlyingActor(); @@ -1279,13 +1269,13 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { node2RaftActor.setDropMessageOfType(RequestVote.class); - ChangeServersVotingStatus changeServers = new ChangeServersVotingStatus(ImmutableMap.of(node1ID, true)); + ChangeServersVotingStatus changeServers = new ChangeServersVotingStatus(Map.of(node1ID, true)); node1RaftActorRef.tell(changeServers, testKit.getRef()); ServerChangeReply reply = testKit.expectMsgClass(Duration.ofSeconds(5), ServerChangeReply.class); assertEquals("getStatus", ServerChangeStatus.NO_LEADER, reply.getStatus()); - assertEquals("Server config", ImmutableSet.of(nonVotingServer(node1ID), votingServer(node2ID)), - new HashSet<>(node1RaftActor.getRaftActorContext().getPeerServerInfo(true).getServerConfig())); + assertEquals("Server config", Set.of(nonVotingServer(node1ID), votingServer(node2ID)), + Set.copyOf(node1RaftActor.getRaftActorContext().getPeerServerInfo(true).getServerConfig())); assertEquals("getRaftState", RaftState.Follower, node1RaftActor.getRaftState()); LOG.info("testChangeToVotingWithNoLeaderAndElectionTimeout ending"); @@ -1307,7 +1297,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { configParams.setElectionTimeoutFactor(3); configParams.setPeerAddressResolver(peerAddressResolver); - ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList( + ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(List.of( new ServerInfo(node1ID, false), new ServerInfo(node2ID, false))); SimpleReplicatedLogEntry persistedServerConfigEntry = new SimpleReplicatedLogEntry(0, 1, persistedServerConfig); @@ -1322,14 +1312,14 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { ActorRef node1Collector = actorFactory.createActor( MessageCollectorActor.props(), actorFactory.generateActorId("collector")); TestActorRef node1RaftActorRef = actorFactory.createTestActor( - CollectingMockRaftActor.props(node1ID, ImmutableMap.of(), configParams, + CollectingMockRaftActor.props(node1ID, Map.of(), configParams, PERSISTENT, node1Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node1ID); final CollectingMockRaftActor node1RaftActor = node1RaftActorRef.underlyingActor(); ActorRef node2Collector = actorFactory.createActor( MessageCollectorActor.props(), actorFactory.generateActorId("collector")); TestActorRef node2RaftActorRef = actorFactory.createTestActor( - CollectingMockRaftActor.props(node2ID, ImmutableMap.of(), configParams, + CollectingMockRaftActor.props(node2ID, Map.of(), configParams, PERSISTENT, node2Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node2ID); final CollectingMockRaftActor node2RaftActor = node2RaftActorRef.underlyingActor(); @@ -1339,7 +1329,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { // forward the request to node2. ChangeServersVotingStatus changeServers = new ChangeServersVotingStatus( - ImmutableMap.of(node1ID, true, node2ID, true)); + Map.of(node1ID, true, node2ID, true)); node1RaftActorRef.tell(changeServers, testKit.getRef()); ServerChangeReply reply = testKit.expectMsgClass(Duration.ofSeconds(5), ServerChangeReply.class); assertEquals("getStatus", ServerChangeStatus.OK, reply.getStatus()); @@ -1373,7 +1363,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { ? actorFactory.createTestActorPath(node1ID) : peerId.equals(node2ID) ? actorFactory.createTestActorPath(node2ID) : null); - ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList( + ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(List.of( new ServerInfo(node1ID, false), new ServerInfo(node2ID, true))); SimpleReplicatedLogEntry persistedServerConfigEntry = new SimpleReplicatedLogEntry(0, 1, persistedServerConfig); @@ -1385,14 +1375,14 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { ActorRef node1Collector = actorFactory.createActor( MessageCollectorActor.props(), actorFactory.generateActorId("collector")); TestActorRef node1RaftActorRef = actorFactory.createTestActor( - CollectingMockRaftActor.props(node1ID, ImmutableMap.of(), configParams, + CollectingMockRaftActor.props(node1ID, Map.of(), configParams, PERSISTENT, node1Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node1ID); final CollectingMockRaftActor node1RaftActor = node1RaftActorRef.underlyingActor(); ActorRef node2Collector = actorFactory.createActor( MessageCollectorActor.props(), actorFactory.generateActorId("collector")); TestActorRef node2RaftActorRef = actorFactory.createTestActor( - CollectingMockRaftActor.props(node2ID, ImmutableMap.of(), configParams, + CollectingMockRaftActor.props(node2ID, Map.of(), configParams, PERSISTENT, node2Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node2ID); CollectingMockRaftActor node2RaftActor = node2RaftActorRef.underlyingActor(); @@ -1403,7 +1393,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { node2RaftActor.setDropMessageOfType(RequestVote.class); - ChangeServersVotingStatus changeServers = new ChangeServersVotingStatus(ImmutableMap.of(node1ID, true, + ChangeServersVotingStatus changeServers = new ChangeServersVotingStatus(Map.of(node1ID, true, node2ID, true)); node1RaftActorRef.tell(changeServers, testKit.getRef()); @@ -1464,7 +1454,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { ReplicatedLogEntry logEntry = log.get(log.lastIndex()); assertEquals("Last log entry payload class", ServerConfigurationPayload.class, logEntry.getData().getClass()); ServerConfigurationPayload payload = (ServerConfigurationPayload)logEntry.getData(); - assertEquals("Server config", ImmutableSet.copyOf(expected), new HashSet<>(payload.getServerConfig())); + assertEquals("Server config", Set.of(expected), Set.copyOf(payload.getServerConfig())); } private static RaftActorContextImpl newFollowerContext(final String id, @@ -1476,7 +1466,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { ElectionTermImpl termInfo = new ElectionTermImpl(noPersistence, id, LOG); termInfo.update(1, LEADER_ID); return new RaftActorContextImpl(actor, actor.underlyingActor().getContext(), - id, termInfo, -1, -1, ImmutableMap.of(LEADER_ID, ""), configParams, + id, termInfo, -1, -1, Map.of(LEADER_ID, ""), configParams, noPersistence, applyState -> actor.tell(applyState, actor), LOG, MoreExecutors.directExecutor()); } @@ -1486,7 +1476,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { AbstractMockRaftActor(final String id, final Map peerAddresses, final Optional config, final boolean persistent, final ActorRef collectorActor) { - super(builder().id(id).peerAddresses(peerAddresses).config(config.get()) + super(builder().id(id).peerAddresses(peerAddresses).config(config.orElseThrow()) .persistent(Optional.of(persistent))); this.collectorActor = collectorActor; } @@ -1573,9 +1563,9 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { @Override @SuppressWarnings("checkstyle:IllegalCatch") public void createSnapshot(final ActorRef actorRef, final Optional installSnapshotStream) { - MockSnapshotState snapshotState = new MockSnapshotState(new ArrayList<>(getState())); + MockSnapshotState snapshotState = new MockSnapshotState(List.copyOf(getState())); if (installSnapshotStream.isPresent()) { - SerializationUtils.serialize(snapshotState, installSnapshotStream.get()); + SerializationUtils.serialize(snapshotState, installSnapshotStream.orElseThrow()); } actorRef.tell(new CaptureSnapshotReply(snapshotState, installSnapshotStream), actorRef); @@ -1591,7 +1581,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest { public static class MockNewFollowerRaftActor extends AbstractMockRaftActor { public MockNewFollowerRaftActor(final ConfigParams config, final ActorRef collectorActor) { - super(NEW_SERVER_ID, new HashMap<>(), Optional.of(config), NO_PERSISTENCE, collectorActor); + super(NEW_SERVER_ID, Map.of(), Optional.of(config), NO_PERSISTENCE, collectorActor); setPersistence(false); } diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorTest.java index 96e04df15a..fde56a9a21 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorTest.java @@ -23,6 +23,7 @@ import static org.mockito.Mockito.never; import static org.mockito.Mockito.reset; import static org.mockito.Mockito.timeout; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import akka.actor.ActorRef; import akka.actor.PoisonPill; @@ -37,15 +38,10 @@ import akka.persistence.SnapshotOffer; import akka.protobuf.ByteString; import akka.testkit.TestActorRef; import akka.testkit.javadsl.TestKit; -import com.google.common.collect.ImmutableMap; import com.google.common.util.concurrent.Uninterruptibles; import java.io.ByteArrayOutputStream; import java.io.ObjectOutputStream; import java.time.Duration; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; @@ -141,22 +137,20 @@ public class RaftActorTest extends AbstractActorTest { // log entry. config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS)); - ImmutableMap peerAddresses = ImmutableMap.builder() - .put("member1", "address").build(); + Map peerAddresses = Map.of("member1", "address"); ActorRef followerActor = factory.createActor(MockRaftActor.props(persistenceId, peerAddresses, config), persistenceId); kit.watch(followerActor); - List snapshotUnappliedEntries = new ArrayList<>(); - ReplicatedLogEntry entry1 = new SimpleReplicatedLogEntry(4, 1, new MockRaftActorContext.MockPayload("E")); - snapshotUnappliedEntries.add(entry1); + List snapshotUnappliedEntries = List.of( + new SimpleReplicatedLogEntry(4, 1, new MockRaftActorContext.MockPayload("E"))); int lastAppliedDuringSnapshotCapture = 3; int lastIndexDuringSnapshotCapture = 4; // 4 messages as part of snapshot, which are applied to state - MockSnapshotState snapshotState = new MockSnapshotState(Arrays.asList( + MockSnapshotState snapshotState = new MockSnapshotState(List.of( new MockRaftActorContext.MockPayload("A"), new MockRaftActorContext.MockPayload("B"), new MockRaftActorContext.MockPayload("C"), @@ -167,13 +161,9 @@ public class RaftActorTest extends AbstractActorTest { InMemorySnapshotStore.addSnapshot(persistenceId, snapshot); // add more entries after snapshot is taken - List entries = new ArrayList<>(); ReplicatedLogEntry entry2 = new SimpleReplicatedLogEntry(5, 1, new MockRaftActorContext.MockPayload("F", 2)); ReplicatedLogEntry entry3 = new SimpleReplicatedLogEntry(6, 1, new MockRaftActorContext.MockPayload("G", 3)); ReplicatedLogEntry entry4 = new SimpleReplicatedLogEntry(7, 1, new MockRaftActorContext.MockPayload("H", 4)); - entries.add(entry2); - entries.add(entry3); - entries.add(entry4); final int lastAppliedToState = 5; final int lastIndex = 7; @@ -199,7 +189,7 @@ public class RaftActorTest extends AbstractActorTest { mockRaftActor.waitForRecoveryComplete(); RaftActorContext context = mockRaftActor.getRaftActorContext(); - assertEquals("Journal log size", snapshotUnappliedEntries.size() + entries.size(), + assertEquals("Journal log size", snapshotUnappliedEntries.size() + 3, context.getReplicatedLog().size()); assertEquals("Journal data size", 10, context.getReplicatedLog().dataSize()); assertEquals("Last index", lastIndex, context.getReplicatedLog().lastIndex()); @@ -223,8 +213,7 @@ public class RaftActorTest extends AbstractActorTest { config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS)); TestActorRef ref = factory.createTestActor(MockRaftActor.props(persistenceId, - ImmutableMap.builder().put("member1", "address").build(), - config, createProvider()), persistenceId); + Map.of("member1", "address"), config, createProvider()), persistenceId); MockRaftActor mockRaftActor = ref.underlyingActor(); @@ -246,8 +235,7 @@ public class RaftActorTest extends AbstractActorTest { InMemoryJournal.addWriteMessagesCompleteLatch(persistenceId, 1); TestActorRef ref = factory.createTestActor(MockRaftActor.props(persistenceId, - ImmutableMap.builder().put("member1", "address").build(), - config, createProvider()) + Map.of("member1", "address"), config, createProvider()) .withDispatcher(Dispatchers.DefaultDispatcherId()), persistenceId); InMemoryJournal.waitForWriteMessagesComplete(persistenceId); @@ -258,8 +246,7 @@ public class RaftActorTest extends AbstractActorTest { factory.killActor(ref, kit); config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS)); - ref = factory.createTestActor(MockRaftActor.props(persistenceId, - ImmutableMap.builder().put("member1", "address").build(), config, + ref = factory.createTestActor(MockRaftActor.props(persistenceId, Map.of("member1", "address"), config, createProvider()).withDispatcher(Dispatchers.DefaultDispatcherId()), factory.generateActorId("follower-")); @@ -284,7 +271,7 @@ public class RaftActorTest extends AbstractActorTest { config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS)); TestActorRef mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId, - Collections.emptyMap(), config), persistenceId); + Map.of(), config), persistenceId); MockRaftActor mockRaftActor = mockActorRef.underlyingActor(); @@ -295,7 +282,7 @@ public class RaftActorTest extends AbstractActorTest { mockRaftActor.setRaftActorRecoverySupport(mockSupport); Snapshot snapshot = Snapshot.create(ByteState.of(new byte[]{1}), - Collections.emptyList(), 3, 1, 3, 1, -1, null, null); + List.of(), 3, 1, 3, 1, -1, null, null); SnapshotOffer snapshotOffer = new SnapshotOffer(new SnapshotMetadata("test", 6, 12345), snapshot); mockRaftActor.handleRecover(snapshotOffer); @@ -336,28 +323,29 @@ public class RaftActorTest extends AbstractActorTest { // Wait for akka's recovery to complete so it doesn't interfere. mockRaftActor.waitForRecoveryComplete(); - ApplySnapshot applySnapshot = new ApplySnapshot(mock(Snapshot.class)); - doReturn(true).when(mockSupport).handleSnapshotMessage(same(applySnapshot), any(ActorRef.class)); + ApplySnapshot applySnapshot = new ApplySnapshot( + Snapshot.create(null, null, 0, 0, 0, 0, 0, persistenceId, null)); + when(mockSupport.handleSnapshotMessage(same(applySnapshot), any(ActorRef.class))).thenReturn(true); mockRaftActor.handleCommand(applySnapshot); CaptureSnapshotReply captureSnapshotReply = new CaptureSnapshotReply(ByteState.empty(), Optional.empty()); - doReturn(true).when(mockSupport).handleSnapshotMessage(same(captureSnapshotReply), any(ActorRef.class)); + when(mockSupport.handleSnapshotMessage(same(captureSnapshotReply), any(ActorRef.class))).thenReturn(true); mockRaftActor.handleCommand(captureSnapshotReply); SaveSnapshotSuccess saveSnapshotSuccess = new SaveSnapshotSuccess(new SnapshotMetadata("", 0L, 0L)); - doReturn(true).when(mockSupport).handleSnapshotMessage(same(saveSnapshotSuccess), any(ActorRef.class)); + when(mockSupport.handleSnapshotMessage(same(saveSnapshotSuccess), any(ActorRef.class))).thenReturn(true); mockRaftActor.handleCommand(saveSnapshotSuccess); SaveSnapshotFailure saveSnapshotFailure = new SaveSnapshotFailure(new SnapshotMetadata("", 0L, 0L), new Throwable()); - doReturn(true).when(mockSupport).handleSnapshotMessage(same(saveSnapshotFailure), any(ActorRef.class)); + when(mockSupport.handleSnapshotMessage(same(saveSnapshotFailure), any(ActorRef.class))).thenReturn(true); mockRaftActor.handleCommand(saveSnapshotFailure); - doReturn(true).when(mockSupport).handleSnapshotMessage(same(RaftActorSnapshotMessageSupport.COMMIT_SNAPSHOT), - any(ActorRef.class)); + when(mockSupport.handleSnapshotMessage(same(RaftActorSnapshotMessageSupport.COMMIT_SNAPSHOT), + any(ActorRef.class))).thenReturn(true); mockRaftActor.handleCommand(RaftActorSnapshotMessageSupport.COMMIT_SNAPSHOT); - doReturn(true).when(mockSupport).handleSnapshotMessage(same(GetSnapshot.INSTANCE), any(ActorRef.class)); + when(mockSupport.handleSnapshotMessage(same(GetSnapshot.INSTANCE), any(ActorRef.class))).thenReturn(true); mockRaftActor.handleCommand(GetSnapshot.INSTANCE); verify(mockSupport).handleSnapshotMessage(same(applySnapshot), any(ActorRef.class)); @@ -381,7 +369,7 @@ public class RaftActorTest extends AbstractActorTest { DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class); TestActorRef mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId, - Collections.emptyMap(), config, dataPersistenceProvider), persistenceId); + Map.of(), config, dataPersistenceProvider), persistenceId); MockRaftActor mockRaftActor = mockActorRef.underlyingActor(); @@ -405,7 +393,7 @@ public class RaftActorTest extends AbstractActorTest { DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class); TestActorRef mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId, - Collections.emptyMap(), config, dataPersistenceProvider), persistenceId); + Map.of(), config, dataPersistenceProvider), persistenceId); MockRaftActor mockRaftActor = mockActorRef.underlyingActor(); @@ -518,7 +506,7 @@ public class RaftActorTest extends AbstractActorTest { String persistenceId = factory.generateActorId("notifier-"); factory.createActor(MockRaftActor.builder().id(persistenceId) - .peerAddresses(ImmutableMap.of("leader", "fake/path")) + .peerAddresses(Map.of("leader", "fake/path")) .config(config).roleChangeNotifier(notifierActor).props()); List matches = null; @@ -560,8 +548,7 @@ public class RaftActorTest extends AbstractActorTest { DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class); - Map peerAddresses = new HashMap<>(); - peerAddresses.put(follower1Id, followerActor1.path().toString()); + Map peerAddresses = Map.of(follower1Id, followerActor1.path().toString()); TestActorRef mockActorRef = factory.createTestActor( MockRaftActor.props(persistenceId, peerAddresses, config, dataPersistenceProvider), persistenceId); @@ -607,7 +594,7 @@ public class RaftActorTest extends AbstractActorTest { assertEquals(8, leaderActor.getReplicatedLog().size()); - MockSnapshotState snapshotState = new MockSnapshotState(Arrays.asList( + MockSnapshotState snapshotState = new MockSnapshotState(List.of( new MockRaftActorContext.MockPayload("foo-0"), new MockRaftActorContext.MockPayload("foo-1"), new MockRaftActorContext.MockPayload("foo-2"), @@ -649,8 +636,7 @@ public class RaftActorTest extends AbstractActorTest { DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class); - Map peerAddresses = new HashMap<>(); - peerAddresses.put(leaderId, leaderActor1.path().toString()); + Map peerAddresses = Map.of(leaderId, leaderActor1.path().toString()); TestActorRef mockActorRef = factory.createTestActor( MockRaftActor.props(persistenceId, peerAddresses, config, dataPersistenceProvider), persistenceId); @@ -683,15 +669,15 @@ public class RaftActorTest extends AbstractActorTest { assertEquals(6, followerActor.getReplicatedLog().size()); //fake snapshot on index 6 - List entries = Arrays.asList( - (ReplicatedLogEntry) new SimpleReplicatedLogEntry(6, 1, new MockRaftActorContext.MockPayload("foo-6"))); + List entries = List.of( + new SimpleReplicatedLogEntry(6, 1, new MockRaftActorContext.MockPayload("foo-6"))); followerActor.handleCommand(new AppendEntries(1, leaderId, 5, 1, entries, 5, 5, (short)0)); assertEquals(7, followerActor.getReplicatedLog().size()); //fake snapshot on index 7 assertEquals(RaftState.Follower, followerActor.getCurrentBehavior().state()); - entries = Arrays.asList((ReplicatedLogEntry) new SimpleReplicatedLogEntry(7, 1, + entries = List.of(new SimpleReplicatedLogEntry(7, 1, new MockRaftActorContext.MockPayload("foo-7"))); followerActor.handleCommand(new AppendEntries(1, leaderId, 6, 1, entries, 6, 6, (short) 0)); assertEquals(8, followerActor.getReplicatedLog().size()); @@ -699,7 +685,7 @@ public class RaftActorTest extends AbstractActorTest { assertEquals(RaftState.Follower, followerActor.getCurrentBehavior().state()); - ByteString snapshotBytes = fromObject(Arrays.asList( + ByteString snapshotBytes = fromObject(List.of( new MockRaftActorContext.MockPayload("foo-0"), new MockRaftActorContext.MockPayload("foo-1"), new MockRaftActorContext.MockPayload("foo-2"), @@ -716,8 +702,7 @@ public class RaftActorTest extends AbstractActorTest { assertEquals(3, followerActor.getReplicatedLog().size()); //indexes 5,6,7 left in the log assertEquals(7, followerActor.getReplicatedLog().lastIndex()); - entries = Arrays.asList((ReplicatedLogEntry) new SimpleReplicatedLogEntry(8, 1, - new MockRaftActorContext.MockPayload("foo-7"))); + entries = List.of(new SimpleReplicatedLogEntry(8, 1, new MockRaftActorContext.MockPayload("foo-7"))); // send an additional entry 8 with leaderCommit = 7 followerActor.handleCommand(new AppendEntries(1, leaderId, 7, 1, entries, 7, 7, (short) 0)); @@ -740,9 +725,9 @@ public class RaftActorTest extends AbstractActorTest { DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class); - Map peerAddresses = new HashMap<>(); - peerAddresses.put(follower1Id, followerActor1.path().toString()); - peerAddresses.put(follower2Id, followerActor2.path().toString()); + Map peerAddresses = Map.of( + follower1Id, followerActor1.path().toString(), + follower2Id, followerActor2.path().toString()); TestActorRef mockActorRef = factory.createTestActor( MockRaftActor.props(persistenceId, peerAddresses, config, dataPersistenceProvider), persistenceId); @@ -791,7 +776,7 @@ public class RaftActorTest extends AbstractActorTest { assertEquals("Fake snapshot should not happen when Initiate is in progress", 5, leaderActor.getReplicatedLog().size()); - ByteString snapshotBytes = fromObject(Arrays.asList( + ByteString snapshotBytes = fromObject(List.of( new MockRaftActorContext.MockPayload("foo-0"), new MockRaftActorContext.MockPayload("foo-1"), new MockRaftActorContext.MockPayload("foo-2"), @@ -819,7 +804,7 @@ public class RaftActorTest extends AbstractActorTest { DataPersistenceProvider dataPersistenceProvider = createProvider(); - Map peerAddresses = ImmutableMap.builder().put("member1", "address").build(); + Map peerAddresses = Map.of("member1", "address"); TestActorRef mockActorRef = factory.createTestActor( MockRaftActor.props(persistenceId, peerAddresses, config, dataPersistenceProvider), persistenceId); @@ -863,7 +848,7 @@ public class RaftActorTest extends AbstractActorTest { DataPersistenceProvider dataPersistenceProvider = createProvider(); - Map peerAddresses = ImmutableMap.builder().put("member1", "address").build(); + Map peerAddresses = Map.of("member1", "address"); TestActorRef mockActorRef = factory.createTestActor( MockRaftActor.props(persistenceId, peerAddresses, config, dataPersistenceProvider), persistenceId); @@ -909,7 +894,7 @@ public class RaftActorTest extends AbstractActorTest { DataPersistenceProvider dataPersistenceProvider = createProvider(); - Map peerAddresses = ImmutableMap.builder().build(); + Map peerAddresses = Map.of(); TestActorRef mockActorRef = factory.createTestActor( MockRaftActor.props(persistenceId, peerAddresses, config, dataPersistenceProvider), persistenceId); @@ -963,8 +948,7 @@ public class RaftActorTest extends AbstractActorTest { public void testUpdateConfigParam() { DefaultConfigParamsImpl emptyConfig = new DefaultConfigParamsImpl(); String persistenceId = factory.generateActorId("follower-"); - ImmutableMap peerAddresses = - ImmutableMap.builder().put("member1", "address").build(); + Map peerAddresses = Map.of("member1", "address"); DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class); TestActorRef actorRef = factory.createTestActor( @@ -1029,7 +1013,7 @@ public class RaftActorTest extends AbstractActorTest { new MockRaftActorContext.MockPayload("C"))); TestActorRef raftActorRef = factory.createTestActor(MockRaftActor.props(persistenceId, - ImmutableMap.builder().put("member1", "address").build(), config) + Map.of("member1", "address"), config) .withDispatcher(Dispatchers.DefaultDispatcherId()), persistenceId); MockRaftActor mockRaftActor = raftActorRef.underlyingActor(); @@ -1105,13 +1089,13 @@ public class RaftActorTest extends AbstractActorTest { DefaultConfigParamsImpl config = new DefaultConfigParamsImpl(); config.setCustomRaftPolicyImplementationClass(DisableElectionsRaftPolicy.class.getName()); - List snapshotUnappliedEntries = new ArrayList<>(); - snapshotUnappliedEntries.add(new SimpleReplicatedLogEntry(4, 1, new MockRaftActorContext.MockPayload("E"))); + List snapshotUnappliedEntries = List.of( + new SimpleReplicatedLogEntry(4, 1, new MockRaftActorContext.MockPayload("E"))); int snapshotLastApplied = 3; int snapshotLastIndex = 4; - MockSnapshotState snapshotState = new MockSnapshotState(Arrays.asList( + MockSnapshotState snapshotState = new MockSnapshotState(List.of( new MockRaftActorContext.MockPayload("A"), new MockRaftActorContext.MockPayload("B"), new MockRaftActorContext.MockPayload("C"), @@ -1152,7 +1136,7 @@ public class RaftActorTest extends AbstractActorTest { // Test with data persistence disabled - snapshot = Snapshot.create(EmptyState.INSTANCE, Collections.emptyList(), + snapshot = Snapshot.create(EmptyState.INSTANCE, List.of(), -1, -1, -1, -1, 5, "member-1", null); persistenceId = factory.generateActorId("test-actor-"); @@ -1182,9 +1166,9 @@ public class RaftActorTest extends AbstractActorTest { DefaultConfigParamsImpl config = new DefaultConfigParamsImpl(); config.setCustomRaftPolicyImplementationClass(DisableElectionsRaftPolicy.class.getName()); - List state = Arrays.asList(new MockRaftActorContext.MockPayload("A")); + List state = List.of(new MockRaftActorContext.MockPayload("A")); Snapshot snapshot = Snapshot.create(ByteState.of(fromObject(state).toByteArray()), - Arrays.asList(), 5, 2, 5, 2, 2, "member-1", null); + List.of(), 5, 2, 5, 2, 2, "member-1", null); InMemoryJournal.addEntry(persistenceId, 1, new SimpleReplicatedLogEntry(0, 1, new MockRaftActorContext.MockPayload("B"))); @@ -1220,7 +1204,7 @@ public class RaftActorTest extends AbstractActorTest { String persistenceId = factory.generateActorId("test-actor-"); InMemoryJournal.addEntry(persistenceId, 1, new SimpleReplicatedLogEntry(0, 1, - new ServerConfigurationPayload(Arrays.asList(new ServerInfo(persistenceId, false))))); + new ServerConfigurationPayload(List.of(new ServerInfo(persistenceId, false))))); TestActorRef raftActorRef = factory.createTestActor(MockRaftActor.builder().id(persistenceId) .config(config).props().withDispatcher(Dispatchers.DefaultDispatcherId()), persistenceId); @@ -1254,7 +1238,7 @@ public class RaftActorTest extends AbstractActorTest { mockRaftActor.waitForInitializeBehaviorComplete(); - raftActorRef.tell(new AppendEntries(1L, "leader", 0L, 1L, Collections.emptyList(), + raftActorRef.tell(new AppendEntries(1L, "leader", 0L, 1L, List.of(), 0L, -1L, (short)1), ActorRef.noSender()); LeaderStateChanged leaderStateChange = MessageCollectorActor.expectFirstMatching( notifierActor, LeaderStateChanged.class); @@ -1287,7 +1271,7 @@ public class RaftActorTest extends AbstractActorTest { doReturn(true).when(mockPersistenceProvider).isRecoveryApplicable(); TestActorRef leaderActorRef = factory.createTestActor( - MockRaftActor.props(leaderId, ImmutableMap.of(followerId, followerActor.path().toString()), config, + MockRaftActor.props(leaderId, Map.of(followerId, followerActor.path().toString()), config, mockPersistenceProvider), leaderId); MockRaftActor leaderActor = leaderActorRef.underlyingActor(); leaderActor.waitForInitializeBehaviorComplete(); @@ -1329,7 +1313,7 @@ public class RaftActorTest extends AbstractActorTest { config.setIsolatedLeaderCheckInterval(new FiniteDuration(1, TimeUnit.DAYS)); TestActorRef leaderActorRef = factory.createTestActor( - MockRaftActor.props(leaderId, ImmutableMap.of(followerId, followerActor.path().toString()), config), + MockRaftActor.props(leaderId, Map.of(followerId, followerActor.path().toString()), config), leaderId); MockRaftActor leaderActor = leaderActorRef.underlyingActor(); leaderActor.waitForInitializeBehaviorComplete(); @@ -1369,8 +1353,7 @@ public class RaftActorTest extends AbstractActorTest { TestRaftActor.Builder builder = TestRaftActor.newBuilder() .id(leaderId) - .peerAddresses(ImmutableMap.of(followerId, - mockFollowerActorRef.path().toString())) + .peerAddresses(Map.of(followerId, mockFollowerActorRef.path().toString())) .config(config) .collectorActor(factory.createActor( MessageCollectorActor.props(), factory.generateActorId(leaderId + "-collector"))); diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorTestKit.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorTestKit.java index 6386d6c6ba..7d6b8988d3 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorTestKit.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorTestKit.java @@ -7,6 +7,8 @@ */ package org.opendaylight.controller.cluster.raft; +import static org.junit.Assert.fail; + import akka.actor.ActorRef; import akka.actor.ActorSystem; import akka.pattern.Patterns; @@ -16,7 +18,6 @@ import akka.util.Timeout; import com.google.common.util.concurrent.Uninterruptibles; import java.util.Optional; import java.util.concurrent.TimeUnit; -import org.junit.Assert; import org.opendaylight.controller.cluster.raft.client.messages.FindLeader; import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply; import org.slf4j.Logger; @@ -31,7 +32,7 @@ public class RaftActorTestKit extends TestKit { public RaftActorTestKit(final ActorSystem actorSystem, final String actorName) { super(actorSystem); - raftActor = this.getSystem().actorOf(MockRaftActor.builder().id(actorName).props(), actorName); + raftActor = getSystem().actorOf(MockRaftActor.builder().id(actorName).props(), actorName); } public ActorRef getRaftActor() { @@ -65,6 +66,6 @@ public class RaftActorTestKit extends TestKit { Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS); } - Assert.fail("Leader not found for actorRef " + actorRef.path()); + fail("Leader not found for actorRef " + actorRef.path()); } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RecoveryIntegrationSingleNodeTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RecoveryIntegrationSingleNodeTest.java index 7004ca8878..b3da66c0ff 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RecoveryIntegrationSingleNodeTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RecoveryIntegrationSingleNodeTest.java @@ -12,9 +12,8 @@ import static org.junit.Assert.assertEquals; import akka.actor.ActorRef; import akka.persistence.SaveSnapshotSuccess; import akka.testkit.TestActorRef; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Lists; import java.util.List; +import java.util.Map; import org.junit.Before; import org.junit.Test; import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries; @@ -43,7 +42,7 @@ public class RecoveryIntegrationSingleNodeTest extends AbstractRaftActorIntegrat String persistenceId = factory.generateActorId("singleNode"); TestActorRef singleNodeActorRef = - newTestRaftActor(persistenceId, ImmutableMap.builder().build(), leaderConfigParams); + newTestRaftActor(persistenceId, Map.of(), leaderConfigParams); waitUntilLeader(singleNodeActorRef); @@ -75,8 +74,9 @@ public class RecoveryIntegrationSingleNodeTest extends AbstractRaftActorIntegrat assertEquals("Last applied", 5, singleNodeContext.getLastApplied()); - assertEquals("Incorrect State after snapshot success is received ", Lists.newArrayList(payload0, payload1, - payload2, payload3, payload4, payload5), singleNodeActorRef.underlyingActor().getState()); + assertEquals("Incorrect State after snapshot success is received ", + List.of(payload0, payload1, payload2, payload3, payload4, payload5), + singleNodeActorRef.underlyingActor().getState()); InMemoryJournal.waitForWriteMessagesComplete(persistenceId); @@ -87,19 +87,17 @@ public class RecoveryIntegrationSingleNodeTest extends AbstractRaftActorIntegrat assertEquals(1, persistedSnapshots.size()); List snapshottedState = MockRaftActor.fromState(persistedSnapshots.get(0).getState()); - assertEquals("Incorrect Snapshot", Lists.newArrayList(payload0, payload1, payload2, payload3), - snapshottedState); + assertEquals("Incorrect Snapshot", List.of(payload0, payload1, payload2, payload3), snapshottedState); //recovery logic starts killActor(singleNodeActorRef); - singleNodeActorRef = newTestRaftActor(persistenceId, - ImmutableMap.builder().build(), leaderConfigParams); + singleNodeActorRef = newTestRaftActor(persistenceId, Map.of(), leaderConfigParams); singleNodeActorRef.underlyingActor().waitForRecoveryComplete(); - assertEquals("Incorrect State after Recovery ", Lists.newArrayList(payload0, payload1, payload2, payload3, - payload4, payload5), singleNodeActorRef.underlyingActor().getState()); - + assertEquals("Incorrect State after Recovery ", + List.of(payload0, payload1, payload2, payload3, payload4, payload5), + singleNodeActorRef.underlyingActor().getState()); } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RecoveryIntegrationTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RecoveryIntegrationTest.java index ca53d2e6c5..f197ba29a0 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RecoveryIntegrationTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RecoveryIntegrationTest.java @@ -11,9 +11,6 @@ import static org.junit.Assert.assertEquals; import akka.actor.ActorRef; import akka.persistence.SaveSnapshotSuccess; -import com.google.common.collect.ImmutableMap; -import java.util.Arrays; -import java.util.HashMap; import java.util.List; import java.util.Map; import org.junit.Before; @@ -38,15 +35,12 @@ public class RecoveryIntegrationTest extends AbstractRaftActorIntegrationTest { @Before public void setup() { - follower1Actor = newTestRaftActor(follower1Id, ImmutableMap.of(leaderId, testActorPath(leaderId)), + follower1Actor = newTestRaftActor(follower1Id, Map.of(leaderId, testActorPath(leaderId)), newFollowerConfigParams()); - Map leaderPeerAddresses = new HashMap<>(); - leaderPeerAddresses.put(follower1Id, follower1Actor.path().toString()); - leaderPeerAddresses.put(follower2Id, ""); - leaderConfigParams = newLeaderConfigParams(); - leaderActor = newTestRaftActor(leaderId, leaderPeerAddresses, leaderConfigParams); + leaderActor = newTestRaftActor(leaderId, Map.of(follower1Id, follower1Actor.path().toString(), follower2Id, ""), + leaderConfigParams); follower1CollectorActor = follower1Actor.underlyingActor().collectorActor(); leaderCollectorActor = leaderActor.underlyingActor().collectorActor(); @@ -96,7 +90,7 @@ public class RecoveryIntegrationTest extends AbstractRaftActorIntegrationTest { assertEquals("Leader commit index", 4, leaderContext.getCommitIndex()); assertEquals("Leader last applied", 4, leaderContext.getLastApplied()); - assertEquals("Leader state", Arrays.asList(payload0, payload1, payload2, payload3, payload4), + assertEquals("Leader state", List.of(payload0, payload1, payload2, payload3, payload4), leaderActor.underlyingActor().getState()); } @@ -135,7 +129,7 @@ public class RecoveryIntegrationTest extends AbstractRaftActorIntegrationTest { assertEquals("Leader commit index", 4, leaderContext.getCommitIndex()); assertEquals("Leader last applied", 4, leaderContext.getLastApplied()); - assertEquals("Leader state", Arrays.asList(payload0, payload1, payload2, payload3, payload4), + assertEquals("Leader state", List.of(payload0, payload1, payload2, payload3, payload4), leaderActor.underlyingActor().getState()); } @@ -146,8 +140,8 @@ public class RecoveryIntegrationTest extends AbstractRaftActorIntegrationTest { leader = leaderActor.underlyingActor().getCurrentBehavior(); - follower2Actor = newTestRaftActor(follower2Id, ImmutableMap.of(leaderId, testActorPath(leaderId)), - newFollowerConfigParams()); + follower2Actor = newTestRaftActor(follower2Id, + Map.of(leaderId, testActorPath(leaderId)), newFollowerConfigParams()); follower2CollectorActor = follower2Actor.underlyingActor().collectorActor(); leaderActor.tell(new SetPeerAddress(follower2Id, follower2Actor.path().toString()), ActorRef.noSender()); @@ -168,8 +162,8 @@ public class RecoveryIntegrationTest extends AbstractRaftActorIntegrationTest { InMemoryJournal.clear(); - follower2Actor = newTestRaftActor(follower2Id, ImmutableMap.of(leaderId, testActorPath(leaderId)), - newFollowerConfigParams()); + follower2Actor = newTestRaftActor(follower2Id, + Map.of(leaderId, testActorPath(leaderId)), newFollowerConfigParams()); TestRaftActor follower2Underlying = follower2Actor.underlyingActor(); follower2CollectorActor = follower2Underlying.collectorActor(); follower2Context = follower2Underlying.getRaftActorContext(); @@ -182,7 +176,7 @@ public class RecoveryIntegrationTest extends AbstractRaftActorIntegrationTest { // Wait for the follower to persist the snapshot. MessageCollectorActor.expectFirstMatching(follower2CollectorActor, SaveSnapshotSuccess.class); - final List expFollowerState = Arrays.asList(payload0, payload1, payload2); + final List expFollowerState = List.of(payload0, payload1, payload2); assertEquals("Follower commit index", 2, follower2Context.getCommitIndex()); assertEquals("Follower last applied", 2, follower2Context.getLastApplied()); @@ -191,7 +185,7 @@ public class RecoveryIntegrationTest extends AbstractRaftActorIntegrationTest { killActor(follower2Actor); - follower2Actor = newTestRaftActor(follower2Id, ImmutableMap.of(leaderId, testActorPath(leaderId)), + follower2Actor = newTestRaftActor(follower2Id, Map.of(leaderId, testActorPath(leaderId)), newFollowerConfigParams()); follower2Underlying = follower2Actor.underlyingActor(); @@ -244,9 +238,9 @@ public class RecoveryIntegrationTest extends AbstractRaftActorIntegrationTest { reinstateLeaderActor(); - assertEquals("Leader last index", 5 , leaderActor.underlyingActor().getReplicatedLog().lastIndex()); - assertEquals(payload4, leaderActor.underlyingActor().getReplicatedLog().get(4).getData()); - assertEquals(payload5, leaderActor.underlyingActor().getReplicatedLog().get(5).getData()); + final var log = leaderActor.underlyingActor().getReplicatedLog(); + assertEquals("Leader last index", 5, log.lastIndex()); + assertEquals(List.of(payload4, payload5), List.of(log.get(4).getData(), log.get(5).getData())); } private void reinstateLeaderActor() { diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/ReplicatedLogImplTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/ReplicatedLogImplTest.java index 5b8ec2ec15..542828b9d0 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/ReplicatedLogImplTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/ReplicatedLogImplTest.java @@ -17,7 +17,7 @@ import static org.mockito.Mockito.verifyNoMoreInteractions; import akka.japi.Procedure; import com.google.common.util.concurrent.MoreExecutors; -import java.util.Collections; +import java.util.Map; import java.util.function.Consumer; import org.junit.Before; import org.junit.Test; @@ -56,7 +56,7 @@ public class ReplicatedLogImplTest { @Before public void setup() { context = new RaftActorContextImpl(null, null, "test", - new ElectionTermImpl(mockPersistence, "test", LOG), -1, -1, Collections.emptyMap(), + new ElectionTermImpl(mockPersistence, "test", LOG), -1, -1, Map.of(), configParams, mockPersistence, applyState -> { }, LOG, MoreExecutors.directExecutor()); } diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/ReplicationAndSnapshotsIntegrationTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/ReplicationAndSnapshotsIntegrationTest.java index 050b0ddf35..70f67425c3 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/ReplicationAndSnapshotsIntegrationTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/ReplicationAndSnapshotsIntegrationTest.java @@ -10,8 +10,8 @@ package org.opendaylight.controller.cluster.raft; import static org.junit.Assert.assertEquals; import akka.persistence.SaveSnapshotSuccess; -import com.google.common.collect.ImmutableMap; import java.util.List; +import java.util.Map; import org.junit.Test; import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload; import org.opendaylight.controller.cluster.raft.base.messages.ApplyState; @@ -67,15 +67,15 @@ public class ReplicationAndSnapshotsIntegrationTest extends AbstractRaftActorInt DefaultConfigParamsImpl followerConfigParams = newFollowerConfigParams(); followerConfigParams.setSnapshotBatchCount(snapshotBatchCount); - follower1Actor = newTestRaftActor(follower1Id, ImmutableMap.of(leaderId, testActorPath(leaderId), + follower1Actor = newTestRaftActor(follower1Id, Map.of(leaderId, testActorPath(leaderId), follower2Id, testActorPath(follower2Id)), followerConfigParams); - follower2Actor = newTestRaftActor(follower2Id, ImmutableMap.of(leaderId, testActorPath(leaderId), + follower2Actor = newTestRaftActor(follower2Id, Map.of(leaderId, testActorPath(leaderId), follower1Id, testActorPath(follower1Id)), followerConfigParams); - peerAddresses = ImmutableMap.builder() - .put(follower1Id, follower1Actor.path().toString()) - .put(follower2Id, follower2Actor.path().toString()).build(); + peerAddresses = Map.of( + follower1Id, follower1Actor.path().toString(), + follower2Id, follower2Actor.path().toString()); leaderConfigParams = newLeaderConfigParams(); leaderActor = newTestRaftActor(leaderId, peerAddresses, leaderConfigParams); diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest.java index d6a53a0aee..f2658957e1 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest.java @@ -8,21 +8,20 @@ package org.opendaylight.controller.cluster.raft; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; import akka.actor.ActorRef; import akka.persistence.SaveSnapshotSuccess; -import com.google.common.collect.ImmutableMap; import com.google.common.util.concurrent.Uninterruptibles; -import java.util.Arrays; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; import org.apache.commons.lang3.SerializationUtils; import org.eclipse.jdt.annotation.Nullable; -import org.junit.Assert; import org.junit.Test; import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload; import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot; @@ -61,15 +60,15 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A InMemoryJournal.addEntry(leaderId, 1, new UpdateElectionTerm(initialTerm, leaderId)); // Create the leader and 2 follower actors. - follower1Actor = newTestRaftActor(follower1Id, ImmutableMap.of(leaderId, testActorPath(leaderId), + follower1Actor = newTestRaftActor(follower1Id, Map.of(leaderId, testActorPath(leaderId), follower2Id, testActorPath(follower2Id)), newFollowerConfigParams()); - follower2Actor = newTestRaftActor(follower2Id, ImmutableMap.of(leaderId, testActorPath(leaderId), + follower2Actor = newTestRaftActor(follower2Id, Map.of(leaderId, testActorPath(leaderId), follower1Id, testActorPath(follower1Id)), newFollowerConfigParams()); - Map leaderPeerAddresses = ImmutableMap.builder() - .put(follower1Id, follower1Actor.path().toString()) - .put(follower2Id, follower2Actor.path().toString()).build(); + Map leaderPeerAddresses = Map.of( + follower1Id, follower1Actor.path().toString(), + follower2Id, follower2Actor.path().toString()); leaderConfigParams = newLeaderConfigParams(); leaderActor = newTestRaftActor(leaderId, leaderPeerAddresses, leaderConfigParams); @@ -86,7 +85,7 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A follower2 = follower2Actor.underlyingActor().getCurrentBehavior(); currentTerm = leaderContext.getTermInformation().getCurrentTerm(); - assertEquals("Current term > " + initialTerm, true, currentTerm > initialTerm); + assertTrue("Current term > " + initialTerm, currentTerm > initialTerm); leaderCollectorActor = leaderActor.underlyingActor().collectorActor(); follower1CollectorActor = follower1Actor.underlyingActor().collectorActor(); @@ -96,7 +95,7 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A } private void setupFollower2() { - follower2Actor = newTestRaftActor(follower2Id, ImmutableMap.of(leaderId, testActorPath(leaderId), + follower2Actor = newTestRaftActor(follower2Id, Map.of(leaderId, testActorPath(leaderId), follower1Id, testActorPath(follower1Id)), newFollowerConfigParams()); follower2Context = follower2Actor.underlyingActor().getRaftActorContext(); @@ -169,7 +168,7 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A // to catch it up because no snapshotting was done so the follower's next index was present in the log. InstallSnapshot installSnapshot = MessageCollectorActor.getFirstMatching(follower2CollectorActor, InstallSnapshot.class); - Assert.assertNull("Follower 2 received unexpected InstallSnapshot", installSnapshot); + assertNull("Follower 2 received unexpected InstallSnapshot", installSnapshot); testLog.info("testReplicationsWithLaggingFollowerCaughtUpViaAppendEntries complete"); } @@ -254,7 +253,7 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A // Verify the leader did not try to install a snapshot to catch up follower 2. InstallSnapshot installSnapshot = MessageCollectorActor.getFirstMatching(follower2CollectorActor, InstallSnapshot.class); - Assert.assertNull("Follower 2 received unexpected InstallSnapshot", installSnapshot); + assertNull("Follower 2 received unexpected InstallSnapshot", installSnapshot); // Ensure there's at least 1 more heartbeat. MessageCollectorActor.clearMessages(leaderCollectorActor); @@ -364,7 +363,7 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A // Send a server config change to test that the install snapshot includes the server config. - ServerConfigurationPayload serverConfig = new ServerConfigurationPayload(Arrays.asList( + ServerConfigurationPayload serverConfig = new ServerConfigurationPayload(List.of( new ServerInfo(leaderId, true), new ServerInfo(follower1Id, false), new ServerInfo(follower2Id, false))); @@ -452,7 +451,7 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A setupFollower2(); - MessageCollectorActor.expectMatching(follower2CollectorActor, InstallSnapshot.class, 5); + MessageCollectorActor.expectMatching(follower2CollectorActor, InstallSnapshot.class, 1); follower2Actor.stop(); @@ -508,7 +507,7 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A // Verify a snapshot is not triggered. CaptureSnapshot captureSnapshot = MessageCollectorActor.getFirstMatching(leaderCollectorActor, CaptureSnapshot.class); - Assert.assertNull("Leader received unexpected CaptureSnapshot", captureSnapshot); + assertNull("Leader received unexpected CaptureSnapshot", captureSnapshot); expSnapshotState.add(payload1); @@ -581,7 +580,7 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A verifyApplyState(applyState, leaderCollectorActor, payload3.toString(), currentTerm, 3, payload3); captureSnapshot = MessageCollectorActor.getFirstMatching(leaderCollectorActor, CaptureSnapshot.class); - Assert.assertNull("Leader received unexpected CaptureSnapshot", captureSnapshot); + assertNull("Leader received unexpected CaptureSnapshot", captureSnapshot); // Verify the follower 1 applies the state. applyState = MessageCollectorActor.expectFirstMatching(follower1CollectorActor, ApplyState.class); @@ -613,8 +612,8 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A /** * Resume the lagging follower 2 and verify it receives an install snapshot from the leader. */ - private void verifyInstallSnapshotToLaggingFollower(long lastAppliedIndex, - @Nullable ServerConfigurationPayload expServerConfig) { + private void verifyInstallSnapshotToLaggingFollower(final long lastAppliedIndex, + final @Nullable ServerConfigurationPayload expServerConfig) { testLog.info("verifyInstallSnapshotToLaggingFollower starting"); MessageCollectorActor.clearMessages(leaderCollectorActor); @@ -633,15 +632,15 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A // This is OK - the next snapshot should delete it. In production, even if the system restarted // before another snapshot, they would both get applied which wouldn't hurt anything. List persistedSnapshots = InMemorySnapshotStore.getSnapshots(leaderId, Snapshot.class); - Assert.assertTrue("Expected at least 1 persisted snapshots", persistedSnapshots.size() > 0); + assertFalse("Expected at least 1 persisted snapshots", persistedSnapshots.isEmpty()); Snapshot persistedSnapshot = persistedSnapshots.get(persistedSnapshots.size() - 1); verifySnapshot("Persisted", persistedSnapshot, currentTerm, lastAppliedIndex, currentTerm, lastAppliedIndex); List unAppliedEntry = persistedSnapshot.getUnAppliedEntries(); assertEquals("Persisted Snapshot getUnAppliedEntries size", 0, unAppliedEntry.size()); int snapshotSize = SerializationUtils.serialize(persistedSnapshot.getState()).length; - final int expTotalChunks = snapshotSize / SNAPSHOT_CHUNK_SIZE - + (snapshotSize % SNAPSHOT_CHUNK_SIZE > 0 ? 1 : 0); + final int expTotalChunks = snapshotSize / MAXIMUM_MESSAGE_SLICE_SIZE + + (snapshotSize % MAXIMUM_MESSAGE_SLICE_SIZE > 0 ? 1 : 0); InstallSnapshot installSnapshot = MessageCollectorActor.expectFirstMatching(follower2CollectorActor, InstallSnapshot.class); @@ -660,7 +659,7 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A assertEquals("InstallSnapshotReply getTerm", currentTerm, installSnapshotReply.getTerm()); assertEquals("InstallSnapshotReply getChunkIndex", index++, installSnapshotReply.getChunkIndex()); assertEquals("InstallSnapshotReply getFollowerId", follower2Id, installSnapshotReply.getFollowerId()); - assertEquals("InstallSnapshotReply isSuccess", true, installSnapshotReply.isSuccess()); + assertTrue("InstallSnapshotReply isSuccess", installSnapshotReply.isSuccess()); } // Verify follower 2 applies the snapshot. @@ -683,18 +682,18 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A verifyLeadersTrimmedLog(lastAppliedIndex); if (expServerConfig != null) { - Set expServerInfo = new HashSet<>(expServerConfig.getServerConfig()); + Set expServerInfo = Set.copyOf(expServerConfig.getServerConfig()); assertEquals("Leader snapshot server config", expServerInfo, - new HashSet<>(persistedSnapshot.getServerConfiguration().getServerConfig())); + Set.copyOf(persistedSnapshot.getServerConfiguration().getServerConfig())); assertEquals("Follower 2 snapshot server config", expServerInfo, - new HashSet<>(applySnapshot.getSnapshot().getServerConfiguration().getServerConfig())); + Set.copyOf(applySnapshot.getSnapshot().getServerConfiguration().getServerConfig())); ServerConfigurationPayload follower2ServerConfig = follower2Context.getPeerServerInfo(true); assertNotNull("Follower 2 server config is null", follower2ServerConfig); assertEquals("Follower 2 server config", expServerInfo, - new HashSet<>(follower2ServerConfig.getServerConfig())); + Set.copyOf(follower2ServerConfig.getServerConfig())); } MessageCollectorActor.clearMessages(leaderCollectorActor); @@ -765,8 +764,9 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A // Verify the leaders's persisted journal log - it should only contain the last 2 ReplicatedLogEntries // added after the snapshot as the persisted journal should've been purged to the snapshot // sequence number. - verifyPersistedJournal(leaderId, Arrays.asList(new SimpleReplicatedLogEntry(5, currentTerm, payload5), - new SimpleReplicatedLogEntry(6, currentTerm, payload6))); + verifyPersistedJournal(leaderId, List.of( + new SimpleReplicatedLogEntry(5, currentTerm, payload5), + new SimpleReplicatedLogEntry(6, currentTerm, payload6))); // Verify the leaders's persisted journal contains an ApplyJournalEntries for at least the last entry index. List persistedApplyJournalEntries = @@ -779,8 +779,7 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A } } - Assert.assertTrue(String.format("ApplyJournalEntries with index %d not found in leader's persisted journal", 6), - found); + assertTrue("ApplyJournalEntries with index 6 not found in leader's persisted journal", found); // Verify follower 1 applies the 3 log entries. applyStates = MessageCollectorActor.expectMatching(follower1CollectorActor, ApplyState.class, 3); @@ -811,8 +810,8 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A /** * Kill the leader actor, reinstate it and verify the recovered journal. */ - private void verifyLeaderRecoveryAfterReinstatement(long lastIndex, long snapshotIndex, - long firstJournalEntryIndex) { + private void verifyLeaderRecoveryAfterReinstatement(final long lastIndex, final long snapshotIndex, + final long firstJournalEntryIndex) { testLog.info("verifyLeaderRecoveryAfterReinstatement starting: lastIndex: {}, snapshotIndex: {}, " + "firstJournalEntryIndex: {}", lastIndex, snapshotIndex, firstJournalEntryIndex); @@ -845,8 +844,7 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A testLog.info("verifyLeaderRecoveryAfterReinstatement ending"); } - private void sendInitialPayloadsReplicatedToAllFollowers(String... data) { - + private void sendInitialPayloadsReplicatedToAllFollowers(final String... data) { // Send the payloads. for (String d: data) { expSnapshotState.add(sendPayloadData(leaderActor, d)); @@ -855,25 +853,27 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A int numEntries = data.length; // Verify the leader got consensus and applies each log entry even though follower 2 didn't respond. - List applyStates = MessageCollectorActor.expectMatching(leaderCollectorActor, - ApplyState.class, numEntries); + final var leaderStates = MessageCollectorActor.expectMatching(leaderCollectorActor, + ApplyState.class, numEntries); for (int i = 0; i < expSnapshotState.size(); i++) { - MockPayload payload = expSnapshotState.get(i); - verifyApplyState(applyStates.get(i), leaderCollectorActor, payload.toString(), currentTerm, i, payload); + final MockPayload payload = expSnapshotState.get(i); + verifyApplyState(leaderStates.get(i), leaderCollectorActor, payload.toString(), currentTerm, i, payload); } // Verify follower 1 applies each log entry. - applyStates = MessageCollectorActor.expectMatching(follower1CollectorActor, ApplyState.class, numEntries); + final var follower1States = MessageCollectorActor.expectMatching(follower1CollectorActor, + ApplyState.class, numEntries); for (int i = 0; i < expSnapshotState.size(); i++) { - MockPayload payload = expSnapshotState.get(i); - verifyApplyState(applyStates.get(i), null, null, currentTerm, i, payload); + final MockPayload payload = expSnapshotState.get(i); + verifyApplyState(follower1States.get(i), null, null, currentTerm, i, payload); } // Verify follower 2 applies each log entry. - applyStates = MessageCollectorActor.expectMatching(follower2CollectorActor, ApplyState.class, numEntries); + final var follower2States = MessageCollectorActor.expectMatching(follower2CollectorActor, + ApplyState.class, numEntries); for (int i = 0; i < expSnapshotState.size(); i++) { - MockPayload payload = expSnapshotState.get(i); - verifyApplyState(applyStates.get(i), null, null, currentTerm, i, payload); + final MockPayload payload = expSnapshotState.get(i); + verifyApplyState(follower2States.get(i), null, null, currentTerm, i, payload); } // Ensure there's at least 1 more heartbeat. diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/ReplicationWithSlicedPayloadIntegrationTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/ReplicationWithSlicedPayloadIntegrationTest.java index fda95eaa1d..dcbc8179a6 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/ReplicationWithSlicedPayloadIntegrationTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/ReplicationWithSlicedPayloadIntegrationTest.java @@ -9,8 +9,8 @@ package org.opendaylight.controller.cluster.raft; import static org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor.expectMatching; -import com.google.common.collect.ImmutableMap; import java.util.List; +import java.util.Map; import org.junit.Test; import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload; import org.opendaylight.controller.cluster.raft.base.messages.ApplyState; @@ -29,19 +29,19 @@ public class ReplicationWithSlicedPayloadIntegrationTest extends AbstractRaftAct // Create the leader and 2 follower actors. - snapshotChunkSize = 20; + maximumMessageSliceSize = 20; DefaultConfigParamsImpl followerConfigParams = newFollowerConfigParams(); followerConfigParams.setSnapshotBatchCount(snapshotBatchCount); - follower1Actor = newTestRaftActor(follower1Id, ImmutableMap.of(leaderId, testActorPath(leaderId), + follower1Actor = newTestRaftActor(follower1Id, Map.of(leaderId, testActorPath(leaderId), follower2Id, testActorPath(follower2Id)), followerConfigParams); - follower2Actor = newTestRaftActor(follower2Id, ImmutableMap.of(leaderId, testActorPath(leaderId), + follower2Actor = newTestRaftActor(follower2Id, Map.of(leaderId, testActorPath(leaderId), follower1Id, testActorPath(follower1Id)), followerConfigParams); - peerAddresses = ImmutableMap.builder() - .put(follower1Id, follower1Actor.path().toString()) - .put(follower2Id, follower2Actor.path().toString()).build(); + peerAddresses = Map.of( + follower1Id, follower1Actor.path().toString(), + follower2Id, follower2Actor.path().toString()); leaderConfigParams = newLeaderConfigParams(); leaderActor = newTestRaftActor(leaderId, peerAddresses, leaderConfigParams); @@ -58,11 +58,11 @@ public class ReplicationWithSlicedPayloadIntegrationTest extends AbstractRaftAct // Send a large payload that exceeds the size threshold and needs to be sliced. - MockPayload largePayload = sendPayloadData(leaderActor, "large", snapshotChunkSize + 1); + MockPayload largePayload = sendPayloadData(leaderActor, "large", maximumMessageSliceSize + 1); // Then send a small payload that does not need to be sliced. - MockPayload smallPayload = sendPayloadData(leaderActor, "normal", snapshotChunkSize - 1); + MockPayload smallPayload = sendPayloadData(leaderActor, "normal", maximumMessageSliceSize - 1); final List leaderApplyState = expectMatching(leaderCollectorActor, ApplyState.class, 2); verifyApplyState(leaderApplyState.get(0), leaderCollectorActor, diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/SnapshotManagerTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/SnapshotManagerTest.java index 9463a113e6..aa4a44b97c 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/SnapshotManagerTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/SnapshotManagerTest.java @@ -5,7 +5,6 @@ * terms of the Eclipse Public License v1.0 which accompanies this distribution, * and is available at http://www.eclipse.org/legal/epl-v10.html */ - package org.opendaylight.controller.cluster.raft; import static org.junit.Assert.assertArrayEquals; @@ -26,7 +25,7 @@ import static org.mockito.Mockito.verify; import akka.actor.ActorRef; import akka.persistence.SnapshotSelectionCriteria; import java.io.OutputStream; -import java.util.Arrays; +import java.util.List; import java.util.Optional; import java.util.function.Consumer; import org.junit.After; @@ -257,7 +256,7 @@ public class SnapshotManagerTest extends AbstractActorTest { 8L, 2L, new MockRaftActorContext.MockPayload()); doReturn(lastAppliedEntry).when(mockReplicatedLog).get(8L); - doReturn(Arrays.asList(lastLogEntry)).when(mockReplicatedLog).getFrom(9L); + doReturn(List.of(lastLogEntry)).when(mockReplicatedLog).getFrom(9L); // when replicatedToAllIndex = -1 snapshotManager.capture(lastLogEntry, -1); @@ -275,7 +274,7 @@ public class SnapshotManagerTest extends AbstractActorTest { assertEquals("getLastAppliedTerm", 2L, snapshot.getLastAppliedTerm()); assertEquals("getLastAppliedIndex", 8L, snapshot.getLastAppliedIndex()); assertEquals("getState", snapshotState, snapshot.getState()); - assertEquals("getUnAppliedEntries", Arrays.asList(lastLogEntry), snapshot.getUnAppliedEntries()); + assertEquals("getUnAppliedEntries", List.of(lastLogEntry), snapshot.getUnAppliedEntries()); assertEquals("electionTerm", mockElectionTerm.getCurrentTerm(), snapshot.getElectionTerm()); assertEquals("electionVotedFor", mockElectionTerm.getVotedFor(), snapshot.getElectionVotedFor()); @@ -378,7 +377,7 @@ public class SnapshotManagerTest extends AbstractActorTest { Optional installSnapshotStream = installSnapshotStreamCapture.getValue(); assertEquals("isPresent", true, installSnapshotStream.isPresent()); - installSnapshotStream.get().write(snapshotState.getBytes()); + installSnapshotStream.orElseThrow().write(snapshotState.getBytes()); snapshotManager.persist(snapshotState, installSnapshotStream, Runtime.getRuntime().totalMemory()); diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/TestActorFactory.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/TestActorFactory.java index 50a3c98131..96f4fe8c6e 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/TestActorFactory.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/TestActorFactory.java @@ -7,6 +7,8 @@ */ package org.opendaylight.controller.cluster.raft; +import static org.junit.Assert.assertTrue; + import akka.actor.Actor; import akka.actor.ActorIdentity; import akka.actor.ActorRef; @@ -23,10 +25,9 @@ import akka.util.Timeout; import com.google.common.base.Stopwatch; import com.google.common.util.concurrent.Uninterruptibles; import java.time.Duration; -import java.util.LinkedList; +import java.util.ArrayList; import java.util.List; import java.util.concurrent.TimeUnit; -import org.junit.Assert; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import scala.concurrent.Await; @@ -49,7 +50,7 @@ public class TestActorFactory implements AutoCloseable { private static final Logger LOG = LoggerFactory.getLogger(TestActorFactory.class); private final ActorSystem system; - List createdActors = new LinkedList<>(); + private final List createdActors = new ArrayList<>(); private static int actorCount = 1; public TestActorFactory(final ActorSystem system) { @@ -152,7 +153,7 @@ public class TestActorFactory implements AutoCloseable { ActorSelection actorSelection = system.actorSelection(actorRef.path().toString()); Future future = Patterns.ask(actorSelection, new Identify(""), timeout); ActorIdentity reply = (ActorIdentity)Await.result(future, timeout.duration()); - Assert.assertTrue("Identify returned non-present", reply.getActorRef().isPresent()); + assertTrue("Identify returned non-present", reply.getActorRef().isPresent()); return; } catch (Exception | AssertionError e) { Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS); diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/base/messages/TimeoutNowTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/base/messages/TimeoutNowTest.java index 26cdb22d8c..2a58dd1d4f 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/base/messages/TimeoutNowTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/base/messages/TimeoutNowTest.java @@ -7,9 +7,10 @@ */ package org.opendaylight.controller.cluster.raft.base.messages; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertSame; -import org.apache.commons.lang.SerializationUtils; +import org.apache.commons.lang3.SerializationUtils; import org.junit.Test; /** @@ -18,10 +19,11 @@ import org.junit.Test; * @author Thomas Pantelis */ public class TimeoutNowTest { - @Test public void test() { - TimeoutNow cloned = (TimeoutNow) SerializationUtils.clone(TimeoutNow.INSTANCE); + final var bytes = SerializationUtils.serialize(TimeoutNow.INSTANCE); + assertEquals(86, bytes.length); + final var cloned = SerializationUtils.deserialize(bytes); assertSame("Cloned instance", TimeoutNow.INSTANCE, cloned); } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractRaftActorBehaviorTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractRaftActorBehaviorTest.java index 7695d05133..3497840b38 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractRaftActorBehaviorTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractRaftActorBehaviorTest.java @@ -32,12 +32,12 @@ import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry; import org.opendaylight.controller.cluster.raft.TestActorFactory; import org.opendaylight.controller.cluster.raft.messages.AppendEntries; import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply; +import org.opendaylight.controller.cluster.raft.messages.Payload; import org.opendaylight.controller.cluster.raft.messages.RaftRPC; import org.opendaylight.controller.cluster.raft.messages.RequestVote; import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply; import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry; import org.opendaylight.controller.cluster.raft.policy.RaftPolicy; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload; import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal; import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore; import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor; diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/FollowerIdentifierTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/FollowerIdentifierTest.java index a9305a6862..d2aa7d013c 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/FollowerIdentifierTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/FollowerIdentifierTest.java @@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.raft.behaviors; import static org.junit.Assert.assertEquals; -import org.apache.commons.lang.SerializationUtils; +import org.apache.commons.lang3.SerializationUtils; import org.junit.Test; /** @@ -18,11 +18,12 @@ import org.junit.Test; * @author Thomas Pantelis */ public class FollowerIdentifierTest { - @Test public void testSerialization() { - FollowerIdentifier expected = new FollowerIdentifier("follower1"); - FollowerIdentifier cloned = (FollowerIdentifier) SerializationUtils.clone(expected); + final var expected = new FollowerIdentifier("follower1"); + final var bytes = SerializationUtils.serialize(expected); + assertEquals(87, bytes.length); + final var cloned = (FollowerIdentifier) SerializationUtils.deserialize(bytes); assertEquals("cloned", expected, cloned); } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/FollowerTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/FollowerTest.java index a51134676a..8006d5a6ac 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/FollowerTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/FollowerTest.java @@ -7,6 +7,7 @@ */ package org.opendaylight.controller.cluster.raft.behaviors; +import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; @@ -25,21 +26,15 @@ import akka.protobuf.ByteString; import akka.testkit.TestActorRef; import akka.testkit.javadsl.TestKit; import com.google.common.base.Stopwatch; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; import com.google.common.io.ByteSource; import com.google.common.util.concurrent.Uninterruptibles; import java.io.OutputStream; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.Optional; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import org.junit.After; -import org.junit.Assert; import org.junit.Test; import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl; import org.opendaylight.controller.cluster.raft.MockRaftActor; @@ -157,7 +152,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { Uninterruptibles.sleepUninterruptibly(context.getConfigParams() .getElectionTimeOutInterval().toMillis() - 100, TimeUnit.MILLISECONDS); - follower.handleMessage(leaderActor, new AppendEntries(1, "leader", -1, -1, Collections.emptyList(), + follower.handleMessage(leaderActor, new AppendEntries(1, "leader", -1, -1, List.of(), -1, -1, (short) 1)); Uninterruptibles.sleepUninterruptibly(130, TimeUnit.MILLISECONDS); @@ -166,7 +161,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { Uninterruptibles.sleepUninterruptibly(context.getConfigParams() .getElectionTimeOutInterval().toMillis() - 150, TimeUnit.MILLISECONDS); - follower.handleMessage(leaderActor, new AppendEntries(1, "leader", -1, -1, Collections.emptyList(), + follower.handleMessage(leaderActor, new AppendEntries(1, "leader", -1, -1, List.of(), -1, -1, (short) 1)); Uninterruptibles.sleepUninterruptibly(200, TimeUnit.MILLISECONDS); @@ -221,10 +216,9 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { context.getReplicatedLog().append(newReplicatedLogEntry(1,100, "bar")); context.getReplicatedLog().setSnapshotIndex(99); - List entries = Arrays.asList( - newReplicatedLogEntry(2, 101, "foo")); + List entries = List.of(newReplicatedLogEntry(2, 101, "foo")); - Assert.assertEquals(1, context.getReplicatedLog().size()); + assertEquals(1, context.getReplicatedLog().size()); // The new commitIndex is 101 AppendEntries appendEntries = new AppendEntries(2, "leader-1", 100, 1, entries, 101, 100, (short)0); @@ -246,8 +240,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { MockRaftActorContext context = createActorContext(); - List entries = Arrays.asList( - newReplicatedLogEntry(2, 101, "foo")); + List entries = List.of(newReplicatedLogEntry(2, 101, "foo")); // The new commitIndex is 101 AppendEntries appendEntries = new AppendEntries(2, "leader-1", -1, -1, entries, 101, 100, (short) 0); @@ -272,8 +265,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { context.getReplicatedLog().append(newReplicatedLogEntry(1, 100, "bar")); context.getReplicatedLog().setSnapshotIndex(99); - List entries = Arrays.asList( - newReplicatedLogEntry(2, 101, "foo")); + List entries = List.of(newReplicatedLogEntry(2, 101, "foo")); // The new commitIndex is 101 AppendEntries appendEntries = new AppendEntries(2, "leader-1", -1, -1, entries, 101, 100, (short) 0); @@ -297,8 +289,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { context.getReplicatedLog().clear(0,2); context.getReplicatedLog().setSnapshotIndex(100); - List entries = Arrays.asList( - newReplicatedLogEntry(2, 101, "foo")); + List entries = List.of(newReplicatedLogEntry(2, 101, "foo")); // The new commitIndex is 101 AppendEntries appendEntries = new AppendEntries(2, "leader-1", -1, -1, entries, 101, 100, (short) 0); @@ -323,8 +314,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { context.getReplicatedLog().clear(0,2); context.getReplicatedLog().setSnapshotIndex(100); - List entries = Arrays.asList( - newReplicatedLogEntry(2, 105, "foo")); + List entries = List.of(newReplicatedLogEntry(2, 105, "foo")); // The new commitIndex is 101 AppendEntries appendEntries = new AppendEntries(2, "leader-1", -1, -1, entries, 105, 100, (short) 0); @@ -346,8 +336,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { MockRaftActorContext context = createActorContext(); - List entries = Arrays.asList( - newReplicatedLogEntry(2, 101, "foo")); + List entries = List.of(newReplicatedLogEntry(2, 101, "foo")); // The new commitIndex is 101 AppendEntries appendEntries = new AppendEntries(2, "leader-1", 100, 1, entries, 101, 100, (short)0); @@ -367,7 +356,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { context.setCommitIndex(101); setLastLogEntry(context, 1, 101, new MockRaftActorContext.MockPayload("")); - entries = Arrays.asList(newReplicatedLogEntry(2, 101, "foo")); + entries = List.of(newReplicatedLogEntry(2, 101, "foo")); // The new commitIndex is 101 appendEntries = new AppendEntries(2, "leader-1", 101, 1, entries, 102, 101, (short)0); @@ -394,8 +383,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { MockRaftActorContext context = createActorContext(); - List entries = Arrays.asList( - newReplicatedLogEntry(2, 101, "foo")); + List entries = List.of(newReplicatedLogEntry(2, 101, "foo")); // The new commitIndex is 101 AppendEntries appendEntries = new AppendEntries(2, "leader-1", 100, 1, entries, 101, 100, (short)0); @@ -415,8 +403,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { setLastLogEntry(context, 1, 100, new MockRaftActorContext.MockPayload("")); - entries = Arrays.asList( - newReplicatedLogEntry(2, 101, "foo")); + entries = List.of(newReplicatedLogEntry(2, 101, "foo")); // leader-2 is becoming the leader now and it says the commitIndex is 45 appendEntries = new AppendEntries(2, "leader-2", 45, 1, entries, 46, 100, (short)0); @@ -434,8 +421,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { MockRaftActorContext context = createActorContext(); - List entries = Arrays.asList( - newReplicatedLogEntry(2, 101, "foo")); + List entries = List.of(newReplicatedLogEntry(2, 101, "foo")); // The new commitIndex is 101 AppendEntries appendEntries = new AppendEntries(2, "leader-1", 100, 1, entries, 101, 100, (short)0); @@ -456,8 +442,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { setLastLogEntry(context, 1, 101, new MockRaftActorContext.MockPayload("")); - entries = Arrays.asList( - newReplicatedLogEntry(2, 101, "foo")); + entries = List.of(newReplicatedLogEntry(2, 101, "foo")); // The new commitIndex is 101 appendEntries = new AppendEntries(2, "leader-1", 101, 1, entries, 102, 101, (short)0); @@ -474,8 +459,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { setLastLogEntry(context, 1, 100, new MockRaftActorContext.MockPayload("")); - entries = Arrays.asList( - newReplicatedLogEntry(2, 101, "foo")); + entries = List.of(newReplicatedLogEntry(2, 101, "foo")); // leader-2 is becoming the leader now and it says the commitIndex is 45 appendEntries = new AppendEntries(2, "leader-2", 45, 1, entries, 46, 100, (short)0); @@ -504,8 +488,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { new MockRaftActorContext.MockPayload("")); context.getReplicatedLog().setSnapshotIndex(99); - List entries = Arrays.asList( - newReplicatedLogEntry(2, 101, "foo")); + List entries = List.of(newReplicatedLogEntry(2, 101, "foo")); // The new commitIndex is 101 AppendEntries appendEntries = new AppendEntries(2, "leader-1", 100, 1, entries, 101, 100, (short)0); @@ -527,13 +510,13 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { MockRaftActorContext context = createActorContext(); - AppendEntries appendEntries = new AppendEntries(2, "leader", 0, 2, Collections.emptyList(), 101, -1, (short)0); + AppendEntries appendEntries = new AppendEntries(2, "leader", 0, 2, List.of(), 101, -1, (short)0); follower = createBehavior(context); RaftActorBehavior newBehavior = follower.handleMessage(leaderActor, appendEntries); - Assert.assertSame(follower, newBehavior); + assertSame(follower, newBehavior); AppendEntriesReply reply = MessageCollectorActor.expectFirstMatching(leaderActor, AppendEntriesReply.class); @@ -550,13 +533,13 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { context.getReplicatedLog().setSnapshotIndex(4); context.getReplicatedLog().setSnapshotTerm(3); - AppendEntries appendEntries = new AppendEntries(3, "leader", 1, 3, Collections.emptyList(), 8, -1, (short)0); + AppendEntries appendEntries = new AppendEntries(3, "leader", 1, 3, List.of(), 8, -1, (short)0); follower = createBehavior(context); RaftActorBehavior newBehavior = follower.handleMessage(leaderActor, appendEntries); - Assert.assertSame(follower, newBehavior); + assertSame(follower, newBehavior); AppendEntriesReply reply = MessageCollectorActor.expectFirstMatching(leaderActor, AppendEntriesReply.class); @@ -587,9 +570,8 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { context.setReplicatedLog(log); // Prepare the entries to be sent with AppendEntries - List entries = new ArrayList<>(); - entries.add(newReplicatedLogEntry(1, 3, "three")); - entries.add(newReplicatedLogEntry(1, 4, "four")); + List entries = List.of( + newReplicatedLogEntry(1, 3, "three"), newReplicatedLogEntry(1, 4, "four")); // Send appendEntries with the same term as was set on the receiver // before the new behavior was created (1 in this case) @@ -603,7 +585,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { RaftActorBehavior newBehavior = follower.handleMessage(leaderActor, appendEntries); - Assert.assertSame(follower, newBehavior); + assertSame(follower, newBehavior); assertEquals("Next index", 5, log.last().getIndex() + 1); assertEquals("Entry 3", entries.get(0), log.get(3)); @@ -639,9 +621,8 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { context.setReplicatedLog(log); // Prepare the entries to be sent with AppendEntries - List entries = new ArrayList<>(); - entries.add(newReplicatedLogEntry(2, 2, "two-1")); - entries.add(newReplicatedLogEntry(2, 3, "three")); + List entries = List.of( + newReplicatedLogEntry(2, 2, "two-1"), newReplicatedLogEntry(2, 3, "three")); // Send appendEntries with the same term as was set on the receiver // before the new behavior was created (1 in this case) @@ -653,7 +634,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { RaftActorBehavior newBehavior = follower.handleMessage(leaderActor, appendEntries); - Assert.assertSame(follower, newBehavior); + assertSame(follower, newBehavior); // The entry at index 2 will be found out-of-sync with the leader // and will be removed @@ -690,9 +671,8 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { context.setReplicatedLog(log); // Prepare the entries to be sent with AppendEntries - List entries = new ArrayList<>(); - entries.add(newReplicatedLogEntry(2, 2, "two-1")); - entries.add(newReplicatedLogEntry(2, 3, "three")); + List entries = List.of( + newReplicatedLogEntry(2, 2, "two-1"), newReplicatedLogEntry(2, 3, "three")); // Send appendEntries with the same term as was set on the receiver // before the new behavior was created (1 in this case) @@ -705,7 +685,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { RaftActorBehavior newBehavior = follower.handleMessage(leaderActor, appendEntries); - Assert.assertSame(follower, newBehavior); + assertSame(follower, newBehavior); expectAndVerifyAppendEntriesReply(2, false, context.getId(), 1, 2, true); } @@ -725,8 +705,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { context.setReplicatedLog(log); // Prepare the entries to be sent with AppendEntries - List entries = new ArrayList<>(); - entries.add(newReplicatedLogEntry(1, 4, "four")); + List entries = List.of(newReplicatedLogEntry(1, 4, "four")); AppendEntries appendEntries = new AppendEntries(1, "leader", 3, 1, entries, 4, -1, (short)0); @@ -734,7 +713,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { RaftActorBehavior newBehavior = follower.handleMessage(leaderActor, appendEntries); - Assert.assertSame(follower, newBehavior); + assertSame(follower, newBehavior); expectAndVerifyAppendEntriesReply(1, false, context.getId(), 1, 2); } @@ -755,7 +734,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { context.setReplicatedLog(log); // Send the last entry again. - List entries = Arrays.asList(newReplicatedLogEntry(1, 1, "one")); + List entries = List.of(newReplicatedLogEntry(1, 1, "one")); follower = createBehavior(context); @@ -768,7 +747,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { // Send the last entry again and also a new one. - entries = Arrays.asList(newReplicatedLogEntry(1, 1, "one"), newReplicatedLogEntry(1, 2, "two")); + entries = List.of(newReplicatedLogEntry(1, 1, "one"), newReplicatedLogEntry(1, 2, "two")); MessageCollectorActor.clearMessages(leaderActor); follower.handleMessage(leaderActor, new AppendEntries(1, "leader", 0, 1, entries, 2, -1, (short)0)); @@ -796,8 +775,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { context.setReplicatedLog(log); // Prepare the entries to be sent with AppendEntries - List entries = new ArrayList<>(); - entries.add(newReplicatedLogEntry(1, 4, "four")); + List entries = List.of(newReplicatedLogEntry(1, 4, "four")); AppendEntries appendEntries = new AppendEntries(1, "leader", 3, 1, entries, 4, 3, (short)0); @@ -805,7 +783,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { RaftActorBehavior newBehavior = follower.handleMessage(leaderActor, appendEntries); - Assert.assertSame(follower, newBehavior); + assertSame(follower, newBehavior); expectAndVerifyAppendEntriesReply(1, true, context.getId(), 1, 4); } @@ -853,7 +831,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { snapshot.getLastAppliedIndex()); assertEquals("getLastTerm", lastInstallSnapshot.getLastIncludedTerm(), snapshot.getLastTerm()); assertEquals("getState type", ByteState.class, snapshot.getState().getClass()); - Assert.assertArrayEquals("getState", bsSnapshot.toByteArray(), ((ByteState)snapshot.getState()).getBytes()); + assertArrayEquals("getState", bsSnapshot.toByteArray(), ((ByteState)snapshot.getState()).getBytes()); assertEquals("getElectionTerm", 1, snapshot.getElectionTerm()); assertEquals("getElectionVotedFor", "leader", snapshot.getElectionVotedFor()); applySnapshot.getCallback().onSuccess(); @@ -907,7 +885,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { // Send an append entry AppendEntries appendEntries = new AppendEntries(1, "leader", 1, 1, - Arrays.asList(newReplicatedLogEntry(2, 1, "3")), 2, -1, (short)1); + List.of(newReplicatedLogEntry(2, 1, "3")), 2, -1, (short)1); follower.handleMessage(leaderActor, appendEntries); @@ -950,7 +928,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { // Send appendEntries with a new term and leader. AppendEntries appendEntries = new AppendEntries(2, "new-leader", 1, 1, - Arrays.asList(newReplicatedLogEntry(2, 2, "3")), 2, -1, (short)1); + List.of(newReplicatedLogEntry(2, 2, "3")), 2, -1, (short)1); follower.handleMessage(leaderActor, appendEntries); @@ -1004,8 +982,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { setLastLogEntry(context, 1, 101, new MockRaftActorContext.MockPayload("")); - List entries = Arrays.asList( - newReplicatedLogEntry(2, 101, "foo")); + List entries = List.of(newReplicatedLogEntry(2, 101, "foo")); // The new commitIndex is 101 AppendEntries appendEntries = new AppendEntries(2, "leader", 101, 1, entries, 102, 101, (short)0); @@ -1081,7 +1058,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { @Test public void testFollowerSchedulesElectionIfNonVoting() { MockRaftActorContext context = createActorContext(); - context.updatePeerIds(new ServerConfigurationPayload(Arrays.asList(new ServerInfo(context.getId(), false)))); + context.updatePeerIds(new ServerConfigurationPayload(List.of(new ServerInfo(context.getId(), false)))); ((DefaultConfigParamsImpl)context.getConfigParams()).setHeartBeatInterval( FiniteDuration.apply(100, TimeUnit.MILLISECONDS)); ((DefaultConfigParamsImpl)context.getConfigParams()).setElectionTimeoutFactor(1); @@ -1132,7 +1109,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { final AtomicReference followerRaftActor = new AtomicReference<>(); RaftActorSnapshotCohort snapshotCohort = newRaftActorSnapshotCohort(followerRaftActor); Builder builder = MockRaftActor.builder().persistent(Optional.of(true)).id(id) - .peerAddresses(ImmutableMap.of("leader", "")).config(config).snapshotCohort(snapshotCohort); + .peerAddresses(Map.of("leader", "")).config(config).snapshotCohort(snapshotCohort); TestActorRef followerActorRef = actorFactory.createTestActor(builder.props() .withDispatcher(Dispatchers.DefaultDispatcherId()), id); followerRaftActor.set(followerActorRef.underlyingActor()); @@ -1142,7 +1119,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { InMemoryJournal.addDeleteMessagesCompleteLatch(id); InMemoryJournal.addWriteMessagesCompleteLatch(id, 1, ApplyJournalEntries.class); - List entries = Arrays.asList( + List entries = List.of( newReplicatedLogEntry(1, 0, "one"), newReplicatedLogEntry(1, 1, "two")); AppendEntries appendEntries = new AppendEntries(1, "leader", -1, -1, entries, 1, -1, (short)0); @@ -1169,7 +1146,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { assertEquals("Snapshot getLastAppliedIndex", 1, snapshot.getLastAppliedIndex()); assertEquals("Snapshot getLastTerm", 1, snapshot.getLastTerm()); assertEquals("Snapshot getLastIndex", 1, snapshot.getLastIndex()); - assertEquals("Snapshot state", ImmutableList.of(entries.get(0).getData(), entries.get(1).getData()), + assertEquals("Snapshot state", List.of(entries.get(0).getData(), entries.get(1).getData()), MockRaftActor.fromState(snapshot.getState())); } @@ -1187,7 +1164,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { final AtomicReference followerRaftActor = new AtomicReference<>(); RaftActorSnapshotCohort snapshotCohort = newRaftActorSnapshotCohort(followerRaftActor); Builder builder = MockRaftActor.builder().persistent(Optional.of(true)).id(id) - .peerAddresses(ImmutableMap.of("leader", "")).config(config).snapshotCohort(snapshotCohort); + .peerAddresses(Map.of("leader", "")).config(config).snapshotCohort(snapshotCohort); TestActorRef followerActorRef = actorFactory.createTestActor(builder.props() .withDispatcher(Dispatchers.DefaultDispatcherId()), id); followerRaftActor.set(followerActorRef.underlyingActor()); @@ -1197,7 +1174,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { InMemoryJournal.addDeleteMessagesCompleteLatch(id); InMemoryJournal.addWriteMessagesCompleteLatch(id, 1, ApplyJournalEntries.class); - List entries = Arrays.asList( + List entries = List.of( newReplicatedLogEntry(1, 0, "one"), newReplicatedLogEntry(1, 1, "two"), newReplicatedLogEntry(1, 2, "three")); @@ -1225,7 +1202,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { assertEquals("Snapshot getLastAppliedIndex", 2, snapshot.getLastAppliedIndex()); assertEquals("Snapshot getLastTerm", 1, snapshot.getLastTerm()); assertEquals("Snapshot getLastIndex", 2, snapshot.getLastIndex()); - assertEquals("Snapshot state", ImmutableList.of(entries.get(0).getData(), entries.get(1).getData(), + assertEquals("Snapshot state", List.of(entries.get(0).getData(), entries.get(1).getData(), entries.get(2).getData()), MockRaftActor.fromState(snapshot.getState())); assertEquals("Journal size", 0, followerRaftActor.get().getReplicatedLog().size()); @@ -1244,7 +1221,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { assertEquals("Last index", 2, followerRaftActor.get().getReplicatedLog().lastIndex()); assertEquals("Last applied index", 2, followerRaftActor.get().getRaftActorContext().getLastApplied()); assertEquals("Commit index", 2, followerRaftActor.get().getRaftActorContext().getCommitIndex()); - assertEquals("State", ImmutableList.of(entries.get(0).getData(), entries.get(1).getData(), + assertEquals("State", List.of(entries.get(0).getData(), entries.get(1).getData(), entries.get(2).getData()), followerRaftActor.get().getState()); } @@ -1262,7 +1239,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { final AtomicReference followerRaftActor = new AtomicReference<>(); RaftActorSnapshotCohort snapshotCohort = newRaftActorSnapshotCohort(followerRaftActor); Builder builder = MockRaftActor.builder().persistent(Optional.of(true)).id(id) - .peerAddresses(ImmutableMap.of("leader", "")).config(config).snapshotCohort(snapshotCohort); + .peerAddresses(Map.of("leader", "")).config(config).snapshotCohort(snapshotCohort); TestActorRef followerActorRef = actorFactory.createTestActor(builder.props() .withDispatcher(Dispatchers.DefaultDispatcherId()), id); followerRaftActor.set(followerActorRef.underlyingActor()); @@ -1272,7 +1249,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { InMemoryJournal.addDeleteMessagesCompleteLatch(id); InMemoryJournal.addWriteMessagesCompleteLatch(id, 1, ApplyJournalEntries.class); - List entries = Arrays.asList( + List entries = List.of( newReplicatedLogEntry(1, 0, "one"), newReplicatedLogEntry(1, 1, "two"), newReplicatedLogEntry(1, 2, "three")); @@ -1302,7 +1279,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { assertEquals("Snapshot getLastAppliedIndex", 0, snapshot.getLastAppliedIndex()); assertEquals("Snapshot getLastTerm", 1, snapshot.getLastTerm()); assertEquals("Snapshot getLastIndex", 2, snapshot.getLastIndex()); - assertEquals("Snapshot state", ImmutableList.of(entries.get(0).getData()), + assertEquals("Snapshot state", List.of(entries.get(0).getData()), MockRaftActor.fromState(snapshot.getState())); } @@ -1318,7 +1295,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { follower = createBehavior(context); follower.handleMessage(leaderActor, - new AppendEntries(1, "leader", -1, -1, Collections.emptyList(), -1, -1, (short)0)); + new AppendEntries(1, "leader", -1, -1, List.of(), -1, -1, (short)0)); AppendEntriesReply reply = MessageCollectorActor.expectFirstMatching(leaderActor, AppendEntriesReply.class); assertTrue(reply.isNeedsLeaderAddress()); @@ -1327,7 +1304,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { PeerAddressResolver mockResolver = mock(PeerAddressResolver.class); ((DefaultConfigParamsImpl)context.getConfigParams()).setPeerAddressResolver(mockResolver); - follower.handleMessage(leaderActor, new AppendEntries(1, "leader", -1, -1, Collections.emptyList(), -1, -1, + follower.handleMessage(leaderActor, new AppendEntries(1, "leader", -1, -1, List.of(), -1, -1, (short)0, RaftVersions.CURRENT_VERSION, leaderActor.path().toString())); reply = MessageCollectorActor.expectFirstMatching(leaderActor, AppendEntriesReply.class); @@ -1370,10 +1347,8 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { int size = chunkSize; if (chunkSize > snapshotLength) { size = snapshotLength; - } else { - if (start + chunkSize > snapshotLength) { - size = snapshotLength - start; - } + } else if (start + chunkSize > snapshotLength) { + size = snapshotLength - start; } byte[] nextChunk = new byte[size]; @@ -1410,12 +1385,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest { } private ByteString createSnapshot() { - HashMap followerSnapshot = new HashMap<>(); - followerSnapshot.put("1", "A"); - followerSnapshot.put("2", "B"); - followerSnapshot.put("3", "C"); - - return toByteString(followerSnapshot); + return toByteString(Map.of("1", "A", "2", "B", "3", "C")); } @Override diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/LeaderInstallSnapshotStateTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/LeaderInstallSnapshotStateTest.java new file mode 100644 index 0000000000..aa07181711 --- /dev/null +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/LeaderInstallSnapshotStateTest.java @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2023 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.raft.behaviors; + +import static org.junit.Assert.assertEquals; + +import com.google.common.io.ByteSource; +import java.io.IOException; +import java.io.InputStream; +import java.util.Arrays; +import java.util.Objects; +import org.junit.Test; + +public class LeaderInstallSnapshotStateTest { + // Prime number on purpose + private static final int CHUNK_SIZE = 9_999_991; + // More than Integer.MAX_VALUE + private static final long SIZE = 4_294_967_294L; + + @Test + public void testSnapshotLongerThanInteger() throws IOException { + try (var fts = new LeaderInstallSnapshotState(CHUNK_SIZE, "test")) { + fts.setSnapshotBytes(new MockByteSource(SIZE)); + + int chunkIndex = 0; + long offset = 0; + long expectedChunkSize = CHUNK_SIZE; + while (offset < SIZE) { + offset = offset + CHUNK_SIZE; + if (offset > SIZE) { + // We reached last chunk + expectedChunkSize = CHUNK_SIZE - (offset - SIZE); + offset = SIZE; + } + chunkIndex ++; + final byte[] chunk = fts.getNextChunk(); + assertEquals("byte size not matching for chunk:", expectedChunkSize, chunk.length); + assertEquals("chunk index not matching", chunkIndex, fts.getChunkIndex()); + fts.markSendStatus(true); + if (!fts.isLastChunk(chunkIndex)) { + fts.incrementChunkIndex(); + } + } + + assertEquals("totalChunks not matching", chunkIndex, fts.getTotalChunks()); + } + } + + private static final class MockByteSource extends ByteSource { + private final long size; + + private MockByteSource(final long size) { + this.size = size; + } + + @Override + public long size() { + return size; + } + + @Override + public InputStream openStream() { + return new MockInputStream(size); + } + } + + private static final class MockInputStream extends InputStream { + private long remaining; + + MockInputStream(final long size) { + remaining = size; + } + + @Override + public int read() { + if (remaining > 0) { + remaining--; + return 0; + } + return -1; + } + + @Override + public int read(final byte[] bytes, final int off, final int len) { + Objects.checkFromIndexSize(off, len, bytes.length); + if (remaining <= 0) { + return -1; + } + final int count = len <= remaining ? len : (int) remaining; + Arrays.fill(bytes, off, off + count, (byte) 0); + remaining -= count; + return count; + } + } +} diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/LeaderTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/LeaderTest.java index d9a5487e55..0f16f92c49 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/LeaderTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/LeaderTest.java @@ -25,18 +25,16 @@ import akka.actor.Terminated; import akka.protobuf.ByteString; import akka.testkit.TestActorRef; import akka.testkit.javadsl.TestKit; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; import com.google.common.io.ByteSource; import com.google.common.util.concurrent.Uninterruptibles; import java.io.IOException; import java.io.OutputStream; import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.OptionalInt; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import org.apache.commons.lang3.SerializationUtils; @@ -65,6 +63,7 @@ import org.opendaylight.controller.cluster.raft.messages.AppendEntries; import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply; import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot; import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply; +import org.opendaylight.controller.cluster.raft.messages.Payload; import org.opendaylight.controller.cluster.raft.messages.RaftRPC; import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply; import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries; @@ -73,7 +72,6 @@ import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEnt import org.opendaylight.controller.cluster.raft.persisted.Snapshot; import org.opendaylight.controller.cluster.raft.policy.DefaultRaftPolicy; import org.opendaylight.controller.cluster.raft.policy.RaftPolicy; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload; import org.opendaylight.controller.cluster.raft.utils.ForwardMessageToBehaviorActor; import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor; import org.opendaylight.yangtools.concepts.Identifier; @@ -170,9 +168,8 @@ public class LeaderTest extends AbstractLeaderTest { private RaftActorBehavior sendReplicate(final MockRaftActorContext actorContext, final long term, final long index, final Payload payload) { - SimpleReplicatedLogEntry newEntry = new SimpleReplicatedLogEntry(index, term, payload); - actorContext.getReplicatedLog().append(newEntry); - return leader.handleMessage(leaderActor, new Replicate(null, null, newEntry, true)); + actorContext.getReplicatedLog().append(new SimpleReplicatedLogEntry(index, term, payload)); + return leader.handleMessage(leaderActor, new Replicate(index, true, null, null)); } @Test @@ -403,7 +400,7 @@ public class LeaderTest extends AbstractLeaderTest { final int messageNr) { final AppendEntries commitReq = allMessages.get(2 * messageNr + 1); assertEquals(lastIndex + messageNr + 1, commitReq.getLeaderCommit()); - assertEquals(ImmutableList.of(), commitReq.getEntries()); + assertEquals(List.of(), commitReq.getEntries()); } private static void assertRequestEntry(final long lastIndex, final List allMessages, @@ -548,16 +545,14 @@ public class LeaderTest extends AbstractLeaderTest { actorContext.setLastApplied(0); - long newLogIndex = actorContext.getReplicatedLog().lastIndex() + 1; - long term = actorContext.getTermInformation().getCurrentTerm(); - ReplicatedLogEntry newEntry = new SimpleReplicatedLogEntry( - newLogIndex, term, new MockRaftActorContext.MockPayload("foo")); + final long newLogIndex = actorContext.getReplicatedLog().lastIndex() + 1; + final long term = actorContext.getTermInformation().getCurrentTerm(); + final var data = new MockRaftActorContext.MockPayload("foo"); - actorContext.getReplicatedLog().append(newEntry); + actorContext.getReplicatedLog().append(new SimpleReplicatedLogEntry(newLogIndex, term, data)); final Identifier id = new MockIdentifier("state-id"); - RaftActorBehavior raftBehavior = leader.handleMessage(leaderActor, - new Replicate(leaderActor, id, newEntry, true)); + final var raftBehavior = leader.handleMessage(leaderActor, new Replicate(newLogIndex, true, leaderActor, id)); // State should not change assertTrue(raftBehavior instanceof Leader); @@ -566,8 +561,7 @@ public class LeaderTest extends AbstractLeaderTest { // We should get 2 ApplyState messages - 1 for new log entry and 1 for the previous // one since lastApplied state is 0. - List applyStateList = MessageCollectorActor.getAllMatching( - leaderActor, ApplyState.class); + final var applyStateList = MessageCollectorActor.getAllMatching(leaderActor, ApplyState.class); assertEquals("ApplyState count", newLogIndex, applyStateList.size()); for (int i = 0; i <= newLogIndex - 1; i++) { @@ -577,7 +571,7 @@ public class LeaderTest extends AbstractLeaderTest { } ApplyState last = applyStateList.get((int) newLogIndex - 1); - assertEquals("getData", newEntry.getData(), last.getReplicatedLogEntry().getData()); + assertEquals("getData", data, last.getReplicatedLogEntry().getData()); assertEquals("getIdentifier", id, last.getIdentifier()); } @@ -587,11 +581,6 @@ public class LeaderTest extends AbstractLeaderTest { final MockRaftActorContext actorContext = createActorContextWithFollower(); - Map leadersSnapshot = new HashMap<>(); - leadersSnapshot.put("1", "A"); - leadersSnapshot.put("2", "B"); - leadersSnapshot.put("3", "C"); - //clears leaders log actorContext.getReplicatedLog().removeFrom(0); @@ -614,12 +603,12 @@ public class LeaderTest extends AbstractLeaderTest { //update follower timestamp leader.markFollowerActive(FOLLOWER_ID); - ByteString bs = toByteString(leadersSnapshot); + ByteString bs = toByteString(Map.of("1", "A", "2", "B", "3", "C")); leader.setSnapshotHolder(new SnapshotHolder(Snapshot.create(ByteState.of(bs.toByteArray()), - Collections.emptyList(), commitIndex, snapshotTerm, commitIndex, snapshotTerm, + List.of(), commitIndex, snapshotTerm, commitIndex, snapshotTerm, -1, null, null), ByteSource.wrap(bs.toByteArray()))); LeaderInstallSnapshotState fts = new LeaderInstallSnapshotState( - actorContext.getConfigParams().getSnapshotChunkSize(), leader.logName()); + actorContext.getConfigParams().getMaximumMessageSliceSize(), leader.logName()); fts.setSnapshotBytes(ByteSource.wrap(bs.toByteArray())); leader.getFollower(FOLLOWER_ID).setLeaderInstallSnapshotState(fts); @@ -677,18 +666,15 @@ public class LeaderTest extends AbstractLeaderTest { MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class); // new entry - SimpleReplicatedLogEntry entry = - new SimpleReplicatedLogEntry(newEntryIndex, currentTerm, - new MockRaftActorContext.MockPayload("D")); - - actorContext.getReplicatedLog().append(entry); + actorContext.getReplicatedLog().append( + new SimpleReplicatedLogEntry(newEntryIndex, currentTerm, new MockRaftActorContext.MockPayload("D"))); //update follower timestamp leader.markFollowerActive(FOLLOWER_ID); // this should invoke a sendinstallsnapshot as followersLastIndex < snapshotIndex RaftActorBehavior raftBehavior = leader.handleMessage( - leaderActor, new Replicate(null, new MockIdentifier("state-id"), entry, true)); + leaderActor, new Replicate(newEntryIndex, true, null, new MockIdentifier("state-id"))); assertTrue(raftBehavior instanceof Leader); @@ -725,15 +711,13 @@ public class LeaderTest extends AbstractLeaderTest { leader.setSnapshotHolder(null); // new entry - SimpleReplicatedLogEntry entry = new SimpleReplicatedLogEntry(newEntryIndex, currentTerm, - new MockRaftActorContext.MockPayload("D")); - - actorContext.getReplicatedLog().append(entry); + actorContext.getReplicatedLog().append( + new SimpleReplicatedLogEntry(newEntryIndex, currentTerm, new MockRaftActorContext.MockPayload("D"))); //update follower timestamp leader.markFollowerActive(FOLLOWER_ID); - leader.handleMessage(leaderActor, new Replicate(null, new MockIdentifier("state-id"), entry, true)); + leader.handleMessage(leaderActor, new Replicate(newEntryIndex, true, null, new MockIdentifier("state-id"))); assertEquals("isCapturing", true, actorContext.getSnapshotManager().isCapturing()); @@ -745,7 +729,7 @@ public class LeaderTest extends AbstractLeaderTest { assertEquals(2, cs.getLastTerm()); // if an initiate is started again when first is in progress, it shouldnt initiate Capture - leader.handleMessage(leaderActor, new Replicate(null, new MockIdentifier("state-id"), entry, true)); + leader.handleMessage(leaderActor, new Replicate(newEntryIndex, true, null, new MockIdentifier("state-id"))); assertSame("CaptureSnapshot instance", cs, actorContext.getSnapshotManager().getCaptureSnapshot()); } @@ -788,10 +772,8 @@ public class LeaderTest extends AbstractLeaderTest { } // new entry - SimpleReplicatedLogEntry entry = new SimpleReplicatedLogEntry(newEntryIndex, currentTerm, - new MockRaftActorContext.MockPayload("D")); - - actorContext.getReplicatedLog().append(entry); + actorContext.getReplicatedLog().append( + new SimpleReplicatedLogEntry(newEntryIndex, currentTerm, new MockRaftActorContext.MockPayload("D"))); //update follower timestamp leader.markFollowerActive(FOLLOWER_ID); @@ -815,7 +797,7 @@ public class LeaderTest extends AbstractLeaderTest { MessageCollectorActor.clearMessages(followerActor); // Sending Replicate message should not initiate another capture since the first is in progress. - leader.handleMessage(leaderActor, new Replicate(null, new MockIdentifier("state-id"), entry, true)); + leader.handleMessage(leaderActor, new Replicate(newEntryIndex, true, null, new MockIdentifier("state-id"))); assertSame("CaptureSnapshot instance", cs, actorContext.getSnapshotManager().getCaptureSnapshot()); // Similarly sending another AppendEntriesReply to force a snapshot should not initiate another capture. @@ -825,7 +807,7 @@ public class LeaderTest extends AbstractLeaderTest { // Now simulate the CaptureSnapshotReply to initiate snapshot install - the first chunk should be sent. final byte[] bytes = new byte[]{1, 2, 3}; - installSnapshotStream.get().get().write(bytes); + installSnapshotStream.get().orElseThrow().write(bytes); actorContext.getSnapshotManager().persist(ByteState.of(bytes), installSnapshotStream.get(), Runtime.getRuntime().totalMemory()); MessageCollectorActor.expectFirstMatching(followerActor, InstallSnapshot.class); @@ -873,7 +855,7 @@ public class LeaderTest extends AbstractLeaderTest { leader.getFollower(FOLLOWER_ID).setNextIndex(0); byte[] bytes = toByteString(leadersSnapshot).toByteArray(); - Snapshot snapshot = Snapshot.create(ByteState.of(bytes), Collections.emptyList(), + Snapshot snapshot = Snapshot.create(ByteState.of(bytes), List.of(), lastAppliedIndex, snapshotTerm, lastAppliedIndex, snapshotTerm, -1, null, null); RaftActorBehavior raftBehavior = leader.handleMessage(leaderActor, @@ -925,7 +907,7 @@ public class LeaderTest extends AbstractLeaderTest { leader.getFollower(FOLLOWER_ID).setNextIndex(-1); byte[] bytes = toByteString(leadersSnapshot).toByteArray(); - Snapshot snapshot = Snapshot.create(ByteState.of(bytes), Collections.emptyList(), + Snapshot snapshot = Snapshot.create(ByteState.of(bytes), List.of(), lastAppliedIndex, snapshotTerm, lastAppliedIndex, snapshotTerm, -1, null, null); RaftActorBehavior raftBehavior = leader.handleMessage(leaderActor, @@ -980,10 +962,10 @@ public class LeaderTest extends AbstractLeaderTest { ByteString bs = toByteString(leadersSnapshot); leader.setSnapshotHolder(new SnapshotHolder(Snapshot.create(ByteState.of(bs.toByteArray()), - Collections.emptyList(), commitIndex, snapshotTerm, commitIndex, snapshotTerm, + List.of(), commitIndex, snapshotTerm, commitIndex, snapshotTerm, -1, null, null), ByteSource.wrap(bs.toByteArray()))); LeaderInstallSnapshotState fts = new LeaderInstallSnapshotState( - actorContext.getConfigParams().getSnapshotChunkSize(), leader.logName()); + actorContext.getConfigParams().getMaximumMessageSliceSize(), leader.logName()); fts.setSnapshotBytes(ByteSource.wrap(bs.toByteArray())); leader.getFollower(FOLLOWER_ID).setLeaderInstallSnapshotState(fts); while (!fts.isLastChunk(fts.getChunkIndex())) { @@ -1021,7 +1003,7 @@ public class LeaderTest extends AbstractLeaderTest { DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl() { @Override - public int getSnapshotChunkSize() { + public int getMaximumMessageSliceSize() { return 50; } }; @@ -1049,8 +1031,7 @@ public class LeaderTest extends AbstractLeaderTest { ByteString bs = toByteString(leadersSnapshot); Snapshot snapshot = Snapshot.create(ByteState.of(bs.toByteArray()), - Collections.emptyList(), commitIndex, snapshotTerm, commitIndex, snapshotTerm, - -1, null, null); + List.of(), commitIndex, snapshotTerm, commitIndex, snapshotTerm, -1, null, null); leader.handleMessage(leaderActor, new SendInstallSnapshot(snapshot, ByteSource.wrap(bs.toByteArray()))); @@ -1099,7 +1080,7 @@ public class LeaderTest extends AbstractLeaderTest { actorContext.setConfigParams(new DefaultConfigParamsImpl() { @Override - public int getSnapshotChunkSize() { + public int getMaximumMessageSliceSize() { return 50; } }); @@ -1123,8 +1104,7 @@ public class LeaderTest extends AbstractLeaderTest { ByteString bs = toByteString(leadersSnapshot); Snapshot snapshot = Snapshot.create(ByteState.of(bs.toByteArray()), - Collections.emptyList(), commitIndex, snapshotTerm, commitIndex, snapshotTerm, - -1, null, null); + List.of(), commitIndex, snapshotTerm, commitIndex, snapshotTerm, -1, null, null); Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); leader.handleMessage(leaderActor, new SendInstallSnapshot(snapshot, ByteSource.wrap(bs.toByteArray()))); @@ -1164,7 +1144,7 @@ public class LeaderTest extends AbstractLeaderTest { actorContext.setConfigParams(new DefaultConfigParamsImpl() { @Override - public int getSnapshotChunkSize() { + public int getMaximumMessageSliceSize() { return 50; } }); @@ -1188,8 +1168,7 @@ public class LeaderTest extends AbstractLeaderTest { ByteString bs = toByteString(leadersSnapshot); Snapshot snapshot = Snapshot.create(ByteState.of(bs.toByteArray()), - Collections.emptyList(), commitIndex, snapshotTerm, commitIndex, snapshotTerm, - -1, null, null); + List.of(), commitIndex, snapshotTerm, commitIndex, snapshotTerm, -1, null, null); leader.handleMessage(leaderActor, new SendInstallSnapshot(snapshot, ByteSource.wrap(bs.toByteArray()))); @@ -1198,8 +1177,8 @@ public class LeaderTest extends AbstractLeaderTest { assertEquals(1, installSnapshot.getChunkIndex()); assertEquals(3, installSnapshot.getTotalChunks()); - assertEquals(LeaderInstallSnapshotState.INITIAL_LAST_CHUNK_HASH_CODE, - installSnapshot.getLastChunkHashCode().getAsInt()); + assertEquals(OptionalInt.of(LeaderInstallSnapshotState.INITIAL_LAST_CHUNK_HASH_CODE), + installSnapshot.getLastChunkHashCode()); final int hashCode = Arrays.hashCode(installSnapshot.getData()); @@ -1212,7 +1191,7 @@ public class LeaderTest extends AbstractLeaderTest { assertEquals(2, installSnapshot.getChunkIndex()); assertEquals(3, installSnapshot.getTotalChunks()); - assertEquals(hashCode, installSnapshot.getLastChunkHashCode().getAsInt()); + assertEquals(OptionalInt.of(hashCode), installSnapshot.getLastChunkHashCode()); } @Test @@ -1282,8 +1261,7 @@ public class LeaderTest extends AbstractLeaderTest { private MockRaftActorContext createActorContextWithFollower() { MockRaftActorContext actorContext = createActorContext(); - actorContext.setPeerAddresses(ImmutableMap.builder().put(FOLLOWER_ID, - followerActor.path().toString()).build()); + actorContext.setPeerAddresses(Map.of(FOLLOWER_ID, followerActor.path().toString())); return actorContext; } @@ -1292,7 +1270,7 @@ public class LeaderTest extends AbstractLeaderTest { DefaultConfigParamsImpl followerConfig = new DefaultConfigParamsImpl(); followerConfig.setElectionTimeoutFactor(10000); followerActorContext.setConfigParams(followerConfig); - followerActorContext.setPeerAddresses(ImmutableMap.of(LEADER_ID, leaderActor.path().toString())); + followerActorContext.setPeerAddresses(Map.of(LEADER_ID, leaderActor.path().toString())); return followerActorContext; } @@ -1358,7 +1336,7 @@ public class LeaderTest extends AbstractLeaderTest { final MockRaftActorContext leaderActorContext = createActorContext(); MockRaftActorContext followerActorContext = createActorContext(FOLLOWER_ID, followerActor); - followerActorContext.setPeerAddresses(ImmutableMap.of(LEADER_ID, leaderActor.path().toString())); + followerActorContext.setPeerAddresses(Map.of(LEADER_ID, leaderActor.path().toString())); Follower follower = new Follower(followerActorContext); followerActor.underlyingActor().setBehavior(follower); @@ -1739,7 +1717,7 @@ public class LeaderTest extends AbstractLeaderTest { FollowerLogInformation followerInfo = leader.getFollower(FOLLOWER_ID); assertEquals(payloadVersion, leader.getLeaderPayloadVersion()); - assertEquals(RaftVersions.HELIUM_VERSION, followerInfo.getRaftVersion()); + assertEquals(RaftVersions.FLUORINE_VERSION, followerInfo.getRaftVersion()); AppendEntriesReply reply = new AppendEntriesReply(FOLLOWER_ID, 1, true, 2, 1, payloadVersion); @@ -1793,7 +1771,8 @@ public class LeaderTest extends AbstractLeaderTest { MockRaftActorContext leaderActorContext = createActorContextWithFollower(); ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setHeartBeatInterval( new FiniteDuration(1000, TimeUnit.SECONDS)); - ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setSnapshotChunkSize(2); + // Note: the size here depends on estimate + ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setMaximumMessageSliceSize(246); leaderActorContext.setReplicatedLog( new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 4, 1).build()); @@ -2268,7 +2247,7 @@ public class LeaderTest extends AbstractLeaderTest { logStart("testReplicationWithPayloadSizeThatExceedsThreshold"); final int serializedSize = SerializationUtils.serialize(new AppendEntries(1, LEADER_ID, -1, -1, - Arrays.asList(new SimpleReplicatedLogEntry(0, 1, + List.of(new SimpleReplicatedLogEntry(0, 1, new MockRaftActorContext.MockPayload("large"))), 0, -1, (short)0)).length; final MockRaftActorContext.MockPayload largePayload = new MockRaftActorContext.MockPayload("large", serializedSize); @@ -2276,7 +2255,7 @@ public class LeaderTest extends AbstractLeaderTest { MockRaftActorContext leaderActorContext = createActorContextWithFollower(); ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setHeartBeatInterval( new FiniteDuration(300, TimeUnit.MILLISECONDS)); - ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setSnapshotChunkSize(serializedSize - 50); + ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setMaximumMessageSliceSize(serializedSize - 50); leaderActorContext.setReplicatedLog(new MockRaftActorContext.MockReplicatedLogBuilder().build()); leaderActorContext.setCommitIndex(-1); leaderActorContext.setLastApplied(-1); @@ -2360,7 +2339,7 @@ public class LeaderTest extends AbstractLeaderTest { ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setHeartBeatInterval( new FiniteDuration(100, TimeUnit.MILLISECONDS)); ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setElectionTimeoutFactor(1); - ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setSnapshotChunkSize(10); + ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setMaximumMessageSliceSize(10); leaderActorContext.setReplicatedLog(new MockRaftActorContext.MockReplicatedLogBuilder().build()); leaderActorContext.setCommitIndex(-1); leaderActorContext.setLastApplied(-1); @@ -2375,7 +2354,7 @@ public class LeaderTest extends AbstractLeaderTest { MessageCollectorActor.clearMessages(followerActor); sendReplicate(leaderActorContext, term, 0, new MockRaftActorContext.MockPayload("large", - leaderActorContext.getConfigParams().getSnapshotChunkSize() + 1)); + leaderActorContext.getConfigParams().getMaximumMessageSliceSize() + 1)); MessageCollectorActor.expectFirstMatching(followerActor, MessageSlice.class); // Sleep for at least 3 * election timeout so the slicing state expires. @@ -2422,7 +2401,7 @@ public class LeaderTest extends AbstractLeaderTest { // Initial heartbeat shouldn't have the leader address AppendEntries appendEntries = MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class); - assertFalse(appendEntries.getLeaderAddress().isPresent()); + assertNull(appendEntries.leaderAddress()); MessageCollectorActor.clearMessages(followerActor); // Send AppendEntriesReply indicating the follower needs the leader address @@ -2437,8 +2416,7 @@ public class LeaderTest extends AbstractLeaderTest { leader.handleMessage(leaderActor, SendHeartBeat.INSTANCE); appendEntries = MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class); - assertTrue(appendEntries.getLeaderAddress().isPresent()); - assertEquals(leaderActor.path().toString(), appendEntries.getLeaderAddress().get()); + assertEquals(leaderActor.path().toString(), appendEntries.leaderAddress()); MessageCollectorActor.clearMessages(followerActor); // Send AppendEntriesReply indicating the follower does not need the leader address @@ -2452,7 +2430,7 @@ public class LeaderTest extends AbstractLeaderTest { leader.handleMessage(leaderActor, SendHeartBeat.INSTANCE); appendEntries = MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class); - assertFalse(appendEntries.getLeaderAddress().isPresent()); + assertNull(appendEntries.leaderAddress()); } @Override @@ -2462,14 +2440,14 @@ public class LeaderTest extends AbstractLeaderTest { assertEquals("New votedFor", null, actorContext.getTermInformation().getVotedFor()); } - private class MockConfigParamsImpl extends DefaultConfigParamsImpl { + private static class MockConfigParamsImpl extends DefaultConfigParamsImpl { private final long electionTimeOutIntervalMillis; - private final int snapshotChunkSize; + private final int maximumMessageSliceSize; - MockConfigParamsImpl(final long electionTimeOutIntervalMillis, final int snapshotChunkSize) { + MockConfigParamsImpl(final long electionTimeOutIntervalMillis, final int maximumMessageSliceSize) { this.electionTimeOutIntervalMillis = electionTimeOutIntervalMillis; - this.snapshotChunkSize = snapshotChunkSize; + this.maximumMessageSliceSize = maximumMessageSliceSize; } @Override @@ -2478,8 +2456,8 @@ public class LeaderTest extends AbstractLeaderTest { } @Override - public int getSnapshotChunkSize() { - return snapshotChunkSize; + public int getMaximumMessageSliceSize() { + return maximumMessageSliceSize; } } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/SnapshotTrackerTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/SnapshotTrackerTest.java index d8d2b4045b..2c83f67582 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/SnapshotTrackerTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/SnapshotTrackerTest.java @@ -16,10 +16,8 @@ import static org.mockito.Mockito.verify; import akka.protobuf.ByteString; import com.google.common.io.ByteSource; import java.io.IOException; -import java.io.Serializable; import java.util.Arrays; import java.util.HashMap; -import java.util.Map; import java.util.OptionalInt; import org.apache.commons.lang3.SerializationUtils; import org.junit.Before; @@ -37,10 +35,11 @@ import org.slf4j.LoggerFactory; public class SnapshotTrackerTest { private static final Logger LOG = LoggerFactory.getLogger(SnapshotTrackerTest.class); + private final HashMap data = new HashMap<>(); + @Mock private RaftActorContext mockContext; private FileBackedOutputStream fbos; - private Map data; private ByteString byteString; private byte[] chunk1; private byte[] chunk2; @@ -48,12 +47,11 @@ public class SnapshotTrackerTest { @Before public void setup() { - data = new HashMap<>(); data.put("key1", "value1"); data.put("key2", "value2"); data.put("key3", "value3"); - byteString = ByteString.copyFrom(SerializationUtils.serialize((Serializable) data)); + byteString = ByteString.copyFrom(SerializationUtils.serialize(data)); chunk1 = getNextChunk(byteString, 0, 10); chunk2 = getNextChunk(byteString, 10, 10); chunk3 = getNextChunk(byteString, 20, byteString.size()); @@ -123,10 +121,8 @@ public class SnapshotTrackerTest { int start = offset; if (size > snapshotLength) { size = snapshotLength; - } else { - if (start + size > snapshotLength) { - size = snapshotLength - start; - } + } else if (start + size > snapshotLength) { + size = snapshotLength - start; } byte[] nextChunk = new byte[size]; diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/client/messages/ShutdownTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/client/messages/ShutdownTest.java index 81b9fbbb86..4db399666f 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/client/messages/ShutdownTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/client/messages/ShutdownTest.java @@ -7,9 +7,10 @@ */ package org.opendaylight.controller.cluster.raft.client.messages; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertSame; -import org.apache.commons.lang.SerializationUtils; +import org.apache.commons.lang3.SerializationUtils; import org.junit.Test; /** @@ -18,10 +19,11 @@ import org.junit.Test; * @author Thomas Pantelis */ public class ShutdownTest { - @Test public void test() { - Shutdown cloned = (Shutdown) SerializationUtils.clone(Shutdown.INSTANCE); + final var bytes = SerializationUtils.serialize(Shutdown.INSTANCE); + assertEquals(86, bytes.length); + final var cloned = SerializationUtils.deserialize(bytes); assertSame("Cloned instance", Shutdown.INSTANCE, cloned); } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/AppendEntriesReplyTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/AppendEntriesReplyTest.java index 8452a71c24..79c7477ba2 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/AppendEntriesReplyTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/AppendEntriesReplyTest.java @@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.raft.messages; import static org.junit.Assert.assertEquals; -import org.apache.commons.lang.SerializationUtils; +import org.apache.commons.lang3.SerializationUtils; import org.junit.Test; import org.opendaylight.controller.cluster.raft.RaftVersions; @@ -19,29 +19,14 @@ import org.opendaylight.controller.cluster.raft.RaftVersions; * @author Thomas Pantelis */ public class AppendEntriesReplyTest { - @Test public void testSerialization() { - AppendEntriesReply expected = new AppendEntriesReply("follower", 5, true, 100, 4, (short)6, true, true, - RaftVersions.CURRENT_VERSION); - AppendEntriesReply cloned = (AppendEntriesReply) SerializationUtils.clone(expected); + final var expected = new AppendEntriesReply("follower", 5, true, 100, 4, (short)6, true, true, + RaftVersions.CURRENT_VERSION); - assertEquals("getTerm", expected.getTerm(), cloned.getTerm()); - assertEquals("getFollowerId", expected.getFollowerId(), cloned.getFollowerId()); - assertEquals("getLogLastTerm", expected.getLogLastTerm(), cloned.getLogLastTerm()); - assertEquals("getLogLastIndex", expected.getLogLastIndex(), cloned.getLogLastIndex()); - assertEquals("getPayloadVersion", expected.getPayloadVersion(), cloned.getPayloadVersion()); - assertEquals("getRaftVersion", expected.getRaftVersion(), cloned.getRaftVersion()); - assertEquals("isForceInstallSnapshot", expected.isForceInstallSnapshot(), cloned.isForceInstallSnapshot()); - assertEquals("isNeedsLeaderAddress", expected.isNeedsLeaderAddress(), cloned.isNeedsLeaderAddress()); - } - - @Test - @Deprecated - public void testPreFluorineSerialization() { - AppendEntriesReply expected = new AppendEntriesReply("follower", 5, true, 100, 4, (short)6, true, true, - RaftVersions.BORON_VERSION); - AppendEntriesReply cloned = (AppendEntriesReply) SerializationUtils.clone(expected); + final var bytes = SerializationUtils.serialize(expected); + assertEquals(98, bytes.length); + final var cloned = (AppendEntriesReply) SerializationUtils.deserialize(bytes); assertEquals("getTerm", expected.getTerm(), cloned.getTerm()); assertEquals("getFollowerId", expected.getFollowerId(), cloned.getFollowerId()); @@ -50,6 +35,6 @@ public class AppendEntriesReplyTest { assertEquals("getPayloadVersion", expected.getPayloadVersion(), cloned.getPayloadVersion()); assertEquals("getRaftVersion", expected.getRaftVersion(), cloned.getRaftVersion()); assertEquals("isForceInstallSnapshot", expected.isForceInstallSnapshot(), cloned.isForceInstallSnapshot()); - assertEquals("isNeedsLeaderAddress", false, cloned.isNeedsLeaderAddress()); + assertEquals("isNeedsLeaderAddress", expected.isNeedsLeaderAddress(), cloned.isNeedsLeaderAddress()); } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/AppendEntriesTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/AppendEntriesTest.java index a7c3c8b9d5..38f1defb9f 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/AppendEntriesTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/AppendEntriesTest.java @@ -8,11 +8,10 @@ package org.opendaylight.controller.cluster.raft.messages; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import java.util.Arrays; import java.util.Iterator; -import org.apache.commons.lang.SerializationUtils; +import java.util.List; +import org.apache.commons.lang3.SerializationUtils; import org.junit.Test; import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload; import org.opendaylight.controller.cluster.raft.RaftVersions; @@ -25,7 +24,6 @@ import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEnt * @author Thomas Pantelis */ public class AppendEntriesTest { - @Test public void testSerialization() { ReplicatedLogEntry entry1 = new SimpleReplicatedLogEntry(1, 2, new MockPayload("payload1")); @@ -36,41 +34,29 @@ public class AppendEntriesTest { // Without leader address - AppendEntries expected = new AppendEntries(5L, "node1", 7L, 8L, Arrays.asList(entry1, entry2), 10L, - -1, payloadVersion, RaftVersions.CURRENT_VERSION, null); + var expected = new AppendEntries(5L, "node1", 7L, 8L, List.of(entry1, entry2), 10L, -1, payloadVersion, + RaftVersions.CURRENT_VERSION, null); - AppendEntries cloned = (AppendEntries) SerializationUtils.clone(expected); + var bytes = SerializationUtils.serialize(expected); + assertEquals(285, bytes.length); + var cloned = (AppendEntries) SerializationUtils.deserialize(bytes); verifyAppendEntries(expected, cloned, RaftVersions.CURRENT_VERSION); // With leader address - expected = new AppendEntries(5L, "node1", 7L, 8L, Arrays.asList(entry1, entry2), 10L, - -1, payloadVersion, RaftVersions.CURRENT_VERSION, "leader address"); + expected = new AppendEntries(5L, "node1", 7L, 8L, List.of(entry1, entry2), 10L, -1, payloadVersion, + RaftVersions.CURRENT_VERSION, "leader address"); - cloned = (AppendEntries) SerializationUtils.clone(expected); + bytes = SerializationUtils.serialize(expected); + assertEquals(301, bytes.length); + cloned = (AppendEntries) SerializationUtils.deserialize(bytes); verifyAppendEntries(expected, cloned, RaftVersions.CURRENT_VERSION); } - @Test - @Deprecated - public void testPreFluorineSerialization() { - ReplicatedLogEntry entry1 = new SimpleReplicatedLogEntry(1, 2, new MockPayload("payload1")); - - ReplicatedLogEntry entry2 = new SimpleReplicatedLogEntry(3, 4, new MockPayload("payload2")); - - short payloadVersion = 5; - - AppendEntries expected = new AppendEntries(5L, "node1", 7L, 8L, Arrays.asList(entry1, entry2), 10L, - -1, payloadVersion, RaftVersions.BORON_VERSION, "leader address"); - - AppendEntries cloned = (AppendEntries) SerializationUtils.clone(expected); - - verifyAppendEntries(expected, cloned, RaftVersions.BORON_VERSION); - } - - private static void verifyAppendEntries(AppendEntries expected, AppendEntries actual, short recipientRaftVersion) { + private static void verifyAppendEntries(final AppendEntries expected, final AppendEntries actual, + final short recipientRaftVersion) { assertEquals("getLeaderId", expected.getLeaderId(), actual.getLeaderId()); assertEquals("getTerm", expected.getTerm(), actual.getTerm()); assertEquals("getLeaderCommit", expected.getLeaderCommit(), actual.getLeaderCommit()); @@ -85,16 +71,11 @@ public class AppendEntriesTest { verifyReplicatedLogEntry(iter.next(), e); } - if (recipientRaftVersion >= RaftVersions.FLUORINE_VERSION) { - assertEquals("getLeaderAddress", expected.getLeaderAddress(), actual.getLeaderAddress()); - assertEquals("getLeaderRaftVersion", RaftVersions.CURRENT_VERSION, actual.getLeaderRaftVersion()); - } else { - assertFalse(actual.getLeaderAddress().isPresent()); - assertEquals("getLeaderRaftVersion", RaftVersions.BORON_VERSION, actual.getLeaderRaftVersion()); - } + assertEquals("getLeaderAddress", expected.leaderAddress(), actual.leaderAddress()); + assertEquals("getLeaderRaftVersion", RaftVersions.CURRENT_VERSION, actual.getLeaderRaftVersion()); } - private static void verifyReplicatedLogEntry(ReplicatedLogEntry expected, ReplicatedLogEntry actual) { + private static void verifyReplicatedLogEntry(final ReplicatedLogEntry expected, final ReplicatedLogEntry actual) { assertEquals("getIndex", expected.getIndex(), actual.getIndex()); assertEquals("getTerm", expected.getTerm(), actual.getTerm()); assertEquals("getData", expected.getData().toString(), actual.getData().toString()); diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshotReplyTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshotReplyTest.java index 2841d989cf..9db4cf4d03 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshotReplyTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshotReplyTest.java @@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.raft.messages; import static org.junit.Assert.assertEquals; -import org.apache.commons.lang.SerializationUtils; +import org.apache.commons.lang3.SerializationUtils; import org.junit.Test; /** @@ -18,11 +18,12 @@ import org.junit.Test; * @author Thomas Pantelis */ public class InstallSnapshotReplyTest { - @Test public void testSerialization() { - InstallSnapshotReply expected = new InstallSnapshotReply(5L, "follower", 1, true); - InstallSnapshotReply cloned = (InstallSnapshotReply) SerializationUtils.clone(expected); + final var expected = new InstallSnapshotReply(5L, "follower", 1, true); + final var bytes = SerializationUtils.serialize(expected); + assertEquals(95, bytes.length); + final var cloned = (InstallSnapshotReply) SerializationUtils.deserialize(bytes); assertEquals("getTerm", expected.getTerm(), cloned.getTerm()); assertEquals("getFollowerId", expected.getFollowerId(), cloned.getFollowerId()); diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshotTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshotTest.java index c7fad2a191..090ab77dad 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshotTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshotTest.java @@ -10,11 +10,10 @@ package org.opendaylight.controller.cluster.raft.messages; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; -import java.io.Serializable; -import java.util.Arrays; +import java.util.List; import java.util.Optional; import java.util.OptionalInt; -import org.apache.commons.lang.SerializationUtils; +import org.apache.commons.lang3.SerializationUtils; import org.junit.Test; import org.opendaylight.controller.cluster.raft.RaftVersions; import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload; @@ -26,9 +25,17 @@ import org.opendaylight.controller.cluster.raft.persisted.ServerInfo; * @author Thomas Pantelis */ public class InstallSnapshotTest { + @Test + public void testCurrentSerialization() { + testSerialization(RaftVersions.CURRENT_VERSION, 1262, 1125); + } @Test - public void testSerialization() { + public void testFluorineSerialization() { + testSerialization(RaftVersions.FLUORINE_VERSION, 1302, 1165); + } + + private static void testSerialization(final short raftVersion, final int fullSize, final int emptySize) { byte[] data = new byte[1000]; for (int i = 0, j = 0; i < data.length; i++) { data[i] = (byte)j; @@ -37,21 +44,19 @@ public class InstallSnapshotTest { } } - ServerConfigurationPayload serverConfig = new ServerConfigurationPayload(Arrays.asList( + var serverConfig = new ServerConfigurationPayload(List.of( new ServerInfo("leader", true), new ServerInfo("follower", false))); - InstallSnapshot expected = new InstallSnapshot(3L, "leaderId", 11L, 2L, data, 5, 6, OptionalInt.of(54321), - Optional.of(serverConfig)); - - Object serialized = expected.toSerializable(RaftVersions.CURRENT_VERSION); - assertEquals("Serialized type", InstallSnapshot.class, serialized.getClass()); + assertInstallSnapshot(fullSize, new InstallSnapshot(3L, "leaderId", 11L, 2L, data, 5, 6, OptionalInt.of(54321), + Optional.of(serverConfig), raftVersion)); - InstallSnapshot actual = (InstallSnapshot) SerializationUtils.clone((Serializable) serialized); - verifyInstallSnapshot(expected, actual); + assertInstallSnapshot(emptySize, new InstallSnapshot(3L, "leaderId", 11L, 2L, data, 5, 6, OptionalInt.empty(), + Optional.empty(), raftVersion)); + } - expected = new InstallSnapshot(3L, "leaderId", 11L, 2L, data, 5, 6); - actual = (InstallSnapshot) SerializationUtils.clone((Serializable) expected.toSerializable( - RaftVersions.CURRENT_VERSION)); - verifyInstallSnapshot(expected, actual); + private static void assertInstallSnapshot(final int expectedSize, final InstallSnapshot expected) { + final var bytes = SerializationUtils.serialize(expected); + assertEquals(expectedSize, bytes.length); + verifyInstallSnapshot(expected, (InstallSnapshot) SerializationUtils.deserialize(bytes)); } private static void verifyInstallSnapshot(final InstallSnapshot expected, final InstallSnapshot actual) { @@ -74,8 +79,8 @@ public class InstallSnapshotTest { assertEquals("getServerConfig present", expected.getServerConfig().isPresent(), actual.getServerConfig().isPresent()); if (expected.getServerConfig().isPresent()) { - assertEquals("getServerConfig", expected.getServerConfig().get().getServerConfig(), - actual.getServerConfig().get().getServerConfig()); + assertEquals("getServerConfig", expected.getServerConfig().orElseThrow().getServerConfig(), + actual.getServerConfig().orElseThrow().getServerConfig()); } } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/RequestVoteReplyTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/RequestVoteReplyTest.java index fa1bb5f152..51488a362c 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/RequestVoteReplyTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/RequestVoteReplyTest.java @@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.raft.messages; import static org.junit.Assert.assertEquals; -import org.apache.commons.lang.SerializationUtils; +import org.apache.commons.lang3.SerializationUtils; import org.junit.Test; /** @@ -18,11 +18,12 @@ import org.junit.Test; * @author Thomas Pantelis */ public class RequestVoteReplyTest { - @Test public void testSerialization() { - RequestVoteReply expected = new RequestVoteReply(5, true); - RequestVoteReply cloned = (RequestVoteReply) SerializationUtils.clone(expected); + final var expected = new RequestVoteReply(5, true); + final var bytes = SerializationUtils.serialize(expected); + assertEquals(78, bytes.length); + final var cloned = (RequestVoteReply) SerializationUtils.deserialize(bytes); assertEquals("getTerm", expected.getTerm(), cloned.getTerm()); assertEquals("isVoteGranted", expected.isVoteGranted(), cloned.isVoteGranted()); diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/RequestVoteTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/RequestVoteTest.java index 6cb9179ded..c3227be60c 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/RequestVoteTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/RequestVoteTest.java @@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.raft.messages; import static org.junit.Assert.assertEquals; -import org.apache.commons.lang.SerializationUtils; +import org.apache.commons.lang3.SerializationUtils; import org.junit.Test; /** @@ -18,11 +18,12 @@ import org.junit.Test; * @author Thomas Pantelis */ public class RequestVoteTest { - @Test public void testSerialization() { - RequestVote expected = new RequestVote(4, "candidateId", 3, 2); - RequestVote cloned = (RequestVote) SerializationUtils.clone(expected); + final var expected = new RequestVote(4, "candidateId", 3, 2); + final var bytes = SerializationUtils.serialize(expected); + assertEquals(97, bytes.length); + final var cloned = (RequestVote) SerializationUtils.deserialize(bytes); assertEquals("getTerm", expected.getTerm(), cloned.getTerm()); assertEquals("getCandidateId", expected.getCandidateId(), cloned.getCandidateId()); diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/ApplyJournalEntriesTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/ApplyJournalEntriesTest.java index b7f152574b..c762c1e2d0 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/ApplyJournalEntriesTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/ApplyJournalEntriesTest.java @@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.raft.persisted; import static org.junit.Assert.assertEquals; -import org.apache.commons.lang.SerializationUtils; +import org.apache.commons.lang3.SerializationUtils; import org.junit.Test; /** @@ -18,11 +18,12 @@ import org.junit.Test; * @author Thomas Pantelis */ public class ApplyJournalEntriesTest { - @Test public void testSerialization() { - ApplyJournalEntries expected = new ApplyJournalEntries(5); - ApplyJournalEntries cloned = (ApplyJournalEntries) SerializationUtils.clone(expected); + final var expected = new ApplyJournalEntries(5); + final var bytes = SerializationUtils.serialize(expected); + assertEquals(80, bytes.length); + final var cloned = (ApplyJournalEntries) SerializationUtils.deserialize(bytes); assertEquals("getFromIndex", expected.getToIndex(), cloned.getToIndex()); } diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/DeleteEntriesTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/DeleteEntriesTest.java index 8334296ead..73fb02f2bc 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/DeleteEntriesTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/DeleteEntriesTest.java @@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.raft.persisted; import static org.junit.Assert.assertEquals; -import org.apache.commons.lang.SerializationUtils; +import org.apache.commons.lang3.SerializationUtils; import org.junit.Test; /** @@ -18,11 +18,12 @@ import org.junit.Test; * @author Thomas Pantelis */ public class DeleteEntriesTest { - @Test public void testSerialization() { - DeleteEntries expected = new DeleteEntries(5); - DeleteEntries cloned = (DeleteEntries) SerializationUtils.clone(expected); + final var expected = new DeleteEntries(5); + final var bytes = SerializationUtils.serialize(expected); + assertEquals(79, bytes.length); + final var cloned = (DeleteEntries) SerializationUtils.deserialize(bytes); assertEquals("getFromIndex", expected.getFromIndex(), cloned.getFromIndex()); } diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/EmptyStateTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/EmptyStateTest.java index 963580cde4..18fa2d7719 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/EmptyStateTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/EmptyStateTest.java @@ -7,9 +7,10 @@ */ package org.opendaylight.controller.cluster.raft.persisted; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertSame; -import org.apache.commons.lang.SerializationUtils; +import org.apache.commons.lang3.SerializationUtils; import org.junit.Test; /** @@ -19,10 +20,11 @@ import org.junit.Test; * */ public class EmptyStateTest { - @Test public void testSerialization() { - EmptyState cloned = (EmptyState) SerializationUtils.clone(EmptyState.INSTANCE); + final var bytes = SerializationUtils.serialize(EmptyState.INSTANCE); + assertEquals(82, bytes.length); + final var cloned = SerializationUtils.deserialize(bytes); assertSame("cloned", EmptyState.INSTANCE, cloned); } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/NoopPayloadTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/NoopPayloadTest.java new file mode 100644 index 0000000000..bf2e8fa108 --- /dev/null +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/NoopPayloadTest.java @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.raft.persisted; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; + +import org.apache.commons.lang3.SerializationUtils; +import org.junit.Test; + +public class NoopPayloadTest { + @Test + public void testSerialization() { + final var bytes = SerializationUtils.serialize(NoopPayload.INSTANCE); + assertEquals(74, bytes.length); + assertSame(NoopPayload.INSTANCE, SerializationUtils.deserialize(bytes)); + } +} diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/ServerConfigurationPayloadTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/ServerConfigurationPayloadTest.java index aa2fe90884..d686e946e7 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/ServerConfigurationPayloadTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/ServerConfigurationPayloadTest.java @@ -10,8 +10,8 @@ package org.opendaylight.controller.cluster.raft.persisted; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; -import java.util.Arrays; -import org.apache.commons.lang.SerializationUtils; +import java.util.List; +import org.apache.commons.lang3.SerializationUtils; import org.junit.Test; /** @@ -20,19 +20,21 @@ import org.junit.Test; * @author Thomas Pantelis */ public class ServerConfigurationPayloadTest { - @Test public void testSerialization() { - ServerConfigurationPayload expected = new ServerConfigurationPayload(Arrays.asList(new ServerInfo("1", true), - new ServerInfo("2", false))); - ServerConfigurationPayload cloned = (ServerConfigurationPayload) SerializationUtils.clone(expected); + final var expected = new ServerConfigurationPayload(List.of(new ServerInfo("1", true), + new ServerInfo("2", false))); + + final var bytes = SerializationUtils.serialize(expected); + assertEquals(125, bytes.length); + final var cloned = (ServerConfigurationPayload) SerializationUtils.deserialize(bytes); assertEquals("getServerConfig", expected.getServerConfig(), cloned.getServerConfig()); } @Test public void testSize() { - ServerConfigurationPayload expected = new ServerConfigurationPayload(Arrays.asList(new ServerInfo("1", true))); + final var expected = new ServerConfigurationPayload(List.of(new ServerInfo("1", true))); assertTrue(expected.size() > 0); } } diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/SimpleReplicatedLogEntryTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/SimpleReplicatedLogEntryTest.java index ec4a3689b2..919aaba4cd 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/SimpleReplicatedLogEntryTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/SimpleReplicatedLogEntryTest.java @@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.raft.persisted; import static org.junit.Assert.assertEquals; -import org.apache.commons.lang.SerializationUtils; +import org.apache.commons.lang3.SerializationUtils; import org.junit.Test; import org.opendaylight.controller.cluster.raft.MockRaftActorContext; @@ -19,12 +19,12 @@ import org.opendaylight.controller.cluster.raft.MockRaftActorContext; * @author Thomas Pantelis */ public class SimpleReplicatedLogEntryTest { - @Test public void testSerialization() { - SimpleReplicatedLogEntry expected = new SimpleReplicatedLogEntry(0, 1, - new MockRaftActorContext.MockPayload("A")); - SimpleReplicatedLogEntry cloned = (SimpleReplicatedLogEntry) SerializationUtils.clone(expected); + final var expected = new SimpleReplicatedLogEntry(0, 1, new MockRaftActorContext.MockPayload("A")); + final var bytes = SerializationUtils.serialize(expected); + assertEquals(218, bytes.length); + final var cloned = (SimpleReplicatedLogEntry) SerializationUtils.deserialize(bytes); assertEquals("getTerm", expected.getTerm(), cloned.getTerm()); assertEquals("getIndex", expected.getIndex(), cloned.getIndex()); diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/SnapshotTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/SnapshotTest.java index 9f1f924252..3223e482d6 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/SnapshotTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/SnapshotTest.java @@ -9,10 +9,8 @@ package org.opendaylight.controller.cluster.raft.persisted; import static org.junit.Assert.assertEquals; -import java.util.Arrays; -import java.util.Collections; import java.util.List; -import org.apache.commons.lang.SerializationUtils; +import org.apache.commons.lang3.SerializationUtils; import org.junit.Test; import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload; import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry; @@ -23,27 +21,29 @@ import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry; * @author Thomas Pantelis */ public class SnapshotTest { - @Test public void testSerialization() { - testSerialization(new byte[]{1, 2, 3, 4, 5, 6, 7}, Arrays.asList( - new SimpleReplicatedLogEntry(6, 2, new MockPayload("payload")))); - testSerialization(new byte[]{1, 2, 3, 4, 5, 6, 7, 8, 9}, Collections.emptyList()); + testSerialization(new byte[]{1, 2, 3, 4, 5, 6, 7}, List.of( + new SimpleReplicatedLogEntry(6, 2, new MockPayload("payload"))), 491); + testSerialization(new byte[]{1, 2, 3, 4, 5, 6, 7, 8, 9}, List.of(), 345); } - private static void testSerialization(final byte[] state, final List unapplied) { + private static void testSerialization(final byte[] state, final List unapplied, + final int expectedSize) { long lastIndex = 6; long lastTerm = 2; long lastAppliedIndex = 5; long lastAppliedTerm = 1; long electionTerm = 3; String electionVotedFor = "member-1"; - ServerConfigurationPayload serverConfig = new ServerConfigurationPayload(Arrays.asList( + ServerConfigurationPayload serverConfig = new ServerConfigurationPayload(List.of( new ServerInfo("1", true), new ServerInfo("2", false))); - Snapshot expected = Snapshot.create(ByteState.of(state), unapplied, lastIndex, lastTerm, lastAppliedIndex, + final var expected = Snapshot.create(ByteState.of(state), unapplied, lastIndex, lastTerm, lastAppliedIndex, lastAppliedTerm, electionTerm, electionVotedFor, serverConfig); - Snapshot cloned = (Snapshot) SerializationUtils.clone(expected); + final var bytes = SerializationUtils.serialize(expected); + assertEquals(expectedSize, bytes.length); + final var cloned = (Snapshot) SerializationUtils.deserialize(bytes); assertEquals("lastIndex", expected.getLastIndex(), cloned.getLastIndex()); assertEquals("lastTerm", expected.getLastTerm(), cloned.getLastTerm()); diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/UpdateElectionTermTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/UpdateElectionTermTest.java index de95125966..75e32783b9 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/UpdateElectionTermTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/UpdateElectionTermTest.java @@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.raft.persisted; import static org.junit.Assert.assertEquals; -import org.apache.commons.lang.SerializationUtils; +import org.apache.commons.lang3.SerializationUtils; import org.junit.Test; /** @@ -18,11 +18,12 @@ import org.junit.Test; * @author Thomas Pantelis */ public class UpdateElectionTermTest { - @Test public void testSerialization() { - UpdateElectionTerm expected = new UpdateElectionTerm(5, "leader"); - UpdateElectionTerm cloned = (UpdateElectionTerm) SerializationUtils.clone(expected); + final var expected = new UpdateElectionTerm(5, "leader"); + final var bytes = SerializationUtils.serialize(expected); + assertEquals(88, bytes.length); + final var cloned = (UpdateElectionTerm) SerializationUtils.deserialize(bytes); assertEquals("getCurrentTerm", expected.getCurrentTerm(), cloned.getCurrentTerm()); assertEquals("getVotedFor", expected.getVotedFor(), cloned.getVotedFor()); diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/utils/InMemoryJournal.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/utils/InMemoryJournal.java index a9eea07387..21bf4bfa57 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/utils/InMemoryJournal.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/utils/InMemoryJournal.java @@ -24,7 +24,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; -import org.apache.commons.lang.SerializationUtils; +import org.apache.commons.lang3.SerializationUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import scala.Option; @@ -43,7 +43,7 @@ public class InMemoryJournal extends AsyncWriteJournal { final Class ofType; WriteMessagesComplete(final int count, final Class ofType) { - this.latch = new CountDownLatch(count); + latch = new CountDownLatch(count); this.ofType = ofType; } } diff --git a/opendaylight/md-sal/sal-akka-segmented-journal/pom.xml b/opendaylight/md-sal/sal-akka-segmented-journal/pom.xml index 3b80172aba..243f3b0bfc 100644 --- a/opendaylight/md-sal/sal-akka-segmented-journal/pom.xml +++ b/opendaylight/md-sal/sal-akka-segmented-journal/pom.xml @@ -12,7 +12,7 @@ and is available at http://www.eclipse.org/legal/epl-v10.html org.opendaylight.controller mdsal-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ../parent @@ -20,82 +20,47 @@ and is available at http://www.eclipse.org/legal/epl-v10.html bundle - - org.opendaylight.controller - repackaged-akka - - - com.typesafe.akka - akka-testkit_2.13 + com.google.guava + guava - com.typesafe.akka - akka-persistence-tck_2.13 + com.typesafe + config - - io.dropwizard.metrics metrics-core - - - org.scala-lang - scala-library + org.eclipse.jdt + org.eclipse.jdt.annotation - - org.opendaylight.controller - sal-clustering-commons - - - - - io.atomix atomix-storage - 3.1.5 - provided - - - io.atomix - atomix-utils - 3.1.5 - provided - com.esotericsoftware - kryo - 4.0.2 - provided + org.opendaylight.controller + repackaged-akka - com.esotericsoftware - minlog - 1.3.1 - provided + org.opendaylight.controller + sal-clustering-commons - com.esotericsoftware - reflectasm - 1.11.8 - provided + org.scala-lang + scala-library + - org.ow2.asm - asm - 5.2 - provided + com.typesafe.akka + akka-testkit_2.13 - org.objenesis - objenesis - 2.6 - provided + com.typesafe.akka + akka-persistence-tck_2.13 - commons-io commons-io @@ -108,34 +73,6 @@ and is available at http://www.eclipse.org/legal/epl-v10.html - - - - org.apache.felix - maven-bundle-plugin - true - - - ${project.groupId}.${project.artifactId} - - !COM.newmonics.*, - !android.os, - * - - - - *;inline=true;groupId=io.atomix, - *;inline=true;groupId=com.esotericsoftware, - *;inline=true;groupId=org.objenesis, - *;inline=true;groupId=org.ow2.asm, - - - - - - - scm:git:http://git.opendaylight.org/gerrit/controller.git scm:git:ssh://git.opendaylight.org:29418/controller.git diff --git a/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournal.java b/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournal.java index 678749b1c1..b89ebf4eb1 100644 --- a/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournal.java +++ b/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournal.java @@ -13,6 +13,7 @@ import com.codahale.metrics.Histogram; import org.eclipse.jdt.annotation.NonNull; import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.ReplayMessages; import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.WriteMessages; +import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.WrittenMessages; /** * Abstraction of a data journal. This provides a unified interface towards {@link SegmentedJournalActor}, allowing @@ -79,7 +80,13 @@ abstract class DataJournal { /** * Handle a request to store some messages. * - * @param message Request message + * @param message {@link WriteMessages} message + * @return a {@link WrittenMessages} object + */ + abstract @NonNull WrittenMessages handleWriteMessages(@NonNull WriteMessages message); + + /** + * Flush all messages to durable storage. */ - abstract void handleWriteMessages(@NonNull WriteMessages message); + abstract void flush(); } diff --git a/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournalEntry.java b/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournalEntry.java index 6899c6e1d6..fdd0b80d03 100644 --- a/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournalEntry.java +++ b/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournalEntry.java @@ -10,16 +10,13 @@ package org.opendaylight.controller.akka.segjournal; import static java.util.Objects.requireNonNull; import akka.persistence.PersistentRepr; -import io.atomix.storage.journal.JournalSegment; /** * A single entry in the data journal. We do not store {@code persistenceId} for each entry, as that is a - * journal-invariant, nor do we store {@code sequenceNr}, as that information is maintained by {@link JournalSegment}'s - * index. - * - * @author Robert Varga + * journal-invariant, nor do we store {@code sequenceNr}, as that information is maintained by a particular journal + * segment's index. */ -abstract class DataJournalEntry { +abstract sealed class DataJournalEntry { /** * A single data journal entry on its way to the backing file. */ diff --git a/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournalEntrySerdes.java b/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournalEntrySerdes.java new file mode 100644 index 0000000000..e0d7be18e1 --- /dev/null +++ b/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournalEntrySerdes.java @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2019 Pantheon Technologies, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.akka.segjournal; + +import static java.util.Objects.requireNonNull; + +import akka.actor.ActorSystem; +import akka.actor.ExtendedActorSystem; +import akka.persistence.PersistentRepr; +import akka.serialization.JavaSerializer; +import com.google.common.base.VerifyException; +import io.atomix.storage.journal.JournalSerdes.EntryInput; +import io.atomix.storage.journal.JournalSerdes.EntryOutput; +import io.atomix.storage.journal.JournalSerdes.EntrySerdes; +import java.io.IOException; +import java.util.concurrent.Callable; +import org.opendaylight.controller.akka.segjournal.DataJournalEntry.FromPersistence; +import org.opendaylight.controller.akka.segjournal.DataJournalEntry.ToPersistence; + +/** + * Kryo serializer for {@link DataJournalEntry}. Each {@link SegmentedJournalActor} has its own instance, as well as + * a nested JavaSerializer to handle the payload. + * + *

    + * Since we are persisting only parts of {@link PersistentRepr}, this class asymmetric by design: + * {@link #write(EntryOutput, DataJournalEntry)} only accepts {@link ToPersistence} subclass, which is a wrapper + * around a {@link PersistentRepr}, while {@link #read(EntryInput)} produces an {@link FromPersistence}, which + * needs further processing to reconstruct a {@link PersistentRepr}. + */ +final class DataJournalEntrySerdes implements EntrySerdes { + private final ExtendedActorSystem actorSystem; + + DataJournalEntrySerdes(final ActorSystem actorSystem) { + this.actorSystem = requireNonNull((ExtendedActorSystem) actorSystem); + } + + @Override + public void write(final EntryOutput output, final DataJournalEntry entry) throws IOException { + if (entry instanceof ToPersistence toPersistence) { + final var repr = toPersistence.repr(); + output.writeString(repr.manifest()); + output.writeString(repr.writerUuid()); + output.writeObject(repr.payload()); + } else { + throw new VerifyException("Unexpected entry " + entry); + } + } + + @Override + public DataJournalEntry read(final EntryInput input) throws IOException { + return new FromPersistence(input.readString(), input.readString(), + JavaSerializer.currentSystem().withValue(actorSystem, (Callable) input::readObject)); + } +} diff --git a/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournalEntrySerializer.java b/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournalEntrySerializer.java deleted file mode 100644 index e248262b15..0000000000 --- a/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournalEntrySerializer.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright (c) 2019 Pantheon Technologies, s.r.o. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.akka.segjournal; - -import static com.google.common.base.Verify.verify; -import static java.util.Objects.requireNonNull; - -import akka.actor.ActorSystem; -import akka.actor.ExtendedActorSystem; -import akka.persistence.PersistentRepr; -import com.esotericsoftware.kryo.Kryo; -import com.esotericsoftware.kryo.Serializer; -import com.esotericsoftware.kryo.io.Input; -import com.esotericsoftware.kryo.io.Output; -import com.esotericsoftware.kryo.serializers.JavaSerializer; -import java.util.concurrent.Callable; -import org.opendaylight.controller.akka.segjournal.DataJournalEntry.FromPersistence; -import org.opendaylight.controller.akka.segjournal.DataJournalEntry.ToPersistence; - -/** - * Kryo serializer for {@link DataJournalEntry}. Each {@link SegmentedJournalActor} has its own instance, as well as - * a nested JavaSerializer to handle the payload. - * - *

    - * Since we are persisting only parts of {@link PersistentRepr}, this class asymmetric by design: - * {@link #write(Kryo, Output, DataJournalEntry)} only accepts {@link ToPersistence} subclass, which is a wrapper - * around a {@link PersistentRepr}, while {@link #read(Kryo, Input, Class)} produces an {@link FromPersistence}, which - * needs further processing to reconstruct a {@link PersistentRepr}. - * - * @author Robert Varga - */ -final class DataJournalEntrySerializer extends Serializer { - private final JavaSerializer serializer = new JavaSerializer(); - private final ExtendedActorSystem actorSystem; - - DataJournalEntrySerializer(final ActorSystem actorSystem) { - this.actorSystem = requireNonNull((ExtendedActorSystem) actorSystem); - } - - @Override - public void write(final Kryo kryo, final Output output, final DataJournalEntry object) { - verify(object instanceof ToPersistence); - final PersistentRepr repr = ((ToPersistence) object).repr(); - output.writeString(repr.manifest()); - output.writeString(repr.writerUuid()); - serializer.write(kryo, output, repr.payload()); - } - - @Override - public DataJournalEntry read(final Kryo kryo, final Input input, final Class type) { - final String manifest = input.readString(); - final String uuid = input.readString(); - final Object payload = akka.serialization.JavaSerializer.currentSystem().withValue(actorSystem, - (Callable)() -> serializer.read(kryo, input, type)); - return new FromPersistence(manifest, uuid, payload); - } -} diff --git a/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournalV0.java b/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournalV0.java index bc5eead800..ad4c110bc8 100644 --- a/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournalV0.java +++ b/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournalV0.java @@ -7,32 +7,30 @@ */ package org.opendaylight.controller.akka.segjournal; -import static com.google.common.base.Verify.verify; - import akka.actor.ActorSystem; -import akka.persistence.AtomicWrite; import akka.persistence.PersistentRepr; import com.codahale.metrics.Histogram; -import io.atomix.storage.StorageLevel; -import io.atomix.storage.journal.Indexed; +import com.google.common.base.VerifyException; +import io.atomix.storage.journal.JournalReader; +import io.atomix.storage.journal.JournalSerdes; +import io.atomix.storage.journal.JournalWriter; import io.atomix.storage.journal.SegmentedJournal; -import io.atomix.storage.journal.SegmentedJournalReader; -import io.atomix.storage.journal.SegmentedJournalWriter; -import io.atomix.utils.serializer.Namespace; +import io.atomix.storage.journal.StorageLevel; import java.io.File; import java.io.Serializable; +import java.util.ArrayList; +import java.util.List; import org.opendaylight.controller.akka.segjournal.DataJournalEntry.FromPersistence; import org.opendaylight.controller.akka.segjournal.DataJournalEntry.ToPersistence; import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.ReplayMessages; import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.WriteMessages; +import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.WrittenMessages; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import scala.jdk.javaapi.CollectionConverters; /** * Version 0 data journal, where every journal entry maps to exactly one segmented file entry. - * - * @author Robert Varga */ final class DataJournalV0 extends DataJournal { private static final Logger LOG = LoggerFactory.getLogger(DataJournalV0.class); @@ -44,8 +42,8 @@ final class DataJournalV0 extends DataJournal { super(persistenceId, messageSize); entries = SegmentedJournal.builder() .withStorageLevel(storage).withDirectory(directory).withName("data") - .withNamespace(Namespace.builder() - .register(new DataJournalEntrySerializer(system), FromPersistence.class, ToPersistence.class) + .withNamespace(JournalSerdes.builder() + .register(new DataJournalEntrySerdes(system), FromPersistence.class, ToPersistence.class) .build()) .withMaxEntrySize(maxEntrySize).withMaxSegmentSize(maxSegmentSize) .build(); @@ -68,31 +66,20 @@ final class DataJournalV0 extends DataJournal { @Override void close() { + flush(); entries.close(); } + @Override + void flush() { + entries.writer().flush(); + } + @Override @SuppressWarnings("checkstyle:illegalCatch") void handleReplayMessages(final ReplayMessages message, final long fromSequenceNr) { - try (SegmentedJournalReader reader = entries.openReader(fromSequenceNr)) { - int count = 0; - while (reader.hasNext() && count < message.max) { - final Indexed next = reader.next(); - if (next.index() > message.toSequenceNr) { - break; - } - - LOG.trace("{}: replay {}", persistenceId, next); - updateLargestSize(next.size()); - final DataJournalEntry entry = next.entry(); - verify(entry instanceof FromPersistence, "Unexpected entry %s", entry); - - final PersistentRepr repr = ((FromPersistence) entry).toRepr(persistenceId, next.index()); - LOG.debug("{}: replaying {}", persistenceId, repr); - message.replayCallback.accept(repr); - count++; - } - LOG.debug("{}: successfully replayed {} entries", persistenceId, count); + try (var reader = entries.openReader(fromSequenceNr)) { + handleReplayMessages(reader, message); } catch (Exception e) { LOG.warn("{}: failed to replay messages for {}", persistenceId, message, e); message.promise.failure(e); @@ -101,34 +88,73 @@ final class DataJournalV0 extends DataJournal { } } + private void handleReplayMessages(final JournalReader reader, final ReplayMessages message) { + int count = 0; + while (count < message.max && reader.getNextIndex() <= message.toSequenceNr) { + final var repr = reader.tryNext((index, entry, size) -> { + LOG.trace("{}: replay index={} entry={}", persistenceId, index, entry); + updateLargestSize(size); + if (entry instanceof FromPersistence fromPersistence) { + return fromPersistence.toRepr(persistenceId, index); + } + throw new VerifyException("Unexpected entry " + entry); + }); + + if (repr == null) { + break; + } + + LOG.debug("{}: replaying {}", persistenceId, repr); + message.replayCallback.accept(repr); + count++; + } + LOG.debug("{}: successfully replayed {} entries", persistenceId, count); + } + @Override @SuppressWarnings("checkstyle:illegalCatch") - void handleWriteMessages(final WriteMessages message) { + WrittenMessages handleWriteMessages(final WriteMessages message) { final int count = message.size(); - final SegmentedJournalWriter writer = entries.writer(); + final var responses = new ArrayList<>(); + final var writer = entries.writer(); + long writtenBytes = 0; for (int i = 0; i < count; ++i) { final long mark = writer.getLastIndex(); - final AtomicWrite request = message.getRequest(i); + final var request = message.getRequest(i); + + final var reprs = CollectionConverters.asJava(request.payload()); + LOG.trace("{}: append {}/{}: {} items at mark {}", persistenceId, i, count, reprs.size(), mark); try { - for (PersistentRepr repr : CollectionConverters.asJava(request.payload())) { - final Object payload = repr.payload(); - if (!(payload instanceof Serializable)) { - throw new UnsupportedOperationException("Non-serializable payload encountered " - + payload.getClass()); - } - - recordMessageSize(writer.append(new ToPersistence(repr)).size()); - } + writtenBytes += writePayload(writer, reprs); } catch (Exception e) { - LOG.warn("{}: failed to write out request", persistenceId, e); - message.setFailure(i, e); + LOG.warn("{}: failed to write out request {}/{} reverting to {}", persistenceId, i, count, mark, e); + responses.add(e); writer.truncate(mark); continue; } + responses.add(null); + } + + return new WrittenMessages(message, responses, writtenBytes); + } + + private long writePayload(final JournalWriter writer, final List reprs) { + long bytes = 0; + for (var repr : reprs) { + final Object payload = repr.payload(); + if (!(payload instanceof Serializable)) { + throw new UnsupportedOperationException("Non-serializable payload encountered " + + payload.getClass()); + } - message.setSuccess(i); + LOG.trace("{}: starting append of {}", persistenceId, payload); + final var entry = writer.append(new ToPersistence(repr)); + final int size = entry.size(); + LOG.trace("{}: finished append of {} with {} bytes at {}", persistenceId, payload, size, entry.index()); + recordMessageSize(size); + bytes += size; } - writer.flush(); + return bytes; } } diff --git a/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/LongEntrySerdes.java b/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/LongEntrySerdes.java new file mode 100644 index 0000000000..eebf95ff0c --- /dev/null +++ b/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/LongEntrySerdes.java @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2023 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.akka.segjournal; + +import io.atomix.storage.journal.JournalSerdes.EntryInput; +import io.atomix.storage.journal.JournalSerdes.EntryOutput; +import io.atomix.storage.journal.JournalSerdes.EntrySerdes; +import java.io.IOException; + +enum LongEntrySerdes implements EntrySerdes { + LONG_ENTRY_SERDES { + @Override + public Long read(final EntryInput input) throws IOException { + return input.readLong(); + } + + @Override + public void write(final EntryOutput output, final Long entry) throws IOException { + output.writeLong(entry); + } + } +} diff --git a/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/SegmentedFileJournal.java b/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/SegmentedFileJournal.java index 8efb2db3ab..b9320998c9 100644 --- a/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/SegmentedFileJournal.java +++ b/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/SegmentedFileJournal.java @@ -17,15 +17,13 @@ import akka.persistence.AtomicWrite; import akka.persistence.PersistentRepr; import akka.persistence.journal.japi.AsyncWriteJournal; import com.typesafe.config.Config; -import com.typesafe.config.ConfigMemorySize; -import io.atomix.storage.StorageLevel; import io.atomix.storage.journal.SegmentedJournal; +import io.atomix.storage.journal.StorageLevel; import java.io.File; import java.net.URLEncoder; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.Optional; import java.util.function.Consumer; @@ -39,8 +37,6 @@ import scala.concurrent.Future; * An Akka persistence journal implementation on top of {@link SegmentedJournal}. This actor represents aggregation * of multiple journals and performs a receptionist job between Akka and invidual per-persistenceId actors. See * {@link SegmentedJournalActor} for details on how the persistence works. - * - * @author Robert Varga */ public class SegmentedFileJournal extends AsyncWriteJournal { public static final String STORAGE_ROOT_DIRECTORY = "root-directory"; @@ -48,6 +44,7 @@ public class SegmentedFileJournal extends AsyncWriteJournal { public static final int STORAGE_MAX_ENTRY_SIZE_DEFAULT = 16 * 1024 * 1024; public static final String STORAGE_MAX_SEGMENT_SIZE = "max-segment-size"; public static final int STORAGE_MAX_SEGMENT_SIZE_DEFAULT = STORAGE_MAX_ENTRY_SIZE_DEFAULT * 8; + public static final String STORAGE_MAX_UNFLUSHED_BYTES = "max-unflushed-bytes"; public static final String STORAGE_MEMORY_MAPPED = "memory-mapped"; private static final Logger LOG = LoggerFactory.getLogger(SegmentedFileJournal.class); @@ -57,6 +54,7 @@ public class SegmentedFileJournal extends AsyncWriteJournal { private final StorageLevel storage; private final int maxEntrySize; private final int maxSegmentSize; + private final int maxUnflushedBytes; public SegmentedFileJournal(final Config config) { rootDir = new File(config.getString(STORAGE_ROOT_DIRECTORY)); @@ -68,6 +66,7 @@ public class SegmentedFileJournal extends AsyncWriteJournal { maxEntrySize = getBytes(config, STORAGE_MAX_ENTRY_SIZE, STORAGE_MAX_ENTRY_SIZE_DEFAULT); maxSegmentSize = getBytes(config, STORAGE_MAX_SEGMENT_SIZE, STORAGE_MAX_SEGMENT_SIZE_DEFAULT); + maxUnflushedBytes = getBytes(config, STORAGE_MAX_UNFLUSHED_BYTES, maxEntrySize); if (config.hasPath(STORAGE_MEMORY_MAPPED)) { storage = config.getBoolean(STORAGE_MEMORY_MAPPED) ? StorageLevel.MAPPED : StorageLevel.DISK; @@ -80,12 +79,12 @@ public class SegmentedFileJournal extends AsyncWriteJournal { @Override public Future>> doAsyncWriteMessages(final Iterable messages) { - final Map map = new HashMap<>(); - final List>> result = new ArrayList<>(); + final var map = new HashMap(); + final var result = new ArrayList>>(); - for (AtomicWrite message : messages) { - final String persistenceId = message.persistenceId(); - final ActorRef handler = handlers.computeIfAbsent(persistenceId, this::createHandler); + for (var message : messages) { + final var persistenceId = message.persistenceId(); + final var handler = handlers.computeIfAbsent(persistenceId, this::createHandler); result.add(map.computeIfAbsent(handler, key -> new WriteMessages()).add(message)); } @@ -116,18 +115,18 @@ public class SegmentedFileJournal extends AsyncWriteJournal { } private ActorRef createHandler(final String persistenceId) { - final String directoryName = URLEncoder.encode(persistenceId, StandardCharsets.UTF_8); - final File directory = new File(rootDir, directoryName); + final var directoryName = URLEncoder.encode(persistenceId, StandardCharsets.UTF_8); + final var directory = new File(rootDir, directoryName); LOG.debug("Creating handler for {} in directory {}", persistenceId, directory); - final ActorRef handler = context().actorOf(SegmentedJournalActor.props(persistenceId, directory, storage, - maxEntrySize, maxSegmentSize)); + final var handler = context().actorOf(SegmentedJournalActor.props(persistenceId, directory, storage, + maxEntrySize, maxSegmentSize, maxUnflushedBytes)); LOG.debug("Directory {} handled by {}", directory, handler); return handler; } private Future delegateMessage(final String persistenceId, final AsyncMessage message) { - final ActorRef handler = handlers.get(persistenceId); + final var handler = handlers.get(persistenceId); if (handler == null) { return Futures.failed(new IllegalStateException("Cannot find handler for " + persistenceId)); } @@ -145,9 +144,8 @@ public class SegmentedFileJournal extends AsyncWriteJournal { if (!config.hasPath(path)) { return defaultValue; } - final ConfigMemorySize value = config.getMemorySize(path); - final long result = value.toBytes(); - checkArgument(result <= Integer.MAX_VALUE, "Size %s exceeds maximum allowed %s", Integer.MAX_VALUE); - return (int) result; + final long value = config.getBytes(path); + checkArgument(value <= Integer.MAX_VALUE, "Size %s exceeds maximum allowed %s", Integer.MAX_VALUE); + return (int) value; } } diff --git a/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/SegmentedJournalActor.java b/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/SegmentedJournalActor.java index e5c5b7807b..73ffab6a05 100644 --- a/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/SegmentedJournalActor.java +++ b/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/SegmentedJournalActor.java @@ -7,11 +7,14 @@ */ package org.opendaylight.controller.akka.segjournal; +import static com.google.common.base.Verify.verify; import static com.google.common.base.Verify.verifyNotNull; import static java.util.Objects.requireNonNull; import akka.actor.AbstractActor; +import akka.actor.ActorRef; import akka.actor.Props; +import akka.japi.pf.ReceiveBuilder; import akka.persistence.AtomicWrite; import akka.persistence.PersistentRepr; import com.codahale.metrics.Histogram; @@ -19,12 +22,13 @@ import com.codahale.metrics.Meter; import com.codahale.metrics.MetricRegistry; import com.codahale.metrics.Timer; import com.google.common.base.MoreObjects; -import io.atomix.storage.StorageLevel; +import com.google.common.base.Stopwatch; import io.atomix.storage.journal.Indexed; +import io.atomix.storage.journal.JournalSerdes; import io.atomix.storage.journal.SegmentedJournal; -import io.atomix.storage.journal.SegmentedJournalWriter; -import io.atomix.utils.serializer.Namespace; +import io.atomix.storage.journal.StorageLevel; import java.io.File; +import java.util.ArrayDeque; import java.util.ArrayList; import java.util.List; import java.util.Optional; @@ -56,11 +60,9 @@ import scala.concurrent.Promise; *

    * Split-file approach allows us to treat sequence numbers and indices as equivalent, without maintaining any explicit * mapping information. The only additional information we need to maintain is the last deleted sequence number. - * - * @author Robert Varga */ -final class SegmentedJournalActor extends AbstractActor { - abstract static class AsyncMessage { +abstract sealed class SegmentedJournalActor extends AbstractActor { + abstract static sealed class AsyncMessage { final Promise promise = Promise.apply(); } @@ -103,7 +105,7 @@ final class SegmentedJournalActor extends AbstractActor { private final List>> results = new ArrayList<>(); Future> add(final AtomicWrite write) { - final Promise> promise = Promise.apply(); + final var promise = Promise.>apply(); requests.add(write); results.add(promise); return promise.future(); @@ -119,7 +121,6 @@ final class SegmentedJournalActor extends AbstractActor { void setFailure(final int index, final Exception cause) { results.get(index).success(Optional.of(cause)); - } void setSuccess(final int index) { @@ -145,8 +146,145 @@ final class SegmentedJournalActor extends AbstractActor { } } + // responses == null on success, Exception on failure + record WrittenMessages(WriteMessages message, List responses, long writtenBytes) { + WrittenMessages { + verify(responses.size() == message.size(), "Mismatched %s and %s", message, responses); + verify(writtenBytes >= 0, "Unexpected length %s", writtenBytes); + } + + private void complete() { + for (int i = 0, size = responses.size(); i < size; ++i) { + if (responses.get(i) instanceof Exception ex) { + message.setFailure(i, ex); + } else { + message.setSuccess(i); + } + } + } + } + + /** + * A {@link SegmentedJournalActor} which delays issuing a flush operation until a watermark is reached or when the + * queue is empty. + * + *

    + * The problem we are addressing is that there is a queue sitting in from of the actor, which we have no direct + * access to. Since a flush involves committing data to durable storage, that operation can easily end up dominating + * workloads. + * + *

    + * We solve this by having an additional queue in which we track which messages were written and trigger a flush + * only when the number of bytes we have written exceeds specified limit. The other part is that each time this + * queue becomes non-empty, we send a dedicated message to self. This acts as a actor queue probe -- when we receive + * it, we know we have processed all messages that were in the queue when we first delayed the write. + * + *

    + * The combination of these mechanisms ensure we use a minimal delay while also ensuring we take advantage of + * batching opportunities. + */ + private static final class Delayed extends SegmentedJournalActor { + private static final class Flush extends AsyncMessage { + final long batch; + + Flush(final long batch) { + this.batch = batch; + } + } + + private record UnflushedWrite(WrittenMessages message, Stopwatch start, long count) { + UnflushedWrite { + requireNonNull(message); + requireNonNull(start); + } + } + + private final ArrayDeque unflushedWrites = new ArrayDeque<>(); + private final Stopwatch unflushedDuration = Stopwatch.createUnstarted(); + private final long maxUnflushedBytes; + + private long batch = 0; + private long unflushedBytes = 0; + + Delayed(final String persistenceId, final File directory, final StorageLevel storage, + final int maxEntrySize, final int maxSegmentSize, final int maxUnflushedBytes) { + super(persistenceId, directory, storage, maxEntrySize, maxSegmentSize); + this.maxUnflushedBytes = maxUnflushedBytes; + } + + @Override + ReceiveBuilder addMessages(final ReceiveBuilder builder) { + return super.addMessages(builder).match(Flush.class, this::handleFlush); + } + + private void handleFlush(final Flush message) { + if (message.batch == batch) { + flushWrites(); + } else { + LOG.debug("{}: batch {} not flushed by {}", persistenceId(), batch, message.batch); + } + } + + @Override + void onWrittenMessages(final WrittenMessages message, final Stopwatch started, final long count) { + boolean first = unflushedWrites.isEmpty(); + if (first) { + unflushedDuration.start(); + } + unflushedWrites.addLast(new UnflushedWrite(message, started, count)); + unflushedBytes = unflushedBytes + message.writtenBytes; + if (unflushedBytes >= maxUnflushedBytes) { + LOG.debug("{}: reached {} unflushed journal bytes", persistenceId(), unflushedBytes); + flushWrites(); + } else if (first) { + LOG.debug("{}: deferring journal flush", persistenceId()); + self().tell(new Flush(++batch), ActorRef.noSender()); + } + } + + @Override + void flushWrites() { + final var unsyncedSize = unflushedWrites.size(); + if (unsyncedSize == 0) { + // Nothing to flush + return; + } + + LOG.debug("{}: flushing {} journal writes after {}", persistenceId(), unsyncedSize, + unflushedDuration.stop()); + flushJournal(unflushedBytes, unsyncedSize); + + final var sw = Stopwatch.createStarted(); + unflushedWrites.forEach(write -> completeWriteMessages(write.message, write.start, write.count)); + unflushedWrites.clear(); + unflushedBytes = 0; + unflushedDuration.reset(); + LOG.debug("{}: completed {} flushed journal writes in {}", persistenceId(), unsyncedSize, sw); + } + } + + private static final class Immediate extends SegmentedJournalActor { + Immediate(final String persistenceId, final File directory, final StorageLevel storage, + final int maxEntrySize, final int maxSegmentSize) { + super(persistenceId, directory, storage, maxEntrySize, maxSegmentSize); + } + + @Override + void onWrittenMessages(final WrittenMessages message, final Stopwatch started, final long count) { + flushJournal(message.writtenBytes, 1); + completeWriteMessages(message, started, count); + } + + @Override + void flushWrites() { + // No-op + } + } + private static final Logger LOG = LoggerFactory.getLogger(SegmentedJournalActor.class); - private static final Namespace DELETE_NAMESPACE = Namespace.builder().register(Long.class).build(); + private static final JournalSerdes DELETE_NAMESPACE = JournalSerdes.builder() + .register(LongEntrySerdes.LONG_ENTRY_SERDES, Long.class) + .build(); private static final int DELETE_SEGMENT_SIZE = 64 * 1024; private final String persistenceId; @@ -161,12 +299,18 @@ final class SegmentedJournalActor extends AbstractActor { private Meter messageWriteCount; // Tracks the size distribution of messages private Histogram messageSize; + // Tracks the number of messages completed for each flush + private Histogram flushMessages; + // Tracks the number of bytes completed for each flush + private Histogram flushBytes; + // Tracks the duration of flush operations + private Timer flushTime; private DataJournal dataJournal; private SegmentedJournal deleteJournal; private long lastDelete; - SegmentedJournalActor(final String persistenceId, final File directory, final StorageLevel storage, + private SegmentedJournalActor(final String persistenceId, final File directory, final StorageLevel storage, final int maxEntrySize, final int maxSegmentSize) { this.persistenceId = requireNonNull(persistenceId); this.directory = requireNonNull(directory); @@ -176,20 +320,39 @@ final class SegmentedJournalActor extends AbstractActor { } static Props props(final String persistenceId, final File directory, final StorageLevel storage, - final int maxEntrySize, final int maxSegmentSize) { - return Props.create(SegmentedJournalActor.class, requireNonNull(persistenceId), directory, storage, - maxEntrySize, maxSegmentSize); + final int maxEntrySize, final int maxSegmentSize, final int maxUnflushedBytes) { + final var pid = requireNonNull(persistenceId); + return maxUnflushedBytes > 0 + ? Props.create(Delayed.class, pid, directory, storage, maxEntrySize, maxSegmentSize, maxUnflushedBytes) + : Props.create(Immediate.class, pid, directory, storage, maxEntrySize, maxSegmentSize); + } + + final String persistenceId() { + return persistenceId; + } + + final void flushJournal(final long bytes, final int messages) { + final var sw = Stopwatch.createStarted(); + dataJournal.flush(); + LOG.debug("{}: journal flush completed in {}", persistenceId, sw.stop()); + flushBytes.update(bytes); + flushMessages.update(messages); + flushTime.update(sw.elapsed(TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS); } @Override public Receive createReceive() { - return receiveBuilder() - .match(DeleteMessagesTo.class, this::handleDeleteMessagesTo) - .match(ReadHighestSequenceNr.class, this::handleReadHighestSequenceNr) - .match(ReplayMessages.class, this::handleReplayMessages) - .match(WriteMessages.class, this::handleWriteMessages) - .matchAny(this::handleUnknown) - .build(); + return addMessages(receiveBuilder()) + .matchAny(this::handleUnknown) + .build(); + } + + ReceiveBuilder addMessages(final ReceiveBuilder builder) { + return builder + .match(DeleteMessagesTo.class, this::handleDeleteMessagesTo) + .match(ReadHighestSequenceNr.class, this::handleReadHighestSequenceNr) + .match(ReplayMessages.class, this::handleReplayMessages) + .match(WriteMessages.class, this::handleWriteMessages); } @Override @@ -197,12 +360,15 @@ final class SegmentedJournalActor extends AbstractActor { LOG.debug("{}: actor starting", persistenceId); super.preStart(); - final MetricRegistry registry = MetricsReporter.getInstance(MeteringBehavior.DOMAIN).getMetricsRegistry(); - final String actorName = self().path().parent().toStringWithoutAddress() + '/' + directory.getName(); + final var registry = MetricsReporter.getInstance(MeteringBehavior.DOMAIN).getMetricsRegistry(); + final var actorName = self().path().parent().toStringWithoutAddress() + '/' + directory.getName(); batchWriteTime = registry.timer(MetricRegistry.name(actorName, "batchWriteTime")); messageWriteCount = registry.meter(MetricRegistry.name(actorName, "messageWriteCount")); messageSize = registry.histogram(MetricRegistry.name(actorName, "messageSize")); + flushBytes = registry.histogram(MetricRegistry.name(actorName, "flushBytes")); + flushMessages = registry.histogram(MetricRegistry.name(actorName, "flushMessages")); + flushTime = registry.timer(MetricRegistry.name(actorName, "flushTime")); } @Override @@ -239,6 +405,8 @@ final class SegmentedJournalActor extends AbstractActor { ensureOpen(); LOG.debug("{}: delete messages {}", persistenceId, message); + flushWrites(); + final long to = Long.min(dataJournal.lastWrittenSequenceNr(), message.toSequenceNr); LOG.debug("{}: adjusted delete to {}", persistenceId, to); @@ -246,8 +414,8 @@ final class SegmentedJournalActor extends AbstractActor { LOG.debug("{}: deleting entries up to {}", persistenceId, to); lastDelete = to; - final SegmentedJournalWriter deleteWriter = deleteJournal.writer(); - final Indexed entry = deleteWriter.append(lastDelete); + final var deleteWriter = deleteJournal.writer(); + final var entry = deleteWriter.append(lastDelete); deleteWriter.commit(entry.index()); dataJournal.deleteTo(lastDelete); @@ -267,6 +435,7 @@ final class SegmentedJournalActor extends AbstractActor { final Long sequence; if (directory.isDirectory()) { ensureOpen(); + flushWrites(); sequence = dataJournal.lastWrittenSequenceNr(); } else { sequence = 0L; @@ -279,6 +448,7 @@ final class SegmentedJournalActor extends AbstractActor { private void handleReplayMessages(final ReplayMessages message) { LOG.debug("{}: replaying messages {}", persistenceId, message); ensureOpen(); + flushWrites(); final long from = Long.max(lastDelete + 1, message.fromSequenceNr); LOG.debug("{}: adjusted fromSequenceNr to {}", persistenceId, from); @@ -289,15 +459,30 @@ final class SegmentedJournalActor extends AbstractActor { private void handleWriteMessages(final WriteMessages message) { ensureOpen(); - final long startTicks = System.nanoTime(); + final var started = Stopwatch.createStarted(); final long start = dataJournal.lastWrittenSequenceNr(); + final var writtenMessages = dataJournal.handleWriteMessages(message); - dataJournal.handleWriteMessages(message); + onWrittenMessages(writtenMessages, started, dataJournal.lastWrittenSequenceNr() - start); + } - batchWriteTime.update(System.nanoTime() - startTicks, TimeUnit.NANOSECONDS); - messageWriteCount.mark(dataJournal.lastWrittenSequenceNr() - start); + final void completeWriteMessages(final WrittenMessages message, final Stopwatch started, final long count) { + batchWriteTime.update(started.stop().elapsed(TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS); + messageWriteCount.mark(count); + // log message after statistics are updated + LOG.debug("{}: write of {} bytes completed in {}", persistenceId, message.writtenBytes, started); + message.complete(); } + /** + * Handle a check of written messages. + * + * @param message Messages which were written + * @param started Stopwatch started when the write started + * @param count number of writes + */ + abstract void onWrittenMessages(WrittenMessages message, Stopwatch started, long count); + private void handleUnknown(final Object message) { LOG.error("{}: Received unknown message {}", persistenceId, message); } @@ -308,15 +493,20 @@ final class SegmentedJournalActor extends AbstractActor { return; } + final var sw = Stopwatch.createStarted(); deleteJournal = SegmentedJournal.builder().withDirectory(directory).withName("delete") .withNamespace(DELETE_NAMESPACE).withMaxSegmentSize(DELETE_SEGMENT_SIZE).build(); - final Indexed lastEntry = deleteJournal.writer().getLastEntry(); - lastDelete = lastEntry == null ? 0 : lastEntry.entry(); + final var lastDeleteRecovered = deleteJournal.openReader(deleteJournal.writer().getLastIndex()) + .tryNext((index, value, length) -> value); + lastDelete = lastDeleteRecovered == null ? 0 : lastDeleteRecovered.longValue(); dataJournal = new DataJournalV0(persistenceId, messageSize, context().system(), storage, directory, maxEntrySize, maxSegmentSize); dataJournal.deleteTo(lastDelete); - LOG.debug("{}: journal open with last index {}, deleted to {}", persistenceId, + LOG.debug("{}: journal open in {} with last index {}, deleted to {}", persistenceId, sw, dataJournal.lastWrittenSequenceNr(), lastDelete); } + + abstract void flushWrites(); + } diff --git a/opendaylight/md-sal/sal-akka-segmented-journal/src/test/java/org/opendaylight/controller/akka/segjournal/SegmentedFileJournalSpecTest.java b/opendaylight/md-sal/sal-akka-segmented-journal/src/test/java/org/opendaylight/controller/akka/segjournal/SegmentedFileJournalSpecTest.java index 87c7f99700..d488dc6cf2 100644 --- a/opendaylight/md-sal/sal-akka-segmented-journal/src/test/java/org/opendaylight/controller/akka/segjournal/SegmentedFileJournalSpecTest.java +++ b/opendaylight/md-sal/sal-akka-segmented-journal/src/test/java/org/opendaylight/controller/akka/segjournal/SegmentedFileJournalSpecTest.java @@ -29,4 +29,10 @@ public class SegmentedFileJournalSpecTest extends JavaJournalSpec { FileUtils.deleteQuietly(JOURNAL_DIR); super.beforeAll(); } + + @Override + public void afterAll() { + super.afterAll(); + FileUtils.deleteQuietly(JOURNAL_DIR); + } } diff --git a/opendaylight/md-sal/sal-akka-segmented-journal/src/test/java/org/opendaylight/controller/akka/segjournal/SegmentedFileJournalTest.java b/opendaylight/md-sal/sal-akka-segmented-journal/src/test/java/org/opendaylight/controller/akka/segjournal/SegmentedFileJournalTest.java index 7db0d4b87e..4d3db7980e 100644 --- a/opendaylight/md-sal/sal-akka-segmented-journal/src/test/java/org/opendaylight/controller/akka/segjournal/SegmentedFileJournalTest.java +++ b/opendaylight/md-sal/sal-akka-segmented-journal/src/test/java/org/opendaylight/controller/akka/segjournal/SegmentedFileJournalTest.java @@ -7,13 +7,12 @@ */ package org.opendaylight.controller.akka.segjournal; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.reset; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -24,7 +23,7 @@ import akka.persistence.AtomicWrite; import akka.persistence.PersistentRepr; import akka.testkit.CallingThreadDispatcher; import akka.testkit.javadsl.TestKit; -import io.atomix.storage.StorageLevel; +import io.atomix.storage.journal.StorageLevel; import java.io.File; import java.io.IOException; import java.io.Serializable; @@ -36,50 +35,59 @@ import java.util.Optional; import java.util.function.Consumer; import java.util.stream.Collectors; import org.apache.commons.io.FileUtils; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.AsyncMessage; import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.WriteMessages; import scala.concurrent.Future; -public class SegmentedFileJournalTest { +@ExtendWith(MockitoExtension.class) +class SegmentedFileJournalTest { private static final File DIRECTORY = new File("target/sfj-test"); private static final int SEGMENT_SIZE = 1024 * 1024; private static final int MESSAGE_SIZE = 512 * 1024; + private static final int FLUSH_SIZE = 16 * 1024; private static ActorSystem SYSTEM; + @Mock + private Consumer firstCallback; + private TestKit kit; private ActorRef actor; - @BeforeClass - public static void beforeClass() { + @BeforeAll + static void beforeClass() { SYSTEM = ActorSystem.create("test"); } - @AfterClass - public static void afterClass() { + @AfterAll + static void afterClass() { TestKit.shutdownActorSystem(SYSTEM); SYSTEM = null; } - @Before - public void before() { + @BeforeEach + void before() { kit = new TestKit(SYSTEM); FileUtils.deleteQuietly(DIRECTORY); actor = actor(); } - @After - public void after() { + @AfterEach + void after() { actor.tell(PoisonPill.getInstance(), ActorRef.noSender()); + FileUtils.deleteQuietly(DIRECTORY); } @Test - public void testDeleteAfterStop() { + void testDeleteAfterStop() { // Preliminary setup final WriteMessages write = new WriteMessages(); final Future> first = write.add(AtomicWrite.apply(PersistentRepr.apply("first", 1, "foo", @@ -108,7 +116,7 @@ public class SegmentedFileJournalTest { } @Test - public void testSegmentation() throws IOException { + void testSegmentation() throws IOException { // We want to have roughly three segments final LargePayload payload = new LargePayload(); @@ -133,7 +141,7 @@ public class SegmentedFileJournalTest { } @Test - public void testComplexDeletesAndPartialReplays() throws Exception { + void testComplexDeletesAndPartialReplays() throws Exception { for (int i = 0; i <= 4; i++) { writeBigPaylod(); } @@ -203,7 +211,7 @@ public class SegmentedFileJournalTest { private ActorRef actor() { return kit.childActorOf(SegmentedJournalActor.props("foo", DIRECTORY, StorageLevel.DISK, MESSAGE_SIZE, - SEGMENT_SIZE).withDispatcher(CallingThreadDispatcher.Id())); + SEGMENT_SIZE, FLUSH_SIZE).withDispatcher(CallingThreadDispatcher.Id())); } private void deleteEntries(final long deleteTo) { @@ -219,8 +227,8 @@ public class SegmentedFileJournalTest { } private void assertReplayCount(final int expected) { - Consumer firstCallback = mock(Consumer.class); - doNothing().when(firstCallback).accept(any(PersistentRepr.class)); + // Cast fixes an Eclipse warning 'generic array created' + reset((Object) firstCallback); AsyncMessage replay = SegmentedJournalActor.replayMessages(0, Long.MAX_VALUE, Long.MAX_VALUE, firstCallback); actor.tell(replay, ActorRef.noSender()); @@ -243,10 +251,10 @@ public class SegmentedFileJournalTest { return future.value().get().get(); } - private static final class LargePayload implements Serializable { + static final class LargePayload implements Serializable { + @java.io.Serial private static final long serialVersionUID = 1L; final byte[] bytes = new byte[MESSAGE_SIZE / 2]; - } } diff --git a/opendaylight/md-sal/sal-binding-it/pom.xml b/opendaylight/md-sal/sal-binding-it/pom.xml index 3240e8849b..dd32609d30 100644 --- a/opendaylight/md-sal/sal-binding-it/pom.xml +++ b/opendaylight/md-sal/sal-binding-it/pom.xml @@ -4,7 +4,7 @@ org.opendaylight.controller mdsal-it-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ../mdsal-it-parent sal-binding-it diff --git a/opendaylight/md-sal/sal-binding-it/src/test/java/org/opendaylight/controller/test/sal/binding/it/AbstractIT.java b/opendaylight/md-sal/sal-binding-it/src/test/java/org/opendaylight/controller/test/sal/binding/it/AbstractIT.java index 60b4865ee2..0996dae3b9 100644 --- a/opendaylight/md-sal/sal-binding-it/src/test/java/org/opendaylight/controller/test/sal/binding/it/AbstractIT.java +++ b/opendaylight/md-sal/sal-binding-it/src/test/java/org/opendaylight/controller/test/sal/binding/it/AbstractIT.java @@ -33,7 +33,6 @@ public abstract class AbstractIT extends AbstractMdsalTestBase { protected Option[] getAdditionalOptions() { return new Option[] { mavenBundle("org.opendaylight.controller", "sal-test-model").versionAsInProject(), - mavenBundle("net.bytebuddy", "byte-buddy").versionAsInProject(), }; } } diff --git a/opendaylight/md-sal/sal-binding-it/src/test/java/org/opendaylight/controller/test/sal/binding/it/NotificationIT.java b/opendaylight/md-sal/sal-binding-it/src/test/java/org/opendaylight/controller/test/sal/binding/it/NotificationIT.java index c1d94e65be..163163bf28 100644 --- a/opendaylight/md-sal/sal-binding-it/src/test/java/org/opendaylight/controller/test/sal/binding/it/NotificationIT.java +++ b/opendaylight/md-sal/sal-binding-it/src/test/java/org/opendaylight/controller/test/sal/binding/it/NotificationIT.java @@ -10,16 +10,12 @@ package org.opendaylight.controller.test.sal.binding.it; import static org.junit.Assert.assertEquals; import java.util.ArrayList; -import java.util.List; import javax.inject.Inject; import org.junit.Test; import org.opendaylight.mdsal.binding.api.NotificationPublishService; import org.opendaylight.mdsal.binding.api.NotificationService; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.bi.ba.notification.rev150205.OpendaylightTestNotificationListener; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.bi.ba.notification.rev150205.OutOfPixieDustNotification; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.bi.ba.notification.rev150205.OutOfPixieDustNotificationBuilder; -import org.opendaylight.yangtools.concepts.ListenerRegistration; -import org.opendaylight.yangtools.yang.binding.NotificationListener; import org.opendaylight.yangtools.yang.common.Uint16; import org.ops4j.pax.exam.util.Filter; import org.slf4j.Logger; @@ -44,71 +40,53 @@ public class NotificationIT extends AbstractIT { */ @Test public void notificationTest() throws Exception { - NotificationTestListener listener1 = new NotificationTestListener(); - ListenerRegistration listener1Reg = - notificationService.registerNotificationListener(listener1); - - LOG.info("The notification of type FlowAdded with cookie ID 0 is created. The " - + "delay 100ms to make sure that the notification was delivered to " - + "listener."); - notificationPublishService.putNotification(noDustNotification("rainy day", 42)); - Thread.sleep(100); - - /** - * Check that one notification was delivered and has correct cookie. - */ - assertEquals(1, listener1.notificationBag.size()); - assertEquals("rainy day", listener1.notificationBag.get(0).getReason()); - assertEquals(42, listener1.notificationBag.get(0).getDaysTillNewDust().intValue()); - - LOG.info("The registration of the Consumer 2. SalFlowListener is registered " + final var bag1 = new ArrayList(); + try (var reg1 = notificationService.registerListener(OutOfPixieDustNotification.class, bag1::add)) { + LOG.info(""" + The notification of type FlowAdded with cookie ID 0 is created. The\s\ + delay 100ms to make sure that the notification was delivered to\s\ + listener."""); + notificationPublishService.putNotification(noDustNotification("rainy day", 42)); + Thread.sleep(100); + + // Check that one notification was delivered and has correct cookie. + assertEquals(1, bag1.size()); + assertEquals("rainy day", bag1.get(0).getReason()); + assertEquals(42, bag1.get(0).getDaysTillNewDust().intValue()); + + LOG.info("The registration of the Consumer 2. SalFlowListener is registered " + "registered as notification listener."); - NotificationTestListener listener2 = new NotificationTestListener(); - final ListenerRegistration listener2Reg = - notificationService.registerNotificationListener(listener2); - - LOG.info("3 notifications are published"); - notificationPublishService.putNotification(noDustNotification("rainy day", 5)); - notificationPublishService.putNotification(noDustNotification("rainy day", 10)); - notificationPublishService.putNotification(noDustNotification("tax collector", 2)); - - /** - * The delay 100ms to make sure that the notifications were delivered to - * listeners. - */ - Thread.sleep(100); - - /** - * Check that 3 notification was delivered to both listeners (first one - * received 4 in total, second 3 in total). - */ - assertEquals(4, listener1.notificationBag.size()); - assertEquals(3, listener2.notificationBag.size()); - - /** - * The second listener is closed (unregistered) - * - */ - listener2Reg.close(); - - LOG.info("The notification 5 is published"); - notificationPublishService.putNotification(noDustNotification("entomologist hunt", 10)); - - /** - * The delay 100ms to make sure that the notification was delivered to - * listener. - */ - Thread.sleep(100); - - /** - * Check that first consumer received 5 notifications in total, second - * consumer received only three. Last notification was never received by - * second consumer because its listener was unregistered. - * - */ - assertEquals(5, listener1.notificationBag.size()); - assertEquals(3, listener2.notificationBag.size()); + final var bag2 = new ArrayList(); + try (var reg2 = notificationService.registerListener(OutOfPixieDustNotification.class, bag2::add)) { + LOG.info("3 notifications are published"); + notificationPublishService.putNotification(noDustNotification("rainy day", 5)); + notificationPublishService.putNotification(noDustNotification("rainy day", 10)); + notificationPublishService.putNotification(noDustNotification("tax collector", 2)); + + // The delay 100ms to make sure that the notifications were delivered to listeners. + Thread.sleep(100); + + // Check that 3 notification was delivered to both listeners (first one received 4 in total, second 3 + // in total). + assertEquals(4, bag1.size()); + assertEquals(3, bag2.size()); + + // The second listener is closed (unregistered) + reg2.close(); + + LOG.info("The notification 5 is published"); + notificationPublishService.putNotification(noDustNotification("entomologist hunt", 10)); + + // The delay 100ms to make sure that the notification was delivered to listener. + Thread.sleep(100); + + // Check that first consumer received 5 notifications in total, second consumer received only three. + // Last notification was never received by second consumer because its listener was unregistered. + assertEquals(5, bag1.size()); + assertEquals(3, bag2.size()); + } + } } /** @@ -121,17 +99,4 @@ public class NotificationIT extends AbstractIT { ret.setReason(reason).setDaysTillNewDust(Uint16.valueOf(days)); return ret.build(); } - - /** - * Implements {@link OpendaylightTestNotificationListener} and contains attributes which keep lists of objects of - * the type {@link OutOfPixieDustNotification}. - */ - public static class NotificationTestListener implements OpendaylightTestNotificationListener { - List notificationBag = new ArrayList<>(); - - @Override - public void onOutOfPixieDustNotification(final OutOfPixieDustNotification arg0) { - notificationBag.add(arg0); - } - } } diff --git a/opendaylight/md-sal/sal-binding-it/src/test/java/org/opendaylight/controller/test/sal/binding/it/RoutedServiceIT.java b/opendaylight/md-sal/sal-binding-it/src/test/java/org/opendaylight/controller/test/sal/binding/it/RoutedServiceIT.java index 523071415d..5cdde319cd 100644 --- a/opendaylight/md-sal/sal-binding-it/src/test/java/org/opendaylight/controller/test/sal/binding/it/RoutedServiceIT.java +++ b/opendaylight/md-sal/sal-binding-it/src/test/java/org/opendaylight/controller/test/sal/binding/it/RoutedServiceIT.java @@ -9,7 +9,8 @@ package org.opendaylight.controller.test.sal.binding.it; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNotSame; -import static org.junit.Assert.assertSame; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -19,10 +20,9 @@ import java.util.Set; import javax.inject.Inject; import org.junit.Before; import org.junit.Test; -import org.mockito.Mockito; -import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry; import org.opendaylight.mdsal.binding.api.RpcProviderService; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.OpendaylightTestRoutedRpcService; +import org.opendaylight.mdsal.binding.api.RpcService; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.RoutedSimpleRoute; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.RoutedSimpleRouteInput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.RoutedSimpleRouteInputBuilder; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.RoutedSimpleRouteOutput; @@ -30,7 +30,7 @@ import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controll import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.store.rev140422.lists.UnorderedContainer; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.store.rev140422.lists.unordered.container.UnorderedList; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.store.rev140422.lists.unordered.container.UnorderedListKey; -import org.opendaylight.yangtools.concepts.ObjectRegistration; +import org.opendaylight.yangtools.concepts.Registration; import org.opendaylight.yangtools.yang.binding.InstanceIdentifier; import org.opendaylight.yangtools.yang.common.RpcResult; import org.ops4j.pax.exam.util.Filter; @@ -41,12 +41,10 @@ import org.slf4j.LoggerFactory; * Covers routed rpc creation, registration, invocation, unregistration. */ public class RoutedServiceIT extends AbstractIT { + private static final Logger LOG = LoggerFactory.getLogger(RoutedServiceIT.class); - private static final Logger LOG = LoggerFactory - .getLogger(RoutedServiceIT.class); - - protected OpendaylightTestRoutedRpcService odlRoutedService1; - protected OpendaylightTestRoutedRpcService odlRoutedService2; + protected RoutedSimpleRoute routedSimpleRouteRpc1; + protected RoutedSimpleRoute routedSimpleRouteRpc2; @Inject @Filter(timeout = 120 * 1000) @@ -54,95 +52,92 @@ public class RoutedServiceIT extends AbstractIT { @Inject @Filter(timeout = 120 * 1000) - RpcConsumerRegistry rpcConsumerRegistry; + RpcService rpcService; /** * Prepare mocks. */ @Before public void setUp() { - odlRoutedService1 = mock(OpendaylightTestRoutedRpcService.class, "First Flow Service"); - odlRoutedService2 = mock(OpendaylightTestRoutedRpcService.class, "Second Flow Service"); - Mockito.when(odlRoutedService1.routedSimpleRoute(Mockito.any())) - .thenReturn(Futures.>immediateFuture(null)); - Mockito.when(odlRoutedService2.routedSimpleRoute(Mockito.any())) - .thenReturn(Futures.>immediateFuture(null)); + routedSimpleRouteRpc1 = mock(RoutedSimpleRoute.class, "First Flow Rpc"); + doReturn(RoutedSimpleRoute.class).when(routedSimpleRouteRpc1).implementedInterface(); + doReturn(Futures.>immediateFuture(null)).when(routedSimpleRouteRpc1) + .invoke(any()); + + routedSimpleRouteRpc2 = mock(RoutedSimpleRoute.class, "Second Flow Rpc"); + doReturn(RoutedSimpleRoute.class).when(routedSimpleRouteRpc2).implementedInterface(); + doReturn(Futures.>immediateFuture(null)).when(routedSimpleRouteRpc2) + .invoke(any()); } @Test public void testServiceRegistration() { - LOG.info("Register provider 1 with first implementation of routeSimpleService - service1 of node 1"); + LOG.info("Register provider 1 with first implementation of routeSimpleService - rpc1 of node 1"); final InstanceIdentifier nodeOnePath = createNodeRef("foo:node:1"); final InstanceIdentifier nodeTwo = createNodeRef("foo:node:2"); - ObjectRegistration firstReg = rpcProviderService.registerRpcImplementation( - OpendaylightTestRoutedRpcService.class, odlRoutedService1, Set.of(nodeOnePath)); + Registration firstReg = rpcProviderService.registerRpcImplementation(routedSimpleRouteRpc1, + Set.of(nodeOnePath)); assertNotNull("Registration should not be null", firstReg); - assertSame(odlRoutedService1, firstReg.getInstance()); - LOG.info("Register provider 2 with second implementation of routeSimpleService - service2 of node 2"); + LOG.info("Register provider 2 with second implementation of routeSimpleService - rpc2 of node 2"); - ObjectRegistration secondReg = rpcProviderService.registerRpcImplementation( - OpendaylightTestRoutedRpcService.class, odlRoutedService2, Set.of(nodeTwo)); + Registration secondReg = rpcProviderService.registerRpcImplementation(routedSimpleRouteRpc2, Set.of(nodeTwo)); assertNotNull("Registration should not be null", firstReg); - assertSame(odlRoutedService2, secondReg.getInstance()); assertNotSame(secondReg, firstReg); - OpendaylightTestRoutedRpcService consumerService = - rpcConsumerRegistry.getRpcService(OpendaylightTestRoutedRpcService.class); + RoutedSimpleRoute consumerService = rpcService.getRpc(RoutedSimpleRoute.class); assertNotNull("MD-SAL instance of test Service should be returned", consumerService); - assertNotSame("Provider instance and consumer instance should not be same.", odlRoutedService1, + assertNotSame("Provider instance and consumer instance should not be same.", routedSimpleRouteRpc1, consumerService); /** * Consumer creates addFlow message for node one and sends it to the MD-SAL. */ final RoutedSimpleRouteInput simpleRouteFirstFoo = createSimpleRouteInput(nodeOnePath); - consumerService.routedSimpleRoute(simpleRouteFirstFoo); + consumerService.invoke(simpleRouteFirstFoo); /** - * Verifies that implementation of the first provider received the same message from MD-SAL. + * Verifies that implementation of the first instance received the same message from MD-SAL. */ - verify(odlRoutedService1).routedSimpleRoute(simpleRouteFirstFoo); + verify(routedSimpleRouteRpc1).invoke(simpleRouteFirstFoo); /** * Verifies that second instance was not invoked with first message */ - verify(odlRoutedService2, times(0)).routedSimpleRoute(simpleRouteFirstFoo); + verify(routedSimpleRouteRpc2, times(0)).invoke(simpleRouteFirstFoo); /** * Consumer sends message to nodeTwo for three times. Should be processed by second instance. */ final RoutedSimpleRouteInput simpleRouteSecondFoo = createSimpleRouteInput(nodeTwo); - consumerService.routedSimpleRoute(simpleRouteSecondFoo); - consumerService.routedSimpleRoute(simpleRouteSecondFoo); - consumerService.routedSimpleRoute(simpleRouteSecondFoo); + consumerService.invoke(simpleRouteSecondFoo); + consumerService.invoke(simpleRouteSecondFoo); + consumerService.invoke(simpleRouteSecondFoo); /** * Verifies that second instance was invoked 3 times with second message and first instance wasn't invoked. */ - verify(odlRoutedService2, times(3)).routedSimpleRoute(simpleRouteSecondFoo); - verify(odlRoutedService1, times(0)).routedSimpleRoute(simpleRouteSecondFoo); + verify(routedSimpleRouteRpc2, times(3)).invoke(simpleRouteSecondFoo); + verify(routedSimpleRouteRpc1, times(0)).invoke(simpleRouteSecondFoo); LOG.info("Unregistration of the path for the node one in the first provider"); firstReg.close(); LOG.info("Provider 2 registers path of node 1"); secondReg.close(); - secondReg = rpcProviderService.registerRpcImplementation( - OpendaylightTestRoutedRpcService.class, odlRoutedService2, Set.of(nodeOnePath)); + secondReg = rpcProviderService.registerRpcImplementation(routedSimpleRouteRpc2, Set.of(nodeOnePath)); /** * A consumer sends third message to node 1. */ final RoutedSimpleRouteInput simpleRouteThirdFoo = createSimpleRouteInput(nodeOnePath); - consumerService.routedSimpleRoute(simpleRouteThirdFoo); + consumerService.invoke(simpleRouteThirdFoo); /** * Verifies that provider 1 wasn't invoked and provider 2 was invoked 1 time. * TODO: fix unregister path */ - //verify(odlRoutedService1, times(0)).routedSimpleRoute(simpleRouteThirdFoo); - verify(odlRoutedService2).routedSimpleRoute(simpleRouteThirdFoo); + verify(routedSimpleRouteRpc2).invoke(simpleRouteThirdFoo); } /** diff --git a/opendaylight/md-sal/sal-cluster-admin-api/pom.xml b/opendaylight/md-sal/sal-cluster-admin-api/pom.xml index ce3e83d007..ad8d996edc 100644 --- a/opendaylight/md-sal/sal-cluster-admin-api/pom.xml +++ b/opendaylight/md-sal/sal-cluster-admin-api/pom.xml @@ -4,7 +4,7 @@ org.opendaylight.controller mdsal-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ../parent @@ -12,6 +12,14 @@ bundle + + com.google.guava + guava + + + org.opendaylight.yangtools + yang-common + org.opendaylight.controller cds-access-api diff --git a/opendaylight/md-sal/sal-cluster-admin-impl/pom.xml b/opendaylight/md-sal/sal-cluster-admin-impl/pom.xml index 713d07a85e..cb905343c8 100644 --- a/opendaylight/md-sal/sal-cluster-admin-impl/pom.xml +++ b/opendaylight/md-sal/sal-cluster-admin-impl/pom.xml @@ -4,7 +4,7 @@ org.opendaylight.controller mdsal-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ../parent @@ -13,90 +13,112 @@ - org.opendaylight.controller - sal-cluster-admin-api + com.github.spotbugs + spotbugs-annotations + true - - - org.slf4j - slf4j-simple - test + com.google.guava + guava - org.opendaylight.controller - sal-akka-raft - test-jar - test + org.apache.commons + commons-lang3 - org.opendaylight.controller - sal-distributed-datastore - test-jar - ${project.version} - test + org.eclipse.jdt + org.eclipse.jdt.annotation org.opendaylight.yangtools - yang-test-util + concepts - org.opendaylight.controller.samples - clustering-it-model - test + org.opendaylight.yangtools + yang-common - commons-lang - commons-lang - test + org.opendaylight.mdsal + mdsal-binding-api - - - com.typesafe.akka - akka-testkit_2.13 + org.opendaylight.mdsal + mdsal-dom-spi - - - org.scala-lang - scala-library + org.opendaylight.mdsal + yang-binding - - org.opendaylight.controller eos-dom-akka - org.opendaylight.mdsal - mdsal-binding-api + org.opendaylight.controller + cds-access-api + + + org.opendaylight.controller + repackaged-akka org.opendaylight.controller sal-akka-raft + + org.opendaylight.controller + sal-cluster-admin-api + org.opendaylight.controller sal-distributed-datastore - org.opendaylight.mdsal - yang-binding + org.osgi + org.osgi.service.component.annotations + + + org.scala-lang + scala-library + + + + + com.typesafe.akka + akka-testkit_2.13 + + + org.opendaylight.controller + sal-akka-raft + test-jar + test + + + org.opendaylight.controller + sal-distributed-datastore + test-jar + ${project.version} + test org.opendaylight.yangtools - yang-common + yang-data-api + test - org.apache.commons - commons-lang3 + org.opendaylight.yangtools + yang-test-util - org.osgi - osgi.cmpn + org.opendaylight.controller.samples + clustering-it-model + test + + + org.slf4j + slf4j-simple + test - diff --git a/opendaylight/md-sal/sal-cluster-admin-impl/src/main/java/org/opendaylight/controller/cluster/datastore/admin/ClusterAdminRpcService.java b/opendaylight/md-sal/sal-cluster-admin-impl/src/main/java/org/opendaylight/controller/cluster/datastore/admin/ClusterAdminRpcService.java index e00d620d7c..8ad1553dba 100644 --- a/opendaylight/md-sal/sal-cluster-admin-impl/src/main/java/org/opendaylight/controller/cluster/datastore/admin/ClusterAdminRpcService.java +++ b/opendaylight/md-sal/sal-cluster-admin-impl/src/main/java/org/opendaylight/controller/cluster/datastore/admin/ClusterAdminRpcService.java @@ -13,6 +13,7 @@ import akka.actor.Status.Success; import akka.dispatch.OnComplete; import akka.pattern.Patterns; import akka.util.Timeout; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Strings; import com.google.common.base.Throwables; import com.google.common.collect.ImmutableMap; @@ -55,43 +56,52 @@ import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot import org.opendaylight.controller.cluster.datastore.utils.ActorUtils; import org.opendaylight.controller.cluster.raft.client.messages.GetSnapshot; import org.opendaylight.controller.eos.akka.DataCenterControl; -import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer; +import org.opendaylight.mdsal.binding.api.RpcProviderService; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ActivateEosDatacenter; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ActivateEosDatacenterInput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ActivateEosDatacenterOutput; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShards; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShardsInput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShardsOutput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShardsOutputBuilder; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplicaInput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplicaOutput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplicaOutputBuilder; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastore; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastoreInput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastoreOutput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastoreOutputBuilder; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShards; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShardsInput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShardsOutput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShardsOutputBuilder; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShard; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShardInput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShardOutput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShardOutputBuilder; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DeactivateEosDatacenter; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DeactivateEosDatacenterInput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DeactivateEosDatacenterOutput; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShards; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShardsInput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShardsOutput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShardsOutputBuilder; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetKnownClientsForAllShards; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetKnownClientsForAllShardsInput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetKnownClientsForAllShardsOutput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetKnownClientsForAllShardsOutputBuilder; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetShardRoleInput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetShardRoleOutput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetShardRoleOutputBuilder; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.LocateShard; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.LocateShardInput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.LocateShardOutput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.LocateShardOutputBuilder; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.MakeLeaderLocalInput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.MakeLeaderLocalOutput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.MakeLeaderLocalOutputBuilder; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicas; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicasInput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicasOutput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicasOutputBuilder; @@ -107,8 +117,9 @@ import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controll import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.shard.result.output.ShardResult; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.shard.result.output.ShardResultBuilder; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.shard.result.output.ShardResultKey; +import org.opendaylight.yangtools.concepts.Registration; import org.opendaylight.yangtools.yang.common.Empty; -import org.opendaylight.yangtools.yang.common.RpcError.ErrorType; +import org.opendaylight.yangtools.yang.common.ErrorType; import org.opendaylight.yangtools.yang.common.RpcResult; import org.opendaylight.yangtools.yang.common.RpcResultBuilder; import org.opendaylight.yangtools.yang.common.Uint32; @@ -121,39 +132,58 @@ import scala.concurrent.Future; * * @author Thomas Pantelis */ -public class ClusterAdminRpcService implements ClusterAdminService { +public final class ClusterAdminRpcService { private static final Timeout SHARD_MGR_TIMEOUT = new Timeout(1, TimeUnit.MINUTES); private static final Logger LOG = LoggerFactory.getLogger(ClusterAdminRpcService.class); private static final @NonNull RpcResult LOCAL_SHARD_RESULT = RpcResultBuilder.success(new LocateShardOutputBuilder() - .setMemberNode(new LocalBuilder().setLocal(Empty.getInstance()).build()) + .setMemberNode(new LocalBuilder().setLocal(Empty.value()).build()) .build()) .build(); private final DistributedDataStoreInterface configDataStore; private final DistributedDataStoreInterface operDataStore; - private final BindingNormalizedNodeSerializer serializer; private final Timeout makeLeaderLocalTimeout; private final DataCenterControl dataCenterControl; public ClusterAdminRpcService(final DistributedDataStoreInterface configDataStore, final DistributedDataStoreInterface operDataStore, - final BindingNormalizedNodeSerializer serializer, final DataCenterControl dataCenterControl) { this.configDataStore = configDataStore; this.operDataStore = operDataStore; - this.serializer = serializer; - this.makeLeaderLocalTimeout = + makeLeaderLocalTimeout = new Timeout(configDataStore.getActorUtils().getDatastoreContext() .getShardLeaderElectionTimeout().duration().$times(2)); this.dataCenterControl = dataCenterControl; } - @Override - public ListenableFuture> addShardReplica(final AddShardReplicaInput input) { + Registration registerWith(final RpcProviderService rpcProviderService) { + return rpcProviderService.registerRpcImplementations( + (org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013 + .AddShardReplica) this::addShardReplica, + (org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013 + .RemoveShardReplica) this::removeShardReplica, + (LocateShard) this::locateShard, + (org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013 + .MakeLeaderLocal) this::makeLeaderLocal, + (AddReplicasForAllShards) this::addReplicasForAllShards, + (RemoveAllShardReplicas) this::removeAllShardReplicas, + (ChangeMemberVotingStatesForShard) this::changeMemberVotingStatesForShard, + (ChangeMemberVotingStatesForAllShards) this::changeMemberVotingStatesForAllShards, + (FlipMemberVotingStatesForAllShards) this::flipMemberVotingStatesForAllShards, + (org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013 + .GetShardRole) this::getShardRole, + (BackupDatastore) this::backupDatastore, + (GetKnownClientsForAllShards) this::getKnownClientsForAllShards, + (ActivateEosDatacenter) this::activateEosDatacenter, + (DeactivateEosDatacenter) this::deactivateEosDatacenter); + } + + @VisibleForTesting + ListenableFuture> addShardReplica(final AddShardReplicaInput input) { final String shardName = input.getShardName(); if (Strings.isNullOrEmpty(shardName)) { return newFailedRpcResultFuture("A valid shard name must be specified"); @@ -166,28 +196,27 @@ public class ClusterAdminRpcService implements ClusterAdminService { LOG.info("Adding replica for shard {}", shardName); - final SettableFuture> returnFuture = SettableFuture.create(); - ListenableFuture future = sendMessageToShardManager(dataStoreType, new AddShardReplica(shardName)); - Futures.addCallback(future, new FutureCallback() { - @Override - public void onSuccess(final Success success) { - LOG.info("Successfully added replica for shard {}", shardName); - returnFuture.set(newSuccessfulResult(new AddShardReplicaOutputBuilder().build())); - } + final var returnFuture = SettableFuture.>create(); + Futures.addCallback(sendMessageToShardManager(dataStoreType, new AddShardReplica(shardName)), + new FutureCallback() { + @Override + public void onSuccess(final Success success) { + LOG.info("Successfully added replica for shard {}", shardName); + returnFuture.set(newSuccessfulResult(new AddShardReplicaOutputBuilder().build())); + } - @Override - public void onFailure(final Throwable failure) { - onMessageFailure(String.format("Failed to add replica for shard %s", shardName), + @Override + public void onFailure(final Throwable failure) { + onMessageFailure(String.format("Failed to add replica for shard %s", shardName), returnFuture, failure); - } - }, MoreExecutors.directExecutor()); + } + }, MoreExecutors.directExecutor()); return returnFuture; } - @Override - public ListenableFuture> removeShardReplica( - final RemoveShardReplicaInput input) { + @VisibleForTesting + ListenableFuture> removeShardReplica(final RemoveShardReplicaInput input) { final String shardName = input.getShardName(); if (Strings.isNullOrEmpty(shardName)) { return newFailedRpcResultFuture("A valid shard name must be specified"); @@ -225,8 +254,7 @@ public class ClusterAdminRpcService implements ClusterAdminService { return returnFuture; } - @Override - public ListenableFuture> locateShard(final LocateShardInput input) { + private ListenableFuture> locateShard(final LocateShardInput input) { final ActorUtils utils; switch (input.getDataStoreType()) { case Config: @@ -267,8 +295,8 @@ public class ClusterAdminRpcService implements ClusterAdminService { return ret; } - @Override - public ListenableFuture> makeLeaderLocal(final MakeLeaderLocalInput input) { + @VisibleForTesting + ListenableFuture> makeLeaderLocal(final MakeLeaderLocalInput input) { final String shardName = input.getShardName(); if (Strings.isNullOrEmpty(shardName)) { return newFailedRpcResultFuture("A valid shard name must be specified"); @@ -322,8 +350,7 @@ public class ClusterAdminRpcService implements ClusterAdminService { return future; } - @Override - public ListenableFuture> addReplicasForAllShards( + @VisibleForTesting ListenableFuture> addReplicasForAllShards( final AddReplicasForAllShardsInput input) { LOG.info("Adding replicas for all shards"); @@ -337,9 +364,7 @@ public class ClusterAdminRpcService implements ClusterAdminService { "Failed to add replica"); } - - @Override - public ListenableFuture> removeAllShardReplicas( + @VisibleForTesting ListenableFuture> removeAllShardReplicas( final RemoveAllShardReplicasInput input) { LOG.info("Removing replicas for all shards"); @@ -355,56 +380,54 @@ public class ClusterAdminRpcService implements ClusterAdminService { sendMessageToManagerForConfiguredShards(DataStoreType.Config, shardResultData, messageSupplier); sendMessageToManagerForConfiguredShards(DataStoreType.Operational, shardResultData, messageSupplier); - return waitForShardResults(shardResultData, shardResults -> - new RemoveAllShardReplicasOutputBuilder().setShardResult(shardResults).build(), - " Failed to remove replica"); + return waitForShardResults(shardResultData, + shardResults -> new RemoveAllShardReplicasOutputBuilder().setShardResult(shardResults).build(), + " Failed to remove replica"); } - @Override - public ListenableFuture> changeMemberVotingStatesForShard( + @VisibleForTesting + ListenableFuture> changeMemberVotingStatesForShard( final ChangeMemberVotingStatesForShardInput input) { final String shardName = input.getShardName(); if (Strings.isNullOrEmpty(shardName)) { return newFailedRpcResultFuture("A valid shard name must be specified"); } - DataStoreType dataStoreType = input.getDataStoreType(); + final var dataStoreType = input.getDataStoreType(); if (dataStoreType == null) { return newFailedRpcResultFuture("A valid DataStoreType must be specified"); } - List memberVotingStates = input.getMemberVotingState(); + final var memberVotingStates = input.getMemberVotingState(); if (memberVotingStates == null || memberVotingStates.isEmpty()) { return newFailedRpcResultFuture("No member voting state input was specified"); } - ChangeShardMembersVotingStatus changeVotingStatus = toChangeShardMembersVotingStatus(shardName, - memberVotingStates); - + final var changeVotingStatus = toChangeShardMembersVotingStatus(shardName, memberVotingStates); LOG.info("Change member voting states for shard {}: {}", shardName, changeVotingStatus.getMeberVotingStatusMap()); - final SettableFuture> returnFuture = SettableFuture.create(); - ListenableFuture future = sendMessageToShardManager(dataStoreType, changeVotingStatus); - Futures.addCallback(future, new FutureCallback() { - @Override - public void onSuccess(final Success success) { - LOG.info("Successfully changed member voting states for shard {}", shardName); - returnFuture.set(newSuccessfulResult(new ChangeMemberVotingStatesForShardOutputBuilder().build())); - } + final var returnFuture = SettableFuture.>create(); + Futures.addCallback(sendMessageToShardManager(dataStoreType, changeVotingStatus), + new FutureCallback() { + @Override + public void onSuccess(final Success success) { + LOG.info("Successfully changed member voting states for shard {}", shardName); + returnFuture.set(newSuccessfulResult(new ChangeMemberVotingStatesForShardOutputBuilder().build())); + } - @Override - public void onFailure(final Throwable failure) { - onMessageFailure(String.format("Failed to change member voting states for shard %s", shardName), + @Override + public void onFailure(final Throwable failure) { + onMessageFailure(String.format("Failed to change member voting states for shard %s", shardName), returnFuture, failure); - } - }, MoreExecutors.directExecutor()); + } + }, MoreExecutors.directExecutor()); return returnFuture; } - @Override - public ListenableFuture> changeMemberVotingStatesForAllShards( + @VisibleForTesting + ListenableFuture> changeMemberVotingStatesForAllShards( final ChangeMemberVotingStatesForAllShardsInput input) { List memberVotingStates = input.getMemberVotingState(); if (memberVotingStates == null || memberVotingStates.isEmpty()) { @@ -425,11 +448,11 @@ public class ClusterAdminRpcService implements ClusterAdminService { "Failed to change member voting states"); } - @Override - public ListenableFuture> flipMemberVotingStatesForAllShards( + @VisibleForTesting + ListenableFuture> flipMemberVotingStatesForAllShards( final FlipMemberVotingStatesForAllShardsInput input) { - final List, ShardResultBuilder>> shardResultData = new ArrayList<>(); - Function messageSupplier = FlipShardMembersVotingStatus::new; + final var shardResultData = new ArrayList, ShardResultBuilder>>(); + final Function messageSupplier = FlipShardMembersVotingStatus::new; LOG.info("Flip member voting states for all shards"); @@ -441,8 +464,7 @@ public class ClusterAdminRpcService implements ClusterAdminService { "Failed to change member voting states"); } - @Override - public ListenableFuture> getShardRole(final GetShardRoleInput input) { + private ListenableFuture> getShardRole(final GetShardRoleInput input) { final String shardName = input.getShardName(); if (Strings.isNullOrEmpty(shardName)) { return newFailedRpcResultFuture("A valid shard name must be specified"); @@ -484,8 +506,8 @@ public class ClusterAdminRpcService implements ClusterAdminService { return returnFuture; } - @Override - public ListenableFuture> backupDatastore(final BackupDatastoreInput input) { + @VisibleForTesting + ListenableFuture> backupDatastore(final BackupDatastoreInput input) { LOG.debug("backupDatastore: {}", input); if (Strings.isNullOrEmpty(input.getFilePath())) { @@ -513,9 +535,7 @@ public class ClusterAdminRpcService implements ClusterAdminService { return returnFuture; } - - @Override - public ListenableFuture> getKnownClientsForAllShards( + private ListenableFuture> getKnownClientsForAllShards( final GetKnownClientsForAllShardsInput input) { final ImmutableMap> allShardReplies = getAllShardLeadersClients(); @@ -523,8 +543,7 @@ public class ClusterAdminRpcService implements ClusterAdminService { MoreExecutors.directExecutor()); } - @Override - public ListenableFuture> activateEosDatacenter( + private ListenableFuture> activateEosDatacenter( final ActivateEosDatacenterInput input) { LOG.debug("Activating EOS Datacenter"); final SettableFuture> future = SettableFuture.create(); @@ -545,8 +564,7 @@ public class ClusterAdminRpcService implements ClusterAdminService { return future; } - @Override - public ListenableFuture> deactivateEosDatacenter( + private ListenableFuture> deactivateEosDatacenter( final DeactivateEosDatacenterInput input) { LOG.debug("Deactivating EOS Datacenter"); final SettableFuture> future = SettableFuture.create(); @@ -696,8 +714,6 @@ public class ClusterAdminRpcService implements ClusterAdminService { return ask(shardManager, message, SHARD_MGR_TIMEOUT); } - @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD", - justification = "https://github.com/spotbugs/spotbugs/issues/811") @SuppressWarnings("checkstyle:IllegalCatch") private static void saveSnapshotsToFile(final DatastoreSnapshotList snapshots, final String fileName, final SettableFuture> returnFuture) { diff --git a/opendaylight/md-sal/sal-cluster-admin-impl/src/main/java/org/opendaylight/controller/cluster/datastore/admin/OSGiClusterAdmin.java b/opendaylight/md-sal/sal-cluster-admin-impl/src/main/java/org/opendaylight/controller/cluster/datastore/admin/OSGiClusterAdmin.java index 82a669e166..bcbf408eed 100644 --- a/opendaylight/md-sal/sal-cluster-admin-impl/src/main/java/org/opendaylight/controller/cluster/datastore/admin/OSGiClusterAdmin.java +++ b/opendaylight/md-sal/sal-cluster-admin-impl/src/main/java/org/opendaylight/controller/cluster/datastore/admin/OSGiClusterAdmin.java @@ -7,13 +7,10 @@ */ package org.opendaylight.controller.cluster.datastore.admin; -import com.google.common.annotations.Beta; import org.opendaylight.controller.cluster.datastore.DistributedDataStoreInterface; import org.opendaylight.controller.eos.akka.DataCenterControl; import org.opendaylight.mdsal.binding.api.RpcProviderService; -import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService; -import org.opendaylight.yangtools.concepts.ObjectRegistration; +import org.opendaylight.yangtools.concepts.Registration; import org.osgi.service.component.annotations.Activate; import org.osgi.service.component.annotations.Component; import org.osgi.service.component.annotations.Deactivate; @@ -21,28 +18,21 @@ import org.osgi.service.component.annotations.Reference; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Beta -@Component(immediate = true) +@Component(service = { }) public final class OSGiClusterAdmin { private static final Logger LOG = LoggerFactory.getLogger(OSGiClusterAdmin.class); - @Reference(target = "(type=distributed-config)") - DistributedDataStoreInterface configDatastore = null; - @Reference(target = "(type=distributed-operational)") - DistributedDataStoreInterface operDatastore = null; - @Reference - BindingNormalizedNodeSerializer serializer = null; - @Reference - RpcProviderService rpcProviderService = null; - @Reference - DataCenterControl dataCenterControl = null; - - private ObjectRegistration reg; + private final Registration reg; @Activate - void activate() { - reg = rpcProviderService.registerRpcImplementation(ClusterAdminService.class, - new ClusterAdminRpcService(configDatastore, operDatastore, serializer, dataCenterControl)); + public OSGiClusterAdmin( + @Reference(target = "(type=distributed-config)") final DistributedDataStoreInterface configDatastore, + @Reference(target = "(type=distributed-operational)") final DistributedDataStoreInterface operDatastore, + @Reference final RpcProviderService rpcProviderService, + @Reference final DataCenterControl dataCenterControls, + @Reference final DataCenterControl dataCenterControl) { + reg = new ClusterAdminRpcService(configDatastore, operDatastore, dataCenterControl) + .registerWith(rpcProviderService); LOG.info("Cluster Admin services started"); } diff --git a/opendaylight/md-sal/sal-cluster-admin-impl/src/test/java/org/opendaylight/controller/cluster/datastore/admin/ClusterAdminRpcServiceTest.java b/opendaylight/md-sal/sal-cluster-admin-impl/src/test/java/org/opendaylight/controller/cluster/datastore/admin/ClusterAdminRpcServiceTest.java index ba00174f24..2239908877 100644 --- a/opendaylight/md-sal/sal-cluster-admin-impl/src/test/java/org/opendaylight/controller/cluster/datastore/admin/ClusterAdminRpcServiceTest.java +++ b/opendaylight/md-sal/sal-cluster-admin-impl/src/test/java/org/opendaylight/controller/cluster/datastore/admin/ClusterAdminRpcServiceTest.java @@ -9,6 +9,7 @@ package org.opendaylight.controller.cluster.datastore.admin; import static java.lang.Boolean.FALSE; import static java.lang.Boolean.TRUE; +import static java.util.Objects.requireNonNull; import static org.hamcrest.CoreMatchers.anyOf; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.MatcherAssert.assertThat; @@ -26,18 +27,14 @@ import akka.actor.ActorRef; import akka.actor.PoisonPill; import akka.actor.Status.Success; import akka.cluster.Cluster; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import java.io.File; -import java.io.FileInputStream; -import java.util.AbstractMap.SimpleEntry; +import java.nio.file.Files; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Map.Entry; import java.util.Optional; import java.util.Set; import java.util.concurrent.TimeUnit; @@ -46,6 +43,7 @@ import org.junit.After; import org.junit.Before; import org.junit.Test; import org.opendaylight.controller.cluster.access.concepts.MemberName; +import org.opendaylight.controller.cluster.databroker.ClientBackedDataStore; import org.opendaylight.controller.cluster.datastore.AbstractDataStore; import org.opendaylight.controller.cluster.datastore.DatastoreContext; import org.opendaylight.controller.cluster.datastore.MemberNode; @@ -63,35 +61,23 @@ import org.opendaylight.controller.cluster.raft.persisted.UpdateElectionTerm; import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal; import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore; import org.opendaylight.controller.md.cluster.datastore.model.CarsModel; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShardsInputBuilder; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShardsOutput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplicaInputBuilder; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplicaOutput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastoreInputBuilder; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastoreOutput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShardsInputBuilder; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShardsOutput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShardInputBuilder; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShardOutput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShardsInputBuilder; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShardsOutput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.MakeLeaderLocalInputBuilder; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.MakeLeaderLocalOutput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicasInputBuilder; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicasOutput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveShardReplicaInputBuilder; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveShardReplicaOutput; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.member.voting.states.input.MemberVotingStateBuilder; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.shard.result.output.ShardResult; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.shard.result.output.ShardResultBuilder; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.shard.result.output.ShardResultKey; -import org.opendaylight.yangtools.yang.common.RpcError; import org.opendaylight.yangtools.yang.common.RpcResult; import org.opendaylight.yangtools.yang.common.XMLNamespace; -import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; +import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode; /** * Unit tests for ClusterAdminRpcService. @@ -99,6 +85,12 @@ import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; * @author Thomas Pantelis */ public class ClusterAdminRpcServiceTest { + record ExpState(String name, boolean voting) { + ExpState { + requireNonNull(name); + } + } + private static final MemberName MEMBER_1 = MemberName.forName("member-1"); private static final MemberName MEMBER_2 = MemberName.forName("member-2"); private static final MemberName MEMBER_3 = MemberName.forName("member-3"); @@ -112,34 +104,38 @@ public class ClusterAdminRpcServiceTest { @After public void tearDown() { - for (MemberNode m : Lists.reverse(memberNodes)) { - m.cleanup(); + for (var member : Lists.reverse(memberNodes)) { + member.cleanup(); } memberNodes.clear(); } @Test public void testBackupDatastore() throws Exception { - MemberNode node = MemberNode.builder(memberNodes).akkaConfig("Member1") - .moduleShardsConfig("module-shards-member1.conf").waitForShardLeader("cars", "people") - .testName("testBackupDatastore").build(); + final var node = MemberNode.builder(memberNodes) + .akkaConfig("Member1") + .moduleShardsConfig("module-shards-member1.conf") + .waitForShardLeader("cars", "people") + .testName("testBackupDatastore") + .build(); - String fileName = "target/testBackupDatastore"; - new File(fileName).delete(); + final var fileName = "target/testBackupDatastore"; + final var file = new File(fileName); + file.delete(); - final ClusterAdminRpcService service = new ClusterAdminRpcService(node.configDataStore(), node.operDataStore(), - null, null); + final var service = new ClusterAdminRpcService(node.configDataStore(), node.operDataStore(), null); - RpcResult rpcResult = service .backupDatastore(new BackupDatastoreInputBuilder() - .setFilePath(fileName).build()).get(5, TimeUnit.SECONDS); + var rpcResult = service.backupDatastore(new BackupDatastoreInputBuilder().setFilePath(fileName).build()) + .get(5, TimeUnit.SECONDS); verifySuccessfulRpcResult(rpcResult); - try (FileInputStream fis = new FileInputStream(fileName)) { - List snapshots = SerializationUtils.deserialize(fis); + try (var fis = Files.newInputStream(file.toPath())) { + final List snapshots = SerializationUtils.deserialize(fis); assertEquals("DatastoreSnapshot size", 2, snapshots.size()); - ImmutableMap map = ImmutableMap.of(snapshots.get(0).getType(), snapshots.get(0), - snapshots.get(1).getType(), snapshots.get(1)); + final var map = Map.of( + snapshots.get(0).getType(), snapshots.get(0), + snapshots.get(1).getType(), snapshots.get(1)); verifyDatastoreSnapshot(node.configDataStore().getActorUtils().getDataStoreName(), map.get(node.configDataStore().getActorUtils().getDataStoreName()), "cars", "people"); } finally { @@ -151,7 +147,7 @@ public class ClusterAdminRpcServiceTest { node.configDataStore().getActorUtils().getShardManager().tell(node.datastoreContextBuilder() .shardInitializationTimeout(200, TimeUnit.MILLISECONDS).build(), ActorRef.noSender()); - ActorRef carsShardActor = node.configDataStore().getActorUtils().findLocalShard("cars").get(); + final var carsShardActor = node.configDataStore().getActorUtils().findLocalShard("cars").orElseThrow(); node.kit().watch(carsShardActor); carsShardActor.tell(PoisonPill.getInstance(), ActorRef.noSender()); node.kit().expectTerminated(carsShardActor); @@ -165,9 +161,9 @@ public class ClusterAdminRpcServiceTest { private static void verifyDatastoreSnapshot(final String type, final DatastoreSnapshot datastoreSnapshot, final String... expShardNames) { assertNotNull("Missing DatastoreSnapshot for type " + type, datastoreSnapshot); - Set shardNames = new HashSet<>(); - for (DatastoreSnapshot.ShardSnapshot s: datastoreSnapshot.getShardSnapshots()) { - shardNames.add(s.getName()); + var shardNames = new HashSet(); + for (var snapshot : datastoreSnapshot.getShardSnapshots()) { + shardNames.add(snapshot.getName()); } assertEquals("DatastoreSnapshot shard names", Set.of(expShardNames), shardNames); @@ -178,7 +174,7 @@ public class ClusterAdminRpcServiceTest { String name = "testGetPrefixShardRole"; String moduleShardsConfig = "module-shards-default-member-1.conf"; - final MemberNode member1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name) + final var member1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name) .moduleShardsConfig(moduleShardsConfig).build(); member1.kit().waitUntilLeader(member1.configDataStore().getActorUtils(), "default"); @@ -189,11 +185,11 @@ public class ClusterAdminRpcServiceTest { String name = "testModuleShardLeaderMovement"; String moduleShardsConfig = "module-shards-member1.conf"; - final MemberNode member1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name) + final var member1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name) .waitForShardLeader("cars").moduleShardsConfig(moduleShardsConfig).build(); - final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name) + final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name) .moduleShardsConfig(moduleShardsConfig).build(); - final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name) + final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name) .moduleShardsConfig(moduleShardsConfig).build(); member1.waitForMembersUp("member-2", "member-3"); @@ -229,17 +225,17 @@ public class ClusterAdminRpcServiceTest { public void testAddShardReplica() throws Exception { String name = "testAddShardReplica"; String moduleShardsConfig = "module-shards-cars-member-1.conf"; - MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name) + final var leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name) .moduleShardsConfig(moduleShardsConfig).waitForShardLeader("cars").build(); - MemberNode newReplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name) + final var newReplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name) .moduleShardsConfig(moduleShardsConfig).build(); leaderNode1.waitForMembersUp("member-2"); doAddShardReplica(newReplicaNode2, "cars", "member-1"); - MemberNode newReplicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name) + var newReplicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name) .moduleShardsConfig(moduleShardsConfig).build(); leaderNode1.waitForMembersUp("member-3"); @@ -251,18 +247,18 @@ public class ClusterAdminRpcServiceTest { verifyRaftPeersPresent(newReplicaNode2.operDataStore(), "cars", "member-1", "member-3"); // Write data to member-2's config datastore and read/verify via member-3 - final NormalizedNode configCarsNode = writeCarsNodeAndVerify(newReplicaNode2.configDataStore(), + final var configCarsNode = writeCarsNodeAndVerify(newReplicaNode2.configDataStore(), newReplicaNode3.configDataStore()); // Write data to member-3's oper datastore and read/verify via member-2 writeCarsNodeAndVerify(newReplicaNode3.operDataStore(), newReplicaNode2.operDataStore()); // Verify all data has been replicated. We expect 4 log entries and thus last applied index of 3 - - // 2 ServerConfigurationPayload entries, the transaction payload entry plus a purge payload. + // 2 ServerConfigurationPayload entries, the transaction payload entry plus a purge payload. RaftStateVerifier verifier = raftState -> { - assertEquals("Commit index", 4, raftState.getCommitIndex()); - assertEquals("Last applied index", 4, raftState.getLastApplied()); + assertEquals("Commit index", 3, raftState.getCommitIndex()); + assertEquals("Last applied index", 3, raftState.getLastApplied()); }; verifyRaftState(leaderNode1.configDataStore(), "cars", verifier); @@ -289,34 +285,36 @@ public class ClusterAdminRpcServiceTest { @Test public void testAddShardReplicaFailures() throws Exception { String name = "testAddShardReplicaFailures"; - MemberNode memberNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name) + final var memberNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name) .moduleShardsConfig("module-shards-cars-member-1.conf").build(); - final ClusterAdminRpcService service = new ClusterAdminRpcService(memberNode.configDataStore(), - memberNode.operDataStore(), null, null); + final var service = new ClusterAdminRpcService(memberNode.configDataStore(), memberNode.operDataStore(), null); - RpcResult rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder() - .setDataStoreType(DataStoreType.Config).build()).get(10, TimeUnit.SECONDS); + var rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder() + .setDataStoreType(DataStoreType.Config) + .build()) + .get(10, TimeUnit.SECONDS); verifyFailedRpcResult(rpcResult); - rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName("cars") - .build()).get(10, TimeUnit.SECONDS); + rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName("cars").build()) + .get(10, TimeUnit.SECONDS); verifyFailedRpcResult(rpcResult); rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName("people") - .setDataStoreType(DataStoreType.Config).build()).get(10, TimeUnit.SECONDS); + .setDataStoreType(DataStoreType.Config) + .build()) + .get(10, TimeUnit.SECONDS); verifyFailedRpcResult(rpcResult); } - private static NormalizedNode writeCarsNodeAndVerify(final AbstractDataStore writeToStore, + private static ContainerNode writeCarsNodeAndVerify(final AbstractDataStore writeToStore, final AbstractDataStore readFromStore) throws Exception { - DOMStoreWriteTransaction writeTx = writeToStore.newWriteOnlyTransaction(); - NormalizedNode carsNode = CarsModel.create(); + final var writeTx = writeToStore.newWriteOnlyTransaction(); + final var carsNode = CarsModel.create(); writeTx.write(CarsModel.BASE_PATH, carsNode); - DOMStoreThreePhaseCommitCohort cohort = writeTx.ready(); - Boolean canCommit = cohort.canCommit().get(7, TimeUnit.SECONDS); - assertEquals("canCommit", TRUE, canCommit); + final var cohort = writeTx.ready(); + assertEquals("canCommit", TRUE, cohort.canCommit().get(7, TimeUnit.SECONDS)); cohort.preCommit().get(5, TimeUnit.SECONDS); cohort.commit().get(5, TimeUnit.SECONDS); @@ -325,31 +323,31 @@ public class ClusterAdminRpcServiceTest { } private static void readCarsNodeAndVerify(final AbstractDataStore readFromStore, - final NormalizedNode expCarsNode) throws Exception { - Optional optional = readFromStore.newReadOnlyTransaction().read(CarsModel.BASE_PATH) - .get(15, TimeUnit.SECONDS); - assertTrue("isPresent", optional.isPresent()); - assertEquals("Data node", expCarsNode, optional.get()); + final ContainerNode expCarsNode) throws Exception { + assertEquals(Optional.of(expCarsNode), + readFromStore.newReadOnlyTransaction().read(CarsModel.BASE_PATH).get(15, TimeUnit.SECONDS)); } private static void doAddShardReplica(final MemberNode memberNode, final String shardName, final String... peerMemberNames) throws Exception { memberNode.waitForMembersUp(peerMemberNames); - final ClusterAdminRpcService service = new ClusterAdminRpcService(memberNode.configDataStore(), - memberNode.operDataStore(), null, null); + final var service = new ClusterAdminRpcService(memberNode.configDataStore(), memberNode.operDataStore(), null); - RpcResult rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder() - .setShardName(shardName).setDataStoreType(DataStoreType.Config).build()).get(10, TimeUnit.SECONDS); + var rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder() + .setShardName(shardName) + .setDataStoreType(DataStoreType.Config) + .build()).get(10, TimeUnit.SECONDS); verifySuccessfulRpcResult(rpcResult); verifyRaftPeersPresent(memberNode.configDataStore(), shardName, peerMemberNames); - Optional optional = memberNode.operDataStore().getActorUtils().findLocalShard(shardName); - assertFalse("Oper shard present", optional.isPresent()); + assertEquals(Optional.empty(), memberNode.operDataStore().getActorUtils().findLocalShard(shardName)); - rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName(shardName) - .setDataStoreType(DataStoreType.Operational).build()).get(10, TimeUnit.SECONDS); + rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder() + .setShardName(shardName) + .setDataStoreType(DataStoreType.Operational) + .build()).get(10, TimeUnit.SECONDS); verifySuccessfulRpcResult(rpcResult); verifyRaftPeersPresent(memberNode.operDataStore(), shardName, peerMemberNames); @@ -357,12 +355,12 @@ public class ClusterAdminRpcServiceTest { private static void doMakeShardLeaderLocal(final MemberNode memberNode, final String shardName, final String newLeader) throws Exception { - final ClusterAdminRpcService service = new ClusterAdminRpcService(memberNode.configDataStore(), - memberNode.operDataStore(), null, null); + final var service = new ClusterAdminRpcService(memberNode.configDataStore(), memberNode.operDataStore(), null); - final RpcResult rpcResult = service.makeLeaderLocal(new MakeLeaderLocalInputBuilder() - .setDataStoreType(DataStoreType.Config).setShardName(shardName).build()) - .get(10, TimeUnit.SECONDS); + final var rpcResult = service.makeLeaderLocal(new MakeLeaderLocalInputBuilder() + .setDataStoreType(DataStoreType.Config) + .setShardName(shardName) + .build()).get(10, TimeUnit.SECONDS); verifySuccessfulRpcResult(rpcResult); @@ -372,8 +370,9 @@ public class ClusterAdminRpcServiceTest { private static T verifySuccessfulRpcResult(final RpcResult rpcResult) { if (!rpcResult.isSuccessful()) { - if (rpcResult.getErrors().size() > 0) { - RpcError error = Iterables.getFirst(rpcResult.getErrors(), null); + final var errors = rpcResult.getErrors(); + if (errors.size() > 0) { + final var error = errors.get(0); throw new AssertionError("Rpc failed with error: " + error, error.getCause()); } @@ -385,8 +384,9 @@ public class ClusterAdminRpcServiceTest { private static void verifyFailedRpcResult(final RpcResult rpcResult) { assertFalse("RpcResult", rpcResult.isSuccessful()); - assertEquals("RpcResult errors size", 1, rpcResult.getErrors().size()); - RpcError error = Iterables.getFirst(rpcResult.getErrors(), null); + final var errors = rpcResult.getErrors(); + assertEquals("RpcResult errors size", 1, errors.size()); + final var error = errors.get(0); assertNotNull("RpcResult error message null", error.getMessage()); } @@ -394,15 +394,15 @@ public class ClusterAdminRpcServiceTest { public void testRemoveShardReplica() throws Exception { String name = "testRemoveShardReplica"; String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf"; - final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name) + final var leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name) .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder( DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1)) .build(); - final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name) + final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name) .moduleShardsConfig(moduleShardsConfig).build(); - final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name) + final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name) .moduleShardsConfig(moduleShardsConfig).build(); leaderNode1.configDataStore().waitTillReady(); @@ -413,12 +413,13 @@ public class ClusterAdminRpcServiceTest { // Invoke RPC service on member-3 to remove it's local shard - final ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(), - replicaNode3.operDataStore(), null, null); + final var service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(), replicaNode3.operDataStore(), + null); - RpcResult rpcResult = service3.removeShardReplica(new RemoveShardReplicaInputBuilder() - .setShardName("cars").setMemberName("member-3").setDataStoreType(DataStoreType.Config).build()) - .get(10, TimeUnit.SECONDS); + var rpcResult = service3.removeShardReplica(new RemoveShardReplicaInputBuilder() + .setShardName("cars").setMemberName("member-3") + .setDataStoreType(DataStoreType.Config) + .build()).get(10, TimeUnit.SECONDS); verifySuccessfulRpcResult(rpcResult); verifyRaftPeersPresent(leaderNode1.configDataStore(), "cars", "member-2"); @@ -430,7 +431,7 @@ public class ClusterAdminRpcServiceTest { Cluster.get(leaderNode1.kit().getSystem()).down(Cluster.get(replicaNode2.kit().getSystem()).selfAddress()); replicaNode2.cleanup(); - MemberNode newPeplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name) + final var newPeplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name) .moduleShardsConfig(moduleShardsConfig).build(); newPeplicaNode2.configDataStore().waitTillReady(); @@ -438,11 +439,14 @@ public class ClusterAdminRpcServiceTest { // Invoke RPC service on member-1 to remove member-2 - final ClusterAdminRpcService service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(), - leaderNode1.operDataStore(), null, null); + final var service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(), leaderNode1.operDataStore(), + null); - rpcResult = service1.removeShardReplica(new RemoveShardReplicaInputBuilder().setShardName("cars") - .setMemberName("member-2").setDataStoreType(DataStoreType.Config).build()).get(10, TimeUnit.SECONDS); + rpcResult = service1.removeShardReplica(new RemoveShardReplicaInputBuilder() + .setShardName("cars") + .setMemberName("member-2") + .setDataStoreType(DataStoreType.Config) + .build()).get(10, TimeUnit.SECONDS); verifySuccessfulRpcResult(rpcResult); verifyRaftPeersPresent(leaderNode1.configDataStore(), "cars"); @@ -453,15 +457,15 @@ public class ClusterAdminRpcServiceTest { public void testRemoveShardLeaderReplica() throws Exception { String name = "testRemoveShardLeaderReplica"; String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf"; - final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name) + final var leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name) .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder( DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1)) .build(); - final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name) + final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name) .moduleShardsConfig(moduleShardsConfig).build(); - final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name) + final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name) .moduleShardsConfig(moduleShardsConfig).build(); leaderNode1.configDataStore().waitTillReady(); @@ -474,12 +478,14 @@ public class ClusterAdminRpcServiceTest { // Invoke RPC service on leader member-1 to remove it's local shard - final ClusterAdminRpcService service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(), - leaderNode1.operDataStore(), null, null); + final var service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(), leaderNode1.operDataStore(), + null); - RpcResult rpcResult = service1.removeShardReplica(new RemoveShardReplicaInputBuilder() - .setShardName("cars").setMemberName("member-1").setDataStoreType(DataStoreType.Config).build()) - .get(10, TimeUnit.SECONDS); + final var rpcResult = service1.removeShardReplica(new RemoveShardReplicaInputBuilder() + .setShardName("cars") + .setMemberName("member-1") + .setDataStoreType(DataStoreType.Config) + .build()).get(10, TimeUnit.SECONDS); verifySuccessfulRpcResult(rpcResult); verifyRaftState(replicaNode2.configDataStore(), "cars", raftState -> @@ -495,17 +501,17 @@ public class ClusterAdminRpcServiceTest { public void testAddReplicasForAllShards() throws Exception { String name = "testAddReplicasForAllShards"; String moduleShardsConfig = "module-shards-member1.conf"; - MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name) + final var leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name) .moduleShardsConfig(moduleShardsConfig).waitForShardLeader("cars", "people").build(); - ModuleShardConfiguration petsModuleConfig = new ModuleShardConfiguration( - XMLNamespace.of("pets-ns"), "pets-module", "pets", null, List.of(MEMBER_1)); + final var petsModuleConfig = new ModuleShardConfiguration(XMLNamespace.of("pets-ns"), "pets-module", "pets", + null, List.of(MEMBER_1)); leaderNode1.configDataStore().getActorUtils().getShardManager().tell( new CreateShard(petsModuleConfig, Shard.builder(), null), leaderNode1.kit().getRef()); leaderNode1.kit().expectMsgClass(Success.class); leaderNode1.kit().waitUntilLeader(leaderNode1.configDataStore().getActorUtils(), "pets"); - MemberNode newReplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name) + final var newReplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name) .moduleShardsConfig(moduleShardsConfig).build(); leaderNode1.waitForMembersUp("member-2"); @@ -515,19 +521,18 @@ public class ClusterAdminRpcServiceTest { new CreateShard(petsModuleConfig, Shard.builder(), null), newReplicaNode2.kit().getRef()); newReplicaNode2.kit().expectMsgClass(Success.class); - newReplicaNode2.operDataStore().getActorUtils().getShardManager().tell( - new CreateShard(new ModuleShardConfiguration(XMLNamespace.of("no-leader-ns"), "no-leader-module", - "no-leader", null, List.of(MEMBER_1)), - Shard.builder(), null), - newReplicaNode2.kit().getRef()); + newReplicaNode2.operDataStore().getActorUtils().getShardManager() + .tell(new CreateShard(new ModuleShardConfiguration(XMLNamespace.of("no-leader-ns"), "no-leader-module", + "no-leader", null, List.of(MEMBER_1)), + Shard.builder(), null), newReplicaNode2.kit().getRef()); newReplicaNode2.kit().expectMsgClass(Success.class); - final ClusterAdminRpcService service = new ClusterAdminRpcService(newReplicaNode2.configDataStore(), - newReplicaNode2.operDataStore(), null, null); + final var service = new ClusterAdminRpcService(newReplicaNode2.configDataStore(), + newReplicaNode2.operDataStore(), null); - RpcResult rpcResult = service.addReplicasForAllShards( - new AddReplicasForAllShardsInputBuilder().build()).get(10, TimeUnit.SECONDS); - AddReplicasForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult); + var rpcResult = service.addReplicasForAllShards(new AddReplicasForAllShardsInputBuilder().build()) + .get(10, TimeUnit.SECONDS); + final var result = verifySuccessfulRpcResult(rpcResult); verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config), successShardResult("people", DataStoreType.Config), successShardResult("pets", DataStoreType.Config), @@ -546,15 +551,15 @@ public class ClusterAdminRpcServiceTest { public void testRemoveAllShardReplicas() throws Exception { String name = "testRemoveAllShardReplicas"; String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf"; - final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name) + final var leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name) .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder( DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1)) .build(); - final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name) + final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name) .moduleShardsConfig(moduleShardsConfig).build(); - final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name) + final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name) .moduleShardsConfig(moduleShardsConfig).build(); leaderNode1.configDataStore().waitTillReady(); @@ -562,8 +567,8 @@ public class ClusterAdminRpcServiceTest { verifyRaftPeersPresent(replicaNode2.configDataStore(), "cars", "member-1", "member-3"); verifyRaftPeersPresent(replicaNode3.configDataStore(), "cars", "member-1", "member-2"); - ModuleShardConfiguration petsModuleConfig = new ModuleShardConfiguration(XMLNamespace.of("pets-ns"), - "pets-module", "pets", null, List.of(MEMBER_1, MEMBER_2, MEMBER_3)); + final var petsModuleConfig = new ModuleShardConfiguration(XMLNamespace.of("pets-ns"), "pets-module", "pets", + null, List.of(MEMBER_1, MEMBER_2, MEMBER_3)); leaderNode1.configDataStore().getActorUtils().getShardManager().tell( new CreateShard(petsModuleConfig, Shard.builder(), null), leaderNode1.kit().getRef()); leaderNode1.kit().expectMsgClass(Success.class); @@ -580,12 +585,13 @@ public class ClusterAdminRpcServiceTest { verifyRaftPeersPresent(replicaNode2.configDataStore(), "pets", "member-1", "member-3"); verifyRaftPeersPresent(replicaNode3.configDataStore(), "pets", "member-1", "member-2"); - final ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(), - replicaNode3.operDataStore(), null, null); + final var service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(), replicaNode3.operDataStore(), + null); - RpcResult rpcResult = service3.removeAllShardReplicas( - new RemoveAllShardReplicasInputBuilder().setMemberName("member-3").build()).get(10, TimeUnit.SECONDS); - RemoveAllShardReplicasOutput result = verifySuccessfulRpcResult(rpcResult); + var rpcResult = service3.removeAllShardReplicas( + new RemoveAllShardReplicasInputBuilder().setMemberName("member-3").build()) + .get(10, TimeUnit.SECONDS); + final var result = verifySuccessfulRpcResult(rpcResult); verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config), successShardResult("people", DataStoreType.Config), successShardResult("pets", DataStoreType.Config), @@ -607,15 +613,15 @@ public class ClusterAdminRpcServiceTest { public void testChangeMemberVotingStatesForShard() throws Exception { String name = "testChangeMemberVotingStatusForShard"; String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf"; - final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name) + final var leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name) .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder( DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1)) .build(); - final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name) + final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name) .moduleShardsConfig(moduleShardsConfig).build(); - final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name) + final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name) .moduleShardsConfig(moduleShardsConfig).build(); leaderNode1.configDataStore().waitTillReady(); @@ -626,32 +632,31 @@ public class ClusterAdminRpcServiceTest { // Invoke RPC service on member-3 to change voting status - final ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(), - replicaNode3.operDataStore(), null, null); + final var service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(), replicaNode3.operDataStore(), + null); - RpcResult rpcResult = service3 - .changeMemberVotingStatesForShard(new ChangeMemberVotingStatesForShardInputBuilder() - .setShardName("cars").setDataStoreType(DataStoreType.Config) - .setMemberVotingState(List.of( - new MemberVotingStateBuilder().setMemberName("member-2").setVoting(FALSE).build(), - new MemberVotingStateBuilder().setMemberName("member-3").setVoting(FALSE).build())) - .build()) - .get(10, TimeUnit.SECONDS); + var rpcResult = service3.changeMemberVotingStatesForShard(new ChangeMemberVotingStatesForShardInputBuilder() + .setShardName("cars").setDataStoreType(DataStoreType.Config) + .setMemberVotingState(List.of( + new MemberVotingStateBuilder().setMemberName("member-2").setVoting(FALSE).build(), + new MemberVotingStateBuilder().setMemberName("member-3").setVoting(FALSE).build())) + .build()) + .get(10, TimeUnit.SECONDS); verifySuccessfulRpcResult(rpcResult); - verifyVotingStates(leaderNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE), - new SimpleEntry<>("member-2", FALSE), new SimpleEntry<>("member-3", FALSE)); - verifyVotingStates(replicaNode2.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE), - new SimpleEntry<>("member-2", FALSE), new SimpleEntry<>("member-3", FALSE)); - verifyVotingStates(replicaNode3.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE), - new SimpleEntry<>("member-2", FALSE), new SimpleEntry<>("member-3", FALSE)); + verifyVotingStates(leaderNode1.configDataStore(), "cars", + new ExpState("member-1", true), new ExpState("member-2", false), new ExpState("member-3", false)); + verifyVotingStates(replicaNode2.configDataStore(), "cars", + new ExpState("member-1", true), new ExpState("member-2", false), new ExpState("member-3", false)); + verifyVotingStates(replicaNode3.configDataStore(), "cars", + new ExpState("member-1", true), new ExpState("member-2", false), new ExpState("member-3", false)); } @Test public void testChangeMemberVotingStatesForSingleNodeShard() throws Exception { String name = "testChangeMemberVotingStatesForSingleNodeShard"; String moduleShardsConfig = "module-shards-member1.conf"; - MemberNode leaderNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name) + final var leaderNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name) .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder( DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1)) .build(); @@ -660,36 +665,39 @@ public class ClusterAdminRpcServiceTest { // Invoke RPC service on member-3 to change voting status - final ClusterAdminRpcService service = new ClusterAdminRpcService(leaderNode.configDataStore(), - leaderNode.operDataStore(), null, null); - - RpcResult rpcResult = service - .changeMemberVotingStatesForShard(new ChangeMemberVotingStatesForShardInputBuilder() - .setShardName("cars").setDataStoreType(DataStoreType.Config) - .setMemberVotingState(List.of(new MemberVotingStateBuilder() - .setMemberName("member-1") - .setVoting(FALSE) - .build())) - .build()) - .get(10, TimeUnit.SECONDS); + final var service = new ClusterAdminRpcService(leaderNode.configDataStore(), leaderNode.operDataStore(), null); + + final var rpcResult = service.changeMemberVotingStatesForShard( + new ChangeMemberVotingStatesForShardInputBuilder() + .setShardName("cars").setDataStoreType(DataStoreType.Config) + .setMemberVotingState(List.of(new MemberVotingStateBuilder() + .setMemberName("member-1") + .setVoting(FALSE) + .build())) + .build()) + .get(10, TimeUnit.SECONDS); verifyFailedRpcResult(rpcResult); - verifyVotingStates(leaderNode.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE)); + verifyVotingStates(leaderNode.configDataStore(), "cars", new ExpState("member-1", true)); } @Test public void testChangeMemberVotingStatesForAllShards() throws Exception { String name = "testChangeMemberVotingStatesForAllShards"; String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf"; - final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name) - .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder( - DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1)) - .build(); - - final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name) + final var leaderNode1 = MemberNode.builder(memberNodes) + .akkaConfig("Member1") + .testName(name) + .moduleShardsConfig(moduleShardsConfig) + .datastoreContextBuilder(DatastoreContext.newBuilder() + .shardHeartbeatIntervalInMillis(300) + .shardElectionTimeoutFactor(1)) + .build(); + + final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name) .moduleShardsConfig(moduleShardsConfig).build(); - final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name) + final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name) .moduleShardsConfig(moduleShardsConfig).build(); leaderNode1.configDataStore().waitTillReady(); @@ -702,75 +710,78 @@ public class ClusterAdminRpcServiceTest { // Invoke RPC service on member-3 to change voting status - final ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(), - replicaNode3.operDataStore(), null, null); + final var service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(), + replicaNode3.operDataStore(), null); - RpcResult rpcResult = service3.changeMemberVotingStatesForAllShards( - new ChangeMemberVotingStatesForAllShardsInputBuilder().setMemberVotingState(List.of( + final var rpcResult = service3.changeMemberVotingStatesForAllShards( + new ChangeMemberVotingStatesForAllShardsInputBuilder() + .setMemberVotingState(List.of( new MemberVotingStateBuilder().setMemberName("member-2").setVoting(FALSE).build(), - new MemberVotingStateBuilder().setMemberName("member-3").setVoting(FALSE).build())).build()) + new MemberVotingStateBuilder().setMemberName("member-3").setVoting(FALSE).build())) + .build()) .get(10, TimeUnit.SECONDS); - ChangeMemberVotingStatesForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult); + final var result = verifySuccessfulRpcResult(rpcResult); verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config), successShardResult("people", DataStoreType.Config), successShardResult("cars", DataStoreType.Operational), successShardResult("people", DataStoreType.Operational)); - verifyVotingStates(new AbstractDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(), - replicaNode2.configDataStore(), replicaNode2.operDataStore(), - replicaNode3.configDataStore(), replicaNode3.operDataStore()}, - new String[]{"cars", "people"}, new SimpleEntry<>("member-1", TRUE), - new SimpleEntry<>("member-2", FALSE), new SimpleEntry<>("member-3", FALSE)); + verifyVotingStates(new ClientBackedDataStore[] { + leaderNode1.configDataStore(), leaderNode1.operDataStore(), + replicaNode2.configDataStore(), replicaNode2.operDataStore(), + replicaNode3.configDataStore(), replicaNode3.operDataStore() + }, new String[] { "cars", "people" }, + new ExpState("member-1", true), new ExpState("member-2", false), new ExpState("member-3", false)); } @Test public void testFlipMemberVotingStates() throws Exception { String name = "testFlipMemberVotingStates"; - ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(List.of( - new ServerInfo("member-1", true), new ServerInfo("member-2", true), - new ServerInfo("member-3", false))); + final var persistedServerConfig = new ServerConfigurationPayload(List.of( + new ServerInfo("member-1", true), new ServerInfo("member-2", true), new ServerInfo("member-3", false))); setupPersistedServerConfigPayload(persistedServerConfig, "member-1", name, "cars", "people"); setupPersistedServerConfigPayload(persistedServerConfig, "member-2", name, "cars", "people"); setupPersistedServerConfigPayload(persistedServerConfig, "member-3", name, "cars", "people"); String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf"; - final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name) + final var leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name) .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(DatastoreContext.newBuilder() .shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(10)) .build(); - final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name) + final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name) .moduleShardsConfig(moduleShardsConfig).build(); - final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name) + final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name) .moduleShardsConfig(moduleShardsConfig).build(); leaderNode1.configDataStore().waitTillReady(); leaderNode1.operDataStore().waitTillReady(); replicaNode3.configDataStore().waitTillReady(); replicaNode3.operDataStore().waitTillReady(); - verifyVotingStates(leaderNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE), - new SimpleEntry<>("member-2", TRUE), new SimpleEntry<>("member-3", FALSE)); + verifyVotingStates(leaderNode1.configDataStore(), "cars", + new ExpState("member-1", true), new ExpState("member-2", true), new ExpState("member-3", false)); - final ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(), - replicaNode3.operDataStore(), null, null); + final var service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(), replicaNode3.operDataStore(), + null); - RpcResult rpcResult = service3.flipMemberVotingStatesForAllShards( - new FlipMemberVotingStatesForAllShardsInputBuilder().build()).get(10, TimeUnit.SECONDS); - FlipMemberVotingStatesForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult); + var rpcResult = service3.flipMemberVotingStatesForAllShards( + new FlipMemberVotingStatesForAllShardsInputBuilder().build()) + .get(10, TimeUnit.SECONDS); + var result = verifySuccessfulRpcResult(rpcResult); verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config), successShardResult("people", DataStoreType.Config), successShardResult("cars", DataStoreType.Operational), successShardResult("people", DataStoreType.Operational)); - verifyVotingStates(new AbstractDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(), - replicaNode2.configDataStore(), replicaNode2.operDataStore(), - replicaNode3.configDataStore(), replicaNode3.operDataStore()}, - new String[]{"cars", "people"}, - new SimpleEntry<>("member-1", FALSE), new SimpleEntry<>("member-2", FALSE), - new SimpleEntry<>("member-3", TRUE)); + verifyVotingStates(new ClientBackedDataStore[] { + leaderNode1.configDataStore(), leaderNode1.operDataStore(), + replicaNode2.configDataStore(), replicaNode2.operDataStore(), + replicaNode3.configDataStore(), replicaNode3.operDataStore() + }, new String[] { "cars", "people" }, + new ExpState("member-1", false), new ExpState("member-2", false), new ExpState("member-3", true)); // Leadership should have transferred to member 3 since it is the only remaining voting member. verifyRaftState(leaderNode1.configDataStore(), "cars", raftState -> { @@ -788,19 +799,20 @@ public class ClusterAdminRpcServiceTest { // Flip the voting states back to the original states. rpcResult = service3.flipMemberVotingStatesForAllShards( - new FlipMemberVotingStatesForAllShardsInputBuilder().build()).get(10, TimeUnit.SECONDS); + new FlipMemberVotingStatesForAllShardsInputBuilder().build()) + .get(10, TimeUnit.SECONDS); result = verifySuccessfulRpcResult(rpcResult); verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config), successShardResult("people", DataStoreType.Config), successShardResult("cars", DataStoreType.Operational), successShardResult("people", DataStoreType.Operational)); - verifyVotingStates(new AbstractDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(), - replicaNode2.configDataStore(), replicaNode2.operDataStore(), - replicaNode3.configDataStore(), replicaNode3.operDataStore()}, - new String[]{"cars", "people"}, - new SimpleEntry<>("member-1", TRUE), new SimpleEntry<>("member-2", TRUE), - new SimpleEntry<>("member-3", FALSE)); + verifyVotingStates(new ClientBackedDataStore[] { + leaderNode1.configDataStore(), leaderNode1.operDataStore(), + replicaNode2.configDataStore(), replicaNode2.operDataStore(), + replicaNode3.configDataStore(), replicaNode3.operDataStore() + }, new String[] { "cars", "people" }, + new ExpState("member-1", true), new ExpState("member-2", true), new ExpState("member-3", false)); // Leadership should have transferred to member 1 or 2. verifyRaftState(leaderNode1.configDataStore(), "cars", raftState -> { @@ -816,7 +828,7 @@ public class ClusterAdminRpcServiceTest { // Members 1, 2, and 3 are initially started up as non-voting. Members 4, 5, and 6 are initially // non-voting and simulated as down by not starting them up. - ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(List.of( + final var persistedServerConfig = new ServerConfigurationPayload(List.of( new ServerInfo("member-1", false), new ServerInfo("member-2", false), new ServerInfo("member-3", false), new ServerInfo("member-4", true), new ServerInfo("member-5", true), new ServerInfo("member-6", true))); @@ -826,47 +838,47 @@ public class ClusterAdminRpcServiceTest { setupPersistedServerConfigPayload(persistedServerConfig, "member-3", name, "cars", "people"); String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf"; - final MemberNode replicaNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name) + final var replicaNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name) .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder( DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1)) .build(); - final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name) + final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name) .moduleShardsConfig(moduleShardsConfig).build(); - final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name) + final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name) .moduleShardsConfig(moduleShardsConfig).build(); // Initially there won't be a leader b/c all the up nodes are non-voting. replicaNode1.waitForMembersUp("member-2", "member-3"); - verifyVotingStates(replicaNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", FALSE), - new SimpleEntry<>("member-2", FALSE), new SimpleEntry<>("member-3", FALSE), - new SimpleEntry<>("member-4", TRUE), new SimpleEntry<>("member-5", TRUE), - new SimpleEntry<>("member-6", TRUE)); + verifyVotingStates(replicaNode1.configDataStore(), "cars", + new ExpState("member-1", false), new ExpState("member-2", false), new ExpState("member-3", false), + new ExpState("member-4", true), new ExpState("member-5", true), new ExpState("member-6", true)); verifyRaftState(replicaNode1.configDataStore(), "cars", raftState -> assertEquals("Expected raft state", RaftState.Follower.toString(), raftState.getRaftState())); - final ClusterAdminRpcService service1 = new ClusterAdminRpcService(replicaNode1.configDataStore(), - replicaNode1.operDataStore(), null, null); + final var service1 = new ClusterAdminRpcService(replicaNode1.configDataStore(), replicaNode1.operDataStore(), + null); - RpcResult rpcResult = service1.flipMemberVotingStatesForAllShards( - new FlipMemberVotingStatesForAllShardsInputBuilder().build()).get(10, TimeUnit.SECONDS); - FlipMemberVotingStatesForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult); + final var rpcResult = service1.flipMemberVotingStatesForAllShards( + new FlipMemberVotingStatesForAllShardsInputBuilder().build()) + .get(10, TimeUnit.SECONDS); + final var result = verifySuccessfulRpcResult(rpcResult); verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config), successShardResult("people", DataStoreType.Config), successShardResult("cars", DataStoreType.Operational), successShardResult("people", DataStoreType.Operational)); - verifyVotingStates(new AbstractDataStore[]{replicaNode1.configDataStore(), replicaNode1.operDataStore(), - replicaNode2.configDataStore(), replicaNode2.operDataStore(), - replicaNode3.configDataStore(), replicaNode3.operDataStore()}, - new String[]{"cars", "people"}, - new SimpleEntry<>("member-1", TRUE), new SimpleEntry<>("member-2", TRUE), - new SimpleEntry<>("member-3", TRUE), new SimpleEntry<>("member-4", FALSE), - new SimpleEntry<>("member-5", FALSE), new SimpleEntry<>("member-6", FALSE)); + verifyVotingStates(new ClientBackedDataStore[] { + replicaNode1.configDataStore(), replicaNode1.operDataStore(), + replicaNode2.configDataStore(), replicaNode2.operDataStore(), + replicaNode3.configDataStore(), replicaNode3.operDataStore() + }, new String[] { "cars", "people" }, + new ExpState("member-1", true), new ExpState("member-2", true), new ExpState("member-3", true), + new ExpState("member-4", false), new ExpState("member-5", false), new ExpState("member-6", false)); // Since member 1 was changed to voting and there was no leader, it should've started and election // and become leader @@ -888,7 +900,7 @@ public class ClusterAdminRpcServiceTest { String name = "testFlipMemberVotingStatesWithVotingMembersDown"; // Members 4, 5, and 6 are initially non-voting and simulated as down by not starting them up. - ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(List.of( + final var persistedServerConfig = new ServerConfigurationPayload(List.of( new ServerInfo("member-1", true), new ServerInfo("member-2", true), new ServerInfo("member-3", true), new ServerInfo("member-4", false), new ServerInfo("member-5", false), new ServerInfo("member-6", false))); @@ -898,43 +910,43 @@ public class ClusterAdminRpcServiceTest { setupPersistedServerConfigPayload(persistedServerConfig, "member-3", name, "cars", "people"); String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf"; - final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name) + final var leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name) .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder( DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1)) .build(); - final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name) + final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name) .moduleShardsConfig(moduleShardsConfig).build(); - final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name) + final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name) .moduleShardsConfig(moduleShardsConfig).build(); leaderNode1.configDataStore().waitTillReady(); leaderNode1.operDataStore().waitTillReady(); - verifyVotingStates(leaderNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE), - new SimpleEntry<>("member-2", TRUE), new SimpleEntry<>("member-3", TRUE), - new SimpleEntry<>("member-4", FALSE), new SimpleEntry<>("member-5", FALSE), - new SimpleEntry<>("member-6", FALSE)); + verifyVotingStates(leaderNode1.configDataStore(), "cars", + new ExpState("member-1", true), new ExpState("member-2", true), new ExpState("member-3", true), + new ExpState("member-4", false), new ExpState("member-5", false), new ExpState("member-6", false)); - final ClusterAdminRpcService service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(), - leaderNode1.operDataStore(), null, null); + final var service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(), leaderNode1.operDataStore(), + null); - RpcResult rpcResult = service1.flipMemberVotingStatesForAllShards( - new FlipMemberVotingStatesForAllShardsInputBuilder().build()).get(10, TimeUnit.SECONDS); - FlipMemberVotingStatesForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult); + final var rpcResult = service1.flipMemberVotingStatesForAllShards( + new FlipMemberVotingStatesForAllShardsInputBuilder().build()) + .get(10, TimeUnit.SECONDS); + final var result = verifySuccessfulRpcResult(rpcResult); verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config), successShardResult("people", DataStoreType.Config), successShardResult("cars", DataStoreType.Operational), successShardResult("people", DataStoreType.Operational)); // Members 2 and 3 are now non-voting but should get replicated with the new new server config. - verifyVotingStates(new AbstractDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(), - replicaNode2.configDataStore(), replicaNode2.operDataStore(), - replicaNode3.configDataStore(), replicaNode3.operDataStore()}, - new String[]{"cars", "people"}, - new SimpleEntry<>("member-1", FALSE), new SimpleEntry<>("member-2", FALSE), - new SimpleEntry<>("member-3", FALSE), new SimpleEntry<>("member-4", TRUE), - new SimpleEntry<>("member-5", TRUE), new SimpleEntry<>("member-6", TRUE)); + verifyVotingStates(new ClientBackedDataStore[] { + leaderNode1.configDataStore(), leaderNode1.operDataStore(), + replicaNode2.configDataStore(), replicaNode2.operDataStore(), + replicaNode3.configDataStore(), replicaNode3.operDataStore() + }, new String[] { "cars", "people" }, + new ExpState("member-1", false), new ExpState("member-2", false), new ExpState("member-3", false), + new ExpState("member-4", true), new ExpState("member-5", true), new ExpState("member-6", true)); // The leader (member 1) was changed to non-voting but it shouldn't be able to step down as leader yet // b/c it can't get a majority consensus with all voting members down. So verify it remains the leader. @@ -946,12 +958,12 @@ public class ClusterAdminRpcServiceTest { private static void setupPersistedServerConfigPayload(final ServerConfigurationPayload serverConfig, final String member, final String datastoreTypeSuffix, final String... shards) { - String[] datastoreTypes = {"config_", "oper_"}; + String[] datastoreTypes = { "config_", "oper_" }; for (String type : datastoreTypes) { for (String shard : shards) { - List newServerInfo = new ArrayList<>(serverConfig.getServerConfig().size()); - for (ServerInfo info : serverConfig.getServerConfig()) { - newServerInfo.add(new ServerInfo(ShardIdentifier.create(shard, MemberName.forName(info.getId()), + final var newServerInfo = new ArrayList(serverConfig.getServerConfig().size()); + for (var info : serverConfig.getServerConfig()) { + newServerInfo.add(new ServerInfo(ShardIdentifier.create(shard, MemberName.forName(info.peerId()), type + datastoreTypeSuffix).toString(), info.isVoting())); } @@ -964,45 +976,43 @@ public class ClusterAdminRpcServiceTest { } } - @SafeVarargs - private static void verifyVotingStates(final AbstractDataStore[] datastores, final String[] shards, - final SimpleEntry... expStates) throws Exception { - for (AbstractDataStore datastore: datastores) { - for (String shard: shards) { + private static void verifyVotingStates(final ClientBackedDataStore[] datastores, final String[] shards, + final ExpState... expStates) throws Exception { + for (var datastore : datastores) { + for (String shard : shards) { verifyVotingStates(datastore, shard, expStates); } } } - @SafeVarargs - private static void verifyVotingStates(final AbstractDataStore datastore, final String shardName, - final SimpleEntry... expStates) throws Exception { + private static void verifyVotingStates(final ClientBackedDataStore datastore, final String shardName, + final ExpState... expStates) throws Exception { String localMemberName = datastore.getActorUtils().getCurrentMemberName().getName(); - Map expStateMap = new HashMap<>(); - for (Entry e: expStates) { - expStateMap.put(ShardIdentifier.create(shardName, MemberName.forName(e.getKey()), - datastore.getActorUtils().getDataStoreName()).toString(), e.getValue()); + var expStateMap = new HashMap(); + for (var expState : expStates) { + expStateMap.put(ShardIdentifier.create(shardName, MemberName.forName(expState.name), + datastore.getActorUtils().getDataStoreName()).toString(), expState.voting); } verifyRaftState(datastore, shardName, raftState -> { String localPeerId = ShardIdentifier.create(shardName, MemberName.forName(localMemberName), datastore.getActorUtils().getDataStoreName()).toString(); assertEquals("Voting state for " + localPeerId, expStateMap.get(localPeerId), raftState.isVoting()); - for (Entry e: raftState.getPeerVotingStates().entrySet()) { - assertEquals("Voting state for " + e.getKey(), expStateMap.get(e.getKey()), e.getValue()); + for (var entry : raftState.getPeerVotingStates().entrySet()) { + assertEquals("Voting state for " + entry.getKey(), expStateMap.get(entry.getKey()), entry.getValue()); } }); } private static void verifyShardResults(final Map shardResults, final ShardResult... expShardResults) { - Map expResultsMap = new HashMap<>(); - for (ShardResult r: expShardResults) { + var expResultsMap = new HashMap(); + for (var r : expShardResults) { expResultsMap.put(r.getShardName() + "-" + r.getDataStoreType(), r); } - for (ShardResult result: shardResults.values()) { - ShardResult exp = expResultsMap.remove(result.getShardName() + "-" + result.getDataStoreType()); + for (var result : shardResults.values()) { + var exp = expResultsMap.remove(result.getShardName() + "-" + result.getDataStoreType()); assertNotNull(String.format("Unexpected result for shard %s, type %s", result.getShardName(), result.getDataStoreType()), exp); assertEquals("isSucceeded", exp.getSucceeded(), result.getSucceeded()); diff --git a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/pom.xml b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/pom.xml index d1a9fbd27e..481adfd32f 100644 --- a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/pom.xml +++ b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/pom.xml @@ -5,7 +5,7 @@ mdsal-parent org.opendaylight.controller - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ../parent/pom.xml 4.0.0 diff --git a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/ActivateEosDatacenterCommand.java b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/ActivateEosDatacenterCommand.java index 6f7b8d4522..ec23a08159 100644 --- a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/ActivateEosDatacenterCommand.java +++ b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/ActivateEosDatacenterCommand.java @@ -11,20 +11,20 @@ import com.google.common.util.concurrent.ListenableFuture; import org.apache.karaf.shell.api.action.Command; import org.apache.karaf.shell.api.action.lifecycle.Reference; import org.apache.karaf.shell.api.action.lifecycle.Service; -import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry; +import org.opendaylight.mdsal.binding.api.RpcService; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ActivateEosDatacenter; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ActivateEosDatacenterInputBuilder; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService; import org.opendaylight.yangtools.yang.common.RpcResult; @Service @Command(scope = "cluster-admin", name = "activate-eos-datacenter", description = "Run an activate-eos-datacenter test") public class ActivateEosDatacenterCommand extends AbstractRpcAction { @Reference - private RpcConsumerRegistry rpcConsumerRegistry; + private RpcService rpcService; @Override protected ListenableFuture> invokeRpc() { - return rpcConsumerRegistry.getRpcService(ClusterAdminService.class) - .activateEosDatacenter(new ActivateEosDatacenterInputBuilder().build()); + return rpcService.getRpc(ActivateEosDatacenter.class) + .invoke(new ActivateEosDatacenterInputBuilder().build()); } } diff --git a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/AddReplicasForAllShardsCommand.java b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/AddReplicasForAllShardsCommand.java index 5189f08efc..51f086b722 100644 --- a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/AddReplicasForAllShardsCommand.java +++ b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/AddReplicasForAllShardsCommand.java @@ -11,9 +11,9 @@ import com.google.common.util.concurrent.ListenableFuture; import org.apache.karaf.shell.api.action.Command; import org.apache.karaf.shell.api.action.lifecycle.Reference; import org.apache.karaf.shell.api.action.lifecycle.Service; -import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry; +import org.opendaylight.mdsal.binding.api.RpcService; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShards; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShardsInputBuilder; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService; import org.opendaylight.yangtools.yang.common.RpcResult; @Service @@ -21,11 +21,11 @@ import org.opendaylight.yangtools.yang.common.RpcResult; description = "Run an add-replicas-for-all-shards test") public class AddReplicasForAllShardsCommand extends AbstractRpcAction { @Reference - private RpcConsumerRegistry rpcConsumerRegistry; + private RpcService rpcService; @Override protected ListenableFuture> invokeRpc() { - return rpcConsumerRegistry.getRpcService(ClusterAdminService.class) - .addReplicasForAllShards(new AddReplicasForAllShardsInputBuilder().build()); + return rpcService.getRpc(AddReplicasForAllShards.class) + .invoke(new AddReplicasForAllShardsInputBuilder().build()); } } diff --git a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/AddShardReplicaCommand.java b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/AddShardReplicaCommand.java index 77c039e9a3..3639fbd5b6 100644 --- a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/AddShardReplicaCommand.java +++ b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/AddShardReplicaCommand.java @@ -12,9 +12,9 @@ import org.apache.karaf.shell.api.action.Argument; import org.apache.karaf.shell.api.action.Command; import org.apache.karaf.shell.api.action.lifecycle.Reference; import org.apache.karaf.shell.api.action.lifecycle.Service; -import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry; +import org.opendaylight.mdsal.binding.api.RpcService; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplica; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplicaInputBuilder; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType; import org.opendaylight.yangtools.yang.common.RpcResult; @@ -22,7 +22,7 @@ import org.opendaylight.yangtools.yang.common.RpcResult; @Command(scope = "cluster-admin", name = "add-shard-replica", description = "Run an add-shard-replica test") public class AddShardReplicaCommand extends AbstractRpcAction { @Reference - private RpcConsumerRegistry rpcConsumerRegistry; + private RpcService rpcService; @Argument(index = 0, name = "shard-name", required = true) private String shardName; @Argument(index = 1, name = "data-store-type", required = true, description = "config / operational") @@ -30,10 +30,10 @@ public class AddShardReplicaCommand extends AbstractRpcAction { @Override protected ListenableFuture> invokeRpc() { - return rpcConsumerRegistry.getRpcService(ClusterAdminService.class) - .addShardReplica(new AddShardReplicaInputBuilder() + return rpcService.getRpc(AddShardReplica.class) + .invoke(new AddShardReplicaInputBuilder() .setShardName(shardName) - .setDataStoreType(DataStoreType.forName(dataStoreType).orElse(null)) + .setDataStoreType(DataStoreType.forName(dataStoreType)) .build()); } } diff --git a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/BackupDatastoreCommand.java b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/BackupDatastoreCommand.java index 9574bc0eda..4f19d0f3a9 100644 --- a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/BackupDatastoreCommand.java +++ b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/BackupDatastoreCommand.java @@ -12,9 +12,9 @@ import org.apache.karaf.shell.api.action.Argument; import org.apache.karaf.shell.api.action.Command; import org.apache.karaf.shell.api.action.lifecycle.Reference; import org.apache.karaf.shell.api.action.lifecycle.Service; -import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry; +import org.opendaylight.mdsal.binding.api.RpcService; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastore; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastoreInputBuilder; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService; import org.opendaylight.yangtools.yang.common.RpcResult; import org.opendaylight.yangtools.yang.common.Uint32; @@ -22,7 +22,7 @@ import org.opendaylight.yangtools.yang.common.Uint32; @Command(scope = "cluster-admin", name = "backup-datastore", description = "Run a backup-datastore test") public class BackupDatastoreCommand extends AbstractRpcAction { @Reference - private RpcConsumerRegistry rpcConsumerRegistry; + private RpcService rpcService; @Argument(index = 0, name = "file-path", required = true) private String filePath; @Argument(index = 1, name = "timeout", required = true) @@ -30,8 +30,8 @@ public class BackupDatastoreCommand extends AbstractRpcAction { @Override protected ListenableFuture> invokeRpc() { - return rpcConsumerRegistry.getRpcService(ClusterAdminService.class) - .backupDatastore(new BackupDatastoreInputBuilder() + return rpcService.getRpc(BackupDatastore.class) + .invoke(new BackupDatastoreInputBuilder() .setFilePath(filePath) .setTimeout(Uint32.valueOf(timeout)) .build()); diff --git a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/ChangeMemberVotingStatesForAllShardsCommand.java b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/ChangeMemberVotingStatesForAllShardsCommand.java index d8fb0a4232..cef7e9d938 100644 --- a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/ChangeMemberVotingStatesForAllShardsCommand.java +++ b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/ChangeMemberVotingStatesForAllShardsCommand.java @@ -13,9 +13,9 @@ import org.apache.karaf.shell.api.action.Argument; import org.apache.karaf.shell.api.action.Command; import org.apache.karaf.shell.api.action.lifecycle.Reference; import org.apache.karaf.shell.api.action.lifecycle.Service; -import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry; +import org.opendaylight.mdsal.binding.api.RpcService; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShards; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShardsInputBuilder; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.member.voting.states.input.MemberVotingState; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.member.voting.states.input.MemberVotingStateBuilder; import org.opendaylight.yangtools.yang.common.RpcResult; @@ -26,7 +26,7 @@ import org.opendaylight.yangtools.yang.common.RpcResult; description = "Run a change-member-voting-states-for-all-shards test") public class ChangeMemberVotingStatesForAllShardsCommand extends AbstractRpcAction { @Reference - private RpcConsumerRegistry rpcConsumerRegistry; + private RpcService rpcService; @Argument(index = 0, name = "member-name", required = true) private String memberName; @Argument(index = 1, name = "voting", required = true) @@ -39,8 +39,8 @@ public class ChangeMemberVotingStatesForAllShardsCommand extends AbstractRpcActi .setVoting(voting) .build(); - return rpcConsumerRegistry.getRpcService(ClusterAdminService.class) - .changeMemberVotingStatesForAllShards(new ChangeMemberVotingStatesForAllShardsInputBuilder() + return rpcService.getRpc(ChangeMemberVotingStatesForAllShards.class) + .invoke(new ChangeMemberVotingStatesForAllShardsInputBuilder() .setMemberVotingState(List.of(memberVotingState)) .build()); } diff --git a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/ChangeMemberVotingStatesForShardCommand.java b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/ChangeMemberVotingStatesForShardCommand.java index cbfd5704af..e98a3090b6 100644 --- a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/ChangeMemberVotingStatesForShardCommand.java +++ b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/ChangeMemberVotingStatesForShardCommand.java @@ -14,9 +14,9 @@ import org.apache.karaf.shell.api.action.Argument; import org.apache.karaf.shell.api.action.Command; import org.apache.karaf.shell.api.action.lifecycle.Reference; import org.apache.karaf.shell.api.action.lifecycle.Service; -import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry; +import org.opendaylight.mdsal.binding.api.RpcService; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShard; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShardInputBuilder; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.member.voting.states.input.MemberVotingState; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.member.voting.states.input.MemberVotingStateBuilder; @@ -27,7 +27,7 @@ import org.opendaylight.yangtools.yang.common.RpcResult; description = "Run a change-member-voting-states-for-shard test") public class ChangeMemberVotingStatesForShardCommand extends AbstractRpcAction { @Reference - private RpcConsumerRegistry rpcConsumerRegistry; + private RpcService rpcService; @Argument(index = 0, name = "shard-name", required = true) private String shardName; @Argument(index = 1, name = "data-store-type", required = true, description = "config / operational") @@ -44,10 +44,10 @@ public class ChangeMemberVotingStatesForShardCommand extends AbstractRpcAction { .setVoting(voting) .build(); - return rpcConsumerRegistry.getRpcService(ClusterAdminService.class) - .changeMemberVotingStatesForShard(new ChangeMemberVotingStatesForShardInputBuilder() + return rpcService.getRpc(ChangeMemberVotingStatesForShard.class) + .invoke(new ChangeMemberVotingStatesForShardInputBuilder() .setShardName(shardName) - .setDataStoreType(DataStoreType.forName(dataStoreType).orElse(null)) + .setDataStoreType(DataStoreType.forName(dataStoreType)) .setMemberVotingState(List.of(memberVotingState)) .build()); } diff --git a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/DeactivateEosDatacenterCommand.java b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/DeactivateEosDatacenterCommand.java index e0fd59ddf8..0ca0003c7e 100644 --- a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/DeactivateEosDatacenterCommand.java +++ b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/DeactivateEosDatacenterCommand.java @@ -11,8 +11,8 @@ import com.google.common.util.concurrent.ListenableFuture; import org.apache.karaf.shell.api.action.Command; import org.apache.karaf.shell.api.action.lifecycle.Reference; import org.apache.karaf.shell.api.action.lifecycle.Service; -import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService; +import org.opendaylight.mdsal.binding.api.RpcService; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DeactivateEosDatacenter; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DeactivateEosDatacenterInputBuilder; import org.opendaylight.yangtools.yang.common.RpcResult; @@ -21,11 +21,11 @@ import org.opendaylight.yangtools.yang.common.RpcResult; description = "Run a deactivate-eos-datacenter test") public class DeactivateEosDatacenterCommand extends AbstractRpcAction { @Reference - private RpcConsumerRegistry rpcConsumerRegistry; + private RpcService rpcService; @Override protected ListenableFuture> invokeRpc() { - return rpcConsumerRegistry.getRpcService(ClusterAdminService.class) - .deactivateEosDatacenter(new DeactivateEosDatacenterInputBuilder().build()); + return rpcService.getRpc(DeactivateEosDatacenter.class) + .invoke(new DeactivateEosDatacenterInputBuilder().build()); } } diff --git a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/FlipMemberVotingStatesForAllShardsCommand.java b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/FlipMemberVotingStatesForAllShardsCommand.java index ac523ac2cd..78c0b6f809 100644 --- a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/FlipMemberVotingStatesForAllShardsCommand.java +++ b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/FlipMemberVotingStatesForAllShardsCommand.java @@ -11,8 +11,8 @@ import com.google.common.util.concurrent.ListenableFuture; import org.apache.karaf.shell.api.action.Command; import org.apache.karaf.shell.api.action.lifecycle.Reference; import org.apache.karaf.shell.api.action.lifecycle.Service; -import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService; +import org.opendaylight.mdsal.binding.api.RpcService; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShards; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShardsInputBuilder; import org.opendaylight.yangtools.yang.common.RpcResult; @@ -21,11 +21,11 @@ import org.opendaylight.yangtools.yang.common.RpcResult; description = "Run a flip-member-voting-states-for-all-shards test") public class FlipMemberVotingStatesForAllShardsCommand extends AbstractRpcAction { @Reference - private RpcConsumerRegistry rpcConsumerRegistry; + private RpcService rpcService; @Override protected ListenableFuture> invokeRpc() { - return rpcConsumerRegistry.getRpcService(ClusterAdminService.class) - .flipMemberVotingStatesForAllShards(new FlipMemberVotingStatesForAllShardsInputBuilder().build()); + return rpcService.getRpc(FlipMemberVotingStatesForAllShards.class) + .invoke(new FlipMemberVotingStatesForAllShardsInputBuilder().build()); } } diff --git a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/GetKnownClientsForAllShardsCommand.java b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/GetKnownClientsForAllShardsCommand.java index be55c006ad..c002846680 100644 --- a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/GetKnownClientsForAllShardsCommand.java +++ b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/GetKnownClientsForAllShardsCommand.java @@ -11,8 +11,8 @@ import com.google.common.util.concurrent.ListenableFuture; import org.apache.karaf.shell.api.action.Command; import org.apache.karaf.shell.api.action.lifecycle.Reference; import org.apache.karaf.shell.api.action.lifecycle.Service; -import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService; +import org.opendaylight.mdsal.binding.api.RpcService; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetKnownClientsForAllShards; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetKnownClientsForAllShardsInputBuilder; import org.opendaylight.yangtools.yang.common.RpcResult; @@ -21,11 +21,11 @@ import org.opendaylight.yangtools.yang.common.RpcResult; description = "Run a get-known-clients-for-all-shards test") public class GetKnownClientsForAllShardsCommand extends AbstractRpcAction { @Reference - private RpcConsumerRegistry rpcConsumerRegistry; + private RpcService rpcService; @Override protected ListenableFuture> invokeRpc() { - return rpcConsumerRegistry.getRpcService(ClusterAdminService.class) - .getKnownClientsForAllShards(new GetKnownClientsForAllShardsInputBuilder().build()); + return rpcService.getRpc(GetKnownClientsForAllShards.class) + .invoke(new GetKnownClientsForAllShardsInputBuilder().build()); } } diff --git a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/GetShardRoleCommand.java b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/GetShardRoleCommand.java index 8e914279ea..50a88e942c 100644 --- a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/GetShardRoleCommand.java +++ b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/GetShardRoleCommand.java @@ -12,9 +12,9 @@ import org.apache.karaf.shell.api.action.Argument; import org.apache.karaf.shell.api.action.Command; import org.apache.karaf.shell.api.action.lifecycle.Reference; import org.apache.karaf.shell.api.action.lifecycle.Service; -import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService; +import org.opendaylight.mdsal.binding.api.RpcService; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetShardRole; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetShardRoleInputBuilder; import org.opendaylight.yangtools.yang.common.RpcResult; @@ -22,7 +22,7 @@ import org.opendaylight.yangtools.yang.common.RpcResult; @Command(scope = "cluster-admin", name = "get-shard-role", description = "Run a get-shard-role test") public class GetShardRoleCommand extends AbstractRpcAction { @Reference - private RpcConsumerRegistry rpcConsumerRegistry; + private RpcService rpcService; @Argument(index = 0, name = "shard-name", required = true) private String shardName; @Argument(index = 1, name = "data-store-type", required = true, description = "config / operational") @@ -30,10 +30,10 @@ public class GetShardRoleCommand extends AbstractRpcAction { @Override protected ListenableFuture> invokeRpc() { - return rpcConsumerRegistry.getRpcService(ClusterAdminService.class) - .getShardRole(new GetShardRoleInputBuilder() + return rpcService.getRpc(GetShardRole.class) + .invoke(new GetShardRoleInputBuilder() .setShardName(shardName) - .setDataStoreType(DataStoreType.forName(dataStoreType).orElse(null)) + .setDataStoreType(DataStoreType.forName(dataStoreType)) .build()); } } diff --git a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/LocateShardCommand.java b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/LocateShardCommand.java index 13d99a8e03..6a1b1731a6 100644 --- a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/LocateShardCommand.java +++ b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/LocateShardCommand.java @@ -12,9 +12,9 @@ import org.apache.karaf.shell.api.action.Argument; import org.apache.karaf.shell.api.action.Command; import org.apache.karaf.shell.api.action.lifecycle.Reference; import org.apache.karaf.shell.api.action.lifecycle.Service; -import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService; +import org.opendaylight.mdsal.binding.api.RpcService; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.LocateShard; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.LocateShardInputBuilder; import org.opendaylight.yangtools.yang.common.RpcResult; @@ -22,7 +22,7 @@ import org.opendaylight.yangtools.yang.common.RpcResult; @Command(scope = "cluster-admin", name = "locate-shard", description = "Run a locate-shard test") public class LocateShardCommand extends AbstractRpcAction { @Reference - private RpcConsumerRegistry rpcConsumerRegistry; + private RpcService rpcService; @Argument(index = 0, name = "shard-name", required = true) private String shardName; @Argument(index = 1, name = "data-store-type", required = true, description = "config / operational") @@ -30,10 +30,10 @@ public class LocateShardCommand extends AbstractRpcAction { @Override protected ListenableFuture> invokeRpc() { - return rpcConsumerRegistry.getRpcService(ClusterAdminService.class) - .locateShard(new LocateShardInputBuilder() + return rpcService.getRpc(LocateShard.class) + .invoke(new LocateShardInputBuilder() .setShardName(shardName) - .setDataStoreType(DataStoreType.forName(dataStoreType).orElse(null)) + .setDataStoreType(DataStoreType.forName(dataStoreType)) .build()); } } diff --git a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/MakeLeaderLocalCommand.java b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/MakeLeaderLocalCommand.java index 3b1c1453d0..90aa8fc653 100644 --- a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/MakeLeaderLocalCommand.java +++ b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/MakeLeaderLocalCommand.java @@ -12,9 +12,9 @@ import org.apache.karaf.shell.api.action.Argument; import org.apache.karaf.shell.api.action.Command; import org.apache.karaf.shell.api.action.lifecycle.Reference; import org.apache.karaf.shell.api.action.lifecycle.Service; -import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService; +import org.opendaylight.mdsal.binding.api.RpcService; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.MakeLeaderLocal; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.MakeLeaderLocalInputBuilder; import org.opendaylight.yangtools.yang.common.RpcResult; @@ -22,7 +22,7 @@ import org.opendaylight.yangtools.yang.common.RpcResult; @Command(scope = "cluster-admin", name = "make-leader-local", description = "Run a make-leader-local test") public class MakeLeaderLocalCommand extends AbstractRpcAction { @Reference - private RpcConsumerRegistry rpcConsumerRegistry; + private RpcService rpcService; @Argument(index = 0, name = "shard-name", required = true) private String shardName; @Argument(index = 1, name = "data-store-type", required = true, description = "config / operational") @@ -30,10 +30,10 @@ public class MakeLeaderLocalCommand extends AbstractRpcAction { @Override protected ListenableFuture> invokeRpc() { - return rpcConsumerRegistry.getRpcService(ClusterAdminService.class) - .makeLeaderLocal(new MakeLeaderLocalInputBuilder() + return rpcService.getRpc(MakeLeaderLocal.class) + .invoke(new MakeLeaderLocalInputBuilder() .setShardName(shardName) - .setDataStoreType(DataStoreType.forName(dataStoreType).orElse(null)) + .setDataStoreType(DataStoreType.forName(dataStoreType)) .build()); } } diff --git a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/RemoveAllShardReplicasCommand.java b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/RemoveAllShardReplicasCommand.java index 97f3a79eb1..c33b66330d 100644 --- a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/RemoveAllShardReplicasCommand.java +++ b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/RemoveAllShardReplicasCommand.java @@ -12,8 +12,8 @@ import org.apache.karaf.shell.api.action.Argument; import org.apache.karaf.shell.api.action.Command; import org.apache.karaf.shell.api.action.lifecycle.Reference; import org.apache.karaf.shell.api.action.lifecycle.Service; -import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService; +import org.opendaylight.mdsal.binding.api.RpcService; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicas; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicasInputBuilder; import org.opendaylight.yangtools.yang.common.RpcResult; @@ -22,14 +22,14 @@ import org.opendaylight.yangtools.yang.common.RpcResult; description = "Run a remove-all-shard-replicas test") public class RemoveAllShardReplicasCommand extends AbstractRpcAction { @Reference - private RpcConsumerRegistry rpcConsumerRegistry; + private RpcService rpcService; @Argument(index = 0, name = "member-name",required = true) private String memberName; @Override protected ListenableFuture> invokeRpc() { - return rpcConsumerRegistry.getRpcService(ClusterAdminService.class) - .removeAllShardReplicas(new RemoveAllShardReplicasInputBuilder() + return rpcService.getRpc(RemoveAllShardReplicas.class) + .invoke(new RemoveAllShardReplicasInputBuilder() .setMemberName(memberName) .build()); } diff --git a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/RemoveShardReplicaCommand.java b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/RemoveShardReplicaCommand.java index e4a786432b..9738f4fb77 100644 --- a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/RemoveShardReplicaCommand.java +++ b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/RemoveShardReplicaCommand.java @@ -12,9 +12,9 @@ import org.apache.karaf.shell.api.action.Argument; import org.apache.karaf.shell.api.action.Command; import org.apache.karaf.shell.api.action.lifecycle.Reference; import org.apache.karaf.shell.api.action.lifecycle.Service; -import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService; +import org.opendaylight.mdsal.binding.api.RpcService; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveShardReplica; import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveShardReplicaInputBuilder; import org.opendaylight.yangtools.yang.common.RpcResult; @@ -22,7 +22,7 @@ import org.opendaylight.yangtools.yang.common.RpcResult; @Command(scope = "cluster-admin", name = "remove-shard-replica", description = "Run a remove-shard-replica") public class RemoveShardReplicaCommand extends AbstractRpcAction { @Reference - private RpcConsumerRegistry rpcConsumerRegistry; + private RpcService rpcService; @Argument(index = 0, name = "shard-name", required = true) private String shardName; @Argument(index = 1, name = "data-store-type", required = true, description = "config / operational") @@ -32,10 +32,10 @@ public class RemoveShardReplicaCommand extends AbstractRpcAction { @Override protected ListenableFuture> invokeRpc() { - return rpcConsumerRegistry.getRpcService(ClusterAdminService.class) - .removeShardReplica(new RemoveShardReplicaInputBuilder() + return rpcService.getRpc(RemoveShardReplica.class) + .invoke(new RemoveShardReplicaInputBuilder() .setShardName(shardName) - .setDataStoreType(DataStoreType.forName(dataStoreType).orElse(null)) + .setDataStoreType(DataStoreType.forName(dataStoreType)) .setMemberName(memberName) .build()); } diff --git a/opendaylight/md-sal/sal-clustering-commons/pom.xml b/opendaylight/md-sal/sal-clustering-commons/pom.xml index 3a62b14b6f..14056f32ca 100644 --- a/opendaylight/md-sal/sal-clustering-commons/pom.xml +++ b/opendaylight/md-sal/sal-clustering-commons/pom.xml @@ -4,7 +4,7 @@ org.opendaylight.controller mdsal-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ../parent @@ -12,66 +12,32 @@ bundle - - org.xmlunit - xmlunit-legacy - - - - org.slf4j - slf4j-simple - test - - - org.opendaylight.yangtools - yang-test-util - - - - - commons-lang - commons-lang - test - - - commons-io - commons-io - test - - - org.apache.commons - commons-lang3 - test + + + javax.servlet + javax.servlet-api - - - org.opendaylight.controller - repackaged-akka - - com.typesafe.akka - akka-testkit_2.13 + com.github.spotbugs + spotbugs-annotations + true - com.typesafe.akka - akka-persistence-tck_2.13 + com.google.guava + guava - org.scalatestplus - junit-4-13_2.13 - test + com.guicedee.services + javax.inject + provided + true - - - - javax.servlet - javax.servlet-api + com.typesafe + config - - io.dropwizard.metrics metrics-core @@ -85,63 +51,119 @@ metrics-jmx - com.guicedee.services - javax.inject - provided + org.checkerframework + checker-qual true + + org.eclipse.jdt + org.eclipse.jdt.annotation + org.kohsuke.metainf-services metainf-services - org.osgi - osgi.cmpn + org.lz4 + lz4-java + 1.8.0 - - - com.google.guava - guava-testlib + org.opendaylight.yangtools + concepts - - - org.scala-lang - scala-library + org.opendaylight.yangtools + util - - org.opendaylight.yangtools - util + yang-common org.opendaylight.yangtools yang-data-api + + org.opendaylight.yangtools + yang-data-codec-binfmt + org.opendaylight.yangtools yang-data-impl + + org.opendaylight.yangtools + yang-data-tree-api + + + org.opendaylight.yangtools + yang-data-util + org.opendaylight.yangtools yang-model-api org.opendaylight.yangtools - yang-data-codec-binfmt + yang-repo-api org.opendaylight.yangtools yang-repo-spi + + org.opendaylight.controller + repackaged-akka + + + org.osgi + org.osgi.service.component.annotations + + + org.scala-lang + scala-library + - - org.lz4 - lz4-java - 1.8.0 + com.google.guava + guava-testlib + + + com.typesafe.akka + akka-persistence-tck_2.13 + + + com.typesafe.akka + akka-testkit_2.13 + + + commons-io + commons-io + test + + + org.apache.commons + commons-lang3 + test + + + org.opendaylight.yangtools + yang-test-util + + + org.scalatestplus + junit-4-13_2.13 + test + + + org.slf4j + slf4j-simple + test + + + org.xmlunit + xmlunit-core diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/DataPersistenceProvider.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/DataPersistenceProvider.java index c655dcdb89..44afa634cc 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/DataPersistenceProvider.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/DataPersistenceProvider.java @@ -9,7 +9,10 @@ package org.opendaylight.controller.cluster; import akka.japi.Procedure; +import akka.persistence.JournalProtocol; +import akka.persistence.SnapshotProtocol; import akka.persistence.SnapshotSelectionCriteria; +import org.eclipse.jdt.annotation.NonNull; /** * DataPersistenceProvider provides methods to persist data and is an abstraction of the akka-persistence persistence @@ -70,4 +73,20 @@ public interface DataPersistenceProvider { * @return the last sequence number */ long getLastSequenceNumber(); + + /** + * Receive and potentially handle a {@link JournalProtocol} response. + * + * @param response A {@link JournalProtocol} response + * @return {@code true} if the response was handled + */ + boolean handleJournalResponse(JournalProtocol.@NonNull Response response); + + /** + * Receive and potentially handle a {@link SnapshotProtocol} response. + * + * @param response A {@link SnapshotProtocol} response + * @return {@code true} if the response was handled + */ + boolean handleSnapshotResponse(SnapshotProtocol.@NonNull Response response); } diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/DelegatingPersistentDataProvider.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/DelegatingPersistentDataProvider.java index f1a20fcc8e..3210819225 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/DelegatingPersistentDataProvider.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/DelegatingPersistentDataProvider.java @@ -8,6 +8,8 @@ package org.opendaylight.controller.cluster; import akka.japi.Procedure; +import akka.persistence.JournalProtocol; +import akka.persistence.SnapshotProtocol; import akka.persistence.SnapshotSelectionCriteria; /** @@ -18,11 +20,11 @@ import akka.persistence.SnapshotSelectionCriteria; public class DelegatingPersistentDataProvider implements DataPersistenceProvider { private DataPersistenceProvider delegate; - public DelegatingPersistentDataProvider(DataPersistenceProvider delegate) { + public DelegatingPersistentDataProvider(final DataPersistenceProvider delegate) { this.delegate = delegate; } - public void setDelegate(DataPersistenceProvider delegate) { + public void setDelegate(final DataPersistenceProvider delegate) { this.delegate = delegate; } @@ -36,27 +38,27 @@ public class DelegatingPersistentDataProvider implements DataPersistenceProvider } @Override - public void persist(T entry, Procedure procedure) { + public void persist(final T entry, final Procedure procedure) { delegate.persist(entry, procedure); } @Override - public void persistAsync(T entry, Procedure procedure) { + public void persistAsync(final T entry, final Procedure procedure) { delegate.persistAsync(entry, procedure); } @Override - public void saveSnapshot(Object entry) { + public void saveSnapshot(final Object entry) { delegate.saveSnapshot(entry); } @Override - public void deleteSnapshots(SnapshotSelectionCriteria criteria) { + public void deleteSnapshots(final SnapshotSelectionCriteria criteria) { delegate.deleteSnapshots(criteria); } @Override - public void deleteMessages(long sequenceNumber) { + public void deleteMessages(final long sequenceNumber) { delegate.deleteMessages(sequenceNumber); } @@ -64,4 +66,14 @@ public class DelegatingPersistentDataProvider implements DataPersistenceProvider public long getLastSequenceNumber() { return delegate.getLastSequenceNumber(); } + + @Override + public boolean handleJournalResponse(final JournalProtocol.Response response) { + return delegate.handleJournalResponse(response); + } + + @Override + public boolean handleSnapshotResponse(final SnapshotProtocol.Response response) { + return delegate.handleSnapshotResponse(response); + } } diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/NonPersistentDataProvider.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/NonPersistentDataProvider.java index 9a4a34cf59..5461689d2a 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/NonPersistentDataProvider.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/NonPersistentDataProvider.java @@ -10,6 +10,8 @@ package org.opendaylight.controller.cluster; import static java.util.Objects.requireNonNull; import akka.japi.Procedure; +import akka.persistence.JournalProtocol; +import akka.persistence.SnapshotProtocol; import akka.persistence.SnapshotSelectionCriteria; import org.opendaylight.controller.cluster.common.actor.ExecuteInSelfActor; import org.slf4j.Logger; @@ -70,4 +72,14 @@ public class NonPersistentDataProvider implements DataPersistenceProvider { LOG.error("An unexpected error occurred", e); } } + + @Override + public boolean handleJournalResponse(final JournalProtocol.Response response) { + return false; + } + + @Override + public boolean handleSnapshotResponse(final SnapshotProtocol.Response response) { + return false; + } } diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/PersistentDataProvider.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/PersistentDataProvider.java index 21102f1f0e..1faee47f52 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/PersistentDataProvider.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/PersistentDataProvider.java @@ -11,16 +11,19 @@ import static java.util.Objects.requireNonNull; import akka.japi.Procedure; import akka.persistence.AbstractPersistentActor; +import akka.persistence.DeleteMessagesSuccess; +import akka.persistence.DeleteSnapshotsSuccess; +import akka.persistence.JournalProtocol; +import akka.persistence.SnapshotProtocol; import akka.persistence.SnapshotSelectionCriteria; /** * A DataPersistenceProvider implementation with persistence enabled. */ public class PersistentDataProvider implements DataPersistenceProvider { - private final AbstractPersistentActor persistentActor; - public PersistentDataProvider(AbstractPersistentActor persistentActor) { + public PersistentDataProvider(final AbstractPersistentActor persistentActor) { this.persistentActor = requireNonNull(persistentActor, "persistentActor can't be null"); } @@ -30,27 +33,27 @@ public class PersistentDataProvider implements DataPersistenceProvider { } @Override - public void persist(T entry, Procedure procedure) { + public void persist(final T entry, final Procedure procedure) { persistentActor.persist(entry, procedure); } @Override - public void persistAsync(T entry, Procedure procedure) { + public void persistAsync(final T entry, final Procedure procedure) { persistentActor.persistAsync(entry, procedure); } @Override - public void saveSnapshot(Object snapshot) { + public void saveSnapshot(final Object snapshot) { persistentActor.saveSnapshot(snapshot); } @Override - public void deleteSnapshots(SnapshotSelectionCriteria criteria) { + public void deleteSnapshots(final SnapshotSelectionCriteria criteria) { persistentActor.deleteSnapshots(criteria); } @Override - public void deleteMessages(long sequenceNumber) { + public void deleteMessages(final long sequenceNumber) { persistentActor.deleteMessages(sequenceNumber); } @@ -58,4 +61,14 @@ public class PersistentDataProvider implements DataPersistenceProvider { public long getLastSequenceNumber() { return persistentActor.lastSequenceNr(); } + + @Override + public boolean handleJournalResponse(final JournalProtocol.Response response) { + return response instanceof DeleteMessagesSuccess; + } + + @Override + public boolean handleSnapshotResponse(final SnapshotProtocol.Response response) { + return response instanceof DeleteSnapshotsSuccess; + } } diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedActor.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedActor.java index 6af52fbd04..f66a77f66e 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedActor.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedActor.java @@ -17,10 +17,11 @@ import org.slf4j.LoggerFactory; public abstract class AbstractUntypedActor extends AbstractActor implements ExecuteInSelfActor { // The member name should be lower case but it's referenced in many subclasses. Suppressing the CS warning for now. - @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_PRIVATE") @SuppressWarnings("checkstyle:MemberName") + @SuppressFBWarnings(value = "SLF4J_LOGGER_SHOULD_BE_PRIVATE", justification = "Class identity is required") protected final Logger LOG = LoggerFactory.getLogger(getClass()); + @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design") protected AbstractUntypedActor() { LOG.debug("Actor created {}", getSelf()); getContext().system().actorSelection("user/termination-monitor").tell(new Monitor(getSelf()), getSelf()); diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedActorWithMetering.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedActorWithMetering.java index 2124b24faf..d20ceb5252 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedActorWithMetering.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedActorWithMetering.java @@ -7,21 +7,24 @@ */ package org.opendaylight.controller.cluster.common.actor; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; + /** * Actor with its behaviour metered. Metering is enabled by configuration. */ public abstract class AbstractUntypedActorWithMetering extends AbstractUntypedActor { - - //this is used in the metric name. Some transient actors do not have defined names + // this is used in the metric name. Some transient actors do not have defined names private String actorNameOverride; + @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design") public AbstractUntypedActorWithMetering() { if (isMetricsCaptureEnabled()) { getContext().become(new MeteringBehavior(this)); } } - public AbstractUntypedActorWithMetering(String actorNameOverride) { + @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design") + public AbstractUntypedActorWithMetering(final String actorNameOverride) { this.actorNameOverride = actorNameOverride; if (isMetricsCaptureEnabled()) { getContext().become(new MeteringBehavior(this)); @@ -29,8 +32,7 @@ public abstract class AbstractUntypedActorWithMetering extends AbstractUntypedAc } private boolean isMetricsCaptureEnabled() { - CommonConfig config = new CommonConfig(getContext().system().settings().config()); - return config.isMetricCaptureEnabled(); + return new CommonConfig(getContext().system().settings().config()).isMetricCaptureEnabled(); } public String getActorNameOverride() { diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedPersistentActor.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedPersistentActor.java index 711a43159a..8bf657e134 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedPersistentActor.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedPersistentActor.java @@ -14,13 +14,16 @@ import org.eclipse.jdt.annotation.NonNull; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +// FIXME: override getContext(), getSelf() and others to be final to get rid of +// SpotBugs MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR violation public abstract class AbstractUntypedPersistentActor extends AbstractPersistentActor implements ExecuteInSelfActor { // The member name should be lower case but it's referenced in many subclasses. Suppressing the CS warning for now. - @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_PRIVATE") @SuppressWarnings("checkstyle:MemberName") + @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_PRIVATE") protected final Logger LOG = LoggerFactory.getLogger(getClass()); + @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design") protected AbstractUntypedPersistentActor() { LOG.trace("Actor created {}", getSelf()); getContext().system().actorSelection("user/termination-monitor").tell(new Monitor(getSelf()), getSelf()); diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedPersistentActorWithMetering.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedPersistentActorWithMetering.java index ed03d33491..760f0bd0fd 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedPersistentActorWithMetering.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedPersistentActorWithMetering.java @@ -7,11 +7,13 @@ */ package org.opendaylight.controller.cluster.common.actor; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; + /** * Actor with its behaviour metered. Metering is enabled by configuration. */ public abstract class AbstractUntypedPersistentActorWithMetering extends AbstractUntypedPersistentActor { - + @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design") public AbstractUntypedPersistentActorWithMetering() { if (isMetricsCaptureEnabled()) { getContext().become(new MeteringBehavior(this)); @@ -19,7 +21,6 @@ public abstract class AbstractUntypedPersistentActorWithMetering extends Abstrac } private boolean isMetricsCaptureEnabled() { - CommonConfig config = new CommonConfig(getContext().system().settings().config()); - return config.isMetricCaptureEnabled(); + return new CommonConfig(getContext().system().settings().config()).isMetricCaptureEnabled(); } } diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/QuarantinedMonitorActor.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/QuarantinedMonitorActor.java index 77dcba564d..c5c19d8d37 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/QuarantinedMonitorActor.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/QuarantinedMonitorActor.java @@ -17,6 +17,7 @@ import akka.japi.Effect; import akka.remote.AssociationErrorEvent; import akka.remote.RemotingLifecycleEvent; import akka.remote.artery.ThisActorSystemQuarantinedEvent; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.util.HashSet; import java.util.Set; import org.slf4j.Logger; @@ -42,6 +43,7 @@ public class QuarantinedMonitorActor extends UntypedAbstractActor { private final Set

    addressSet = new HashSet<>(); private int count = 0; + @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design") protected QuarantinedMonitorActor(final Effect callback) { this.callback = callback; @@ -66,25 +68,23 @@ public class QuarantinedMonitorActor extends UntypedAbstractActor { return; } - if (message instanceof ThisActorSystemQuarantinedEvent) { - final ThisActorSystemQuarantinedEvent event = (ThisActorSystemQuarantinedEvent) message; + if (message instanceof ThisActorSystemQuarantinedEvent event) { LOG.warn("Got quarantined by {}", event.remoteAddress()); quarantined = true; // execute the callback callback.apply(); - } else if (message instanceof AssociationErrorEvent) { + } else if (message instanceof AssociationErrorEvent event) { final String errorMessage = message.toString(); LOG.trace("errorMessage:{}", errorMessage); if (errorMessage.contains("The remote system has a UID that has been quarantined")) { - final Address address = ((AssociationErrorEvent) message).getRemoteAddress(); + final Address address = event.getRemoteAddress(); addressSet.add(address); count++; LOG.trace("address:{} addressSet: {} count:{}", address, addressSet, count); if (count >= MESSAGE_THRESHOLD && addressSet.size() > 1) { count = 0; addressSet.clear(); - final AssociationErrorEvent event = (AssociationErrorEvent) message; LOG.warn("Got quarantined via AssociationEvent by {}", event.remoteAddress()); quarantined = true; @@ -95,8 +95,7 @@ public class QuarantinedMonitorActor extends UntypedAbstractActor { count = 0; addressSet.clear(); } - } else if (message instanceof ClusterEvent.MemberDowned) { - final ClusterEvent.MemberDowned event = (ClusterEvent.MemberDowned) message; + } else if (message instanceof ClusterEvent.MemberDowned event) { if (Cluster.get(getContext().system()).selfMember().equals(event.member())) { LOG.warn("This member has been downed, restarting"); diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/NormalizedNodeNavigator.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/NormalizedNodeNavigator.java index 6e567fa245..19df464f9e 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/NormalizedNodeNavigator.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/NormalizedNodeNavigator.java @@ -35,38 +35,35 @@ public class NormalizedNodeNavigator { final DataContainerNode dataContainerNode) { visitor.visitNode(level, parentPath, dataContainerNode); - String newParentPath = parentPath + "/" + dataContainerNode.getIdentifier().toString(); + String newParentPath = parentPath + "/" + dataContainerNode.name().toString(); - for (NormalizedNode node : dataContainerNode.body()) { - if (node instanceof MixinNode && node instanceof NormalizedNodeContainer) { - navigateNormalizedNodeContainerMixin(level, newParentPath, (NormalizedNodeContainer) node); + for (var node : dataContainerNode.body()) { + if (node instanceof MixinNode && node instanceof NormalizedNodeContainer container) { + navigateNormalizedNodeContainerMixin(level, newParentPath, container); } else { navigateNormalizedNode(level, newParentPath, node); } } - } private void navigateNormalizedNodeContainerMixin(final int level, final String parentPath, final NormalizedNodeContainer node) { visitor.visitNode(level, parentPath, node); - String newParentPath = parentPath + "/" + node.getIdentifier().toString(); + String newParentPath = parentPath + "/" + node.name().toString(); - for (NormalizedNode normalizedNode : node.body()) { - if (normalizedNode instanceof MixinNode && normalizedNode instanceof NormalizedNodeContainer) { - navigateNormalizedNodeContainerMixin(level + 1, newParentPath, - (NormalizedNodeContainer) normalizedNode); + for (var normalizedNode : node.body()) { + if (normalizedNode instanceof MixinNode && normalizedNode instanceof NormalizedNodeContainer container) { + navigateNormalizedNodeContainerMixin(level + 1, newParentPath, container); } else { navigateNormalizedNode(level, newParentPath, normalizedNode); } } - } private void navigateNormalizedNode(final int level, final String parentPath, final NormalizedNode normalizedNode) { - if (normalizedNode instanceof DataContainerNode) { - navigateDataContainerNode(level + 1, parentPath, (DataContainerNode) normalizedNode); + if (normalizedNode instanceof DataContainerNode dataContainer) { + navigateDataContainerNode(level + 1, parentPath, dataContainer); } else { visitor.visitNode(level + 1, parentPath, normalizedNode); } diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/SerializationUtils.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/SerializationUtils.java index a2c456d17a..51e61ea47f 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/SerializationUtils.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/SerializationUtils.java @@ -7,8 +7,6 @@ */ package org.opendaylight.controller.cluster.datastore.node.utils.stream; -import static org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion.MAGNESIUM; - import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; @@ -45,7 +43,7 @@ public final class SerializationUtils { public static void writeNormalizedNode(final DataOutput out, final @Nullable NormalizedNode node) throws IOException { - writeNormalizedNode(out, MAGNESIUM, node); + writeNormalizedNode(out, NormalizedNodeStreamVersion.POTASSIUM, node); } public static void writeNormalizedNode(final DataOutput out, final NormalizedNodeStreamVersion version, @@ -67,7 +65,7 @@ public final class SerializationUtils { public static void writePath(final DataOutput out, final @NonNull YangInstanceIdentifier path) throws IOException { - writePath(out, MAGNESIUM, path); + writePath(out, NormalizedNodeStreamVersion.POTASSIUM, path); } public static void writePath(final DataOutput out, final NormalizedNodeStreamVersion version, @@ -95,7 +93,7 @@ public final class SerializationUtils { public static void writeNodeAndPath(final DataOutput out, final YangInstanceIdentifier path, final NormalizedNode node) throws IOException { - writeNodeAndPath(out, MAGNESIUM, path, node); + writeNodeAndPath(out, NormalizedNodeStreamVersion.POTASSIUM, path, node); } public static void readPathAndNode(final DataInput in, final T instance, final Applier applier) @@ -117,6 +115,6 @@ public final class SerializationUtils { public static void writePathAndNode(final DataOutput out, final YangInstanceIdentifier path, final NormalizedNode node) throws IOException { - writePathAndNode(out, MAGNESIUM, path, node); + writePathAndNode(out, NormalizedNodeStreamVersion.POTASSIUM, path, node); } } diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/AbstractNormalizedNodePruner.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/AbstractNormalizedNodePruner.java index 13500c17c0..cee5a0329a 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/AbstractNormalizedNodePruner.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/AbstractNormalizedNodePruner.java @@ -18,7 +18,6 @@ import java.util.NoSuchElementException; import java.util.Optional; import javax.xml.transform.dom.DOMSource; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; -import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue; @@ -26,7 +25,7 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgum import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter; import org.opendaylight.yangtools.yang.data.impl.schema.ReusableImmutableNormalizedNodeStreamWriter; -import org.opendaylight.yangtools.yang.data.util.DataSchemaContextNode; +import org.opendaylight.yangtools.yang.data.util.DataSchemaContext; import org.opendaylight.yangtools.yang.data.util.DataSchemaContextTree; import org.opendaylight.yangtools.yang.model.api.DataSchemaNode; import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext; @@ -58,12 +57,12 @@ abstract class AbstractNormalizedNodePruner implements NormalizedNodeStreamWrite private static final Logger LOG = LoggerFactory.getLogger(AbstractNormalizedNodePruner.class); - private final Deque> stack = new ArrayDeque<>(); + private final Deque stack = new ArrayDeque<>(); private final ReusableImmutableNormalizedNodeStreamWriter delegate = ReusableImmutableNormalizedNodeStreamWriter.create(); private final DataSchemaContextTree tree; - private DataSchemaContextNode nodePathSchemaNode; + private DataSchemaContext nodePathSchemaNode; private NormalizedNode normalizedNode; private State state = State.UNITIALIZED; private int unknown; @@ -145,11 +144,6 @@ abstract class AbstractNormalizedNodePruner implements NormalizedNodeStreamWrite enter(ReusableImmutableNormalizedNodeStreamWriter::startChoiceNode, name, childSizeHint); } - @Override - public final void startAugmentationNode(final AugmentationIdentifier identifier) throws IOException { - enter(ReusableImmutableNormalizedNodeStreamWriter::startAugmentationNode, identifier); - } - @Override public final boolean startAnyxmlNode(final NodeIdentifier name, final Class objectModel) throws IOException { if (enter(name)) { @@ -181,7 +175,7 @@ abstract class AbstractNormalizedNodePruner implements NormalizedNodeStreamWrite } } - Object translateScalar(final DataSchemaContextNode context, final Object value) throws IOException { + Object translateScalar(final DataSchemaContext context, final Object value) { // Default is pass-through return value; } @@ -206,7 +200,8 @@ abstract class AbstractNormalizedNodePruner implements NormalizedNodeStreamWrite } if (stack.isEmpty()) { - normalizedNode = delegate.getResult(); + final var result = delegate.result(); + normalizedNode = result != null ? result.data() : null; state = State.CLOSED; } } @@ -247,10 +242,11 @@ abstract class AbstractNormalizedNodePruner implements NormalizedNodeStreamWrite return false; } - final DataSchemaContextNode schema; - final DataSchemaContextNode parent = currentSchema(); + final DataSchemaContext schema; + final DataSchemaContext parent = currentSchema(); if (parent != null) { - schema = parent.getChild(name); + schema = parent instanceof DataSchemaContext.Composite compositeParent ? compositeParent.childByArg(name) + : null; } else { schema = nodePathSchemaNode; } @@ -262,7 +258,7 @@ abstract class AbstractNormalizedNodePruner implements NormalizedNodeStreamWrite } stack.push(schema); - final DataSchemaNode dataSchema = schema.getDataSchemaNode(); + final DataSchemaNode dataSchema = schema.dataSchemaNode(); if (dataSchema != null) { delegate.nextDataSchemaNode(dataSchema); } @@ -282,7 +278,7 @@ abstract class AbstractNormalizedNodePruner implements NormalizedNodeStreamWrite } } - final DataSchemaContextNode currentSchema() { + final DataSchemaContext currentSchema() { return stack.peek(); } } diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/UintAdaptingPruner.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/UintAdaptingPruner.java index b3e02a4761..4c6c223396 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/UintAdaptingPruner.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/UintAdaptingPruner.java @@ -30,7 +30,7 @@ import org.opendaylight.yangtools.yang.common.Uint8; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue; import org.opendaylight.yangtools.yang.data.impl.schema.ReusableImmutableNormalizedNodeStreamWriter; -import org.opendaylight.yangtools.yang.data.util.DataSchemaContextNode; +import org.opendaylight.yangtools.yang.data.util.DataSchemaContext; import org.opendaylight.yangtools.yang.data.util.DataSchemaContextTree; import org.opendaylight.yangtools.yang.model.api.DataSchemaNode; import org.opendaylight.yangtools.yang.model.api.LeafSchemaNode; @@ -54,9 +54,9 @@ final class UintAdaptingPruner extends ReusableNormalizedNodePruner { UINT8 { @Override public Object apply(final Object obj) { - if (obj instanceof Short) { + if (obj instanceof Short shortObj) { LOG.trace("Translating legacy uint8 {}", obj); - return Uint8.valueOf((Short) obj); + return Uint8.valueOf(shortObj); } return obj; } @@ -64,9 +64,9 @@ final class UintAdaptingPruner extends ReusableNormalizedNodePruner { UINT16 { @Override public Object apply(final Object obj) { - if (obj instanceof Integer) { + if (obj instanceof Integer intObj) { LOG.trace("Translating legacy uint16 {}", obj); - return Uint16.valueOf((Integer) obj); + return Uint16.valueOf(intObj); } return obj; } @@ -74,9 +74,9 @@ final class UintAdaptingPruner extends ReusableNormalizedNodePruner { UINT32 { @Override public Object apply(final Object obj) { - if (obj instanceof Long) { + if (obj instanceof Long longObj) { LOG.trace("Translating legacy uint32 {}", obj); - return Uint32.valueOf((Long) obj); + return Uint32.valueOf(longObj); } return obj; } @@ -84,9 +84,9 @@ final class UintAdaptingPruner extends ReusableNormalizedNodePruner { UINT64 { @Override public Object apply(final Object obj) { - if (obj instanceof BigInteger) { + if (obj instanceof BigInteger bigInt) { LOG.trace("Translating legacy uint64 {}", obj); - return Uint64.valueOf((BigInteger) obj); + return Uint64.valueOf(bigInt); } return obj; } @@ -133,18 +133,17 @@ final class UintAdaptingPruner extends ReusableNormalizedNodePruner { } @Override - Object translateScalar(final DataSchemaContextNode context, final Object value) throws IOException { - final DataSchemaNode schema = context.getDataSchemaNode(); - return schema instanceof TypedDataSchemaNode ? adaptValue(((TypedDataSchemaNode) schema).getType(), value) - : value; + Object translateScalar(final DataSchemaContext context, final Object value) { + final DataSchemaNode schema = context.dataSchemaNode(); + return schema instanceof TypedDataSchemaNode typed ? adaptValue(typed.getType(), value) : value; } private void adaptEntry(final ReusableImmutableNormalizedNodeStreamWriter writer, final NodeWithValue name) { final NodeWithValue adapted; - final DataSchemaNode schema = currentSchema().getDataSchemaNode(); - if (schema instanceof TypedDataSchemaNode) { + final DataSchemaNode schema = currentSchema().dataSchemaNode(); + if (schema instanceof TypedDataSchemaNode typed) { final Object oldValue = name.getValue(); - final Object newValue = adaptValue(((TypedDataSchemaNode) schema).getType(), oldValue); + final Object newValue = adaptValue(typed.getType(), oldValue); adapted = newValue == oldValue ? name : new NodeWithValue<>(name.getNodeType(), newValue); } else { adapted = name; @@ -156,9 +155,9 @@ final class UintAdaptingPruner extends ReusableNormalizedNodePruner { private void adaptEntry(final ReusableImmutableNormalizedNodeStreamWriter writer, final NodeIdentifierWithPredicates name, final int size) { final NodeIdentifierWithPredicates adapted; - final DataSchemaNode schema = currentSchema().getDataSchemaNode(); - if (schema instanceof ListSchemaNode) { - adapted = NIP_ADAPTERS.getUnchecked((ListSchemaNode) schema).apply(name); + final DataSchemaNode schema = currentSchema().dataSchemaNode(); + if (schema instanceof ListSchemaNode list) { + adapted = NIP_ADAPTERS.getUnchecked(list).apply(name); } else { adapted = name; } diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/util/AbstractDataTreeModificationCursor.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/util/AbstractDataTreeModificationCursor.java index 35ab00f4f5..3e299e3d9b 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/util/AbstractDataTreeModificationCursor.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/util/AbstractDataTreeModificationCursor.java @@ -16,7 +16,7 @@ import java.util.Optional; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModificationCursor; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModificationCursor; /** * Abstract {@link DataTreeModificationCursor} which tracks the current path. Subclasses can get the current path @@ -26,7 +26,7 @@ import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification */ @Beta public abstract class AbstractDataTreeModificationCursor implements DataTreeModificationCursor { - private YangInstanceIdentifier current = YangInstanceIdentifier.empty(); + private YangInstanceIdentifier current = YangInstanceIdentifier.of(); protected final YangInstanceIdentifier current() { return current; diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/io/ChunkedOutputStream.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/io/ChunkedOutputStream.java index 73cdece20f..1376c67714 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/io/ChunkedOutputStream.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/io/ChunkedOutputStream.java @@ -126,14 +126,14 @@ public final class ChunkedOutputStream extends OutputStream { public Either toVariant() { checkClosed(); - return result instanceof byte[] ? Either.ofFirst((byte[]) result) + return result instanceof byte[] bytes ? Either.ofFirst(bytes) : Either.ofSecond(new ChunkedByteArray(size, (ImmutableList) result)); } @VisibleForTesting ChunkedByteArray toChunkedByteArray() { checkClosed(); - return new ChunkedByteArray(size, result instanceof byte[] ? ImmutableList.of((byte[]) result) + return new ChunkedByteArray(size, result instanceof byte[] bytes ? ImmutableList.of(bytes) : (ImmutableList) result); } diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/io/FileBackedOutputStream.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/io/FileBackedOutputStream.java index 029464a82b..b00e4bee4e 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/io/FileBackedOutputStream.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/io/FileBackedOutputStream.java @@ -91,7 +91,7 @@ public class FileBackedOutputStream extends OutputStream { if (file != null) { return Files.newInputStream(file.toPath()); } else { - return new ByteArrayInputStream(memory.getBuffer(), 0, memory.getCount()); + return new ByteArrayInputStream(memory.buf(), 0, memory.count()); } } } @@ -178,20 +178,20 @@ public class FileBackedOutputStream extends OutputStream { throw new IOException("Stream already closed"); } - if (file == null && memory.getCount() + len > fileThreshold) { + if (file == null && memory.count() + len > fileThreshold) { final File temp = File.createTempFile("FileBackedOutputStream", null, fileDirectory == null ? null : new File(fileDirectory)); temp.deleteOnExit(); final Cleaner.Cleanable cleanup = FILE_CLEANER.register(this, () -> deleteFile(temp)); - LOG.debug("Byte count {} has exceeded threshold {} - switching to file: {}", memory.getCount() + len, + LOG.debug("Byte count {} has exceeded threshold {} - switching to file: {}", memory.count() + len, fileThreshold, temp); final OutputStream transfer; try { transfer = Files.newOutputStream(temp.toPath()); try { - transfer.write(memory.getBuffer(), 0, memory.getCount()); + transfer.write(memory.buf(), 0, memory.count()); transfer.flush(); } catch (IOException e) { try { @@ -224,12 +224,12 @@ public class FileBackedOutputStream extends OutputStream { /** * ByteArrayOutputStream that exposes its internals for efficiency. */ - private static class MemoryOutputStream extends ByteArrayOutputStream { - byte[] getBuffer() { + private static final class MemoryOutputStream extends ByteArrayOutputStream { + byte[] buf() { return buf; } - int getCount() { + int count() { return count; } } diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/messaging/MessageAssembler.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/messaging/MessageAssembler.java index 297186d9f7..2b41bc595f 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/messaging/MessageAssembler.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/messaging/MessageAssembler.java @@ -42,11 +42,11 @@ public final class MessageAssembler implements AutoCloseable { private final String logContext; MessageAssembler(final Builder builder) { - this.fileBackedStreamFactory = requireNonNull(builder.fileBackedStreamFactory, + fileBackedStreamFactory = requireNonNull(builder.fileBackedStreamFactory, "FiledBackedStreamFactory cannot be null"); - this.assembledMessageCallback = requireNonNull(builder.assembledMessageCallback, + assembledMessageCallback = requireNonNull(builder.assembledMessageCallback, "assembledMessageCallback cannot be null"); - this.logContext = builder.logContext; + logContext = builder.logContext; stateCache = CacheBuilder.newBuilder() .expireAfterAccess(builder.expireStateAfterInactivityDuration, builder.expireStateAfterInactivityUnit) @@ -97,13 +97,13 @@ public final class MessageAssembler implements AutoCloseable { * @return true if the message was handled, false otherwise */ public boolean handleMessage(final Object message, final @NonNull ActorRef sendTo) { - if (message instanceof MessageSlice) { - LOG.debug("{}: handleMessage: {}", logContext, message); - onMessageSlice((MessageSlice) message, sendTo); + if (message instanceof MessageSlice messageSlice) { + LOG.debug("{}: handleMessage: {}", logContext, messageSlice); + onMessageSlice(messageSlice, sendTo); return true; - } else if (message instanceof AbortSlicing) { - LOG.debug("{}: handleMessage: {}", logContext, message); - onAbortSlicing((AbortSlicing) message); + } else if (message instanceof AbortSlicing abortSlicing) { + LOG.debug("{}: handleMessage: {}", logContext, abortSlicing); + onAbortSlicing(abortSlicing); return true; } @@ -116,14 +116,9 @@ public final class MessageAssembler implements AutoCloseable { final AssembledMessageState state = stateCache.get(identifier, () -> createState(messageSlice)); processMessageSliceForState(messageSlice, state, sendTo); } catch (ExecutionException e) { - final MessageSliceException messageSliceEx; final Throwable cause = e.getCause(); - if (cause instanceof MessageSliceException) { - messageSliceEx = (MessageSliceException) cause; - } else { - messageSliceEx = new MessageSliceException(String.format( - "Error creating state for identifier %s", identifier), cause); - } + final MessageSliceException messageSliceEx = cause instanceof MessageSliceException sliceEx ? sliceEx + : new MessageSliceException(String.format("Error creating state for identifier %s", identifier), cause); messageSlice.getReplyTo().tell(MessageSliceReply.failed(identifier, messageSliceEx, sendTo), ActorRef.noSender()); @@ -231,7 +226,7 @@ public final class MessageAssembler implements AutoCloseable { * @return this Builder */ public Builder fileBackedStreamFactory(final FileBackedOutputStreamFactory newFileBackedStreamFactory) { - this.fileBackedStreamFactory = requireNonNull(newFileBackedStreamFactory); + fileBackedStreamFactory = requireNonNull(newFileBackedStreamFactory); return this; } @@ -243,7 +238,7 @@ public final class MessageAssembler implements AutoCloseable { * @return this Builder */ public Builder assembledMessageCallback(final BiConsumer newAssembledMessageCallback) { - this.assembledMessageCallback = newAssembledMessageCallback; + assembledMessageCallback = newAssembledMessageCallback; return this; } @@ -258,8 +253,8 @@ public final class MessageAssembler implements AutoCloseable { */ public Builder expireStateAfterInactivity(final long duration, final TimeUnit unit) { checkArgument(duration > 0, "duration must be > 0"); - this.expireStateAfterInactivityDuration = duration; - this.expireStateAfterInactivityUnit = unit; + expireStateAfterInactivityDuration = duration; + expireStateAfterInactivityUnit = unit; return this; } @@ -270,7 +265,7 @@ public final class MessageAssembler implements AutoCloseable { * @return this Builder */ public Builder logContext(final String newLogContext) { - this.logContext = newLogContext; + logContext = newLogContext; return this; } diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/messaging/MessageSliceIdentifier.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/messaging/MessageSliceIdentifier.java index 0cc36689bb..852e7f9b19 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/messaging/MessageSliceIdentifier.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/messaging/MessageSliceIdentifier.java @@ -60,17 +60,9 @@ final class MessageSliceIdentifier implements Identifier { @Override public boolean equals(final Object obj) { - if (this == obj) { - return true; - } - - if (!(obj instanceof MessageSliceIdentifier)) { - return false; - } - - MessageSliceIdentifier other = (MessageSliceIdentifier) obj; - return other.clientIdentifier.equals(clientIdentifier) && other.slicerId == slicerId - && other.messageId == messageId; + return this == obj || obj instanceof MessageSliceIdentifier other + && other.clientIdentifier.equals(clientIdentifier) && other.slicerId == slicerId + && other.messageId == messageId; } @Override diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/messaging/MessageSlicer.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/messaging/MessageSlicer.java index 57a6f9ed4f..f30dbc66a7 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/messaging/MessageSlicer.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/messaging/MessageSlicer.java @@ -48,12 +48,12 @@ public class MessageSlicer implements AutoCloseable { private final long id; MessageSlicer(final Builder builder) { - this.fileBackedStreamFactory = builder.fileBackedStreamFactory; - this.messageSliceSize = builder.messageSliceSize; - this.maxSlicingTries = builder.maxSlicingTries; + fileBackedStreamFactory = builder.fileBackedStreamFactory; + messageSliceSize = builder.messageSliceSize; + maxSlicingTries = builder.maxSlicingTries; id = SLICER_ID_COUNTER.getAndIncrement(); - this.logContext = builder.logContext + "_slicer-id-" + id; + logContext = builder.logContext + "_slicer-id-" + id; CacheBuilder> cacheBuilder = CacheBuilder.newBuilder().removalListener(this::stateRemoved); @@ -174,9 +174,9 @@ public class MessageSlicer implements AutoCloseable { * @return true if the message was handled, false otherwise */ public boolean handleMessage(final Object message) { - if (message instanceof MessageSliceReply) { - LOG.debug("{}: handleMessage: {}", logContext, message); - return onMessageSliceReply((MessageSliceReply) message); + if (message instanceof MessageSliceReply sliceReply) { + LOG.debug("{}: handleMessage: {}", logContext, sliceReply); + return onMessageSliceReply(sliceReply); } return false; @@ -219,8 +219,7 @@ public class MessageSlicer implements AutoCloseable { private boolean onMessageSliceReply(final MessageSliceReply reply) { final Identifier identifier = reply.getIdentifier(); - if (!(identifier instanceof MessageSliceIdentifier) - || ((MessageSliceIdentifier)identifier).getSlicerId() != id) { + if (!(identifier instanceof MessageSliceIdentifier sliceIdentifier) || sliceIdentifier.getSlicerId() != id) { return false; } @@ -236,7 +235,7 @@ public class MessageSlicer implements AutoCloseable { final Optional failure = reply.getFailure(); if (failure.isPresent()) { LOG.warn("{}: Received failed {}", logContext, reply); - processMessageSliceException(failure.get(), state, reply.getSendTo()); + processMessageSliceException(failure.orElseThrow(), state, reply.getSendTo()); return true; } @@ -336,7 +335,7 @@ public class MessageSlicer implements AutoCloseable { * @return this Builder */ public Builder fileBackedStreamFactory(final FileBackedOutputStreamFactory newFileBackedStreamFactory) { - this.fileBackedStreamFactory = requireNonNull(newFileBackedStreamFactory); + fileBackedStreamFactory = requireNonNull(newFileBackedStreamFactory); return this; } @@ -348,7 +347,7 @@ public class MessageSlicer implements AutoCloseable { */ public Builder messageSliceSize(final int newMessageSliceSize) { checkArgument(newMessageSliceSize > 0, "messageSliceSize must be > 0"); - this.messageSliceSize = newMessageSliceSize; + messageSliceSize = newMessageSliceSize; return this; } @@ -361,7 +360,7 @@ public class MessageSlicer implements AutoCloseable { */ public Builder maxSlicingTries(final int newMaxSlicingTries) { checkArgument(newMaxSlicingTries > 0, "newMaxSlicingTries must be > 0"); - this.maxSlicingTries = newMaxSlicingTries; + maxSlicingTries = newMaxSlicingTries; return this; } @@ -376,8 +375,8 @@ public class MessageSlicer implements AutoCloseable { */ public Builder expireStateAfterInactivity(final long duration, final TimeUnit unit) { checkArgument(duration > 0, "duration must be > 0"); - this.expireStateAfterInactivityDuration = duration; - this.expireStateAfterInactivityUnit = unit; + expireStateAfterInactivityDuration = duration; + expireStateAfterInactivityUnit = unit; return this; } @@ -388,7 +387,7 @@ public class MessageSlicer implements AutoCloseable { * @return this Builder */ public Builder logContext(final String newLogContext) { - this.logContext = requireNonNull(newLogContext); + logContext = requireNonNull(newLogContext); return this; } diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/notifications/LeaderStateChanged.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/notifications/LeaderStateChanged.java index caa1a8debf..373823ef0f 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/notifications/LeaderStateChanged.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/notifications/LeaderStateChanged.java @@ -18,11 +18,12 @@ import org.eclipse.jdt.annotation.Nullable; * @author Thomas Pantelis */ public class LeaderStateChanged { - private final String memberId; - private final String leaderId; + private final @NonNull String memberId; + private final @Nullable String leaderId; private final short leaderPayloadVersion; - public LeaderStateChanged(@NonNull String memberId, @Nullable String leaderId, short leaderPayloadVersion) { + public LeaderStateChanged(final @NonNull String memberId, final @Nullable String leaderId, + final short leaderPayloadVersion) { this.memberId = requireNonNull(memberId); this.leaderId = leaderId; this.leaderPayloadVersion = leaderPayloadVersion; diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/notifications/RoleChangeNotifier.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/notifications/RoleChangeNotifier.java index bb4ad65f16..ed0c10a717 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/notifications/RoleChangeNotifier.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/notifications/RoleChangeNotifier.java @@ -72,9 +72,8 @@ public class RoleChangeNotifier extends AbstractUntypedActor implements AutoClos } - } else if (message instanceof RoleChanged) { + } else if (message instanceof RoleChanged roleChanged) { // this message is sent by RaftActor. Notify registered listeners when this message is received. - RoleChanged roleChanged = (RoleChanged) message; LOG.info("RoleChangeNotifier for {} , received role change from {} to {}", memberId, roleChanged.getOldRole(), roleChanged.getNewRole()); @@ -83,13 +82,13 @@ public class RoleChangeNotifier extends AbstractUntypedActor implements AutoClos new RoleChangeNotification(roleChanged.getMemberId(), roleChanged.getOldRole(), roleChanged.getNewRole()); - for (ActorRef listener: registeredListeners.values()) { + for (ActorRef listener : registeredListeners.values()) { listener.tell(latestRoleChangeNotification, getSelf()); } - } else if (message instanceof LeaderStateChanged) { - latestLeaderStateChanged = (LeaderStateChanged)message; + } else if (message instanceof LeaderStateChanged leaderStateChanged) { + latestLeaderStateChanged = leaderStateChanged; - for (ActorRef listener: registeredListeners.values()) { + for (ActorRef listener : registeredListeners.values()) { listener.tell(latestLeaderStateChanged, getSelf()); } } else { diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/persistence/LocalSnapshotStore.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/persistence/LocalSnapshotStore.java index b63b732f5b..c796305675 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/persistence/LocalSnapshotStore.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/persistence/LocalSnapshotStore.java @@ -59,7 +59,7 @@ import scala.concurrent.Future; * * @author Thomas Pantelis */ -public class LocalSnapshotStore extends SnapshotStore { +public final class LocalSnapshotStore extends SnapshotStore { private static final Logger LOG = LoggerFactory.getLogger(LocalSnapshotStore.class); private static final int PERSISTENCE_ID_START_INDEX = "snapshot-".length(); diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/Payload.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/Payload.java deleted file mode 100644 index fc65743e7b..0000000000 --- a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/Payload.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ - -package org.opendaylight.controller.cluster.raft.protobuff.client.messages; - -/** - * An instance of a Payload class is meant to be used as the Payload for - * AppendEntries. - * - *

    - * When an actor which is derived from RaftActor attempts to persistData it - * must pass an instance of the Payload class. Similarly when state needs to - * be applied to the derived RaftActor it will be passed an instance of the - * Payload class. - */ -public abstract class Payload { - public abstract int size(); -} diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/schema/provider/RemoteYangTextSourceProvider.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/schema/provider/RemoteYangTextSourceProvider.java index 03b44b5851..c8ceb13ed9 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/schema/provider/RemoteYangTextSourceProvider.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/schema/provider/RemoteYangTextSourceProvider.java @@ -11,7 +11,7 @@ import com.google.common.annotations.Beta; import java.util.Set; import org.eclipse.jdt.annotation.NonNull; import org.opendaylight.controller.cluster.schema.provider.impl.YangTextSchemaSourceSerializationProxy; -import org.opendaylight.yangtools.yang.model.repo.api.SourceIdentifier; +import org.opendaylight.yangtools.yang.model.api.source.SourceIdentifier; import scala.concurrent.Future; /** diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/schema/provider/impl/RemoteSchemaProvider.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/schema/provider/impl/RemoteSchemaProvider.java index 4a4416d576..7a53188718 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/schema/provider/impl/RemoteSchemaProvider.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/schema/provider/impl/RemoteSchemaProvider.java @@ -12,19 +12,18 @@ import com.google.common.annotations.Beta; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.SettableFuture; import org.opendaylight.controller.cluster.schema.provider.RemoteYangTextSourceProvider; -import org.opendaylight.yangtools.yang.model.repo.api.SourceIdentifier; -import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource; +import org.opendaylight.yangtools.yang.model.api.source.SourceIdentifier; +import org.opendaylight.yangtools.yang.model.api.source.YangTextSource; import org.opendaylight.yangtools.yang.model.repo.spi.SchemaSourceProvider; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import scala.concurrent.ExecutionContext; -import scala.concurrent.Future; /** * Provides schema sources from {@link RemoteYangTextSourceProvider}. */ @Beta -public class RemoteSchemaProvider implements SchemaSourceProvider { +public class RemoteSchemaProvider implements SchemaSourceProvider { private static final Logger LOG = LoggerFactory.getLogger(RemoteSchemaProvider.class); private final RemoteYangTextSourceProvider remoteRepo; @@ -37,21 +36,18 @@ public class RemoteSchemaProvider implements SchemaSourceProvider getSource(final SourceIdentifier sourceIdentifier) { - LOG.trace("Getting yang schema source for {}", sourceIdentifier.getName()); + public ListenableFuture getSource(final SourceIdentifier sourceIdentifier) { + LOG.trace("Getting yang schema source for {}", sourceIdentifier.name().getLocalName()); - Future result = remoteRepo.getYangTextSchemaSource(sourceIdentifier); - - final SettableFuture res = SettableFuture.create(); - result.onComplete(new OnComplete() { + final var res = SettableFuture.create(); + remoteRepo.getYangTextSchemaSource(sourceIdentifier).onComplete(new OnComplete<>() { @Override - public void onComplete(final Throwable throwable, - final YangTextSchemaSourceSerializationProxy yangTextSchemaSourceSerializationProxy) { - if (yangTextSchemaSourceSerializationProxy != null) { - res.set(yangTextSchemaSourceSerializationProxy.getRepresentation()); + public void onComplete(final Throwable failure, final YangTextSchemaSourceSerializationProxy success) { + if (success != null) { + res.set(success.getRepresentation()); } - if (throwable != null) { - res.setException(throwable); + if (failure != null) { + res.setException(failure); } } }, executionContext); diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/schema/provider/impl/RemoteYangTextSourceProviderImpl.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/schema/provider/impl/RemoteYangTextSourceProviderImpl.java index 5e88952dda..eea0aa8607 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/schema/provider/impl/RemoteYangTextSourceProviderImpl.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/schema/provider/impl/RemoteYangTextSourceProviderImpl.java @@ -17,9 +17,9 @@ import com.google.common.util.concurrent.MoreExecutors; import java.io.IOException; import java.util.Set; import org.opendaylight.controller.cluster.schema.provider.RemoteYangTextSourceProvider; +import org.opendaylight.yangtools.yang.model.api.source.SourceIdentifier; +import org.opendaylight.yangtools.yang.model.api.source.YangTextSource; import org.opendaylight.yangtools.yang.model.repo.api.SchemaRepository; -import org.opendaylight.yangtools.yang.model.repo.api.SourceIdentifier; -import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import scala.concurrent.Future; @@ -51,16 +51,16 @@ public class RemoteYangTextSourceProviderImpl implements RemoteYangTextSourcePro LOG.trace("Sending yang schema source for {}", identifier); final Promise promise = akka.dispatch.Futures.promise(); - ListenableFuture future = - repository.getSchemaSource(identifier, YangTextSchemaSource.class); + ListenableFuture future = + repository.getSchemaSource(identifier, YangTextSource.class); - Futures.addCallback(future, new FutureCallback() { + Futures.addCallback(future, new FutureCallback() { @Override - public void onSuccess(final YangTextSchemaSource result) { + public void onSuccess(final YangTextSource result) { try { promise.success(new YangTextSchemaSourceSerializationProxy(result)); } catch (IOException e) { - LOG.warn("Unable to read schema source for {}", result.getIdentifier(), e); + LOG.warn("Unable to read schema source for {}", result.sourceId(), e); promise.failure(e); } } diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/schema/provider/impl/YangTextSchemaSourceSerializationProxy.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/schema/provider/impl/YangTextSchemaSourceSerializationProxy.java index 202de58a27..9ad9948e6c 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/schema/provider/impl/YangTextSchemaSourceSerializationProxy.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/schema/provider/impl/YangTextSchemaSourceSerializationProxy.java @@ -5,36 +5,38 @@ * terms of the Eclipse Public License v1.0 which accompanies this distribution, * and is available at http://www.eclipse.org/legal/epl-v10.html */ - package org.opendaylight.controller.cluster.schema.provider.impl; import com.google.common.annotations.Beta; -import com.google.common.io.ByteSource; +import com.google.common.io.CharSource; import java.io.IOException; import java.io.Serializable; import org.opendaylight.yangtools.yang.common.Revision; -import org.opendaylight.yangtools.yang.model.repo.api.RevisionSourceIdentifier; -import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource; +import org.opendaylight.yangtools.yang.common.UnresolvedQName.Unqualified; +import org.opendaylight.yangtools.yang.model.api.source.SourceIdentifier; +import org.opendaylight.yangtools.yang.model.api.source.YangTextSource; +import org.opendaylight.yangtools.yang.model.spi.source.DelegatedYangTextSource; /** - * {@link org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource} serialization proxy. + * {@link YangTextSource} serialization proxy. */ @Beta public class YangTextSchemaSourceSerializationProxy implements Serializable { private static final long serialVersionUID = -6361268518176019477L; - private final byte[] schemaSource; + private final String schemaSource; private final Revision revision; private final String name; - public YangTextSchemaSourceSerializationProxy(final YangTextSchemaSource source) throws IOException { - this.revision = source.getIdentifier().getRevision().orElse(null); - this.name = source.getIdentifier().getName(); - this.schemaSource = source.read(); + public YangTextSchemaSourceSerializationProxy(final YangTextSource source) throws IOException { + final var sourceId = source.sourceId(); + revision = sourceId.revision(); + name = sourceId.name().getLocalName(); + schemaSource = source.read(); } - public YangTextSchemaSource getRepresentation() { - return YangTextSchemaSource.delegateForByteSource( - RevisionSourceIdentifier.create(name, revision), ByteSource.wrap(schemaSource)); + public YangTextSource getRepresentation() { + return new DelegatedYangTextSource(new SourceIdentifier(Unqualified.of(name), revision), + CharSource.wrap(schemaSource)); } } diff --git a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/common/actor/MessageTrackerTest.java b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/common/actor/MessageTrackerTest.java index 3bead64be8..f1c3eae731 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/common/actor/MessageTrackerTest.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/common/actor/MessageTrackerTest.java @@ -10,11 +10,11 @@ package org.opendaylight.controller.cluster.common.actor; import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; import com.google.common.testing.FakeTicker; import java.util.List; -import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.slf4j.Logger; @@ -56,9 +56,8 @@ public class MessageTrackerTest { ticker.advance(20, MILLISECONDS); MessageTracker.Context context2 = messageTracker.received(new Foo()); - Assert.assertEquals(true, context2.error().isPresent()); - Assert.assertEquals(0, context2.error().get().getMessageProcessingTimesSinceLastExpectedMessage().size()); - + assertEquals(true, context2.error().isPresent()); + assertEquals(0, context2.error().orElseThrow().getMessageProcessingTimesSinceLastExpectedMessage().size()); } @Test @@ -78,21 +77,21 @@ public class MessageTrackerTest { MessageTracker.Context context2 = messageTracker.received(new Foo()); - Assert.assertEquals(true, context2.error().isPresent()); + assertEquals(true, context2.error().isPresent()); - MessageTracker.Error error = context2.error().get(); + MessageTracker.Error error = context2.error().orElseThrow(); List messageProcessingTimes = error.getMessageProcessingTimesSinceLastExpectedMessage(); - Assert.assertEquals(3, messageProcessingTimes.size()); + assertEquals(3, messageProcessingTimes.size()); - Assert.assertEquals(String.class, messageProcessingTimes.get(0).getMessageClass()); - Assert.assertEquals(Long.class, messageProcessingTimes.get(1).getMessageClass()); - Assert.assertEquals(Integer.class, messageProcessingTimes.get(2).getMessageClass()); - Assert.assertTrue(messageProcessingTimes.get(2).getElapsedTimeInNanos() > MILLISECONDS.toNanos(10)); - Assert.assertEquals(Foo.class, error.getLastExpectedMessage().getClass()); - Assert.assertEquals(Foo.class, error.getCurrentExpectedMessage().getClass()); + assertEquals(String.class, messageProcessingTimes.get(0).getMessageClass()); + assertEquals(Long.class, messageProcessingTimes.get(1).getMessageClass()); + assertEquals(Integer.class, messageProcessingTimes.get(2).getMessageClass()); + assertTrue(messageProcessingTimes.get(2).getElapsedTimeInNanos() > MILLISECONDS.toNanos(10)); + assertEquals(Foo.class, error.getLastExpectedMessage().getClass()); + assertEquals(Foo.class, error.getCurrentExpectedMessage().getClass()); LOG.error("An error occurred : {}" , error); } @@ -107,8 +106,7 @@ public class MessageTrackerTest { ticker.advance(1, MILLISECONDS); MessageTracker.Context context2 = messageTracker.received(new Foo()); - Assert.assertEquals(false, context2.error().isPresent()); - + assertEquals(false, context2.error().isPresent()); } @Test @@ -117,12 +115,7 @@ public class MessageTrackerTest { messageTracker.received(new Foo()); - try { - messageTracker.received(new Foo()); - fail("Expected an IllegalStateException"); - } catch (IllegalStateException e) { - // expected - } + assertThrows(IllegalStateException.class, () -> messageTracker.received(new Foo())); } @Test @@ -139,15 +132,15 @@ public class MessageTrackerTest { MessageTracker.Context context = messageTracker.received(new Foo()); - Assert.assertEquals(true, context.error().isPresent()); + assertEquals(true, context.error().isPresent()); - MessageTracker.Error error = context.error().get(); + MessageTracker.Error error = context.error().orElseThrow(); - Assert.assertEquals(null, error.getLastExpectedMessage()); - Assert.assertEquals(Foo.class, error.getCurrentExpectedMessage().getClass()); + assertEquals(null, error.getLastExpectedMessage()); + assertEquals(Foo.class, error.getCurrentExpectedMessage().getClass()); String errorString = error.toString(); - Assert.assertTrue(errorString.contains("Last Expected Message = null")); + assertTrue(errorString.contains("Last Expected Message = null")); LOG.error("An error occurred : {}", error); } @@ -162,8 +155,7 @@ public class MessageTrackerTest { MessageTracker.Context context = messageTracker.received(new Foo()); - Assert.assertEquals(true, context.error().isPresent()); - + assertEquals(true, context.error().isPresent()); } @Test @@ -172,20 +164,18 @@ public class MessageTrackerTest { messageTracker.begin(); try (MessageTracker.Context ctx = messageTracker.received(45)) { - Assert.assertEquals(false, ctx.error().isPresent()); + assertEquals(false, ctx.error().isPresent()); } try (MessageTracker.Context ctx = messageTracker.received(45L)) { - Assert.assertEquals(false, ctx.error().isPresent()); + assertEquals(false, ctx.error().isPresent()); } List processingTimeList = messageTracker.getMessagesSinceLastExpectedMessage(); - Assert.assertEquals(2, processingTimeList.size()); + assertEquals(2, processingTimeList.size()); assertEquals(Integer.class, processingTimeList.get(0).getMessageClass()); assertEquals(Long.class, processingTimeList.get(1).getMessageClass()); - } - } diff --git a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/SerializationUtilsTest.java b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/SerializationUtilsTest.java index 9af555d390..630ebecb4e 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/SerializationUtilsTest.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/SerializationUtilsTest.java @@ -8,224 +8,174 @@ package org.opendaylight.controller.cluster.datastore.node.utils.stream; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; -import com.google.common.collect.ImmutableSet; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.DataInputStream; -import java.io.DataOutput; import java.io.DataOutputStream; import java.io.IOException; -import java.nio.charset.Charset; -import java.util.Arrays; -import java.util.Set; +import java.nio.charset.StandardCharsets; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.stream.Collectors; import javax.xml.transform.dom.DOMSource; -import org.custommonkey.xmlunit.Diff; -import org.custommonkey.xmlunit.XMLUnit; import org.junit.Test; import org.opendaylight.yangtools.util.xml.UntrustedXML; import org.opendaylight.yangtools.yang.common.QName; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; -import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier; -import org.opendaylight.yangtools.yang.data.api.schema.AugmentationNode; -import org.opendaylight.yangtools.yang.data.api.schema.ChoiceNode; -import org.opendaylight.yangtools.yang.data.api.schema.DOMSourceAnyxmlNode; +import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier; +import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates; +import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue; +import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode; import org.opendaylight.yangtools.yang.data.api.schema.LeafNode; import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode; -import org.opendaylight.yangtools.yang.data.api.schema.LeafSetNode; -import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode; -import org.opendaylight.yangtools.yang.data.api.schema.MapNode; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; -import org.opendaylight.yangtools.yang.data.api.schema.UnkeyedListEntryNode; -import org.opendaylight.yangtools.yang.data.api.schema.UnkeyedListNode; -import org.opendaylight.yangtools.yang.data.api.schema.UserMapNode; -import org.opendaylight.yangtools.yang.data.impl.schema.Builders; -import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes; -import org.w3c.dom.Document; +import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes; +import org.xmlunit.builder.DiffBuilder; public class SerializationUtilsTest { - - private static final QName CONTAINER_Q_NAME = QName.create("ns-1", "2017-03-17", "container1"); + private static final QName CONTAINER1 = QName.create("ns-1", "2017-03-17", "container1"); @Test - public void testSerializeDeserializeNodes() throws IOException { - final NormalizedNode normalizedNode = createNormalizedNode(); - final byte[] bytes = serializeNormalizedNode(normalizedNode); - assertEquals(10564, bytes.length); - assertEquals(normalizedNode, deserializeNormalizedNode(bytes)); + public void testSerializeDeserializeNodes() throws Exception { + final var normalizedNode = createNormalizedNode(); + final var bytes = serialize(normalizedNode); + assertEquals(10567, bytes.length); + assertEquals(normalizedNode, deserialize(bytes)); } @Test public void testSerializeDeserializeAnyXmlNode() throws Exception { - final ByteArrayInputStream is = - new ByteArrayInputStream("".getBytes(Charset.defaultCharset())); - final Document parse = UntrustedXML.newDocumentBuilder().parse(is); - final DOMSourceAnyxmlNode anyXmlNode = Builders.anyXmlBuilder() - .withNodeIdentifier(id("anyXmlNode")) - .withValue(new DOMSource(parse)) - .build(); - final byte[] bytes = serializeNormalizedNode(anyXmlNode); + final var parse = UntrustedXML.newDocumentBuilder().parse( + new ByteArrayInputStream("".getBytes(StandardCharsets.UTF_8))); + final var anyXmlNode = ImmutableNodes.newAnyxmlBuilder(DOMSource.class) + .withNodeIdentifier(id("anyXmlNode")) + .withValue(new DOMSource(parse)) + .build(); + final byte[] bytes = serialize(anyXmlNode); assertEquals(113, bytes.length); - final NormalizedNode deserialized = deserializeNormalizedNode(bytes); - final DOMSource value = (DOMSource) deserialized.body(); - final Diff diff = XMLUnit.compareXML((Document) anyXmlNode.body().getNode(), - value.getNode().getOwnerDocument()); - assertTrue(diff.toString(), diff.similar()); + + final var diff = DiffBuilder.compare(anyXmlNode.body().getNode()) + // FIXME: why all this magic? + .withTest(((DOMSource) deserialize(bytes).body()).getNode().getOwnerDocument()) + .checkForSimilar() + .build(); + assertFalse(diff.toString(), diff.hasDifferences()); } @Test public void testSerializeDeserializePath() throws IOException { - final ByteArrayOutputStream bos = new ByteArrayOutputStream(); - final DataOutput out = new DataOutputStream(bos); - final YangInstanceIdentifier path = YangInstanceIdentifier.builder() - .node(id("container1")) - .node(autmentationId("list1", "list2")) - .node(listId("list1", "keyName1", "keyValue1")) - .node(leafSetId("leafSer1", "leafSetValue1")) - .build(); - SerializationUtils.writePath(out, path); + final var path = YangInstanceIdentifier.builder() + .node(id("container1")) + .node(listId("list1", "keyName1", "keyValue1")) + .node(leafSetId("leafSer1", "leafSetValue1")) + .build(); + + final var bos = new ByteArrayOutputStream(); + try (var out = new DataOutputStream(bos)) { + SerializationUtils.writePath(out, path); + } - final byte[] bytes = bos.toByteArray(); - assertEquals(119, bytes.length); + final var bytes = bos.toByteArray(); + assertEquals(105, bytes.length); - final YangInstanceIdentifier deserialized = - SerializationUtils.readPath(new DataInputStream(new ByteArrayInputStream(bytes))); - assertEquals(path, deserialized); + assertEquals(path, SerializationUtils.readPath(new DataInputStream(new ByteArrayInputStream(bytes)))); } @Test public void testSerializeDeserializePathAndNode() throws IOException { - final ByteArrayOutputStream bos = new ByteArrayOutputStream(); - final DataOutput out = new DataOutputStream(bos); - final NormalizedNode node = createNormalizedNode(); - final YangInstanceIdentifier path = YangInstanceIdentifier.create(id("container1")); - SerializationUtils.writeNodeAndPath(out, path, node); - - final byte[] bytes = bos.toByteArray(); - assertEquals(10566, bytes.length); + final var path = YangInstanceIdentifier.of(id("container1")); + final var node = createNormalizedNode(); - final DataInputStream in = new DataInputStream(new ByteArrayInputStream(bytes)); - final AtomicBoolean applierCalled = new AtomicBoolean(false); - SerializationUtils.readNodeAndPath(in, applierCalled, (instance, deserializedPath, deserializedNode) -> { - assertEquals(path, deserializedPath); - assertEquals(node, deserializedNode); - applierCalled.set(true); - }); - assertTrue(applierCalled.get()); - } - - @Test - public void testSerializeDeserializeAugmentNoref() throws IOException { - final YangInstanceIdentifier expected = YangInstanceIdentifier.create( - AugmentationIdentifier.create(ImmutableSet.of( - QName.create("foo", "leaf1"), - QName.create("bar", "leaf2")))); - - final ByteArrayOutputStream bos = new ByteArrayOutputStream(); - final DataOutput out = new DataOutputStream(bos); - SerializationUtils.writePath(out, expected); + final var bos = new ByteArrayOutputStream(); + try (var out = new DataOutputStream(bos)) { + SerializationUtils.writeNodeAndPath(out, path, node); + } final byte[] bytes = bos.toByteArray(); - assertEquals(37, bytes.length); - - final DataInputStream in = new DataInputStream(new ByteArrayInputStream(bytes)); - final YangInstanceIdentifier read = SerializationUtils.readPath(in); - assertEquals(expected, read); + assertEquals(10569, bytes.length); + + final var applierCalled = new AtomicBoolean(false); + try (var in = new DataInputStream(new ByteArrayInputStream(bytes))) { + SerializationUtils.readNodeAndPath(in, applierCalled, (instance, deserializedPath, deserializedNode) -> { + assertEquals(path, deserializedPath); + assertEquals(node, deserializedNode); + applierCalled.set(true); + }); + } + assertTrue(applierCalled.get()); } - private static NormalizedNode deserializeNormalizedNode(final byte[] bytes) throws IOException { - return SerializationUtils.readNormalizedNode(new DataInputStream(new ByteArrayInputStream(bytes))).get(); + private static NormalizedNode deserialize(final byte[] bytes) throws Exception { + return SerializationUtils.readNormalizedNode(new DataInputStream(new ByteArrayInputStream(bytes))) + .orElseThrow(); } - private static byte[] serializeNormalizedNode(final NormalizedNode node) throws IOException { - ByteArrayOutputStream bos = new ByteArrayOutputStream(); + private static byte[] serialize(final NormalizedNode node) throws Exception { + final var bos = new ByteArrayOutputStream(); SerializationUtils.writeNormalizedNode(new DataOutputStream(bos), node); return bos.toByteArray(); } - private static NormalizedNode createNormalizedNode() { - final LeafSetNode leafSetNode = Builders.leafSetBuilder() - .withNodeIdentifier(id("leafSetNode")) - .withChild(createLeafSetEntry("leafSetNode", "leafSetValue1")) - .withChild(createLeafSetEntry("leafSetNode", "leafSetValue2")) - .build(); - final LeafSetNode orderedLeafSetNode = Builders.orderedLeafSetBuilder() - .withNodeIdentifier(id("orderedLeafSetNode")) - .withChild(createLeafSetEntry("orderedLeafSetNode", "value1")) - .withChild(createLeafSetEntry("orderedLeafSetNode", "value2")) - .build(); - final LeafNode booleanLeaf = createLeaf("booleanLeaf", true); - final LeafNode byteLeaf = createLeaf("byteLeaf", (byte) 0); - final LeafNode shortLeaf = createLeaf("shortLeaf", (short) 55); - final LeafNode intLeaf = createLeaf("intLeaf", 11); - final LeafNode longLeaf = createLeaf("longLeaf", 151515L); - final LeafNode stringLeaf = createLeaf("stringLeaf", "stringValue"); - final LeafNode longStringLeaf = createLeaf("longStringLeaf", getLongString()); - final LeafNode qNameLeaf = createLeaf("stringLeaf", QName.create("base", "qName")); - final LeafNode idLeaf = createLeaf("stringLeaf", YangInstanceIdentifier.empty()); - final MapEntryNode entry1 = Builders.mapEntryBuilder() - .withNodeIdentifier(listId("mapNode", "key", "key1")) + private static ContainerNode createNormalizedNode() { + final var stringLeaf = createLeaf("stringLeaf", "stringValue"); + final var entry1 = ImmutableNodes.newMapEntryBuilder() + .withNodeIdentifier(listId("mapNode", "key", "key1")) + .withChild(stringLeaf) + .build(); + final var entry2 = ImmutableNodes.newMapEntryBuilder() + .withNodeIdentifier(listId("mapNode", "key", "key2")) + .withChild(stringLeaf) + .build(); + + return ImmutableNodes.newContainerBuilder() + .withNodeIdentifier(new NodeIdentifier(CONTAINER1)) + .withChild(createLeaf("booleanLeaf", true)) + .withChild(createLeaf("byteLeaf", (byte) 0)) + .withChild(createLeaf("shortLeaf", (short) 55)) + .withChild(createLeaf("intLeaf", 11)) + .withChild(createLeaf("longLeaf", 151515L)) .withChild(stringLeaf) - .build(); - final MapEntryNode entry2 = Builders.mapEntryBuilder() - .withNodeIdentifier(listId("mapNode", "key", "key2")) - .withChild(stringLeaf) - .build(); - final MapNode mapNode = Builders.mapBuilder() - .withNodeIdentifier(id("mapNode")) - .withChild(entry1) - .withChild(entry2) - .build(); - final UserMapNode orderedMapNode = Builders.orderedMapBuilder() - .withNodeIdentifier(id("orderedMapNode")) - .withChild(entry2) - .withChild(entry1) - .build(); - final UnkeyedListEntryNode unkeyedListEntry1 = Builders.unkeyedListEntryBuilder() - .withNodeIdentifier(id("unkeyedList")) - .withChild(stringLeaf) - .build(); - final UnkeyedListEntryNode unkeyedListEntry2 = Builders.unkeyedListEntryBuilder() - .withNodeIdentifier(id("unkeyedList")) - .withChild(stringLeaf) - .build(); - final UnkeyedListNode unkeyedListNode = Builders.unkeyedListBuilder() - .withNodeIdentifier(id("unkeyedList")) - .withChild(unkeyedListEntry1) - .withChild(unkeyedListEntry2) - .build(); - final ImmutableSet childNames = - ImmutableSet.of(QName.create(CONTAINER_Q_NAME, "aug1"), QName.create(CONTAINER_Q_NAME, "aug1")); - final AugmentationNode augmentationNode = Builders.augmentationBuilder() - .withNodeIdentifier(new YangInstanceIdentifier.AugmentationIdentifier(childNames)) + .withChild(createLeaf("longStringLeaf", "0123456789".repeat(1000))) + .withChild(createLeaf("stringLeaf", QName.create("base", "qName"))) + .withChild(createLeaf("stringLeaf", YangInstanceIdentifier.of(QName.create("test", "test")))) + .withChild(ImmutableNodes.newSystemMapBuilder() + .withNodeIdentifier(id("mapNode")) + .withChild(entry1) + .withChild(entry2) + .build()) + .withChild(ImmutableNodes.newUserMapBuilder() + .withNodeIdentifier(id("orderedMapNode")) + .withChild(entry2) + .withChild(entry1) + .build()) + .withChild(ImmutableNodes.newUnkeyedListBuilder() + .withNodeIdentifier(id("unkeyedList")) + .withChild(ImmutableNodes.newUnkeyedListEntryBuilder() + .withNodeIdentifier(id("unkeyedList")) + .withChild(stringLeaf) + .build()) + .withChild(ImmutableNodes.newUnkeyedListEntryBuilder() + .withNodeIdentifier(id("unkeyedList")) + .withChild(stringLeaf) + .build()) + .build()) + .withChild(ImmutableNodes.newSystemLeafSetBuilder() + .withNodeIdentifier(id("leafSetNode")) + .withChild(createLeafSetEntry("leafSetNode", "leafSetValue1")) + .withChild(createLeafSetEntry("leafSetNode", "leafSetValue2")) + .build()) + .withChild(ImmutableNodes.newUserLeafSetBuilder() + .withNodeIdentifier(id("orderedLeafSetNode")) + .withChild(createLeafSetEntry("orderedLeafSetNode", "value1")) + .withChild(createLeafSetEntry("orderedLeafSetNode", "value2")) + .build()) .withChild(createLeaf("aug1", "aug1Value")) .withChild(createLeaf("aug2", "aug2Value")) - .build(); - final ChoiceNode choiceNode = Builders.choiceBuilder() - .withNodeIdentifier(id("choiceNode")) - .withChild(createLeaf("choiceLeaf", 12)) - .build(); - return Builders.containerBuilder() - .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(CONTAINER_Q_NAME)) - .withChild(booleanLeaf) - .withChild(byteLeaf) - .withChild(shortLeaf) - .withChild(intLeaf) - .withChild(longLeaf) - .withChild(stringLeaf) - .withChild(longStringLeaf) - .withChild(qNameLeaf) - .withChild(idLeaf) - .withChild(mapNode) - .withChild(orderedMapNode) - .withChild(unkeyedListNode) - .withChild(leafSetNode) - .withChild(orderedLeafSetNode) - .withChild(augmentationNode) - .withChild(choiceNode) + .withChild(ImmutableNodes.newChoiceBuilder() + .withNodeIdentifier(id("choiceNode")) + .withChild(createLeaf("choiceLeaf", 12)) + .build()) .build(); } @@ -234,39 +184,20 @@ public class SerializationUtilsTest { } private static LeafSetEntryNode createLeafSetEntry(final String leafSet, final String value) { - return Builders.leafSetEntryBuilder() - .withNodeIdentifier(leafSetId(leafSet, value)) - .withValue(value) - .build(); + return ImmutableNodes.leafSetEntry(leafSetId(leafSet, value)); } - private static YangInstanceIdentifier.NodeIdentifier id(final String name) { - return new YangInstanceIdentifier.NodeIdentifier(QName.create(CONTAINER_Q_NAME, name)); + private static NodeIdentifier id(final String name) { + return new NodeIdentifier(QName.create(CONTAINER1, name)); } - private static YangInstanceIdentifier.NodeIdentifierWithPredicates listId(final String listName, - final String keyName, - final Object keyValue) { - return YangInstanceIdentifier.NodeIdentifierWithPredicates.of(QName.create(CONTAINER_Q_NAME, listName), - QName.create(CONTAINER_Q_NAME, keyName), keyValue); + private static NodeIdentifierWithPredicates listId(final String listName, final String keyName, + final Object keyValue) { + return NodeIdentifierWithPredicates.of(QName.create(CONTAINER1, listName), QName.create(CONTAINER1, keyName), + keyValue); } - private static YangInstanceIdentifier.NodeWithValue leafSetId(final String node, final T value) { - return new YangInstanceIdentifier.NodeWithValue<>(QName.create(CONTAINER_Q_NAME, node), value); - } - - private static YangInstanceIdentifier.AugmentationIdentifier autmentationId(final String... nodes) { - final Set qNames = Arrays.stream(nodes) - .map(node -> QName.create(CONTAINER_Q_NAME, node)) - .collect(Collectors.toSet()); - return new YangInstanceIdentifier.AugmentationIdentifier(qNames); - } - - private static String getLongString() { - final StringBuilder builder = new StringBuilder(10000); - for (int i = 0; i < 1000; i++) { - builder.append("0123456789"); - } - return builder.toString(); + private static NodeWithValue leafSetId(final String node, final T value) { + return new NodeWithValue<>(QName.create(CONTAINER1, node), value); } } diff --git a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/NormalizedNodePrunerTest.java b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/NormalizedNodePrunerTest.java index 8001ebd051..6b150131b3 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/NormalizedNodePrunerTest.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/NormalizedNodePrunerTest.java @@ -11,11 +11,11 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; +import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.containerNode; import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapEntry; import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapEntryBuilder; import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapNodeBuilder; -import com.google.common.collect.Sets; import java.io.IOException; import java.util.Optional; import java.util.concurrent.atomic.AtomicInteger; @@ -26,19 +26,16 @@ import org.mockito.junit.MockitoJUnitRunner; import org.opendaylight.controller.cluster.datastore.node.utils.NormalizedNodeNavigator; import org.opendaylight.controller.cluster.datastore.util.TestModel; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; -import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier; -import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue; -import org.opendaylight.yangtools.yang.data.api.schema.DOMSourceAnyxmlNode; +import org.opendaylight.yangtools.yang.data.api.schema.AnyxmlNode; +import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode; import org.opendaylight.yangtools.yang.data.api.schema.LeafNode; import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode; -import org.opendaylight.yangtools.yang.data.api.schema.MapNode; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; +import org.opendaylight.yangtools.yang.data.api.schema.SystemLeafSetNode; +import org.opendaylight.yangtools.yang.data.api.schema.SystemMapNode; import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter; -import org.opendaylight.yangtools.yang.data.impl.schema.Builders; -import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes; -import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafSetEntryNodeBuilder; -import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafSetNodeBuilder; +import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes; import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext; @RunWith(MockitoJUnitRunner.StrictStubs.class) @@ -78,7 +75,6 @@ public class NormalizedNodePrunerTest { NormalizedNode actual = pruner.getResult().orElseThrow(); assertEquals(expected, actual); - } @Test(expected = IllegalStateException.class) @@ -96,10 +92,8 @@ public class NormalizedNodePrunerTest { assertEquals(expected, actual); NormalizedNodeWriter.forStreamWriter(pruner).write(expected); - } - @Test public void testNodesPrunedWhenAugmentationSchemaMissing() throws IOException { AbstractNormalizedNodePruner pruner = prunerNoAugSchema(TestModel.TEST_PATH); @@ -136,7 +130,6 @@ public class NormalizedNodePrunerTest { // Asserting true here instead of checking actual value because I don't want this assertion to be fragile assertTrue(countNodes(expected, "urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test") > 0); - } private static int countNodes(final NormalizedNode normalizedNode, final String namespaceFilter) { @@ -145,12 +138,10 @@ public class NormalizedNodePrunerTest { } final AtomicInteger count = new AtomicInteger(); new NormalizedNodeNavigator((level, parentPath, normalizedNode1) -> { - if (!(normalizedNode1.getIdentifier() instanceof AugmentationIdentifier)) { - if (normalizedNode1.getIdentifier().getNodeType().getNamespace().toString().contains(namespaceFilter)) { - count.incrementAndGet(); - } + if (normalizedNode1.name().getNodeType().getNamespace().toString().contains(namespaceFilter)) { + count.incrementAndGet(); } - }).navigate(YangInstanceIdentifier.empty().toString(), normalizedNode); + }).navigate(YangInstanceIdentifier.of().toString(), normalizedNode); return count.get(); } @@ -158,33 +149,16 @@ public class NormalizedNodePrunerTest { @Test public void testLeafNodeNotPrunedWhenHasNoParent() throws IOException { AbstractNormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.DESC_QNAME)); - NormalizedNode input = Builders.leafBuilder().withNodeIdentifier( - new NodeIdentifier(TestModel.DESC_QNAME)).withValue("test").build(); + NormalizedNode input = ImmutableNodes.leafNode(TestModel.DESC_QNAME, "test"); NormalizedNodeWriter.forStreamWriter(pruner).write(input); assertEquals("normalizedNode", input, pruner.getResult().orElseThrow()); } - @Test - public void testLeafNodePrunedWhenHasAugmentationParentAndSchemaMissing() throws IOException { - AugmentationIdentifier augId = new AugmentationIdentifier(Sets.newHashSet(TestModel.AUG_CONT_QNAME)); - AbstractNormalizedNodePruner pruner = prunerFullSchema(YangInstanceIdentifier.builder() - .node(TestModel.TEST_QNAME).node(TestModel.AUGMENTED_LIST_QNAME) - .node(TestModel.AUGMENTED_LIST_QNAME).node(augId).build()); - LeafNode child = Builders.leafBuilder().withNodeIdentifier( - new NodeIdentifier(TestModel.INVALID_QNAME)).withValue("test").build(); - NormalizedNode input = Builders.augmentationBuilder().withNodeIdentifier(augId).withChild(child).build(); - NormalizedNodeWriter.forStreamWriter(pruner).write(input); - - NormalizedNode actual = pruner.getResult().orElseThrow(); - assertEquals("normalizedNode", Builders.augmentationBuilder().withNodeIdentifier(augId).build(), actual); - } - @Test public void testLeafNodePrunedWhenHasNoParentAndSchemaMissing() throws IOException { AbstractNormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.INVALID_QNAME)); - NormalizedNode input = Builders.leafBuilder().withNodeIdentifier( - new NodeIdentifier(TestModel.INVALID_QNAME)).withValue("test").build(); + LeafNode input = ImmutableNodes.leafNode(TestModel.INVALID_QNAME, "test"); NormalizedNodeWriter.forStreamWriter(pruner).write(input); assertEquals(Optional.empty(), pruner.getResult()); @@ -193,8 +167,7 @@ public class NormalizedNodePrunerTest { @Test public void testLeafSetEntryNodeNotPrunedWhenHasNoParent() throws IOException { AbstractNormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.SHOE_QNAME)); - NormalizedNode input = Builders.leafSetEntryBuilder().withValue("puma").withNodeIdentifier( - new NodeWithValue<>(TestModel.SHOE_QNAME, "puma")).build(); + LeafSetEntryNode input = ImmutableNodes.leafSetEntry(TestModel.SHOE_QNAME, "puma"); NormalizedNodeWriter.forStreamWriter(pruner).write(input); NormalizedNode actual = pruner.getResult().orElseThrow(); @@ -204,10 +177,10 @@ public class NormalizedNodePrunerTest { @Test public void testLeafSetEntryNodeNotPrunedWhenHasParent() throws IOException { AbstractNormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.SHOE_QNAME)); - LeafSetEntryNode child = Builders.leafSetEntryBuilder().withValue("puma").withNodeIdentifier( - new NodeWithValue<>(TestModel.SHOE_QNAME, "puma")).build(); - NormalizedNode input = Builders.leafSetBuilder().withNodeIdentifier( - new NodeIdentifier(TestModel.SHOE_QNAME)).withChild(child).build(); + SystemLeafSetNode input = ImmutableNodes.newSystemLeafSetBuilder() + .withNodeIdentifier(new NodeIdentifier(TestModel.SHOE_QNAME)) + .withChildValue("puma") + .build(); NormalizedNodeWriter.forStreamWriter(pruner).write(input); NormalizedNode actual = pruner.getResult().orElseThrow(); @@ -217,8 +190,7 @@ public class NormalizedNodePrunerTest { @Test public void testLeafSetEntryNodePrunedWhenHasNoParentAndSchemaMissing() throws IOException { AbstractNormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.INVALID_QNAME)); - NormalizedNode input = Builders.leafSetEntryBuilder().withValue("test").withNodeIdentifier( - new NodeWithValue<>(TestModel.INVALID_QNAME, "test")).build(); + LeafSetEntryNode input = ImmutableNodes.leafSetEntry(TestModel.INVALID_QNAME, "test"); NormalizedNodeWriter.forStreamWriter(pruner).write(input); assertEquals(Optional.empty(), pruner.getResult()); @@ -227,11 +199,10 @@ public class NormalizedNodePrunerTest { @Test public void testLeafSetEntryNodePrunedWhenHasParentAndSchemaMissing() throws IOException { AbstractNormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.INVALID_QNAME)); - LeafSetEntryNode child = Builders.leafSetEntryBuilder().withValue("test").withNodeIdentifier( - new NodeWithValue<>(TestModel.INVALID_QNAME, "test")).build(); - NormalizedNode input = Builders.leafSetBuilder().withNodeIdentifier( - new NodeIdentifier(TestModel.INVALID_QNAME)).withChild(child).build(); - NormalizedNodeWriter.forStreamWriter(pruner).write(input); + NormalizedNodeWriter.forStreamWriter(pruner).write(ImmutableNodes.newSystemLeafSetBuilder() + .withNodeIdentifier(new NodeIdentifier(TestModel.INVALID_QNAME)) + .withChildValue("test") + .build()); assertEquals(Optional.empty(), pruner.getResult()); } @@ -239,33 +210,37 @@ public class NormalizedNodePrunerTest { @Test public void testAnyXMLNodeNotPrunedWhenHasNoParent() throws IOException { AbstractNormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.ANY_XML_QNAME)); - NormalizedNode input = Builders.anyXmlBuilder().withNodeIdentifier( - new NodeIdentifier(TestModel.ANY_XML_QNAME)).withValue(mock(DOMSource.class)).build(); + AnyxmlNode input = ImmutableNodes.newAnyxmlBuilder(DOMSource.class) + .withNodeIdentifier(new NodeIdentifier(TestModel.ANY_XML_QNAME)) + .withValue(mock(DOMSource.class)) + .build(); NormalizedNodeWriter.forStreamWriter(pruner).write(input); - NormalizedNode actual = pruner.getResult().orElseThrow(); - assertEquals("normalizedNode", input, actual); + assertEquals(input, pruner.getResult().orElseThrow()); } @Test public void testAnyXMLNodeNotPrunedWhenHasParent() throws IOException { - AbstractNormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH); - DOMSourceAnyxmlNode child = Builders.anyXmlBuilder().withNodeIdentifier( - new NodeIdentifier(TestModel.ANY_XML_QNAME)).withValue(mock(DOMSource.class)).build(); - NormalizedNode input = Builders.containerBuilder().withNodeIdentifier( - new NodeIdentifier(TestModel.TEST_QNAME)).withChild(child).build(); + final var pruner = prunerFullSchema(TestModel.TEST_PATH); + final var input = ImmutableNodes.newContainerBuilder() + .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME)) + .withChild(ImmutableNodes.newAnyxmlBuilder(DOMSource.class) + .withNodeIdentifier(new NodeIdentifier(TestModel.ANY_XML_QNAME)) + .withValue(mock(DOMSource.class)) + .build()) + .build(); NormalizedNodeWriter.forStreamWriter(pruner).write(input); - NormalizedNode actual = pruner.getResult().orElseThrow(); - assertEquals("normalizedNode", input, actual); + assertEquals(input, pruner.getResult().orElseThrow()); } @Test public void testAnyXmlNodePrunedWhenHasNoParentAndSchemaMissing() throws IOException { AbstractNormalizedNodePruner pruner = prunerNoTestSchema(TestModel.TEST_PATH.node(TestModel.ANY_XML_QNAME)); - NormalizedNode input = Builders.anyXmlBuilder().withNodeIdentifier( - new NodeIdentifier(TestModel.ANY_XML_QNAME)).withValue(mock(DOMSource.class)).build(); - NormalizedNodeWriter.forStreamWriter(pruner).write(input); + NormalizedNodeWriter.forStreamWriter(pruner).write(ImmutableNodes.newAnyxmlBuilder(DOMSource.class) + .withNodeIdentifier(new NodeIdentifier(TestModel.ANY_XML_QNAME)) + .withValue(mock(DOMSource.class)) + .build()); assertEquals(Optional.empty(), pruner.getResult()); } @@ -278,11 +253,10 @@ public class NormalizedNodePrunerTest { .node(TestModel.INNER_CONTAINER_QNAME).build(); AbstractNormalizedNodePruner pruner = prunerFullSchema(path); - NormalizedNode input = ImmutableNodes.containerNode(TestModel.INNER_CONTAINER_QNAME); + ContainerNode input = containerNode(TestModel.INNER_CONTAINER_QNAME); NormalizedNodeWriter.forStreamWriter(pruner).write(input); - NormalizedNode actual = pruner.getResult().orElseThrow(); - assertEquals("normalizedNode", input, actual); + assertEquals(input, pruner.getResult().orElseThrow()); } @Test @@ -293,8 +267,7 @@ public class NormalizedNodePrunerTest { .node(TestModel.INVALID_QNAME).build(); AbstractNormalizedNodePruner pruner = prunerFullSchema(path); - NormalizedNode input = ImmutableNodes.containerNode(TestModel.INVALID_QNAME); - NormalizedNodeWriter.forStreamWriter(pruner).write(input); + NormalizedNodeWriter.forStreamWriter(pruner).write(containerNode(TestModel.INVALID_QNAME)); assertEquals(Optional.empty(), pruner.getResult()); } @@ -306,18 +279,20 @@ public class NormalizedNodePrunerTest { .build(); AbstractNormalizedNodePruner pruner = prunerFullSchema(path); - MapNode innerList = mapNodeBuilder(TestModel.INNER_LIST_QNAME).withChild(mapEntryBuilder( - TestModel.INNER_LIST_QNAME, TestModel.NAME_QNAME, "one").withChild( - ImmutableNodes.containerNode(TestModel.INVALID_QNAME)).build()).build(); - NormalizedNode input = mapEntryBuilder(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1) - .withChild(innerList).build(); - NormalizedNodeWriter.forStreamWriter(pruner).write(input); - - NormalizedNode expected = mapEntryBuilder(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1) - .withChild(mapNodeBuilder(TestModel.INNER_LIST_QNAME).withChild(mapEntryBuilder( - TestModel.INNER_LIST_QNAME, TestModel.NAME_QNAME, "one").build()).build()).build(); - NormalizedNode actual = pruner.getResult().orElseThrow(); - assertEquals("normalizedNode", expected, actual); + NormalizedNodeWriter.forStreamWriter(pruner) + .write(mapEntryBuilder(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1) + .withChild(mapNodeBuilder(TestModel.INNER_LIST_QNAME) + .withChild(mapEntryBuilder(TestModel.INNER_LIST_QNAME, TestModel.NAME_QNAME, "one") + .withChild(containerNode(TestModel.INVALID_QNAME)) + .build()) + .build()) + .build()); + + assertEquals(mapEntryBuilder(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1) + .withChild(mapNodeBuilder(TestModel.INNER_LIST_QNAME) + .withChild(mapEntryBuilder(TestModel.INNER_LIST_QNAME, TestModel.NAME_QNAME, "one").build()) + .build()) + .build(), pruner.getResult().orElseThrow()); } @Test @@ -327,13 +302,14 @@ public class NormalizedNodePrunerTest { .node(TestModel.INNER_LIST_QNAME).build(); AbstractNormalizedNodePruner pruner = prunerFullSchema(path); - MapNode input = mapNodeBuilder(TestModel.INNER_LIST_QNAME).withChild(mapEntryBuilder( - TestModel.INNER_LIST_QNAME, TestModel.NAME_QNAME, "one").withChild( - ImmutableNodes.containerNode(TestModel.INNER_CONTAINER_QNAME)).build()).build(); + SystemMapNode input = mapNodeBuilder(TestModel.INNER_LIST_QNAME) + .withChild(mapEntryBuilder(TestModel.INNER_LIST_QNAME, TestModel.NAME_QNAME, "one") + .withChild(containerNode(TestModel.INNER_CONTAINER_QNAME)) + .build()) + .build(); NormalizedNodeWriter.forStreamWriter(pruner).write(input); - NormalizedNode actual = pruner.getResult().orElseThrow(); - assertEquals("normalizedNode", input, actual); + assertEquals(input, pruner.getResult().orElseThrow()); } @Test @@ -343,10 +319,11 @@ public class NormalizedNodePrunerTest { .node(TestModel.INVALID_QNAME).build(); AbstractNormalizedNodePruner pruner = prunerFullSchema(path); - MapNode input = mapNodeBuilder(TestModel.INVALID_QNAME).withChild(mapEntryBuilder( - TestModel.INVALID_QNAME, TestModel.NAME_QNAME, "one").withChild( - ImmutableNodes.containerNode(TestModel.INNER_CONTAINER_QNAME)).build()).build(); - NormalizedNodeWriter.forStreamWriter(pruner).write(input); + NormalizedNodeWriter.forStreamWriter(pruner).write(mapNodeBuilder(TestModel.INVALID_QNAME) + .withChild(mapEntryBuilder(TestModel.INVALID_QNAME, TestModel.NAME_QNAME, "one") + .withChild(containerNode(TestModel.INNER_CONTAINER_QNAME)) + .build()) + .build()); assertEquals(Optional.empty(), pruner.getResult()); } @@ -358,32 +335,27 @@ public class NormalizedNodePrunerTest { .build(); AbstractNormalizedNodePruner pruner = prunerFullSchema(path); - MapNode innerList = mapNodeBuilder(TestModel.INVALID_QNAME).withChild(mapEntryBuilder( - TestModel.INVALID_QNAME, TestModel.NAME_QNAME, "one").withChild( - ImmutableNodes.containerNode(TestModel.INNER_CONTAINER_QNAME)).build()).build(); - NormalizedNode input = mapEntryBuilder(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1) - .withChild(innerList).build(); - NormalizedNodeWriter.forStreamWriter(pruner).write(input); - - NormalizedNode expected = mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1); - NormalizedNode actual = pruner.getResult().orElseThrow(); - assertEquals("normalizedNode", expected, actual); + NormalizedNodeWriter.forStreamWriter(pruner) + .write(mapEntryBuilder(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1) + .withChild(mapNodeBuilder(TestModel.INVALID_QNAME) + .withChild(mapEntryBuilder(TestModel.INVALID_QNAME, TestModel.NAME_QNAME, "one") + .withChild(containerNode(TestModel.INNER_CONTAINER_QNAME)) + .build()) + .build()) + .build()); + + assertEquals(mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1), + pruner.getResult().orElseThrow()); } - private static NormalizedNode createTestContainer() { - byte[] bytes1 = {1, 2, 3}; - LeafSetEntryNode entry1 = ImmutableLeafSetEntryNodeBuilder.create().withNodeIdentifier( - new NodeWithValue<>(TestModel.BINARY_LEAF_LIST_QNAME, bytes1)).withValue(bytes1).build(); - - byte[] bytes2 = {}; - LeafSetEntryNode entry2 = ImmutableLeafSetEntryNodeBuilder.create().withNodeIdentifier( - new NodeWithValue<>(TestModel.BINARY_LEAF_LIST_QNAME, bytes2)).withValue(bytes2).build(); - + private static ContainerNode createTestContainer() { return TestModel.createBaseTestContainerBuilder() - .withChild(ImmutableLeafSetNodeBuilder.create().withNodeIdentifier( - new NodeIdentifier(TestModel.BINARY_LEAF_LIST_QNAME)) - .withChild(entry1).withChild(entry2).build()) - .withChild(ImmutableNodes.leafNode(TestModel.SOME_BINARY_DATA_QNAME, new byte[]{1, 2, 3, 4})) - .build(); + .withChild(ImmutableNodes.newSystemLeafSetBuilder() + .withNodeIdentifier(new NodeIdentifier(TestModel.BINARY_LEAF_LIST_QNAME)) + .withChildValue(new byte[] {1, 2, 3}) + .withChildValue(new byte[0]) + .build()) + .withChild(ImmutableNodes.leafNode(TestModel.SOME_BINARY_DATA_QNAME, new byte[] {1, 2, 3, 4})) + .build(); } } diff --git a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/UintAdaptingPrunerTest.java b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/UintAdaptingPrunerTest.java index bba8739813..b29113cc8e 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/UintAdaptingPrunerTest.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/UintAdaptingPrunerTest.java @@ -22,11 +22,9 @@ import org.opendaylight.yangtools.yang.common.Uint8; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates; -import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter; -import org.opendaylight.yangtools.yang.data.impl.schema.Builders; -import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes; +import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes; import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext; import org.opendaylight.yangtools.yang.test.util.YangParserTestUtils; @@ -55,9 +53,9 @@ public class UintAdaptingPrunerTest { @Test public void testListTranslation() throws IOException { - assertEquals(Builders.mapBuilder() + assertEquals(ImmutableNodes.newSystemMapBuilder() .withNodeIdentifier(new NodeIdentifier(LST)) - .withChild(Builders.mapEntryBuilder() + .withChild(ImmutableNodes.newMapEntryBuilder() .withNodeIdentifier(NodeIdentifierWithPredicates.of(LST, ImmutableMap.builder() .put(A, (byte) 1) .put(B, (short) 1) @@ -78,9 +76,9 @@ public class UintAdaptingPrunerTest { .withChild(ImmutableNodes.leafNode(H, Uint64.ONE)) .build()) .build(), - prune(Builders.mapBuilder() + prune(ImmutableNodes.newSystemMapBuilder() .withNodeIdentifier(new NodeIdentifier(LST)) - .withChild(Builders.mapEntryBuilder() + .withChild(ImmutableNodes.newMapEntryBuilder() .withNodeIdentifier(NodeIdentifierWithPredicates.of(LST, ImmutableMap.builder() .put(A, (byte) 1) .put(B, (short) 1) @@ -105,7 +103,7 @@ public class UintAdaptingPrunerTest { @Test public void testContainerTranslation() throws IOException { - assertEquals(Builders.containerBuilder() + assertEquals(ImmutableNodes.newContainerBuilder() .withNodeIdentifier(new NodeIdentifier(CONT)) .withChild(ImmutableNodes.leafNode(A, (byte) 1)) .withChild(ImmutableNodes.leafNode(B, (short) 1)) @@ -116,7 +114,7 @@ public class UintAdaptingPrunerTest { .withChild(ImmutableNodes.leafNode(G, Uint32.ONE)) .withChild(ImmutableNodes.leafNode(H, Uint64.ONE)) .build(), - prune(Builders.containerBuilder() + prune(ImmutableNodes.newContainerBuilder() .withNodeIdentifier(new NodeIdentifier(CONT)) .withChild(ImmutableNodes.leafNode(A, (byte) 1)) .withChild(ImmutableNodes.leafNode(B, (short) 1)) @@ -131,85 +129,60 @@ public class UintAdaptingPrunerTest { @Test public void testLeafList8() throws IOException { - assertEquals(Builders.leafSetBuilder() + assertEquals(ImmutableNodes.newSystemLeafSetBuilder() .withNodeIdentifier(new NodeIdentifier(LFLST8)) - .withChild(Builders.leafSetEntryBuilder() - .withNodeIdentifier(new NodeWithValue<>(LFLST8, Uint8.ONE)) - .withValue(Uint8.ONE) - .build()) + .withChildValue(Uint8.ONE) .build(), - prune(Builders.leafSetBuilder() + prune(ImmutableNodes.newSystemLeafSetBuilder() .withNodeIdentifier(new NodeIdentifier(LFLST8)) - .withChild(Builders.leafSetEntryBuilder() - .withNodeIdentifier(new NodeWithValue<>(LFLST8, (short) 1)) - .withValue((short) 1) - .build()) + .withChildValue((short) 1) .build())); } @Test public void testLeafList16() throws IOException { - assertEquals(Builders.leafSetBuilder() + assertEquals(ImmutableNodes.newSystemLeafSetBuilder() .withNodeIdentifier(new NodeIdentifier(LFLST16)) - .withChild(Builders.leafSetEntryBuilder() - .withNodeIdentifier(new NodeWithValue<>(LFLST16, Uint16.ONE)) - .withValue(Uint16.ONE) - .build()) + .withChildValue(Uint16.ONE) .build(), - prune(Builders.leafSetBuilder() + prune(ImmutableNodes.newSystemLeafSetBuilder() .withNodeIdentifier(new NodeIdentifier(LFLST16)) - .withChild(Builders.leafSetEntryBuilder() - .withNodeIdentifier(new NodeWithValue<>(LFLST16, 1)) - .withValue(1) - .build()) + .withChildValue(1) .build())); } @Test public void testLeafList32() throws IOException { - assertEquals(Builders.leafSetBuilder() + assertEquals(ImmutableNodes.newSystemLeafSetBuilder() .withNodeIdentifier(new NodeIdentifier(LFLST32)) - .withChild(Builders.leafSetEntryBuilder() - .withNodeIdentifier(new NodeWithValue<>(LFLST32, Uint32.ONE)) - .withValue(Uint32.ONE) - .build()) + .withChildValue(Uint32.ONE) .build(), - prune(Builders.leafSetBuilder() + prune(ImmutableNodes.newSystemLeafSetBuilder() .withNodeIdentifier(new NodeIdentifier(LFLST32)) - .withChild(Builders.leafSetEntryBuilder() - .withNodeIdentifier(new NodeWithValue<>(LFLST32, 1L)) - .withValue(1L) - .build()) + .withChildValue(1L) .build())); } @Test public void testLeafList64() throws IOException { - assertEquals(Builders.leafSetBuilder() + assertEquals(ImmutableNodes.newSystemLeafSetBuilder() .withNodeIdentifier(new NodeIdentifier(LFLST64)) - .withChild(Builders.leafSetEntryBuilder() - .withNodeIdentifier(new NodeWithValue<>(LFLST64, Uint64.ONE)) - .withValue(Uint64.ONE) - .build()) + .withChildValue(Uint64.ONE) .build(), - prune(Builders.leafSetBuilder() + prune(ImmutableNodes.newSystemLeafSetBuilder() .withNodeIdentifier(new NodeIdentifier(LFLST64)) - .withChild(Builders.leafSetEntryBuilder() - .withNodeIdentifier(new NodeWithValue<>(LFLST64, BigInteger.ONE)) - .withValue(BigInteger.ONE) - .build()) + .withChildValue(BigInteger.ONE) .build())); } private static NormalizedNode prune(final NormalizedNode node) throws IOException { - final ReusableNormalizedNodePruner pruner = ReusableNormalizedNodePruner.forSchemaContext(CONTEXT) - .withUintAdaption(); - pruner.initializeForPath(YangInstanceIdentifier.create(node.getIdentifier())); + final var pruner = ReusableNormalizedNodePruner.forSchemaContext(CONTEXT).withUintAdaption(); + pruner.initializeForPath(YangInstanceIdentifier.of(node.name())); try (NormalizedNodeWriter writer = NormalizedNodeWriter.forStreamWriter(pruner)) { writer.write(node); } pruner.close(); - return pruner.getResult().get(); + return pruner.getResult().orElseThrow(); } } diff --git a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/util/TestModel.java b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/util/TestModel.java index 7120c31de0..37d102d0f3 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/util/TestModel.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/util/TestModel.java @@ -10,39 +10,22 @@ package org.opendaylight.controller.cluster.datastore.util; import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapEntry; import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapEntryBuilder; import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapNodeBuilder; +import static org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes.leafNode; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import java.io.InputStream; -import java.math.BigDecimal; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; +import java.util.List; +import org.opendaylight.yangtools.yang.common.Decimal64; import org.opendaylight.yangtools.yang.common.QName; import org.opendaylight.yangtools.yang.common.Uint64; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; -import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue; -import org.opendaylight.yangtools.yang.data.api.schema.AugmentationNode; import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode; -import org.opendaylight.yangtools.yang.data.api.schema.LeafNode; -import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode; -import org.opendaylight.yangtools.yang.data.api.schema.LeafSetNode; import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode; -import org.opendaylight.yangtools.yang.data.api.schema.SystemMapNode; -import org.opendaylight.yangtools.yang.data.api.schema.UnkeyedListEntryNode; -import org.opendaylight.yangtools.yang.data.api.schema.builder.CollectionNodeBuilder; import org.opendaylight.yangtools.yang.data.api.schema.builder.DataContainerNodeBuilder; -import org.opendaylight.yangtools.yang.data.api.schema.builder.NormalizedNodeBuilder; -import org.opendaylight.yangtools.yang.data.impl.schema.Builders; -import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes; -import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder; -import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafSetEntryNodeBuilder; -import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafSetNodeBuilder; -import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableMapEntryNodeBuilder; +import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes; import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext; import org.opendaylight.yangtools.yang.test.util.YangParserTestUtils; @@ -185,123 +168,69 @@ public final class TestModel { } public static DataContainerNodeBuilder createBaseTestContainerBuilder() { - // Create a list of shoes - // This is to test leaf list entry - final LeafSetEntryNode nike = ImmutableLeafSetEntryNodeBuilder.create().withNodeIdentifier( - new NodeWithValue<>(SHOE_QNAME, "nike")).withValue("nike").build(); - - final LeafSetEntryNode puma = ImmutableLeafSetEntryNodeBuilder.create().withNodeIdentifier( - new NodeWithValue<>(SHOE_QNAME, "puma")).withValue("puma").build(); - - final LeafSetNode shoes = ImmutableLeafSetNodeBuilder.create().withNodeIdentifier( - new NodeIdentifier(SHOE_QNAME)).withChild(nike).withChild(puma).build(); - - // Test a leaf-list where each entry contains an identity - final LeafSetEntryNode cap1 = - ImmutableLeafSetEntryNodeBuilder - .create() - .withNodeIdentifier( - new NodeWithValue<>(QName.create( - TEST_QNAME, "capability"), DESC_QNAME)) - .withValue(DESC_QNAME).build(); - - final LeafSetNode capabilities = - ImmutableLeafSetNodeBuilder - .create() - .withNodeIdentifier( - new NodeIdentifier(QName.create( - TEST_QNAME, "capability"))).withChild(cap1).build(); - - ContainerNode switchFeatures = - ImmutableContainerNodeBuilder - .create() - .withNodeIdentifier( - new NodeIdentifier(SWITCH_FEATURES_QNAME)) - .withChild(capabilities).build(); - - // Create a leaf list with numbers - final LeafSetEntryNode five = - ImmutableLeafSetEntryNodeBuilder - .create() - .withNodeIdentifier( - new NodeWithValue<>(QName.create( - TEST_QNAME, "number"), 5)).withValue(5).build(); - final LeafSetEntryNode fifteen = - ImmutableLeafSetEntryNodeBuilder - .create() - .withNodeIdentifier( - new NodeWithValue<>(QName.create( - TEST_QNAME, "number"), 15)).withValue(15).build(); - final LeafSetNode numbers = - ImmutableLeafSetNodeBuilder - .create() - .withNodeIdentifier( - new NodeIdentifier(QName.create( - TEST_QNAME, "number"))).withChild(five).withChild(fifteen) - .build(); - - - // Create augmentations - MapEntryNode augMapEntry = createAugmentedListEntry(1, "First Test"); - - // Create a bits leaf - NormalizedNodeBuilder> - myBits = Builders.leafBuilder() - .withNodeIdentifier(new NodeIdentifier(QName.create(TEST_QNAME, "my-bits"))) - .withValue(ImmutableSet.of("foo", "bar")); - - // Create unkeyed list entry - UnkeyedListEntryNode unkeyedListEntry = Builders.unkeyedListEntryBuilder() - .withNodeIdentifier(new NodeIdentifier(UNKEYED_LIST_QNAME)) - .withChild(ImmutableNodes.leafNode(NAME_QNAME, "unkeyed-entry-name")) - .build(); - // Create YangInstanceIdentifier with all path arg types. - YangInstanceIdentifier instanceID = YangInstanceIdentifier.create( - new NodeIdentifier(QName.create(TEST_QNAME, "qname")), - NodeIdentifierWithPredicates.of(QName.create(TEST_QNAME, "list-entry"), - QName.create(TEST_QNAME, "key"), 10), - new AugmentationIdentifier(ImmutableSet.of( - QName.create(TEST_QNAME, "aug1"), QName.create(TEST_QNAME, "aug2"))), - new NodeWithValue<>(QName.create(TEST_QNAME, "leaf-list-entry"), "foo")); - - Map keyValues = new HashMap<>(); - keyValues.put(CHILDREN_QNAME, FIRST_CHILD_NAME); - + YangInstanceIdentifier instanceID = YangInstanceIdentifier.of( + new NodeIdentifier(QName.create(TEST_QNAME, "qname")), + NodeIdentifierWithPredicates.of(QName.create(TEST_QNAME, "list-entry"), + QName.create(TEST_QNAME, "key"), 10), + new NodeWithValue<>(QName.create(TEST_QNAME, "leaf-list-entry"), "foo")); // Create the document - return ImmutableContainerNodeBuilder - .create() - .withNodeIdentifier(new NodeIdentifier(TEST_QNAME)) - .withChild(myBits.build()) - .withChild(ImmutableNodes.leafNode(DESC_QNAME, DESC)) - .withChild(ImmutableNodes.leafNode(BOOLEAN_LEAF_QNAME, ENABLED)) - .withChild(ImmutableNodes.leafNode(SHORT_LEAF_QNAME, SHORT_ID)) - .withChild(ImmutableNodes.leafNode(BYTE_LEAF_QNAME, BYTE_ID)) - .withChild(ImmutableNodes.leafNode(TestModel.BIGINTEGER_LEAF_QNAME, Uint64.valueOf(100))) - .withChild(ImmutableNodes.leafNode(TestModel.BIGDECIMAL_LEAF_QNAME, BigDecimal.valueOf(1.2))) - .withChild(ImmutableNodes.leafNode(SOME_REF_QNAME, instanceID)) - .withChild(ImmutableNodes.leafNode(MYIDENTITY_QNAME, DESC_QNAME)) - .withChild(Builders.unkeyedListBuilder() - .withNodeIdentifier(new NodeIdentifier(UNKEYED_LIST_QNAME)) - .withChild(unkeyedListEntry).build()) - .withChild(Builders.choiceBuilder() - .withNodeIdentifier(new NodeIdentifier(TWO_THREE_QNAME)) - .withChild(ImmutableNodes.leafNode(TWO_QNAME, "two")).build()) - .withChild(Builders.orderedMapBuilder() - .withNodeIdentifier(new NodeIdentifier(ORDERED_LIST_QNAME)) - .withValue(ImmutableList.builder().add( - mapEntryBuilder(ORDERED_LIST_QNAME, ORDERED_LIST_ENTRY_QNAME, "1").build(), - mapEntryBuilder(ORDERED_LIST_QNAME, ORDERED_LIST_ENTRY_QNAME, "2").build()).build()) - .build()) - .withChild(shoes) - .withChild(numbers) - .withChild(switchFeatures) - .withChild(mapNodeBuilder(AUGMENTED_LIST_QNAME).withChild(augMapEntry).build()) - .withChild(mapNodeBuilder(OUTER_LIST_QNAME) - .withChild(mapEntry(OUTER_LIST_QNAME, ID_QNAME, ONE_ID)) - .withChild(BAR_NODE).build() - ); + return ImmutableNodes.newContainerBuilder() + .withNodeIdentifier(new NodeIdentifier(TEST_QNAME)) + // Create a bits leaf + .withChild(leafNode(QName.create(TEST_QNAME, "my-bits"), ImmutableSet.of("foo", "bar"))) + .withChild(leafNode(DESC_QNAME, DESC)) + .withChild(leafNode(BOOLEAN_LEAF_QNAME, ENABLED)) + .withChild(leafNode(SHORT_LEAF_QNAME, SHORT_ID)) + .withChild(leafNode(BYTE_LEAF_QNAME, BYTE_ID)) + .withChild(leafNode(TestModel.BIGINTEGER_LEAF_QNAME, Uint64.valueOf(100))) + .withChild(leafNode(TestModel.BIGDECIMAL_LEAF_QNAME, Decimal64.valueOf("1.2").scaleTo(2))) + .withChild(leafNode(SOME_REF_QNAME, instanceID)) + .withChild(leafNode(MYIDENTITY_QNAME, DESC_QNAME)) + .withChild(ImmutableNodes.newUnkeyedListBuilder() + .withNodeIdentifier(new NodeIdentifier(UNKEYED_LIST_QNAME)) + // Create unkeyed list entry + .withChild(ImmutableNodes.newUnkeyedListEntryBuilder() + .withNodeIdentifier(new NodeIdentifier(UNKEYED_LIST_QNAME)) + .withChild(leafNode(NAME_QNAME, "unkeyed-entry-name")) + .build()) + .build()) + .withChild(ImmutableNodes.newChoiceBuilder() + .withNodeIdentifier(new NodeIdentifier(TWO_THREE_QNAME)) + .withChild(leafNode(TWO_QNAME, "two")).build()) + .withChild(ImmutableNodes.newUserMapBuilder() + .withNodeIdentifier(new NodeIdentifier(ORDERED_LIST_QNAME)) + .withValue(List.of( + mapEntryBuilder(ORDERED_LIST_QNAME, ORDERED_LIST_ENTRY_QNAME, "1").build(), + mapEntryBuilder(ORDERED_LIST_QNAME, ORDERED_LIST_ENTRY_QNAME, "2").build())) + .build()) + .withChild(ImmutableNodes.newSystemLeafSetBuilder() + .withNodeIdentifier(new NodeIdentifier(SHOE_QNAME)) + .withChildValue("nike") + .withChildValue("puma") + .build()) + .withChild(ImmutableNodes.newSystemLeafSetBuilder() + .withNodeIdentifier(new NodeIdentifier(QName.create(TEST_QNAME, "number"))) + .withChildValue(5) + .withChildValue(15) + .build()) + .withChild(ImmutableNodes.newContainerBuilder() + .withNodeIdentifier(new NodeIdentifier(SWITCH_FEATURES_QNAME)) + // Test a leaf-list where each entry contains an identity + .withChild(ImmutableNodes.newSystemLeafSetBuilder() + .withNodeIdentifier(new NodeIdentifier(QName.create(TEST_QNAME, "capability"))) + .withChildValue(DESC_QNAME) + .build()) + .build()) + .withChild(mapNodeBuilder(AUGMENTED_LIST_QNAME) + // Create augmentations + .withChild(createAugmentedListEntry(1, "First Test")) + .build()) + .withChild(mapNodeBuilder(OUTER_LIST_QNAME) + .withChild(mapEntry(OUTER_LIST_QNAME, ID_QNAME, ONE_ID)) + .withChild(BAR_NODE) + .build()); } public static ContainerNode createTestContainer() { @@ -309,76 +238,41 @@ public final class TestModel { } public static MapEntryNode createAugmentedListEntry(final int id, final String name) { - Set childAugmentations = new HashSet<>(); - childAugmentations.add(AUG_CONT_QNAME); - - ContainerNode augCont = ImmutableContainerNodeBuilder.create() - .withNodeIdentifier(new NodeIdentifier(AUG_CONT_QNAME)) - .withChild(ImmutableNodes.leafNode(AUG_NAME_QNAME, name)) - .build(); - - - final AugmentationIdentifier augmentationIdentifier = new AugmentationIdentifier(childAugmentations); - final AugmentationNode augmentationNode = - Builders.augmentationBuilder() - .withNodeIdentifier(augmentationIdentifier).withChild(augCont) - .build(); - - return ImmutableMapEntryNodeBuilder.create() - .withNodeIdentifier(NodeIdentifierWithPredicates.of(AUGMENTED_LIST_QNAME, ID_QNAME, id)) - .withChild(ImmutableNodes.leafNode(ID_QNAME, id)) - .withChild(augmentationNode).build(); + return ImmutableNodes.newMapEntryBuilder() + .withNodeIdentifier(NodeIdentifierWithPredicates.of(AUGMENTED_LIST_QNAME, ID_QNAME, id)) + .withChild(leafNode(ID_QNAME, id)) + .withChild(ImmutableNodes.newContainerBuilder() + .withNodeIdentifier(new NodeIdentifier(AUG_CONT_QNAME)) + .withChild(leafNode(AUG_NAME_QNAME, name)) + .build()) + .build(); } public static ContainerNode createFamily() { - final DataContainerNodeBuilder - familyContainerBuilder = ImmutableContainerNodeBuilder.create().withNodeIdentifier( - new NodeIdentifier(FAMILY_QNAME)); - - final CollectionNodeBuilder childrenBuilder = mapNodeBuilder() - .withNodeIdentifier(new NodeIdentifier(CHILDREN_QNAME)); - - final DataContainerNodeBuilder - firstChildBuilder = mapEntryBuilder(CHILDREN_QNAME, CHILD_NUMBER_QNAME, FIRST_CHILD_ID); - final DataContainerNodeBuilder - secondChildBuilder = mapEntryBuilder(CHILDREN_QNAME, CHILD_NUMBER_QNAME, SECOND_CHILD_ID); - - final DataContainerNodeBuilder - firstGrandChildBuilder = mapEntryBuilder(GRAND_CHILDREN_QNAME, GRAND_CHILD_NUMBER_QNAME, - FIRST_GRAND_CHILD_ID); - final DataContainerNodeBuilder - secondGrandChildBuilder = mapEntryBuilder(GRAND_CHILDREN_QNAME, GRAND_CHILD_NUMBER_QNAME, - SECOND_GRAND_CHILD_ID); - - firstGrandChildBuilder - .withChild( - ImmutableNodes.leafNode(GRAND_CHILD_NUMBER_QNAME, - FIRST_GRAND_CHILD_ID)).withChild( - ImmutableNodes.leafNode(GRAND_CHILD_NAME_QNAME, - FIRST_GRAND_CHILD_NAME)); - - secondGrandChildBuilder.withChild( - ImmutableNodes.leafNode(GRAND_CHILD_NUMBER_QNAME, SECOND_GRAND_CHILD_ID)) - .withChild(ImmutableNodes.leafNode(GRAND_CHILD_NAME_QNAME, SECOND_GRAND_CHILD_NAME)); - - firstChildBuilder - .withChild(ImmutableNodes.leafNode(CHILD_NUMBER_QNAME, FIRST_CHILD_ID)) - .withChild(ImmutableNodes.leafNode(CHILD_NAME_QNAME, FIRST_CHILD_NAME)) - .withChild(mapNodeBuilder(GRAND_CHILDREN_QNAME) - .withChild(firstGrandChildBuilder.build()) - .build()); - - - secondChildBuilder - .withChild(ImmutableNodes.leafNode(CHILD_NUMBER_QNAME, SECOND_CHILD_ID)) - .withChild(ImmutableNodes.leafNode(CHILD_NAME_QNAME, SECOND_CHILD_NAME)) - .withChild(mapNodeBuilder(GRAND_CHILDREN_QNAME) - .withChild(firstGrandChildBuilder.build()) - .build()); - - childrenBuilder.withChild(firstChildBuilder.build()); - childrenBuilder.withChild(secondChildBuilder.build()); - - return familyContainerBuilder.withChild(childrenBuilder.build()).build(); + final var firstGrandChildBuilder = mapEntryBuilder( + GRAND_CHILDREN_QNAME, GRAND_CHILD_NUMBER_QNAME, FIRST_GRAND_CHILD_ID) + .withChild(leafNode(GRAND_CHILD_NUMBER_QNAME,FIRST_GRAND_CHILD_ID)) + .withChild(leafNode(GRAND_CHILD_NAME_QNAME, FIRST_GRAND_CHILD_NAME)); + + return ImmutableNodes.newContainerBuilder() + .withNodeIdentifier(new NodeIdentifier(FAMILY_QNAME)) + .withChild(ImmutableNodes.newSystemMapBuilder() + .withNodeIdentifier(new NodeIdentifier(CHILDREN_QNAME)) + .withChild(mapEntryBuilder(CHILDREN_QNAME, CHILD_NUMBER_QNAME, FIRST_CHILD_ID) + .withChild(leafNode(CHILD_NUMBER_QNAME, FIRST_CHILD_ID)) + .withChild(leafNode(CHILD_NAME_QNAME, FIRST_CHILD_NAME)) + .withChild(mapNodeBuilder(GRAND_CHILDREN_QNAME) + .withChild(firstGrandChildBuilder.build()) + .build()) + .build()) + .withChild(mapEntryBuilder(CHILDREN_QNAME, CHILD_NUMBER_QNAME, SECOND_CHILD_ID) + .withChild(leafNode(CHILD_NUMBER_QNAME, SECOND_CHILD_ID)) + .withChild(leafNode(CHILD_NAME_QNAME, SECOND_CHILD_NAME)) + .withChild(mapNodeBuilder(GRAND_CHILDREN_QNAME) + .withChild(firstGrandChildBuilder.build()) + .build()) + .build()) + .build()) + .build(); } } diff --git a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/io/ChunkedOutputStreamTest.java b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/io/ChunkedOutputStreamTest.java index 56c78f1c70..a93b6a8baa 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/io/ChunkedOutputStreamTest.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/io/ChunkedOutputStreamTest.java @@ -70,8 +70,8 @@ public class ChunkedOutputStreamTest { } int counter = 0; - for (byte[] chunk: assertFinishedStream(size, 2)) { - for (byte actual: chunk) { + for (byte[] chunk : assertFinishedStream(size, 2)) { + for (byte actual : chunk) { assertEquals((byte) counter++, actual); } } diff --git a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/io/FileBackedOutputStreamTest.java b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/io/FileBackedOutputStreamTest.java index ff5d61b41d..da5c5b6763 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/io/FileBackedOutputStreamTest.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/io/FileBackedOutputStreamTest.java @@ -18,7 +18,6 @@ import com.google.common.base.Stopwatch; import com.google.common.util.concurrent.Uninterruptibles; import java.io.File; import java.io.IOException; -import java.io.InputStream; import java.util.Arrays; import java.util.concurrent.TimeUnit; import org.junit.After; @@ -98,16 +97,14 @@ public class FileBackedOutputStreamTest { assertEquals("Temp file", tempFileName, findTempFileName(TEMP_DIR)); assertEquals("Size", bytes.length, fbos.asByteSource().size()); - InputStream inputStream = fbos.asByteSource().openStream(); + try (var inputStream = fbos.asByteSource().openStream()) { + assertArrayEquals("Read bytes", bytes, fbos.asByteSource().read()); - assertArrayEquals("Read bytes", bytes, fbos.asByteSource().read()); - - byte[] inBytes = new byte[bytes.length]; - assertEquals("# bytes read", bytes.length, inputStream.read(inBytes)); - assertArrayEquals("Read InputStream", bytes, inBytes); - assertEquals("End of stream", -1, inputStream.read()); - - inputStream.close(); + byte[] inBytes = new byte[bytes.length]; + assertEquals("# bytes read", bytes.length, inputStream.read(inBytes)); + assertArrayEquals("Read InputStream", bytes, inBytes); + assertEquals("End of stream", -1, inputStream.read()); + } fbos.cleanup(); @@ -182,27 +179,27 @@ public class FileBackedOutputStreamTest { fail("Temp file was not deleted"); } - static String findTempFileName(String dirPath) { + static String findTempFileName(final String dirPath) { String[] files = new File(dirPath).list(); assertNotNull(files); assertTrue("Found more than one temp file: " + Arrays.toString(files), files.length < 2); return files.length == 1 ? files[0] : null; } - static boolean deleteFile(String file) { + static boolean deleteFile(final String file) { return new File(file).delete(); } - static void deleteTempFiles(String path) { + static void deleteTempFiles(final String path) { String[] files = new File(path).list(); if (files != null) { - for (String file: files) { + for (String file : files) { deleteFile(path + File.separator + file); } } } - static void createDir(String path) { + static void createDir(final String path) { File dir = new File(path); if (!dir.exists() && !dir.mkdirs()) { throw new RuntimeException("Failed to create temp dir " + path); diff --git a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/AbortSlicingTest.java b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/AbortSlicingTest.java index 4441857f39..af0027bdfe 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/AbortSlicingTest.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/AbortSlicingTest.java @@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.messaging; import static org.junit.Assert.assertEquals; -import org.apache.commons.lang.SerializationUtils; +import org.apache.commons.lang3.SerializationUtils; import org.junit.Test; /** @@ -22,7 +22,7 @@ public class AbortSlicingTest { @Test public void testSerialization() { AbortSlicing expected = new AbortSlicing(new StringIdentifier("test")); - AbortSlicing cloned = (AbortSlicing) SerializationUtils.clone(expected); + AbortSlicing cloned = SerializationUtils.clone(expected); assertEquals("getIdentifier", expected.getIdentifier(), cloned.getIdentifier()); } } diff --git a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/MessageAssemblerTest.java b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/MessageAssemblerTest.java index 17b54a045d..128a0442e3 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/MessageAssemblerTest.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/MessageAssemblerTest.java @@ -105,7 +105,7 @@ public class MessageAssemblerTest extends AbstractMessagingTest { final MessageSliceReply reply = testProbe.expectMsgClass(MessageSliceReply.class); assertFailedMessageSliceReply(reply, IDENTIFIER, false); - assertEquals("Failure cause", mockFailure, reply.getFailure().get().getCause()); + assertEquals("Failure cause", mockFailure, reply.getFailure().orElseThrow().getCause()); assertFalse("MessageAssembler did not remove state for " + identifier, assembler.hasState(identifier)); verify(mockFiledBackedStream).cleanup(); @@ -130,7 +130,7 @@ public class MessageAssemblerTest extends AbstractMessagingTest { final MessageSliceReply reply = testProbe.expectMsgClass(MessageSliceReply.class); assertFailedMessageSliceReply(reply, IDENTIFIER, false); - assertEquals("Failure cause", mockFailure, reply.getFailure().get().getCause()); + assertEquals("Failure cause", mockFailure, reply.getFailure().orElseThrow().getCause()); assertFalse("MessageAssembler did not remove state for " + identifier, assembler.hasState(identifier)); verify(mockFiledBackedStream).cleanup(); @@ -173,11 +173,11 @@ public class MessageAssemblerTest extends AbstractMessagingTest { } } - private MessageAssembler newMessageAssembler(String logContext) { + private MessageAssembler newMessageAssembler(final String logContext) { return newMessageAssemblerBuilder(logContext).build(); } - private Builder newMessageAssemblerBuilder(String logContext) { + private Builder newMessageAssemblerBuilder(final String logContext) { return MessageAssembler.builder().fileBackedStreamFactory(mockFiledBackedStreamFactory) .assembledMessageCallback(mockAssembledMessageCallback).logContext(logContext); } diff --git a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/MessageSliceIdentifierTest.java b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/MessageSliceIdentifierTest.java index 9c80033b92..c5dbcdd13c 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/MessageSliceIdentifierTest.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/MessageSliceIdentifierTest.java @@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.messaging; import static org.junit.Assert.assertEquals; -import org.apache.commons.lang.SerializationUtils; +import org.apache.commons.lang3.SerializationUtils; import org.junit.Test; /** @@ -22,7 +22,7 @@ public class MessageSliceIdentifierTest { @Test public void testSerialization() { MessageSliceIdentifier expected = new MessageSliceIdentifier(new StringIdentifier("test"), 123L); - MessageSliceIdentifier cloned = (MessageSliceIdentifier) SerializationUtils.clone(expected); + MessageSliceIdentifier cloned = SerializationUtils.clone(expected); assertEquals("cloned", expected, cloned); assertEquals("getClientIdentifier", expected.getClientIdentifier(), cloned.getClientIdentifier()); assertEquals("getSlicerId", expected.getSlicerId(), cloned.getSlicerId()); diff --git a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/MessageSliceReplyTest.java b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/MessageSliceReplyTest.java index 51c4479119..8b661f68d7 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/MessageSliceReplyTest.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/MessageSliceReplyTest.java @@ -16,7 +16,7 @@ import akka.actor.ExtendedActorSystem; import akka.serialization.JavaSerializer; import akka.testkit.TestProbe; import akka.testkit.javadsl.TestKit; -import org.apache.commons.lang.SerializationUtils; +import org.apache.commons.lang3.SerializationUtils; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -48,7 +48,7 @@ public class MessageSliceReplyTest { private void testSuccess() { MessageSliceReply expected = MessageSliceReply.success(new StringIdentifier("test"), 3, TestProbe.apply(actorSystem).ref()); - MessageSliceReply cloned = (MessageSliceReply) SerializationUtils.clone(expected); + MessageSliceReply cloned = SerializationUtils.clone(expected); assertEquals("getIdentifier", expected.getIdentifier(), cloned.getIdentifier()); assertEquals("getSliceIndex", expected.getSliceIndex(), cloned.getSliceIndex()); @@ -59,15 +59,15 @@ public class MessageSliceReplyTest { private void testFailure() { MessageSliceReply expected = MessageSliceReply.failed(new StringIdentifier("test"), new MessageSliceException("mock", true), TestProbe.apply(actorSystem).ref()); - MessageSliceReply cloned = (MessageSliceReply) SerializationUtils.clone(expected); + MessageSliceReply cloned = SerializationUtils.clone(expected); assertEquals("getIdentifier", expected.getIdentifier(), cloned.getIdentifier()); assertEquals("getSliceIndex", expected.getSliceIndex(), cloned.getSliceIndex()); assertEquals("getSendTo", expected.getSendTo(), cloned.getSendTo()); assertTrue("getFailure present", cloned.getFailure().isPresent()); - assertEquals("getFailure message", expected.getFailure().get().getMessage(), - cloned.getFailure().get().getMessage()); - assertEquals("getFailure isRetriable", expected.getFailure().get().isRetriable(), - cloned.getFailure().get().isRetriable()); + assertEquals("getFailure message", expected.getFailure().orElseThrow().getMessage(), + cloned.getFailure().orElseThrow().getMessage()); + assertEquals("getFailure isRetriable", expected.getFailure().orElseThrow().isRetriable(), + cloned.getFailure().orElseThrow().isRetriable()); } } diff --git a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/MessageSliceTest.java b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/MessageSliceTest.java index dc2e6de9d7..afb764091c 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/MessageSliceTest.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/MessageSliceTest.java @@ -15,7 +15,7 @@ import akka.actor.ExtendedActorSystem; import akka.serialization.JavaSerializer; import akka.testkit.TestProbe; import akka.testkit.javadsl.TestKit; -import org.apache.commons.lang.SerializationUtils; +import org.apache.commons.lang3.SerializationUtils; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -35,7 +35,7 @@ public class MessageSliceTest { @After public void tearDown() { - TestKit.shutdownActorSystem(actorSystem, Boolean.TRUE); + TestKit.shutdownActorSystem(actorSystem, true); } @Test @@ -50,7 +50,7 @@ public class MessageSliceTest { MessageSlice expected = new MessageSlice(new StringIdentifier("test"), data, 2, 3, 54321, TestProbe.apply(actorSystem).ref()); - MessageSlice cloned = (MessageSlice) SerializationUtils.clone(expected); + MessageSlice cloned = SerializationUtils.clone(expected); assertEquals("getIdentifier", expected.getIdentifier(), cloned.getIdentifier()); assertEquals("getSliceIndex", expected.getSliceIndex(), cloned.getSliceIndex()); diff --git a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/MessageSlicingIntegrationTest.java b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/MessageSlicingIntegrationTest.java index 61c0617799..63b37e12b0 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/MessageSlicingIntegrationTest.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/MessageSlicingIntegrationTest.java @@ -328,7 +328,7 @@ public class MessageSlicingIntegrationTest { assertEquals("Identifier", identifier, ((MessageSliceIdentifier)reply.getIdentifier()) .getClientIdentifier()); assertEquals("Failure present", Boolean.TRUE, reply.getFailure().isPresent()); - assertEquals("isRetriable", isRetriable, reply.getFailure().get().isRetriable()); + assertEquals("isRetriable", isRetriable, reply.getFailure().orElseThrow().isRetriable()); } static void assertMessageSlice(final MessageSlice sliceMessage, final Identifier identifier, final int sliceIndex, diff --git a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/persistence/LocalSnapshotStoreTest.java b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/persistence/LocalSnapshotStoreTest.java index d45042567d..c9ab83e762 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/persistence/LocalSnapshotStoreTest.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/persistence/LocalSnapshotStoreTest.java @@ -35,7 +35,7 @@ import java.io.IOException; import java.net.URLEncoder; import java.nio.charset.StandardCharsets; import org.apache.commons.io.FileUtils; -import org.apache.commons.lang.SerializationUtils; +import org.apache.commons.lang3.SerializationUtils; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; diff --git a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/schema/provider/impl/RemoteSchemaProviderTest.java b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/schema/provider/impl/RemoteSchemaProviderTest.java index 2a8ddd2e56..611bebfd61 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/schema/provider/impl/RemoteSchemaProviderTest.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/schema/provider/impl/RemoteSchemaProviderTest.java @@ -7,34 +7,28 @@ */ package org.opendaylight.controller.cluster.schema.provider.impl; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.instanceOf; -import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import akka.dispatch.ExecutionContexts; import akka.dispatch.Futures; import com.google.common.io.CharSource; -import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.MoreExecutors; import java.io.IOException; -import java.nio.charset.StandardCharsets; import java.util.concurrent.ExecutionException; import org.junit.Before; import org.junit.Test; import org.opendaylight.controller.cluster.schema.provider.RemoteYangTextSourceProvider; -import org.opendaylight.yangtools.yang.common.Revision; -import org.opendaylight.yangtools.yang.model.repo.api.RevisionSourceIdentifier; +import org.opendaylight.yangtools.yang.model.api.source.SourceIdentifier; import org.opendaylight.yangtools.yang.model.repo.api.SchemaSourceException; -import org.opendaylight.yangtools.yang.model.repo.api.SourceIdentifier; -import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource; +import org.opendaylight.yangtools.yang.model.spi.source.DelegatedYangTextSource; public class RemoteSchemaProviderTest { - private static final SourceIdentifier ID = RevisionSourceIdentifier.create("Test", Revision.of("2015-10-30")); + private static final SourceIdentifier ID = new SourceIdentifier("Test", "2015-10-30"); private RemoteSchemaProvider remoteSchemaProvider; private RemoteYangTextSourceProvider mockedRemoteSchemaRepository; @@ -48,28 +42,24 @@ public class RemoteSchemaProviderTest { @Test public void getExistingYangTextSchemaSource() throws IOException, InterruptedException, ExecutionException { - YangTextSchemaSource schemaSource = YangTextSchemaSource.delegateForByteSource(ID, - CharSource.wrap("Test").asByteSource(StandardCharsets.UTF_8)); + final var schemaSource = new DelegatedYangTextSource(ID, CharSource.wrap("Test")); doReturn(Futures.successful(new YangTextSchemaSourceSerializationProxy(schemaSource))) .when(mockedRemoteSchemaRepository).getYangTextSchemaSource(ID); - YangTextSchemaSource providedSource = remoteSchemaProvider.getSource(ID).get(); - assertEquals(ID, providedSource.getIdentifier()); - assertArrayEquals(schemaSource.read(), providedSource.read()); + final var providedSource = remoteSchemaProvider.getSource(ID).get(); + assertEquals(ID, providedSource.sourceId()); + assertEquals(schemaSource.read(), providedSource.read()); } @Test public void getNonExistingSchemaSource() throws InterruptedException { - doReturn(Futures.failed(new SchemaSourceException("Source not provided"))) - .when(mockedRemoteSchemaRepository).getYangTextSchemaSource(ID); + final var exception = new SchemaSourceException(ID, "Source not provided"); + doReturn(Futures.failed(exception)).when(mockedRemoteSchemaRepository).getYangTextSchemaSource(ID); - ListenableFuture sourceFuture = remoteSchemaProvider.getSource(ID); + final var sourceFuture = remoteSchemaProvider.getSource(ID); assertTrue(sourceFuture.isDone()); - try { - sourceFuture.get(); - fail("Expected a failure to occur"); - } catch (ExecutionException e) { - assertThat(e.getCause(), instanceOf(SchemaSourceException.class)); - } + + final var cause = assertThrows(ExecutionException.class, sourceFuture::get).getCause(); + assertSame(exception, cause); } } diff --git a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/schema/provider/impl/RemoteYangTextSourceProviderImplTest.java b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/schema/provider/impl/RemoteYangTextSourceProviderImplTest.java index 1fda858fdd..a63be0aa00 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/schema/provider/impl/RemoteYangTextSourceProviderImplTest.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/schema/provider/impl/RemoteYangTextSourceProviderImplTest.java @@ -7,75 +7,76 @@ */ package org.opendaylight.controller.cluster.schema.provider.impl; -import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.doReturn; -import com.google.common.io.ByteSource; +import com.google.common.io.CharSource; import com.google.common.util.concurrent.Futures; import java.util.Collections; import java.util.Set; import org.junit.Before; import org.junit.Test; -import org.mockito.Mockito; -import org.opendaylight.yangtools.yang.common.Revision; -import org.opendaylight.yangtools.yang.model.repo.api.RevisionSourceIdentifier; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import org.opendaylight.yangtools.yang.model.api.source.SourceIdentifier; +import org.opendaylight.yangtools.yang.model.api.source.YangTextSource; import org.opendaylight.yangtools.yang.model.repo.api.SchemaRepository; import org.opendaylight.yangtools.yang.model.repo.api.SchemaSourceException; -import org.opendaylight.yangtools.yang.model.repo.api.SourceIdentifier; -import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource; +import org.opendaylight.yangtools.yang.model.spi.source.DelegatedYangTextSource; import scala.concurrent.Await; -import scala.concurrent.Future; import scala.concurrent.duration.FiniteDuration; +@RunWith(MockitoJUnitRunner.StrictStubs.class) public class RemoteYangTextSourceProviderImplTest { + private static final SourceIdentifier ID = new SourceIdentifier("Test", "2015-10-30"); - private static final SourceIdentifier ID = RevisionSourceIdentifier.create("Test", Revision.of("2015-10-30")); + @Mock + private SchemaRepository mockedLocalRepository; private RemoteYangTextSourceProviderImpl remoteRepository; - private SchemaRepository mockedLocalRepository; private final Set providedSources = Collections.singleton(ID); @Before public void setUp() { - mockedLocalRepository = Mockito.mock(SchemaRepository.class); - remoteRepository = new RemoteYangTextSourceProviderImpl(mockedLocalRepository, providedSources); } @Test public void testGetExistingYangTextSchemaSource() throws Exception { - String source = "Test source."; - YangTextSchemaSource schemaSource = YangTextSchemaSource.delegateForByteSource( - ID, ByteSource.wrap(source.getBytes())); - Mockito.when(mockedLocalRepository.getSchemaSource(ID, YangTextSchemaSource.class)).thenReturn( - Futures.immediateFuture(schemaSource)); + var schemaSource = new DelegatedYangTextSource(ID, CharSource.wrap("Test source.")); + + doReturn(Futures.immediateFuture(schemaSource)).when(mockedLocalRepository) + .getSchemaSource(ID, YangTextSource.class); - Future retrievedSourceFuture = - remoteRepository.getYangTextSchemaSource(ID); + var retrievedSourceFuture = remoteRepository.getYangTextSchemaSource(ID); assertTrue(retrievedSourceFuture.isCompleted()); - YangTextSchemaSource resultSchemaSource = Await.result(retrievedSourceFuture, - FiniteDuration.Zero()).getRepresentation(); - assertEquals(resultSchemaSource.getIdentifier(), schemaSource.getIdentifier()); - assertArrayEquals(resultSchemaSource.read(), schemaSource.read()); + var resultSchemaSource = Await.result(retrievedSourceFuture, FiniteDuration.Zero()).getRepresentation(); + assertEquals(resultSchemaSource.sourceId(), schemaSource.sourceId()); + assertEquals(resultSchemaSource.read(), schemaSource.read()); } - @Test(expected = SchemaSourceException.class) + @Test public void testGetNonExistentYangTextSchemaSource() throws Exception { - Mockito.when(mockedLocalRepository.getSchemaSource(ID, YangTextSchemaSource.class)).thenReturn( - Futures.immediateFailedFuture(new SchemaSourceException("Source is not provided"))); + final var exception = new SchemaSourceException(ID, "Source is not provided"); + + doReturn(Futures.immediateFailedFuture(exception)).when(mockedLocalRepository) + .getSchemaSource(ID, YangTextSource.class); - Future retrievedSourceFuture = - remoteRepository.getYangTextSchemaSource(ID); + var retrievedSourceFuture = remoteRepository.getYangTextSchemaSource(ID); assertTrue(retrievedSourceFuture.isCompleted()); - Await.result(retrievedSourceFuture, FiniteDuration.Zero()); + + final var ex = assertThrows(SchemaSourceException.class, + () -> Await.result(retrievedSourceFuture, FiniteDuration.Zero())); + assertSame(ex, exception); } @Test public void testGetProvidedSources() throws Exception { - Set remoteProvidedSources = Await.result(remoteRepository - .getProvidedSources(), FiniteDuration.Zero()); + var remoteProvidedSources = Await.result(remoteRepository.getProvidedSources(), FiniteDuration.Zero()); assertEquals(providedSources, remoteProvidedSources); } - } diff --git a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/schema/provider/impl/YangTextSourceSerializationProxyTest.java b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/schema/provider/impl/YangTextSourceSerializationProxyTest.java index 084fd5242f..ced954640c 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/schema/provider/impl/YangTextSourceSerializationProxyTest.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/schema/provider/impl/YangTextSourceSerializationProxyTest.java @@ -5,64 +5,55 @@ * terms of the Eclipse Public License v1.0 which accompanies this distribution, * and is available at http://www.eclipse.org/legal/epl-v10.html */ - package org.opendaylight.controller.cluster.schema.provider.impl; -import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; -import com.google.common.io.ByteSource; +import com.google.common.io.CharSource; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; -import java.nio.charset.StandardCharsets; import org.junit.Before; import org.junit.Test; -import org.opendaylight.yangtools.yang.common.Revision; -import org.opendaylight.yangtools.yang.model.repo.api.RevisionSourceIdentifier; -import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource; +import org.opendaylight.yangtools.yang.model.api.source.SourceIdentifier; +import org.opendaylight.yangtools.yang.model.api.source.YangTextSource; +import org.opendaylight.yangtools.yang.model.spi.source.DelegatedYangTextSource; public class YangTextSourceSerializationProxyTest { - - private YangTextSchemaSource schemaSource; + private YangTextSource schemaSource; @Before public void setUp() { - String source = "Test source."; - schemaSource = YangTextSchemaSource.delegateForByteSource( - RevisionSourceIdentifier.create("test", Revision.of("2015-10-30")), - ByteSource.wrap(source.getBytes(StandardCharsets.UTF_8))); + schemaSource = new DelegatedYangTextSource(new SourceIdentifier("test", "2015-10-30"), + CharSource.wrap("Test source.")); } - @Test public void serializeAndDeserializeProxy() throws ClassNotFoundException, IOException { - YangTextSchemaSourceSerializationProxy proxy = new YangTextSchemaSourceSerializationProxy(schemaSource); + final var proxy = new YangTextSchemaSourceSerializationProxy(schemaSource); ByteArrayOutputStream bos = new ByteArrayOutputStream(); ObjectOutputStream oos = new ObjectOutputStream(bos); oos.writeObject(proxy); final byte[] bytes = bos.toByteArray(); - assertEquals(353, bytes.length); + assertEquals(323, bytes.length); ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bytes)); - YangTextSchemaSourceSerializationProxy deserializedProxy = - (YangTextSchemaSourceSerializationProxy) ois.readObject(); + final var deserializedProxy = (YangTextSchemaSourceSerializationProxy) ois.readObject(); - assertEquals(deserializedProxy.getRepresentation().getIdentifier(), proxy.getRepresentation().getIdentifier()); - assertArrayEquals(deserializedProxy.getRepresentation().read(), proxy.getRepresentation().read()); + assertEquals(deserializedProxy.getRepresentation().sourceId(), proxy.getRepresentation().sourceId()); + assertEquals(deserializedProxy.getRepresentation().read(), proxy.getRepresentation().read()); } @Test public void testProxyEqualsBackingYangTextSource() throws IOException { - YangTextSchemaSourceSerializationProxy serializationProxy = - new YangTextSchemaSourceSerializationProxy(schemaSource); + final var serializationProxy = new YangTextSchemaSourceSerializationProxy(schemaSource); - assertEquals(serializationProxy.getRepresentation().getIdentifier(), schemaSource.getIdentifier()); - assertArrayEquals(serializationProxy.getRepresentation().read(), schemaSource.read()); + assertEquals(serializationProxy.getRepresentation().sourceId(), schemaSource.sourceId()); + assertEquals(serializationProxy.getRepresentation().read(), schemaSource.read()); } } diff --git a/opendaylight/md-sal/sal-clustering-config/pom.xml b/opendaylight/md-sal/sal-clustering-config/pom.xml index af8b64bbb0..4c62912307 100644 --- a/opendaylight/md-sal/sal-clustering-config/pom.xml +++ b/opendaylight/md-sal/sal-clustering-config/pom.xml @@ -12,13 +12,13 @@ org.opendaylight.odlparent odlparent-lite - 9.0.12 + 13.0.11 org.opendaylight.controller sal-clustering-config - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT jar Configuration files for md-sal clustering diff --git a/opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/datastore.cfg b/opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/datastore.cfg index 678903c74c..b03a4a114e 100644 --- a/opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/datastore.cfg +++ b/opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/datastore.cfg @@ -89,11 +89,6 @@ operational.persistent=false # for a message slice. This needs to be below Akka's maximum-frame-size and defaults to 480KiB. maximum-message-slice-size=491520 -# Enable tell-based protocol between frontend (applications) and backend (shards). Using this protocol -# should avoid AskTimeoutExceptions seen under heavy load. Defaults to false (use tell-based protocol). -# Set to false to enable ask-based protocol. -use-tell-based-protocol=true - # Tune the maximum number of entries a follower is allowed to lag behind the leader before it is # considered out-of-sync. This flag may require tuning in face of a large number of small transactions. #sync-index-threshold=10 diff --git a/opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/factory-akka.conf b/opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/factory-akka.conf index 8f9b5041ee..9834e08ea8 100644 --- a/opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/factory-akka.conf +++ b/opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/factory-akka.conf @@ -163,6 +163,9 @@ odl-cluster-data { max-entry-size = 16M # Maximum size of a segment max-segment-size = 128M + # Maximum number of bytes that are written without synchronizing storage. Defaults to max-entry-size. + # Set to <= 0 to flush immediately. + #max-unflushed-bytes = 1M # Map each segment into memory. Defaults to true, use false to keep a heap-based # buffer instead. memory-mapped = true @@ -181,6 +184,9 @@ odl-cluster-data { max-entry-size = 512K # Maximum size of a segment max-segment-size = 1M + # Maximum number of bytes that are written without synchronizing storage. Defaults to max-entry-size. + # Set to <= 0 to flush immediately. + #max-unflushed-bytes = 128K # Map each segment into memory. Note that while this can improve performance, # it will also place additional burden on system resources. memory-mapped = false diff --git a/opendaylight/md-sal/sal-common-util/pom.xml b/opendaylight/md-sal/sal-common-util/pom.xml index ba89b84cd8..a0bf479f16 100644 --- a/opendaylight/md-sal/sal-common-util/pom.xml +++ b/opendaylight/md-sal/sal-common-util/pom.xml @@ -4,7 +4,7 @@ org.opendaylight.controller mdsal-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ../parent @@ -13,8 +13,8 @@ - com.google.guava - guava + org.eclipse.jdt + org.eclipse.jdt.annotation org.opendaylight.yangtools diff --git a/opendaylight/md-sal/sal-common-util/src/main/java/org/opendaylight/controller/md/sal/common/util/jmx/AbstractMXBean.java b/opendaylight/md-sal/sal-common-util/src/main/java/org/opendaylight/controller/md/sal/common/util/jmx/AbstractMXBean.java index 223132c95b..53d7a2f22a 100644 --- a/opendaylight/md-sal/sal-common-util/src/main/java/org/opendaylight/controller/md/sal/common/util/jmx/AbstractMXBean.java +++ b/opendaylight/md-sal/sal-common-util/src/main/java/org/opendaylight/controller/md/sal/common/util/jmx/AbstractMXBean.java @@ -7,7 +7,6 @@ */ package org.opendaylight.controller.md.sal.common.util.jmx; -import com.google.common.annotations.Beta; import java.lang.management.ManagementFactory; import javax.management.InstanceAlreadyExistsException; import javax.management.InstanceNotFoundException; @@ -32,9 +31,7 @@ import org.slf4j.LoggerFactory; * * @author Thomas Pantelis */ -@Beta public abstract class AbstractMXBean { - private static final Logger LOG = LoggerFactory.getLogger(AbstractMXBean.class); public static final String BASE_JMX_PREFIX = "org.opendaylight.controller:"; @@ -89,7 +86,7 @@ public abstract class AbstractMXBean { boolean registered = false; try { // Object to identify MBean - final ObjectName mbeanName = this.getMBeanObjectName(); + final ObjectName mbeanName = getMBeanObjectName(); LOG.debug("Register MBean {}", mbeanName); @@ -129,16 +126,13 @@ public abstract class AbstractMXBean { * @return true is successfully unregistered, false otherwise. */ public boolean unregisterMBean() { - boolean unregister = false; try { - ObjectName mbeanName = this.getMBeanObjectName(); - unregisterMBean(mbeanName); - unregister = true; + unregisterMBean(getMBeanObjectName()); + return true; } catch (MBeanRegistrationException | InstanceNotFoundException | MalformedObjectNameException e) { LOG.debug("Failed when unregistering MBean", e); + return false; } - - return unregister; } private void unregisterMBean(ObjectName mbeanName) throws MBeanRegistrationException, diff --git a/opendaylight/md-sal/sal-distributed-datastore/pom.xml b/opendaylight/md-sal/sal-distributed-datastore/pom.xml index 96c97d5179..2d397cdcf5 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/pom.xml +++ b/opendaylight/md-sal/sal-distributed-datastore/pom.xml @@ -4,7 +4,7 @@ org.opendaylight.controller mdsal-parent - 5.0.0-SNAPSHOT + 9.0.3-SNAPSHOT ../parent @@ -12,6 +12,12 @@ bundle + + com.github.spotbugs + spotbugs-annotations + true + + org.slf4j @@ -21,11 +27,19 @@ org.osgi - osgi.core + org.osgi.framework + + + org.osgi + org.osgi.service.component org.osgi - osgi.cmpn + org.osgi.service.component.annotations + + + org.osgi + org.osgi.service.metatype.annotations @@ -104,15 +118,30 @@ org.opendaylight.mdsal mdsal-binding-dom-codec-api + + org.opendaylight.mdsal + mdsal-common-api + + + org.opendaylight.mdsal + mdsal-dom-api + + + org.opendaylight.mdsal + mdsal-dom-spi + org.opendaylight.mdsal mdsal-dom-broker - org.opendaylight.yangtools concepts + + org.opendaylight.yangtools + util + org.opendaylight.mdsal yang-binding @@ -129,6 +158,26 @@ org.opendaylight.yangtools yang-data-impl + + org.opendaylight.yangtools + yang-data-tree-api + + + org.opendaylight.yangtools + yang-data-tree-spi + + + org.opendaylight.yangtools + yang-data-tree-ri + + + org.opendaylight.yangtools + yang-data-util + + + org.opendaylight.yangtools + yang-data-codec-binfmt + org.opendaylight.yangtools yang-data-codec-xml @@ -137,6 +186,18 @@ org.opendaylight.yangtools yang-data-codec-gson + + org.opendaylight.yangtools + yang-model-api + + + org.opendaylight.yangtools + yang-model-spi + + + org.opendaylight.yangtools + yang-model-util + org.apache.commons commons-lang3 @@ -146,18 +207,6 @@ commons-text - - io.atomix - atomix-storage - 3.1.5 - test - - - io.atomix - atomix-utils - 3.1.5 - test - org.awaitility awaitility @@ -168,15 +217,15 @@ commons-io test - - commons-lang - commons-lang - test - org.opendaylight.yangtools yang-test-util + + org.opendaylight.mdsal + mdsal-binding-dom-codec + test + org.opendaylight.mdsal mdsal-binding-test-utils @@ -193,9 +242,6 @@ ${project.groupId}.${project.artifactId} - - <_dsannotations-options>norequirements - org.opendaylight.controller.cluster.datastore; org.opendaylight.controller.cluster.datastore.config; diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/akka/osgi/impl/OSGiActorSystemProvider.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/akka/osgi/impl/OSGiActorSystemProvider.java index 05af18d32f..60a72b07f9 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/akka/osgi/impl/OSGiActorSystemProvider.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/akka/osgi/impl/OSGiActorSystemProvider.java @@ -8,7 +8,6 @@ package org.opendaylight.controller.cluster.akka.osgi.impl; import akka.actor.ActorSystem; -import com.typesafe.config.Config; import java.util.concurrent.TimeoutException; import org.opendaylight.controller.cluster.ActorSystemProvider; import org.opendaylight.controller.cluster.ActorSystemProviderListener; @@ -30,26 +29,12 @@ import scala.concurrent.duration.Duration; public final class OSGiActorSystemProvider implements ActorSystemProvider { private static final Logger LOG = LoggerFactory.getLogger(OSGiActorSystemProvider.class); - @Reference - AkkaConfigurationReader reader = null; - private ActorSystemProviderImpl delegate; - @Override - public ActorSystem getActorSystem() { - return delegate.getActorSystem(); - } - - @Override - public ListenerRegistration registerActorSystemProviderListener( - final ActorSystemProviderListener listener) { - return delegate.registerActorSystemProviderListener(listener); - } - @Activate - void activate(final BundleContext bundleContext) { + public OSGiActorSystemProvider(@Reference final AkkaConfigurationReader reader, final BundleContext bundleContext) { LOG.info("Actor System provider starting"); - final Config akkaConfig = AkkaConfigFactory.createAkkaConfig(reader); + final var akkaConfig = AkkaConfigFactory.createAkkaConfig(reader); delegate = new ActorSystemProviderImpl(BundleClassLoaderFactory.createClassLoader(bundleContext), QuarantinedMonitorActorPropsFactory.createProps(bundleContext, akkaConfig), akkaConfig); LOG.info("Actor System provider started"); @@ -62,5 +47,16 @@ public final class OSGiActorSystemProvider implements ActorSystemProvider { delegate = null; LOG.info("Actor System provider stopped"); } + + @Override + public ActorSystem getActorSystem() { + return delegate.getActorSystem(); + } + + @Override + public ListenerRegistration registerActorSystemProviderListener( + final ActorSystemProviderListener listener) { + return delegate.registerActorSystemProviderListener(listener); + } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBroker.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBroker.java deleted file mode 100644 index 91fd64d366..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBroker.java +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.databroker; - -import static com.google.common.base.Preconditions.checkState; - -import com.google.common.collect.ClassToInstanceMap; -import com.google.common.collect.ImmutableClassToInstanceMap; -import com.google.common.collect.ImmutableClassToInstanceMap.Builder; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import java.util.EnumMap; -import java.util.Map; -import java.util.concurrent.atomic.AtomicLong; -import org.opendaylight.mdsal.common.api.LogicalDatastoreType; -import org.opendaylight.mdsal.dom.api.DOMDataBrokerExtension; -import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener; -import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeService; -import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort; -import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistration; -import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistry; -import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier; -import org.opendaylight.mdsal.dom.api.DOMTransactionChain; -import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener; -import org.opendaylight.mdsal.dom.spi.PingPongMergingDOMDataBroker; -import org.opendaylight.mdsal.dom.spi.store.DOMStore; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreTreeChangePublisher; -import org.opendaylight.yangtools.concepts.ListenerRegistration; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public abstract class AbstractDOMBroker extends AbstractDOMTransactionFactory - implements PingPongMergingDOMDataBroker { - - private static final Logger LOG = LoggerFactory.getLogger(AbstractDOMBroker.class); - - private final AtomicLong txNum = new AtomicLong(); - private final AtomicLong chainNum = new AtomicLong(); - private final ClassToInstanceMap extensions; - - private volatile AutoCloseable closeable; - - protected AbstractDOMBroker(final Map datastores) { - super(datastores); - - Builder extBuilder = ImmutableClassToInstanceMap.builder(); - if (isSupported(datastores, DOMStoreTreeChangePublisher.class)) { - extBuilder.put(DOMDataTreeChangeService.class, new DOMDataTreeChangeService() { - @Override - public ListenerRegistration registerDataTreeChangeListener( - final DOMDataTreeIdentifier treeId, final L listener) { - DOMStore store = getDOMStore(treeId.getDatastoreType()); - return ((DOMStoreTreeChangePublisher) store).registerTreeChangeListener( - treeId.getRootIdentifier(), listener); - } - }); - } - - if (isSupported(datastores, DOMDataTreeCommitCohortRegistry.class)) { - extBuilder.put(DOMDataTreeCommitCohortRegistry.class, new DOMDataTreeCommitCohortRegistry() { - @Override - public DOMDataTreeCommitCohortRegistration registerCommitCohort( - final DOMDataTreeIdentifier path, final T cohort) { - DOMStore store = getDOMStore(path.getDatastoreType()); - return ((DOMDataTreeCommitCohortRegistry) store).registerCommitCohort(path, cohort); - } - }); - } - - extensions = extBuilder.build(); - } - - private static boolean isSupported(final Map datastores, - final Class expDOMStoreInterface) { - return datastores.values().stream().allMatch(expDOMStoreInterface::isInstance); - } - - public void setCloseable(final AutoCloseable closeable) { - this.closeable = closeable; - } - - @Override - @SuppressWarnings("checkstyle:IllegalCatch") - public void close() { - super.close(); - - if (closeable != null) { - try { - closeable.close(); - } catch (Exception e) { - LOG.debug("Error closing instance", e); - } - } - } - - @Override - protected Object newTransactionIdentifier() { - return "DOM-" + txNum.getAndIncrement(); - } - - @Override - public ClassToInstanceMap getExtensions() { - return extensions; - } - - @Override - public DOMTransactionChain createTransactionChain(final DOMTransactionChainListener listener) { - checkNotClosed(); - - final Map backingChains = - new EnumMap<>(LogicalDatastoreType.class); - for (Map.Entry entry : getTxFactories().entrySet()) { - backingChains.put(entry.getKey(), entry.getValue().createTransactionChain()); - } - - final long chainId = chainNum.getAndIncrement(); - LOG.debug("Transaction chain {} created with listener {}, backing store chains {}", chainId, listener, - backingChains); - return new DOMBrokerTransactionChain(chainId, backingChains, this, listener); - } - - @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD", - justification = "https://github.com/spotbugs/spotbugs/issues/811") - private DOMStore getDOMStore(final LogicalDatastoreType type) { - DOMStore store = getTxFactories().get(type); - checkState(store != null, "Requested logical data store is not available."); - return store; - } -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBrokerTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBrokerTransaction.java deleted file mode 100644 index 2655b61f68..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBrokerTransaction.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.databroker; - -import static com.google.common.base.Preconditions.checkArgument; -import static java.util.Objects.requireNonNull; - -import com.google.common.base.MoreObjects; -import com.google.common.base.MoreObjects.ToStringHelper; -import java.util.Collection; -import java.util.EnumMap; -import java.util.Map; -import org.opendaylight.mdsal.common.api.LogicalDatastoreType; -import org.opendaylight.mdsal.dom.api.DOMDataTreeTransaction; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransaction; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionFactory; - -public abstract class AbstractDOMBrokerTransaction implements DOMDataTreeTransaction { - - private final EnumMap backingTxs; - private final Object identifier; - private final Map storeTxFactories; - - /** - * Creates new composite Transactions. - * - * @param identifier Identifier of transaction. - */ - protected AbstractDOMBrokerTransaction(final Object identifier, - Map storeTxFactories) { - this.identifier = requireNonNull(identifier, "Identifier should not be null"); - this.storeTxFactories = requireNonNull(storeTxFactories, "Store Transaction Factories should not be null"); - this.backingTxs = new EnumMap<>(LogicalDatastoreType.class); - } - - /** - * Returns subtransaction associated with supplied key. - * - * @param key the data store type key - * @return the subtransaction - * @throws NullPointerException - * if key is null - * @throws IllegalArgumentException - * if no subtransaction is associated with key. - */ - protected final T getSubtransaction(final LogicalDatastoreType key) { - requireNonNull(key, "key must not be null."); - - T ret = backingTxs.get(key); - if (ret == null) { - ret = createTransaction(key); - backingTxs.put(key, ret); - } - checkArgument(ret != null, "No subtransaction associated with %s", key); - return ret; - } - - protected abstract T createTransaction(LogicalDatastoreType key); - - /** - * Returns immutable Iterable of all subtransactions. - * - */ - protected Collection getSubtransactions() { - return backingTxs.values(); - } - - @Override - public Object getIdentifier() { - return identifier; - } - - @SuppressWarnings("checkstyle:IllegalCatch") - protected void closeSubtransactions() { - /* - * We share one exception for all failures, which are added - * as supressedExceptions to it. - */ - IllegalStateException failure = null; - for (T subtransaction : backingTxs.values()) { - try { - subtransaction.close(); - } catch (Exception e) { - // If we did not allocated failure we allocate it - if (failure == null) { - failure = new IllegalStateException("Uncaught exception occured during closing transaction", e); - } else { - // We update it with additional exceptions, which occurred during error. - failure.addSuppressed(e); - } - } - } - // If we have failure, we throw it at after all attempts to close. - if (failure != null) { - throw failure; - } - } - - protected DOMStoreTransactionFactory getTxFactory(LogicalDatastoreType type) { - return storeTxFactories.get(type); - } - - @Override - public final String toString() { - return addToStringAttributes(MoreObjects.toStringHelper(this).omitNullValues()).toString(); - } - - protected ToStringHelper addToStringAttributes(final ToStringHelper toStringHelper) { - return toStringHelper.add("identifier", identifier); - } -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBrokerWriteTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBrokerWriteTransaction.java deleted file mode 100644 index 02e9e047f4..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBrokerWriteTransaction.java +++ /dev/null @@ -1,158 +0,0 @@ -/* - * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.databroker; - -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Preconditions.checkState; -import static java.util.Objects.requireNonNull; - -import com.google.common.base.MoreObjects.ToStringHelper; -import com.google.common.util.concurrent.FluentFuture; -import com.google.common.util.concurrent.Futures; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Map; -import java.util.concurrent.Future; -import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; -import org.opendaylight.mdsal.common.api.CommitInfo; -import org.opendaylight.mdsal.common.api.LogicalDatastoreType; -import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction; -import org.opendaylight.mdsal.dom.broker.TransactionCommitFailedExceptionMapper; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionFactory; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction; -import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; -import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument; -import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public abstract class AbstractDOMBrokerWriteTransaction - extends AbstractDOMBrokerTransaction implements DOMDataTreeWriteTransaction { - - @SuppressWarnings("rawtypes") - private static final AtomicReferenceFieldUpdater - IMPL_UPDATER = AtomicReferenceFieldUpdater.newUpdater(AbstractDOMBrokerWriteTransaction.class, - AbstractDOMTransactionFactory.class, "commitImpl"); - @SuppressWarnings("rawtypes") - private static final AtomicReferenceFieldUpdater FUTURE_UPDATER = - AtomicReferenceFieldUpdater.newUpdater(AbstractDOMBrokerWriteTransaction.class, Future.class, - "commitFuture"); - private static final Logger LOG = LoggerFactory.getLogger(AbstractDOMBrokerWriteTransaction.class); - private static final Future CANCELLED_FUTURE = Futures.immediateCancelledFuture(); - - /** - * Implementation of real commit. It also acts as an indication that - * the transaction is running -- which we flip atomically using - * {@link #IMPL_UPDATER}. - */ - private volatile AbstractDOMTransactionFactory commitImpl; - - /** - * Future task of transaction commit. It starts off as null, but is - * set appropriately on {@link #submit()} and {@link #cancel()} via - * {@link AtomicReferenceFieldUpdater#lazySet(Object, Object)}. - *

    - * Lazy set is safe for use because it is only referenced to in the - * {@link #cancel()} slow path, where we will busy-wait for it. The - * fast path gets the benefit of a store-store barrier instead of the - * usual store-load barrier. - */ - private volatile Future commitFuture; - - protected AbstractDOMBrokerWriteTransaction(final Object identifier, - final Map storeTxFactories, - final AbstractDOMTransactionFactory commitImpl) { - super(identifier, storeTxFactories); - this.commitImpl = requireNonNull(commitImpl, "commitImpl must not be null."); - } - - @Override - public void put(final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode data) { - checkRunning(commitImpl); - checkInstanceIdentifierReferencesData(path,data); - getSubtransaction(store).write(path, data); - } - - private static void checkInstanceIdentifierReferencesData(final YangInstanceIdentifier path, - final NormalizedNode data) { - checkArgument(data != null, "Attempted to store null data at %s", path); - final PathArgument lastArg = path.getLastPathArgument(); - if (lastArg != null) { - checkArgument(lastArg.equals(data.getIdentifier()), - "Instance identifier references %s but data identifier is %s", lastArg, data); - } - } - - @Override - public void delete(final LogicalDatastoreType store, final YangInstanceIdentifier path) { - checkRunning(commitImpl); - getSubtransaction(store).delete(path); - } - - @Override - public void merge(final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode data) { - checkRunning(commitImpl); - checkInstanceIdentifierReferencesData(path, data); - getSubtransaction(store).merge(path, data); - } - - @Override - public boolean cancel() { - final AbstractDOMTransactionFactory impl = IMPL_UPDATER.getAndSet(this, null); - if (impl != null) { - LOG.trace("Transaction {} cancelled before submit", getIdentifier()); - FUTURE_UPDATER.lazySet(this, CANCELLED_FUTURE); - closeSubtransactions(); - return true; - } - - // The transaction is in process of being submitted or cancelled. Busy-wait - // for the corresponding future. - Future future; - do { - future = commitFuture; - } - while (future == null); - - return future.cancel(false); - } - - @Override - @SuppressWarnings("checkstyle:IllegalCatch") - public FluentFuture commit() { - final AbstractDOMTransactionFactory impl = IMPL_UPDATER.getAndSet(this, null); - checkRunning(impl); - - final Collection txns = getSubtransactions(); - final Collection cohorts = new ArrayList<>(txns.size()); - - FluentFuture ret; - try { - for (final T txn : txns) { - cohorts.add(txn.ready()); - } - - ret = impl.commit(this, cohorts); - } catch (RuntimeException e) { - ret = FluentFuture.from(Futures.immediateFailedFuture( - TransactionCommitFailedExceptionMapper.COMMIT_ERROR_MAPPER.apply(e))); - } - FUTURE_UPDATER.lazySet(this, ret); - return ret; - } - - private void checkRunning(final AbstractDOMTransactionFactory impl) { - checkState(impl != null, "Transaction %s is no longer running", getIdentifier()); - } - - @Override - protected ToStringHelper addToStringAttributes(final ToStringHelper toStringHelper) { - return super.addToStringAttributes(toStringHelper).add("running", commitImpl == null); - } -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMTransactionFactory.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMTransactionFactory.java deleted file mode 100644 index 91ca744f76..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMTransactionFactory.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ - -package org.opendaylight.controller.cluster.databroker; - -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.FluentFuture; -import java.util.Collection; -import java.util.EnumMap; -import java.util.Map; -import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; -import org.opendaylight.mdsal.common.api.CommitInfo; -import org.opendaylight.mdsal.common.api.LogicalDatastoreType; -import org.opendaylight.mdsal.dom.api.DOMDataTreeReadTransaction; -import org.opendaylight.mdsal.dom.api.DOMDataTreeReadWriteTransaction; -import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionFactory; - -public abstract class AbstractDOMTransactionFactory implements AutoCloseable { - @SuppressWarnings("rawtypes") - private static final AtomicIntegerFieldUpdater UPDATER = - AtomicIntegerFieldUpdater.newUpdater(AbstractDOMTransactionFactory.class, "closed"); - private final Map storeTxFactories; - private volatile int closed = 0; - - protected AbstractDOMTransactionFactory(final Map txFactories) { - this.storeTxFactories = new EnumMap<>(txFactories); - } - - /** - * Implementations must return unique identifier for each and every call of - * this method. - * - * @return new Unique transaction identifier. - */ - protected abstract Object newTransactionIdentifier(); - - /** - * Submits a transaction asynchronously for commit. - * - * @param transaction the transaction to submit - * @param cohorts the associated cohorts - * @return a resulting Future - */ - protected abstract FluentFuture commit(DOMDataTreeWriteTransaction transaction, - Collection cohorts); - - /** - * Creates a new read-only transaction. - * - * @return the transaction instance - */ - public final DOMDataTreeReadTransaction newReadOnlyTransaction() { - checkNotClosed(); - - return new DOMBrokerReadOnlyTransaction(newTransactionIdentifier(), storeTxFactories); - } - - - /** - * Creates a new write-only transaction. - * - * @return the transaction instance - */ - public final DOMDataTreeWriteTransaction newWriteOnlyTransaction() { - checkNotClosed(); - - return new DOMBrokerWriteOnlyTransaction(newTransactionIdentifier(), storeTxFactories, this); - } - - - /** - * Creates a new read-write transaction. - * - * @return the transaction instance - */ - public final DOMDataTreeReadWriteTransaction newReadWriteTransaction() { - checkNotClosed(); - - return new DOMBrokerReadWriteTransaction(newTransactionIdentifier(), storeTxFactories, this); - } - - /** - * Convenience accessor of backing factories intended to be used only by - * finalization of this class. - * - * Note: - * Finalization of this class may want to access other functionality of - * supplied Transaction factories. - * - * @return Map of backing transaction factories. - */ - public final Map getTxFactories() { - return storeTxFactories; - } - - /** - * Checks if instance is not closed. - * - * @throws IllegalStateException If instance of this class was closed. - * - */ - protected final void checkNotClosed() { - Preconditions.checkState(closed == 0, "Transaction factory was closed. No further operations allowed."); - } - - @Override - public void close() { - final boolean success = UPDATER.compareAndSet(this, 0, 1); - Preconditions.checkState(success, "Transaction factory was already closed"); - } -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/ConcurrentDOMDataBroker.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/ConcurrentDOMDataBroker.java index dce32b6a74..d6da8487e0 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/ConcurrentDOMDataBroker.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/ConcurrentDOMDataBroker.java @@ -9,21 +9,16 @@ package org.opendaylight.controller.cluster.databroker; import static com.google.common.base.Preconditions.checkArgument; import static java.util.Objects.requireNonNull; -import static org.opendaylight.mdsal.dom.broker.TransactionCommitFailedExceptionMapper.CAN_COMMIT_ERROR_MAPPER; -import static org.opendaylight.mdsal.dom.broker.TransactionCommitFailedExceptionMapper.COMMIT_ERROR_MAPPER; -import static org.opendaylight.mdsal.dom.broker.TransactionCommitFailedExceptionMapper.PRE_COMMIT_MAPPER; +import static org.opendaylight.mdsal.dom.spi.TransactionCommitFailedExceptionMapper.CAN_COMMIT_ERROR_MAPPER; +import static org.opendaylight.mdsal.dom.spi.TransactionCommitFailedExceptionMapper.COMMIT_ERROR_MAPPER; +import static org.opendaylight.mdsal.dom.spi.TransactionCommitFailedExceptionMapper.PRE_COMMIT_MAPPER; import com.google.common.annotations.Beta; import com.google.common.util.concurrent.AbstractFuture; import com.google.common.util.concurrent.FluentFuture; import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.MoreExecutors; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; import java.util.Map; import java.util.concurrent.Executor; import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException; @@ -32,11 +27,18 @@ import org.opendaylight.mdsal.common.api.CommitInfo; import org.opendaylight.mdsal.common.api.DataStoreUnavailableException; import org.opendaylight.mdsal.common.api.LogicalDatastoreType; import org.opendaylight.mdsal.common.api.TransactionCommitFailedException; +import org.opendaylight.mdsal.dom.api.DOMDataBroker; import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction; -import org.opendaylight.mdsal.dom.broker.TransactionCommitFailedExceptionMapper; +import org.opendaylight.mdsal.dom.spi.AbstractDOMDataBroker; +import org.opendaylight.mdsal.dom.spi.TransactionCommitFailedExceptionMapper; import org.opendaylight.mdsal.dom.spi.store.DOMStore; import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort; import org.opendaylight.yangtools.util.DurationStatisticsTracker; +import org.opendaylight.yangtools.yang.common.Empty; +import org.osgi.service.component.annotations.Activate; +import org.osgi.service.component.annotations.Component; +import org.osgi.service.component.annotations.Deactivate; +import org.osgi.service.component.annotations.Reference; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -48,7 +50,8 @@ import org.slf4j.LoggerFactory; * @author Thomas Pantelis */ @Beta -public class ConcurrentDOMDataBroker extends AbstractDOMBroker { +@Component(service = DOMDataBroker.class, property = "type=default") +public class ConcurrentDOMDataBroker extends AbstractDOMDataBroker { private static final Logger LOG = LoggerFactory.getLogger(ConcurrentDOMDataBroker.class); private static final String CAN_COMMIT = "CAN_COMMIT"; private static final String PRE_COMMIT = "PRE_COMMIT"; @@ -69,142 +72,97 @@ public class ConcurrentDOMDataBroker extends AbstractDOMBroker { public ConcurrentDOMDataBroker(final Map datastores, final Executor listenableFutureExecutor, final DurationStatisticsTracker commitStatsTracker) { super(datastores); - this.clientFutureCallbackExecutor = requireNonNull(listenableFutureExecutor); + clientFutureCallbackExecutor = requireNonNull(listenableFutureExecutor); this.commitStatsTracker = requireNonNull(commitStatsTracker); } - public DurationStatisticsTracker getCommitStatsTracker() { - return commitStatsTracker; + @Activate + public ConcurrentDOMDataBroker(@Reference final DataBrokerCommitExecutor commitExecutor, + @Reference(target = "(type=distributed-config)") final DOMStore configDatastore, + @Reference(target = "(type=distributed-operational)") final DOMStore operDatastore) { + this(Map.of( + LogicalDatastoreType.CONFIGURATION, configDatastore, LogicalDatastoreType.OPERATIONAL, operDatastore), + commitExecutor.executor(), commitExecutor.commitStatsTracker()); + LOG.info("DOM Data Broker started"); + } + + @Override + @Deactivate + public void close() { + LOG.info("DOM Data Broker stopping"); + super.close(); + LOG.info("DOM Data Broker stopped"); } @Override protected FluentFuture commit(final DOMDataTreeWriteTransaction transaction, - final Collection cohorts) { + final DOMStoreThreePhaseCommitCohort cohort) { checkArgument(transaction != null, "Transaction must not be null."); - checkArgument(cohorts != null, "Cohorts must not be null."); + checkArgument(cohort != null, "Cohorts must not be null."); LOG.debug("Tx: {} is submitted for execution.", transaction.getIdentifier()); - if (cohorts.isEmpty()) { - return CommitInfo.emptyFluentFuture(); - } - - final AsyncNotifyingSettableFuture clientSubmitFuture = - new AsyncNotifyingSettableFuture(clientFutureCallbackExecutor); - - doCanCommit(clientSubmitFuture, transaction, cohorts); - - return FluentFuture.from(clientSubmitFuture).transform(ignored -> CommitInfo.empty(), - MoreExecutors.directExecutor()); + final var clientSubmitFuture = new AsyncNotifyingSettableFuture(clientFutureCallbackExecutor); + doCanCommit(clientSubmitFuture, transaction, cohort); + return FluentFuture.from(clientSubmitFuture); } private void doCanCommit(final AsyncNotifyingSettableFuture clientSubmitFuture, final DOMDataTreeWriteTransaction transaction, - final Collection cohorts) { - + final DOMStoreThreePhaseCommitCohort cohort) { final long startTime = System.nanoTime(); - final Iterator cohortIterator = cohorts.iterator(); - - // Not using Futures.allAsList here to avoid its internal overhead. - FutureCallback futureCallback = new FutureCallback() { + Futures.addCallback(cohort.canCommit(), new FutureCallback<>() { @Override public void onSuccess(final Boolean result) { if (result == null || !result) { - handleException(clientSubmitFuture, transaction, cohorts, CAN_COMMIT, CAN_COMMIT_ERROR_MAPPER, - new TransactionCommitFailedException("Can Commit failed, no detailed cause available.")); - } else if (!cohortIterator.hasNext()) { - // All cohorts completed successfully - we can move on to the preCommit phase - doPreCommit(startTime, clientSubmitFuture, transaction, cohorts); + onFailure(new TransactionCommitFailedException("Can Commit failed, no detailed cause available.")); } else { - Futures.addCallback(cohortIterator.next().canCommit(), this, MoreExecutors.directExecutor()); + doPreCommit(startTime, clientSubmitFuture, transaction, cohort); } } @Override public void onFailure(final Throwable failure) { - handleException(clientSubmitFuture, transaction, cohorts, CAN_COMMIT, CAN_COMMIT_ERROR_MAPPER, failure); + handleException(clientSubmitFuture, transaction, cohort, CAN_COMMIT, CAN_COMMIT_ERROR_MAPPER, failure); } - }; - - Futures.addCallback(cohortIterator.next().canCommit(), futureCallback, MoreExecutors.directExecutor()); + }, MoreExecutors.directExecutor()); } - @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD", - justification = "https://github.com/spotbugs/spotbugs/issues/811") private void doPreCommit(final long startTime, final AsyncNotifyingSettableFuture clientSubmitFuture, - final DOMDataTreeWriteTransaction transaction, - final Collection cohorts) { - - final Iterator cohortIterator = cohorts.iterator(); - - // Not using Futures.allAsList here to avoid its internal overhead. - FutureCallback futureCallback = new FutureCallback() { + final DOMDataTreeWriteTransaction transaction, final DOMStoreThreePhaseCommitCohort cohort) { + Futures.addCallback(cohort.preCommit(), new FutureCallback<>() { @Override - public void onSuccess(final Void notUsed) { - if (!cohortIterator.hasNext()) { - // All cohorts completed successfully - we can move on to the commit phase - doCommit(startTime, clientSubmitFuture, transaction, cohorts); - } else { - ListenableFuture preCommitFuture = cohortIterator.next().preCommit(); - Futures.addCallback(preCommitFuture, this, MoreExecutors.directExecutor()); - } + public void onSuccess(final Empty result) { + doCommit(startTime, clientSubmitFuture, transaction, cohort); } @Override public void onFailure(final Throwable failure) { - handleException(clientSubmitFuture, transaction, cohorts, PRE_COMMIT, PRE_COMMIT_MAPPER, failure); + handleException(clientSubmitFuture, transaction, cohort, PRE_COMMIT, PRE_COMMIT_MAPPER, failure); } - }; - - ListenableFuture preCommitFuture = cohortIterator.next().preCommit(); - Futures.addCallback(preCommitFuture, futureCallback, MoreExecutors.directExecutor()); + }, MoreExecutors.directExecutor()); } - @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD", - justification = "https://github.com/spotbugs/spotbugs/issues/811") private void doCommit(final long startTime, final AsyncNotifyingSettableFuture clientSubmitFuture, - final DOMDataTreeWriteTransaction transaction, - final Collection cohorts) { - - final Iterator cohortIterator = cohorts.iterator(); - - // Not using Futures.allAsList here to avoid its internal overhead. - FutureCallback futureCallback = new FutureCallback() { + final DOMDataTreeWriteTransaction transaction, final DOMStoreThreePhaseCommitCohort cohort) { + Futures.addCallback(cohort.commit(), new FutureCallback() { @Override - public void onSuccess(final Void notUsed) { - if (!cohortIterator.hasNext()) { - // All cohorts completed successfully - we're done. - commitStatsTracker.addDuration(System.nanoTime() - startTime); - - clientSubmitFuture.set(); - } else { - ListenableFuture commitFuture = cohortIterator.next().commit(); - Futures.addCallback(commitFuture, this, MoreExecutors.directExecutor()); - } + public void onSuccess(final CommitInfo result) { + commitStatsTracker.addDuration(System.nanoTime() - startTime); + clientSubmitFuture.set(); } @Override public void onFailure(final Throwable throwable) { - handleException(clientSubmitFuture, transaction, cohorts, COMMIT, COMMIT_ERROR_MAPPER, throwable); + handleException(clientSubmitFuture, transaction, cohort, COMMIT, COMMIT_ERROR_MAPPER, throwable); } - }; - - ListenableFuture commitFuture = cohortIterator.next().commit(); - Futures.addCallback(commitFuture, futureCallback, MoreExecutors.directExecutor()); + }, MoreExecutors.directExecutor()); } - @SuppressFBWarnings(value = { "BC_UNCONFIRMED_CAST_OF_RETURN_VALUE", "UPM_UNCALLED_PRIVATE_METHOD" }, - justification = "Pertains to the assignment of the 'clientException' var. FindBugs flags this as an " - + "uncomfirmed cast but the generic type in TransactionCommitFailedExceptionMapper is " - + "TransactionCommitFailedException and thus should be deemed as confirmed." - + "Also https://github.com/spotbugs/spotbugs/issues/811") private static void handleException(final AsyncNotifyingSettableFuture clientSubmitFuture, - final DOMDataTreeWriteTransaction transaction, - final Collection cohorts, - final String phase, final TransactionCommitFailedExceptionMapper exMapper, - final Throwable throwable) { - + final DOMDataTreeWriteTransaction transaction, final DOMStoreThreePhaseCommitCohort cohort, + final String phase, final TransactionCommitFailedExceptionMapper exMapper, final Throwable throwable) { if (clientSubmitFuture.isDone()) { // We must have had failures from multiple cohorts. return; @@ -213,29 +171,21 @@ public class ConcurrentDOMDataBroker extends AbstractDOMBroker { // Use debug instead of warn level here because this exception gets propagate back to the caller via the Future LOG.debug("Tx: {} Error during phase {}, starting Abort", transaction.getIdentifier(), phase, throwable); - // Transaction failed - tell all cohorts to abort. - @SuppressWarnings("unchecked") - ListenableFuture[] canCommitFutures = new ListenableFuture[cohorts.size()]; - int index = 0; - for (DOMStoreThreePhaseCommitCohort cohort : cohorts) { - canCommitFutures[index++] = cohort.abort(); - } - // Propagate the original exception final Exception e; if (throwable instanceof NoShardLeaderException || throwable instanceof ShardLeaderNotRespondingException) { e = new DataStoreUnavailableException(throwable.getMessage(), throwable); - } else if (throwable instanceof Exception) { - e = (Exception)throwable; + } else if (throwable instanceof Exception ex) { + e = ex; } else { e = new RuntimeException("Unexpected error occurred", throwable); } clientSubmitFuture.setException(exMapper.apply(e)); - ListenableFuture> combinedFuture = Futures.allAsList(canCommitFutures); - Futures.addCallback(combinedFuture, new FutureCallback>() { + // abort + Futures.addCallback(cohort.abort(), new FutureCallback() { @Override - public void onSuccess(final List notUsed) { + public void onSuccess(final Empty result) { // Propagate the original exception to the client. LOG.debug("Tx: {} aborted successfully", transaction.getIdentifier()); } @@ -257,8 +207,7 @@ public class ConcurrentDOMDataBroker extends AbstractDOMBroker { * FIXME: This class should probably be moved to yangtools common utils for re-usability and * unified with AsyncNotifyingListenableFutureTask. */ - private static class AsyncNotifyingSettableFuture extends AbstractFuture { - + private static class AsyncNotifyingSettableFuture extends AbstractFuture { /** * ThreadLocal used to detect if the task completion thread is running the future listener Runnables. */ @@ -287,7 +236,7 @@ public class ConcurrentDOMDataBroker extends AbstractDOMBroker { boolean set() { ON_TASK_COMPLETION_THREAD_TL.set(Boolean.TRUE); try { - return super.set(null); + return super.set(CommitInfo.empty()); } finally { ON_TASK_COMPLETION_THREAD_TL.set(null); } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerReadOnlyTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerReadOnlyTransaction.java deleted file mode 100644 index c4d5e1d8a5..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerReadOnlyTransaction.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ - -package org.opendaylight.controller.cluster.databroker; - -import com.google.common.util.concurrent.FluentFuture; -import java.util.Map; -import java.util.Optional; -import org.opendaylight.mdsal.common.api.LogicalDatastoreType; -import org.opendaylight.mdsal.dom.api.DOMDataTreeReadTransaction; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionFactory; -import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; -import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; - -public class DOMBrokerReadOnlyTransaction - extends AbstractDOMBrokerTransaction implements DOMDataTreeReadTransaction { - - /** - * Creates new composite Transactions. - * - * @param identifier Identifier of transaction. - */ - protected DOMBrokerReadOnlyTransaction(final Object identifier, - final Map storeTxFactories) { - super(identifier, storeTxFactories); - } - - @Override - public FluentFuture> read(final LogicalDatastoreType store, - final YangInstanceIdentifier path) { - return getSubtransaction(store).read(path); - } - - @Override - public FluentFuture exists(final LogicalDatastoreType store, final YangInstanceIdentifier path) { - return getSubtransaction(store).exists(path); - } - - @Override - public void close() { - closeSubtransactions(); - } - - @Override - protected DOMStoreReadTransaction createTransaction(final LogicalDatastoreType key) { - return getTxFactory(key).newReadOnlyTransaction(); - } -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerReadWriteTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerReadWriteTransaction.java deleted file mode 100644 index d9031c2d62..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerReadWriteTransaction.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.databroker; - -import com.google.common.util.concurrent.FluentFuture; -import java.util.Map; -import java.util.Optional; -import org.opendaylight.mdsal.common.api.LogicalDatastoreType; -import org.opendaylight.mdsal.dom.api.DOMDataTreeReadWriteTransaction; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionFactory; -import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; -import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; - -public class DOMBrokerReadWriteTransaction extends AbstractDOMBrokerWriteTransaction - implements DOMDataTreeReadWriteTransaction { - - /** - * Constructs an instance. - * - * @param identifier identifier of transaction. - * @param storeTxFactories the backing transaction store factories - */ - protected DOMBrokerReadWriteTransaction(final Object identifier, - final Map storeTxFactories, - final AbstractDOMTransactionFactory commitImpl) { - super(identifier, storeTxFactories, commitImpl); - } - - @Override - public FluentFuture> read(final LogicalDatastoreType store, - final YangInstanceIdentifier path) { - return getSubtransaction(store).read(path); - } - - @Override - public FluentFuture exists(final LogicalDatastoreType store, final YangInstanceIdentifier path) { - return getSubtransaction(store).exists(path); - } - - @Override - protected DOMStoreReadWriteTransaction createTransaction(final LogicalDatastoreType key) { - return getTxFactory(key).newReadWriteTransaction(); - } -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerTransactionChain.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerTransactionChain.java deleted file mode 100644 index 3364d23143..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerTransactionChain.java +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.databroker; - -import static com.google.common.base.Preconditions.checkState; -import static java.util.Objects.requireNonNull; - -import com.google.common.util.concurrent.FluentFuture; -import com.google.common.util.concurrent.FutureCallback; -import com.google.common.util.concurrent.MoreExecutors; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import java.util.Collection; -import java.util.Map; -import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; -import org.opendaylight.mdsal.common.api.CommitInfo; -import org.opendaylight.mdsal.common.api.LogicalDatastoreType; -import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction; -import org.opendaylight.mdsal.dom.api.DOMTransactionChain; -import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -final class DOMBrokerTransactionChain extends AbstractDOMTransactionFactory - implements DOMTransactionChain { - private enum State { - RUNNING, - CLOSING, - CLOSED, - FAILED, - } - - private static final AtomicIntegerFieldUpdater COUNTER_UPDATER = - AtomicIntegerFieldUpdater.newUpdater(DOMBrokerTransactionChain.class, "counter"); - private static final AtomicReferenceFieldUpdater STATE_UPDATER = - AtomicReferenceFieldUpdater.newUpdater(DOMBrokerTransactionChain.class, State.class, "state"); - private static final Logger LOG = LoggerFactory.getLogger(DOMBrokerTransactionChain.class); - private final AtomicLong txNum = new AtomicLong(); - private final AbstractDOMBroker broker; - private final DOMTransactionChainListener listener; - private final long chainId; - - private volatile State state = State.RUNNING; - private volatile int counter = 0; - - /** - * Constructs an instance. - * - * @param chainId - * ID of transaction chain - * @param chains - * Backing {@link DOMStoreTransactionChain}s. - * @param listener - * Listener, which listens on transaction chain events. - * @throws NullPointerException - * If any of arguments is null. - */ - DOMBrokerTransactionChain(final long chainId, final Map chains, - final AbstractDOMBroker broker, final DOMTransactionChainListener listener) { - super(chains); - this.chainId = chainId; - this.broker = requireNonNull(broker); - this.listener = requireNonNull(listener); - } - - private void checkNotFailed() { - checkState(state != State.FAILED, "Transaction chain has failed"); - } - - @Override - protected Object newTransactionIdentifier() { - return "DOM-CHAIN-" + chainId + "-" + txNum.getAndIncrement(); - } - - @Override - public FluentFuture commit( - final DOMDataTreeWriteTransaction transaction, final Collection cohorts) { - checkNotFailed(); - checkNotClosed(); - - final FluentFuture ret = broker.commit(transaction, cohorts); - - COUNTER_UPDATER.incrementAndGet(this); - ret.addCallback(new FutureCallback() { - @Override - public void onSuccess(final CommitInfo result) { - transactionCompleted(); - } - - @Override - public void onFailure(final Throwable failure) { - transactionFailed(transaction, failure); - } - }, MoreExecutors.directExecutor()); - - return ret; - } - - @Override - public void close() { - final boolean success = STATE_UPDATER.compareAndSet(this, State.RUNNING, State.CLOSING); - if (!success) { - LOG.debug("Chain {} is no longer running", this); - return; - } - - super.close(); - for (DOMStoreTransactionChain subChain : getTxFactories().values()) { - subChain.close(); - } - - if (counter == 0) { - finishClose(); - } - } - - private void finishClose() { - state = State.CLOSED; - listener.onTransactionChainSuccessful(this); - } - - @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD", - justification = "https://github.com/spotbugs/spotbugs/issues/811") - private void transactionCompleted() { - if (COUNTER_UPDATER.decrementAndGet(this) == 0 && state == State.CLOSING) { - finishClose(); - } - } - - @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD", - justification = "https://github.com/spotbugs/spotbugs/issues/811") - private void transactionFailed(final DOMDataTreeWriteTransaction tx, final Throwable cause) { - state = State.FAILED; - LOG.debug("Transaction chain {} failed.", this, cause); - listener.onTransactionChainFailed(this, tx, cause); - } -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerWriteOnlyTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerWriteOnlyTransaction.java deleted file mode 100644 index 2f0915dfce..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerWriteOnlyTransaction.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (c) 2015 Huawei Technologies Co. Ltd. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ - -package org.opendaylight.controller.cluster.databroker; - -import java.util.Map; -import org.opendaylight.mdsal.common.api.LogicalDatastoreType; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionFactory; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction; - -public class DOMBrokerWriteOnlyTransaction extends AbstractDOMBrokerWriteTransaction { - - /** - * Constructs an instance. - * - * @param identifier identifier of transaction. - * @param storeTxFactories the backing transaction store factories - */ - public DOMBrokerWriteOnlyTransaction(Object identifier, - Map storeTxFactories, - AbstractDOMTransactionFactory commitImpl) { - super(identifier, storeTxFactories, commitImpl); - } - - @Override - protected DOMStoreWriteTransaction createTransaction(LogicalDatastoreType key) { - return getTxFactory(key).newWriteOnlyTransaction(); - } - -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DataBrokerCommitExecutor.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DataBrokerCommitExecutor.java new file mode 100644 index 0000000000..bdea393fc8 --- /dev/null +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DataBrokerCommitExecutor.java @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.databroker; + +import java.util.concurrent.Executor; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; +import org.opendaylight.controller.md.sal.common.util.jmx.ThreadExecutorStatsMXBeanImpl; +import org.opendaylight.yangtools.util.DurationStatisticsTracker; +import org.opendaylight.yangtools.util.concurrent.SpecialExecutors; +import org.osgi.service.component.annotations.Activate; +import org.osgi.service.component.annotations.Component; +import org.osgi.service.component.annotations.Deactivate; +import org.osgi.service.metatype.annotations.AttributeDefinition; +import org.osgi.service.metatype.annotations.Designate; +import org.osgi.service.metatype.annotations.ObjectClassDefinition; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Component( + service = DataBrokerCommitExecutor.class, + configurationPid = "org.opendaylight.controller.cluster.datastore.broker") +@Designate(ocd = DataBrokerCommitExecutor.Config.class) +public final class DataBrokerCommitExecutor { + @ObjectClassDefinition + public @interface Config { + @AttributeDefinition(name = "max-data-broker-future-callback-queue-size") + int callbackQueueSize() default 1000; + @AttributeDefinition(name = "max-data-broker-future-callback-pool-size") + int callbackPoolSize() default 20; + } + + private static final Logger LOG = LoggerFactory.getLogger(DataBrokerCommitExecutor.class); + + private final DurationStatisticsTracker commitStatsTracker = DurationStatisticsTracker.createConcurrent(); + private final ThreadExecutorStatsMXBeanImpl threadStats; + private final CommitStatsMXBeanImpl commitStats; + private final ExecutorService executorService; + + @Activate + public DataBrokerCommitExecutor(final Config config) { + executorService = SpecialExecutors.newBlockingBoundedCachedThreadPool(config.callbackPoolSize(), + config.callbackQueueSize(), "CommitFutures", ConcurrentDOMDataBroker.class); + threadStats = ThreadExecutorStatsMXBeanImpl.create(executorService, "CommitFutureExecutorStats", + "DOMDataBroker"); + commitStats = new CommitStatsMXBeanImpl(commitStatsTracker, "DOMDataBroker"); + commitStats.register(); + LOG.info("DOM Data Broker commit exector started"); + } + + @Deactivate + void deactivate() { + LOG.info("DOM Data Broker commit exector stopping"); + commitStats.unregister(); + threadStats.unregister(); + executorService.shutdown(); + try { + executorService.awaitTermination(1, TimeUnit.MINUTES); + } catch (InterruptedException e) { + LOG.warn("Future executor failed to finish in time, giving up", e); + } + LOG.info("DOM Data Broker commit exector stopped"); + } + + Executor executor() { + return executorService; + } + + DurationStatisticsTracker commitStatsTracker() { + return commitStatsTracker; + } +} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/OSGiDOMDataBroker.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/OSGiDOMDataBroker.java deleted file mode 100644 index 3395c7268f..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/OSGiDOMDataBroker.java +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.databroker; - -import com.google.common.annotations.Beta; -import com.google.common.collect.ClassToInstanceMap; -import com.google.common.collect.ImmutableMap; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.TimeUnit; -import org.opendaylight.controller.md.sal.common.util.jmx.ThreadExecutorStatsMXBeanImpl; -import org.opendaylight.mdsal.common.api.LogicalDatastoreType; -import org.opendaylight.mdsal.dom.api.DOMDataBroker; -import org.opendaylight.mdsal.dom.api.DOMDataBrokerExtension; -import org.opendaylight.mdsal.dom.api.DOMDataTreeReadTransaction; -import org.opendaylight.mdsal.dom.api.DOMDataTreeReadWriteTransaction; -import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction; -import org.opendaylight.mdsal.dom.api.DOMTransactionChain; -import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener; -import org.opendaylight.mdsal.dom.spi.store.DOMStore; -import org.opendaylight.yangtools.util.DurationStatisticsTracker; -import org.opendaylight.yangtools.util.concurrent.SpecialExecutors; -import org.osgi.service.component.annotations.Activate; -import org.osgi.service.component.annotations.Component; -import org.osgi.service.component.annotations.Deactivate; -import org.osgi.service.component.annotations.Reference; -import org.osgi.service.metatype.annotations.AttributeDefinition; -import org.osgi.service.metatype.annotations.Designate; -import org.osgi.service.metatype.annotations.ObjectClassDefinition; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@Beta -@Component(immediate = true, configurationPid = "org.opendaylight.controller.cluster.datastore.broker", - property = "type=default") -@Designate(ocd = OSGiDOMDataBroker.Config.class) -public final class OSGiDOMDataBroker implements DOMDataBroker { - @ObjectClassDefinition - public @interface Config { - @AttributeDefinition(name = "max-data-broker-future-callback-queue-size") - int callbackQueueSize() default 1000; - @AttributeDefinition(name = "max-data-broker-future-callback-pool-size") - int callbackPoolSize() default 20; - } - - private static final Logger LOG = LoggerFactory.getLogger(OSGiDOMDataBroker.class); - - @Reference(target = "(type=distributed-config)") - DOMStore configDatastore = null; - @Reference(target = "(type=distributed-operational)") - DOMStore operDatastore = null; - - private ExecutorService executorService; - private ConcurrentDOMDataBroker delegate; - private CommitStatsMXBeanImpl commitStats; - private ThreadExecutorStatsMXBeanImpl threadStats; - - @Override - public DOMDataTreeReadTransaction newReadOnlyTransaction() { - return delegate.newReadOnlyTransaction(); - } - - @Override - public DOMDataTreeWriteTransaction newWriteOnlyTransaction() { - return delegate.newWriteOnlyTransaction(); - } - - @Override - public DOMDataTreeReadWriteTransaction newReadWriteTransaction() { - return delegate.newReadWriteTransaction(); - } - - @Override - public ClassToInstanceMap getExtensions() { - return delegate.getExtensions(); - } - - @Override - public DOMTransactionChain createTransactionChain(final DOMTransactionChainListener listener) { - return delegate.createTransactionChain(listener); - } - - @Override - public DOMTransactionChain createMergingTransactionChain(final DOMTransactionChainListener listener) { - return delegate.createMergingTransactionChain(listener); - } - - @Activate - void activate(final Config config) { - LOG.info("DOM Data Broker starting"); - final DurationStatisticsTracker commitStatsTracker = DurationStatisticsTracker.createConcurrent(); - - executorService = SpecialExecutors.newBlockingBoundedCachedThreadPool(config.callbackPoolSize(), - config.callbackQueueSize(), "CommitFutures", ConcurrentDOMDataBroker.class); - delegate = new ConcurrentDOMDataBroker(ImmutableMap.of( - LogicalDatastoreType.CONFIGURATION, configDatastore, LogicalDatastoreType.OPERATIONAL, operDatastore), - executorService, commitStatsTracker); - - commitStats = new CommitStatsMXBeanImpl(commitStatsTracker, "DOMDataBroker"); - commitStats.register(); - threadStats = ThreadExecutorStatsMXBeanImpl.create(executorService, "CommitFutureExecutorStats", - "DOMDataBroker"); - - LOG.info("DOM Data Broker started"); - } - - @Deactivate - void deactivate() { - LOG.info("DOM Data Broker stopping"); - commitStats.unregister(); - if (threadStats != null) { - threadStats.unregister(); - } - - delegate.close(); - executorService.shutdown(); - try { - executorService.awaitTermination(1, TimeUnit.MINUTES); - } catch (InterruptedException e) { - LOG.warn("Future executor failed to finish in time, giving up", e); - } - LOG.info("DOM Data Broker stopped"); - } -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractClientHandle.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractClientHandle.java index 003c073de8..d10627dcf9 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractClientHandle.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractClientHandle.java @@ -15,6 +15,7 @@ import com.google.common.base.MoreObjects; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; +import java.util.stream.Stream; import org.eclipse.jdt.annotation.NonNull; import org.eclipse.jdt.annotation.Nullable; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; @@ -107,10 +108,16 @@ public abstract class AbstractClientHandle e } final T ensureProxy(final YangInstanceIdentifier path) { - final State local = getState(); - final Long shard = parent.resolveShardForPath(path); + return ensureProxy(getState(), parent.resolveShardForPath(path)); + } + + private T ensureProxy(final State localState, final Long shard) { + return localState.computeIfAbsent(shard, this::createProxy); + } - return local.computeIfAbsent(shard, this::createProxy); + final Stream ensureAllProxies() { + final var local = getState(); + return parent.resolveAllShards().map(shard -> ensureProxy(local, shard)); } final AbstractClientHistory parent() { diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractClientHistory.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractClientHistory.java index 95552b382e..796c23614e 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractClientHistory.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractClientHistory.java @@ -20,6 +20,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLongFieldUpdater; import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; import java.util.concurrent.locks.StampedLock; +import java.util.stream.Stream; import org.checkerframework.checker.lock.qual.GuardedBy; import org.checkerframework.checker.lock.qual.Holding; import org.eclipse.jdt.annotation.NonNull; @@ -31,6 +32,7 @@ import org.opendaylight.controller.cluster.access.commands.CreateLocalHistoryReq import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier; import org.opendaylight.controller.cluster.access.concepts.Response; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; +import org.opendaylight.controller.cluster.datastore.utils.ActorUtils; import org.opendaylight.mdsal.dom.api.DOMTransactionChainClosedException; import org.opendaylight.yangtools.concepts.Identifiable; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; @@ -117,6 +119,14 @@ public abstract class AbstractClientHistory extends LocalAbortable implements Id return client.resolveShardForPath(path); } + final Stream resolveAllShards() { + return client.resolveAllShards(); + } + + final ActorUtils actorUtils() { + return client.actorUtils(); + } + @Override final void localAbort(final Throwable cause) { final State oldState = STATE_UPDATER.getAndSet(this, State.CLOSED); diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractDataStoreClientActor.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractDataStoreClientActor.java index e5d8abcb62..507f065d49 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractDataStoreClientActor.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractDataStoreClientActor.java @@ -12,6 +12,7 @@ import static java.util.Objects.requireNonNull; import akka.actor.ActorRef; import akka.util.Timeout; +import com.google.common.base.Throwables; import java.util.concurrent.TimeUnit; import org.eclipse.jdt.annotation.NonNull; import org.opendaylight.controller.cluster.access.client.AbstractClientActor; @@ -53,10 +54,9 @@ public abstract class AbstractDataStoreClientActor extends AbstractClientActor { try { return (DataStoreClient) Await.result(ExplicitAsk.ask(actor, GET_CLIENT_FACTORY, Timeout.apply(timeout, unit)), Duration.Inf()); - } catch (RuntimeException e) { - throw e; } catch (Exception e) { - throw new RuntimeException(e); + Throwables.throwIfUnchecked(e); + throw new IllegalStateException(e); } } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractDataStoreClientBehavior.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractDataStoreClientBehavior.java index 4f91cb27fa..82567c40d9 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractDataStoreClientBehavior.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractDataStoreClientBehavior.java @@ -17,13 +17,14 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.StampedLock; -import org.opendaylight.controller.cluster.access.client.BackendInfoResolver; +import java.util.stream.Stream; import org.opendaylight.controller.cluster.access.client.ClientActorBehavior; import org.opendaylight.controller.cluster.access.client.ClientActorContext; import org.opendaylight.controller.cluster.access.client.ConnectedClientConnection; import org.opendaylight.controller.cluster.access.client.ConnectionEntry; import org.opendaylight.controller.cluster.access.client.ReconnectForwarder; import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier; +import org.opendaylight.controller.cluster.datastore.utils.ActorUtils; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -71,7 +72,7 @@ abstract class AbstractDataStoreClientBehavior extends ClientActorBehavior resolver) { + final AbstractShardBackendResolver resolver) { super(context, resolver); singleHistory = new SingleClientHistory(this, new LocalHistoryIdentifier(getIdentifier(), 0)); } @@ -194,7 +195,7 @@ abstract class AbstractDataStoreClientBehavior extends ClientActorBehavior resolveAllShards(); + + final ActorUtils actorUtils() { + return ((AbstractShardBackendResolver) resolver()).actorUtils(); + } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractProxyTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractProxyTransaction.java index 8fb042fba1..14ad546991 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractProxyTransaction.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractProxyTransaction.java @@ -49,6 +49,7 @@ import org.opendaylight.controller.cluster.access.concepts.RequestFailure; import org.opendaylight.controller.cluster.access.concepts.Response; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; import org.opendaylight.yangtools.concepts.Identifiable; +import org.opendaylight.yangtools.yang.common.Empty; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; import org.slf4j.Logger; @@ -64,10 +65,9 @@ import org.slf4j.LoggerFactory; *

    * This class interacts with the queueing mechanism in ClientActorBehavior, hence once we arrive at a decision * to use either a local or remote implementation, we are stuck with it. We can re-evaluate on the next transaction. - * - * @author Robert Varga */ -abstract class AbstractProxyTransaction implements Identifiable { +abstract sealed class AbstractProxyTransaction implements Identifiable + permits LocalProxyTransaction, RemoteProxyTransaction { /** * Marker object used instead of read-type of requests, which are satisfied only once. This has a lower footprint * and allows compressing multiple requests into a single entry. This class is not thread-safe. @@ -139,7 +139,7 @@ abstract class AbstractProxyTransaction implements Identifiable optState = flushState(); if (optState.isPresent()) { - forwardToSuccessor(successor, optState.get(), null); + forwardToSuccessor(successor, optState.orElseThrow(), null); } successor.predecessorSealed(); } @@ -448,7 +448,7 @@ abstract class AbstractProxyTransaction implements Identifiable ret) { + final void abort(final VotingFuture ret) { checkSealed(); sendDoAbort(t -> { @@ -733,7 +733,7 @@ abstract class AbstractProxyTransaction implements Identifiable optState = flushState(); if (optState.isPresent()) { - successor.handleReplayedRemoteRequest(optState.get(), null, enqueuedTicks); + successor.handleReplayedRemoteRequest(optState.orElseThrow(), null, enqueuedTicks); } if (successor.markSealed()) { successor.sealAndSend(OptionalLong.of(enqueuedTicks)); @@ -854,7 +854,11 @@ abstract class AbstractProxyTransaction implements Identifiable request, @Nullable Consumer> callback, long enqueuedTicks); - private static IllegalStateException unhandledResponseException(final Response resp) { + static final @NonNull IllegalArgumentException unhandledRequest(final TransactionRequest request) { + return new IllegalArgumentException("Unhandled request " + request); + } + + private static @NonNull IllegalStateException unhandledResponseException(final Response resp) { return new IllegalStateException("Unhandled response " + resp.getClass()); } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractShardBackendResolver.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractShardBackendResolver.java index ca784fed7a..5a436a53d3 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractShardBackendResolver.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractShardBackendResolver.java @@ -74,7 +74,7 @@ abstract class AbstractShardBackendResolver extends BackendInfoResolver new ConnectClientRequest(clientId, t, ABIVersion.BORON, + connectFunction = ExplicitAsk.toScala(t -> new ConnectClientRequest(clientId, t, ABIVersion.POTASSIUM, ABIVersion.current())); } @@ -107,7 +107,7 @@ abstract class AbstractShardBackendResolver extends BackendInfoResolver staleBackendInfoCallbacks.remove(callback); } - protected void notifyStaleBackendInfoCallbacks(Long cookie) { + protected void notifyStaleBackendInfoCallbacks(final Long cookie) { staleBackendInfoCallbacks.forEach(callback -> callback.accept(cookie)); } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractTransactionCommitCohort.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractTransactionCommitCohort.java index b2f66d5d31..77de1e45d8 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractTransactionCommitCohort.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractTransactionCommitCohort.java @@ -14,6 +14,7 @@ import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort; +import org.opendaylight.yangtools.yang.common.Empty; /** * Base class for internal {@link DOMStoreThreePhaseCommitCohort} implementation. It contains utility constants for @@ -23,7 +24,7 @@ import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort; */ abstract class AbstractTransactionCommitCohort implements DOMStoreThreePhaseCommitCohort { static final ListenableFuture TRUE_FUTURE = Futures.immediateFuture(Boolean.TRUE); - static final ListenableFuture VOID_FUTURE = Futures.immediateFuture(null); + static final ListenableFuture EMPTY_FUTURE = Futures.immediateFuture(Empty.value()); private final AbstractClientHistory parent; private final TransactionIdentifier txId; diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ClientSnapshot.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ClientSnapshot.java index d20a618c3d..5611a1044f 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ClientSnapshot.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ClientSnapshot.java @@ -11,6 +11,7 @@ import com.google.common.annotations.Beta; import com.google.common.util.concurrent.FluentFuture; import java.util.Optional; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; +import org.opendaylight.controller.cluster.datastore.utils.RootScatterGather; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; @@ -28,19 +29,20 @@ public class ClientSnapshot extends AbstractClientHandle exists(final YangInstanceIdentifier path) { - return ensureSnapshotProxy(path).exists(path); + return ensureProxy(path).exists(path); } public FluentFuture> read(final YangInstanceIdentifier path) { - return ensureSnapshotProxy(path).read(path); + return path.isEmpty() ? readRoot() : ensureProxy(path).read(path); + } + + private FluentFuture> readRoot() { + return RootScatterGather.gather(parent().actorUtils(), ensureAllProxies() + .map(proxy -> proxy.read(YangInstanceIdentifier.of()))); } @Override final AbstractProxyTransaction createProxy(final Long shard) { return parent().createSnapshotProxy(getIdentifier(), shard); } - - private AbstractProxyTransaction ensureSnapshotProxy(final YangInstanceIdentifier path) { - return ensureProxy(path); - } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ClientTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ClientTransaction.java index 7cdc04aba1..b2ff5d5184 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ClientTransaction.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ClientTransaction.java @@ -14,9 +14,13 @@ import com.google.common.util.concurrent.FluentFuture; import java.util.Collection; import java.util.Map; import java.util.Optional; +import org.eclipse.jdt.annotation.NonNull; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; +import org.opendaylight.controller.cluster.datastore.utils.RootScatterGather; import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; +import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument; +import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; /** @@ -55,28 +59,57 @@ public class ClientTransaction extends AbstractClientHandle exists(final YangInstanceIdentifier path) { - return ensureTransactionProxy(path).exists(path); + return ensureProxy(path).exists(path); } public FluentFuture> read(final YangInstanceIdentifier path) { - return ensureTransactionProxy(path).read(path); + return path.isEmpty() ? readRoot() : ensureProxy(path).read(path); + } + + private FluentFuture> readRoot() { + return RootScatterGather.gather(parent().actorUtils(), ensureAllProxies() + .map(proxy -> proxy.read(YangInstanceIdentifier.of()))); } public void delete(final YangInstanceIdentifier path) { - ensureTransactionProxy(path).delete(path); + if (path.isEmpty()) { + ensureAllProxies().forEach(proxy -> proxy.delete(YangInstanceIdentifier.of())); + } else { + ensureProxy(path).delete(path); + } } public void merge(final YangInstanceIdentifier path, final NormalizedNode data) { - ensureTransactionProxy(path).merge(path, data); + if (path.isEmpty()) { + mergeRoot(RootScatterGather.castRootNode(data)); + } else { + ensureProxy(path).merge(path, data); + } + } + + private void mergeRoot(final @NonNull ContainerNode rootData) { + if (!rootData.isEmpty()) { + RootScatterGather.scatterTouched(rootData, this::ensureProxy).forEach( + scattered -> scattered.shard().merge(YangInstanceIdentifier.of(), scattered.container())); + } } public void write(final YangInstanceIdentifier path, final NormalizedNode data) { - ensureTransactionProxy(path).write(path, data); + if (path.isEmpty()) { + writeRoot(RootScatterGather.castRootNode(data)); + } else { + ensureProxy(path).write(path, data); + } + } + + private void writeRoot(final @NonNull ContainerNode rootData) { + RootScatterGather.scatterAll(rootData, this::ensureProxy, ensureAllProxies()).forEach( + scattered -> scattered.shard().write(YangInstanceIdentifier.of(), scattered.container())); + } + + private AbstractProxyTransaction ensureProxy(final PathArgument childId) { + return ensureProxy(YangInstanceIdentifier.of(childId)); } public DOMStoreThreePhaseCommitCohort ready() { @@ -90,19 +123,11 @@ public class ClientTransaction extends AbstractClientHandle new EmptyTransactionCommitCohort(parent, txId); + case 1 -> new DirectTransactionCommitCohort(parent, txId, toReady.iterator().next()); + default -> new ClientTransactionCommitCohort(parent, txId, toReady); + }; return parent.onTransactionReady(this, cohort); } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ClientTransactionCommitCohort.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ClientTransactionCommitCohort.java index a4eb5e074f..7887577a93 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ClientTransactionCommitCohort.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ClientTransactionCommitCohort.java @@ -12,6 +12,8 @@ import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.MoreExecutors; import java.util.Collection; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; +import org.opendaylight.mdsal.common.api.CommitInfo; +import org.opendaylight.yangtools.yang.common.Empty; final class ClientTransactionCommitCohort extends AbstractTransactionCommitCohort { private final Collection proxies; @@ -35,14 +37,14 @@ final class ClientTransactionCommitCohort extends AbstractTransactionCommitCohor return ret; } - private ListenableFuture addComplete(final ListenableFuture future) { + private ListenableFuture addComplete(final ListenableFuture future) { future.addListener(this::complete, MoreExecutors.directExecutor()); return future; } @Override - public ListenableFuture preCommit() { - final VotingFuture ret = new VotingFuture<>(null, proxies.size()); + public ListenableFuture preCommit() { + final var ret = new VotingFuture<>(Empty.value(), proxies.size()); for (AbstractProxyTransaction proxy : proxies) { proxy.preCommit(ret); } @@ -51,8 +53,8 @@ final class ClientTransactionCommitCohort extends AbstractTransactionCommitCohor } @Override - public ListenableFuture commit() { - final VotingFuture ret = new VotingFuture<>(null, proxies.size()); + public ListenableFuture commit() { + final var ret = new VotingFuture<>(CommitInfo.empty(), proxies.size()); for (AbstractProxyTransaction proxy : proxies) { proxy.doCommit(ret); } @@ -61,8 +63,8 @@ final class ClientTransactionCommitCohort extends AbstractTransactionCommitCohor } @Override - public ListenableFuture abort() { - final VotingFuture ret = new VotingFuture<>(null, proxies.size()); + public ListenableFuture abort() { + final var ret = new VotingFuture<>(Empty.value(), proxies.size()); for (AbstractProxyTransaction proxy : proxies) { proxy.abort(ret); } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/DirectTransactionCommitCohort.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/DirectTransactionCommitCohort.java index 9b21b98682..5b5ff5864a 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/DirectTransactionCommitCohort.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/DirectTransactionCommitCohort.java @@ -11,6 +11,8 @@ import static java.util.Objects.requireNonNull; import com.google.common.util.concurrent.ListenableFuture; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; +import org.opendaylight.mdsal.common.api.CommitInfo; +import org.opendaylight.yangtools.yang.common.Empty; /** * An {@link AbstractTransactionCommitCohort} implementation for transactions which contain a single proxy. Since there @@ -33,19 +35,19 @@ final class DirectTransactionCommitCohort extends AbstractTransactionCommitCohor } @Override - public ListenableFuture preCommit() { - return VOID_FUTURE; + public ListenableFuture preCommit() { + return EMPTY_FUTURE; } @Override - public ListenableFuture abort() { + public ListenableFuture abort() { complete(); - return VOID_FUTURE; + return EMPTY_FUTURE; } @Override - public ListenableFuture commit() { + public ListenableFuture commit() { complete(); - return VOID_FUTURE; + return CommitInfo.emptyFluentFuture(); } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/DistributedDataStoreClientBehavior.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/DistributedDataStoreClientBehavior.java index e40da21d13..f8927c28c8 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/DistributedDataStoreClientBehavior.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/DistributedDataStoreClientBehavior.java @@ -7,7 +7,7 @@ */ package org.opendaylight.controller.cluster.databroker.actors.dds; -import java.util.function.Function; +import java.util.stream.Stream; import org.opendaylight.controller.cluster.access.client.ClientActorContext; import org.opendaylight.controller.cluster.datastore.utils.ActorUtils; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; @@ -18,12 +18,12 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; * @author Robert Varga */ final class DistributedDataStoreClientBehavior extends AbstractDataStoreClientBehavior { - private final Function pathToShard; + private final ModuleShardBackendResolver resolver; private DistributedDataStoreClientBehavior(final ClientActorContext context, final ModuleShardBackendResolver resolver) { super(context, resolver); - pathToShard = resolver::resolveShardForPath; + this.resolver = resolver; } DistributedDataStoreClientBehavior(final ClientActorContext context, final ActorUtils actorUtils) { @@ -32,7 +32,12 @@ final class DistributedDataStoreClientBehavior extends AbstractDataStoreClientBe @Override Long resolveShardForPath(final YangInstanceIdentifier path) { - return pathToShard.apply(path); + return resolver.resolveShardForPath(path); + } + + @Override + Stream resolveAllShards() { + return resolver.resolveAllShards(); } @Override diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/EmptyTransactionCommitCohort.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/EmptyTransactionCommitCohort.java index 7193dd053f..5b11d8679e 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/EmptyTransactionCommitCohort.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/EmptyTransactionCommitCohort.java @@ -9,6 +9,8 @@ package org.opendaylight.controller.cluster.databroker.actors.dds; import com.google.common.util.concurrent.ListenableFuture; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; +import org.opendaylight.mdsal.common.api.CommitInfo; +import org.opendaylight.yangtools.yang.common.Empty; /** * An {@link AbstractTransactionCommitCohort} for use with empty transactions. This relies on the fact that no backends @@ -30,19 +32,19 @@ final class EmptyTransactionCommitCohort extends AbstractTransactionCommitCohort } @Override - public ListenableFuture preCommit() { - return VOID_FUTURE; + public ListenableFuture preCommit() { + return EMPTY_FUTURE; } @Override - public ListenableFuture abort() { + public ListenableFuture abort() { complete(); - return VOID_FUTURE; + return EMPTY_FUTURE; } @Override - public ListenableFuture commit() { + public ListenableFuture commit() { complete(); - return VOID_FUTURE; + return CommitInfo.emptyFluentFuture(); } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/FailedDataTreeModification.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/FailedDataTreeModification.java new file mode 100644 index 0000000000..63dbc92856 --- /dev/null +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/FailedDataTreeModification.java @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.databroker.actors.dds; + +import static java.util.Objects.requireNonNull; + +import java.util.Optional; +import org.eclipse.jdt.annotation.NonNull; +import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; +import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; +import org.opendaylight.yangtools.yang.data.tree.api.CursorAwareDataTreeModification; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModificationCursor; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot; +import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext; + +/** + * A {@link CursorAwareDataTreeModification} which does not really do anything and throws an + * {@link FailedDataTreeModificationException} for most of its operations. Used in case we when + * {@link DataTreeSnapshot#newModification()} fails, see {@link LocalReadWriteProxyTransaction} for details. Surrounding + * code should guard against invocation of most of these methods. + */ +record FailedDataTreeModification( + @NonNull EffectiveModelContext modelContext, + @NonNull Exception cause) implements CursorAwareDataTreeModification { + + FailedDataTreeModification { + requireNonNull(modelContext); + requireNonNull(cause); + } + + @Override + public void delete(final YangInstanceIdentifier path) { + throw ex(); + } + + @Override + public void merge(final YangInstanceIdentifier path, final NormalizedNode data) { + throw ex(); + } + + @Override + public void write(final YangInstanceIdentifier path, final NormalizedNode data) { + throw ex(); + } + + @Override + public void ready() { + // No-op + } + + @Override + public void applyToCursor(final DataTreeModificationCursor cursor) { + throw ex(); + } + + @Override + public Optional readNode(final YangInstanceIdentifier path) { + throw ex(); + } + + @Override + public CursorAwareDataTreeModification newModification() { + throw new UnsupportedOperationException(); + } + + @Override + public Optional openCursor(final YangInstanceIdentifier path) { + throw ex(); + } + + private @NonNull FailedDataTreeModificationException ex() { + return new FailedDataTreeModificationException(cause); + } +} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/FailedDataTreeModificationException.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/FailedDataTreeModificationException.java new file mode 100644 index 0000000000..5f860a00d3 --- /dev/null +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/FailedDataTreeModificationException.java @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.databroker.actors.dds; + +import static java.util.Objects.requireNonNull; + +/** + * A box {@link RuntimeException} thrown by {@link FailedDataTreeModification} from its user-facing methods. + */ +final class FailedDataTreeModificationException extends RuntimeException { + private static final long serialVersionUID = 1L; + + FailedDataTreeModificationException(final Exception cause) { + super(null, requireNonNull(cause), false, false); + } +} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/LocalProxyTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/LocalProxyTransaction.java index 0a5ead9d9b..6c4006e93f 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/LocalProxyTransaction.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/LocalProxyTransaction.java @@ -26,14 +26,16 @@ import org.opendaylight.controller.cluster.access.commands.ReadTransactionSucces import org.opendaylight.controller.cluster.access.commands.TransactionPurgeRequest; import org.opendaylight.controller.cluster.access.commands.TransactionRequest; import org.opendaylight.controller.cluster.access.concepts.Response; +import org.opendaylight.controller.cluster.access.concepts.RuntimeRequestException; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; import org.opendaylight.controller.cluster.datastore.util.AbstractDataTreeModificationCursor; +import org.opendaylight.mdsal.common.api.ReadFailedException; import org.opendaylight.yangtools.util.concurrent.FluentFutures; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -49,13 +51,12 @@ import org.slf4j.LoggerFactory; *

    * This class is not thread-safe as usual with transactions. Since it does not interact with the backend until the * transaction is submitted, at which point this class gets out of the picture, this is not a cause for concern. - * - * @author Robert Varga */ -abstract class LocalProxyTransaction extends AbstractProxyTransaction { +abstract sealed class LocalProxyTransaction extends AbstractProxyTransaction + permits LocalReadOnlyProxyTransaction, LocalReadWriteProxyTransaction { private static final Logger LOG = LoggerFactory.getLogger(LocalProxyTransaction.class); - private final TransactionIdentifier identifier; + private final @NonNull TransactionIdentifier identifier; LocalProxyTransaction(final ProxyHistory parent, final TransactionIdentifier identifier, final boolean isDone) { super(parent, isDone); @@ -76,13 +77,25 @@ abstract class LocalProxyTransaction extends AbstractProxyTransaction { @Nullable Consumer> callback, long enqueuedTicks); @Override - final FluentFuture doExists(final YangInstanceIdentifier path) { - return FluentFutures.immediateFluentFuture(readOnlyView().readNode(path).isPresent()); + FluentFuture doExists(final YangInstanceIdentifier path) { + final boolean result; + try { + result = readOnlyView().readNode(path).isPresent(); + } catch (FailedDataTreeModificationException e) { + return FluentFutures.immediateFailedFluentFuture(ReadFailedException.MAPPER.apply(e)); + } + return FluentFutures.immediateBooleanFluentFuture(result); } @Override - final FluentFuture> doRead(final YangInstanceIdentifier path) { - return FluentFutures.immediateFluentFuture(readOnlyView().readNode(path)); + FluentFuture> doRead(final YangInstanceIdentifier path) { + final Optional result; + try { + result = readOnlyView().readNode(path); + } catch (FailedDataTreeModificationException e) { + return FluentFutures.immediateFailedFluentFuture(ReadFailedException.MAPPER.apply(e)); + } + return FluentFutures.immediateFluentFuture(result); } @Override @@ -96,35 +109,7 @@ abstract class LocalProxyTransaction extends AbstractProxyTransaction { if (request instanceof AbortLocalTransactionRequest) { enqueueAbort(request, callback, enqueuedTicks); } else { - throw new IllegalArgumentException("Unhandled request" + request); - } - } - - private boolean handleReadRequest(final TransactionRequest request, final Consumer> callback) { - // Note we delay completion of read requests to limit the scope at which the client can run, as they have - // listeners, which we do not want to execute while we are reconnecting. - if (request instanceof ReadTransactionRequest) { - final YangInstanceIdentifier path = ((ReadTransactionRequest) request).getPath(); - final Optional result = readOnlyView().readNode(path); - if (callback != null) { - // XXX: FB does not see that callback is final, on stack and has be check for non-null. - final Consumer> fbIsStupid = requireNonNull(callback); - executeInActor(() -> fbIsStupid.accept(new ReadTransactionSuccess(request.getTarget(), - request.getSequence(), result))); - } - return true; - } else if (request instanceof ExistsTransactionRequest) { - final YangInstanceIdentifier path = ((ExistsTransactionRequest) request).getPath(); - final boolean result = readOnlyView().readNode(path).isPresent(); - if (callback != null) { - // XXX: FB does not see that callback is final, on stack and has be check for non-null. - final Consumer> fbIsStupid = requireNonNull(callback); - executeInActor(() -> fbIsStupid.accept(new ExistsTransactionSuccess(request.getTarget(), - request.getSequence(), result))); - } - return true; - } else { - return false; + throw unhandledRequest(request); } } @@ -142,7 +127,7 @@ abstract class LocalProxyTransaction extends AbstractProxyTransaction { // hence we can skip sequence increments. LOG.debug("Not replaying {}", request); } else { - throw new IllegalArgumentException("Unhandled request " + request); + throw unhandledRequest(request); } } @@ -162,15 +147,56 @@ abstract class LocalProxyTransaction extends AbstractProxyTransaction { } else if (request instanceof TransactionPurgeRequest) { enqueuePurge(callback); } else { - throw new IllegalArgumentException("Unhandled request " + request); + throw unhandledRequest(request); + } + } + + @NonNull Response handleExistsRequest(final @NonNull DataTreeSnapshot snapshot, + final @NonNull ExistsTransactionRequest request) { + try { + return new ExistsTransactionSuccess(request.getTarget(), request.getSequence(), + snapshot.readNode(request.getPath()).isPresent()); + } catch (FailedDataTreeModificationException e) { + return request.toRequestFailure(new RuntimeRequestException("Failed to access data", + ReadFailedException.MAPPER.apply(e))); + } + } + + @NonNull Response handleReadRequest(final @NonNull DataTreeSnapshot snapshot, + final @NonNull ReadTransactionRequest request) { + try { + return new ReadTransactionSuccess(request.getTarget(), request.getSequence(), + snapshot.readNode(request.getPath())); + } catch (FailedDataTreeModificationException e) { + return request.toRequestFailure(new RuntimeRequestException("Failed to access data", + ReadFailedException.MAPPER.apply(e))); + } + } + + private boolean handleReadRequest(final TransactionRequest request, final Consumer> callback) { + // Note we delay completion of read requests to limit the scope at which the client can run, as they have + // listeners, which we do not want to execute while we are reconnecting. + if (request instanceof ReadTransactionRequest) { + if (callback != null) { + final var response = handleReadRequest(readOnlyView(), (ReadTransactionRequest) request); + executeInActor(() -> callback.accept(response)); + } + return true; + } else if (request instanceof ExistsTransactionRequest) { + if (callback != null) { + final var response = handleExistsRequest(readOnlyView(), (ExistsTransactionRequest) request); + executeInActor(() -> callback.accept(response)); + } + return true; + } else { + return false; } } @Override final void forwardToRemote(final RemoteProxyTransaction successor, final TransactionRequest request, final Consumer> callback) { - if (request instanceof CommitLocalTransactionRequest) { - final CommitLocalTransactionRequest req = (CommitLocalTransactionRequest) request; + if (request instanceof final CommitLocalTransactionRequest req) { final DataTreeModification mod = req.getModification(); LOG.debug("Applying modification {} to successor {}", mod, successor); @@ -203,7 +229,7 @@ abstract class LocalProxyTransaction extends AbstractProxyTransaction { } else if (request instanceof ModifyTransactionRequest) { successor.handleForwardedRequest(request, callback); } else { - throwUnhandledRequest(request); + throw unhandledRequest(request); } } @@ -215,16 +241,12 @@ abstract class LocalProxyTransaction extends AbstractProxyTransaction { } else if (request instanceof TransactionPurgeRequest) { successor.enqueuePurge(callback); } else { - throwUnhandledRequest(request); + throw unhandledRequest(request); } LOG.debug("Forwarded request {} to successor {}", request, successor); } - private static void throwUnhandledRequest(final TransactionRequest request) { - throw new IllegalArgumentException("Unhandled request " + request); - } - void sendAbort(final TransactionRequest request, final Consumer> callback) { sendRequest(request, callback); } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/LocalReadOnlyProxyTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/LocalReadOnlyProxyTransaction.java index 8cdbcf878c..6d019af1a1 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/LocalReadOnlyProxyTransaction.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/LocalReadOnlyProxyTransaction.java @@ -7,8 +7,8 @@ */ package org.opendaylight.controller.cluster.databroker.actors.dds; -import static com.google.common.base.Preconditions.checkNotNull; import static com.google.common.base.Verify.verify; +import static com.google.common.base.Verify.verifyNotNull; import static java.util.Objects.requireNonNull; import java.util.Optional; @@ -20,7 +20,7 @@ import org.opendaylight.controller.cluster.access.concepts.Response; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot; /** * A read-only specialization of {@link LocalProxyTransaction}. This class is NOT thread-safe. @@ -39,7 +39,7 @@ final class LocalReadOnlyProxyTransaction extends LocalProxyTransaction { LocalReadOnlyProxyTransaction(final ProxyHistory parent, final TransactionIdentifier identifier) { super(parent, identifier, true); // It is an error to touch snapshot once we are DONE - this.snapshot = null; + snapshot = null; } @Override @@ -49,7 +49,7 @@ final class LocalReadOnlyProxyTransaction extends LocalProxyTransaction { @Override DataTreeSnapshot readOnlyView() { - return checkNotNull(snapshot, "Transaction %s is DONE", getIdentifier()); + return verifyNotNull(snapshot, "Transaction %s is DONE", getIdentifier()); } @Override @@ -95,7 +95,7 @@ final class LocalReadOnlyProxyTransaction extends LocalProxyTransaction { private static void commonModifyTransactionRequest(final ModifyTransactionRequest request) { verify(request.getModifications().isEmpty()); - final PersistenceProtocol protocol = request.getPersistenceProtocol().get(); + final PersistenceProtocol protocol = request.getPersistenceProtocol().orElseThrow(); verify(protocol == PersistenceProtocol.ABORT); } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/LocalReadWriteProxyTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/LocalReadWriteProxyTransaction.java index c58f834dd6..47ae6a2bc7 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/LocalReadWriteProxyTransaction.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/LocalReadWriteProxyTransaction.java @@ -7,8 +7,12 @@ */ package org.opendaylight.controller.cluster.databroker.actors.dds; -import com.google.common.base.Preconditions; -import com.google.common.base.Verify; +import static com.google.common.base.Preconditions.checkState; +import static com.google.common.base.Verify.verify; +import static com.google.common.base.Verify.verifyNotNull; + +import com.google.common.util.concurrent.FluentFuture; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.util.Optional; import java.util.OptionalLong; import java.util.function.BiConsumer; @@ -19,9 +23,11 @@ import org.eclipse.jdt.annotation.Nullable; import org.opendaylight.controller.cluster.access.commands.AbortLocalTransactionRequest; import org.opendaylight.controller.cluster.access.commands.AbstractLocalTransactionRequest; import org.opendaylight.controller.cluster.access.commands.CommitLocalTransactionRequest; +import org.opendaylight.controller.cluster.access.commands.ExistsTransactionRequest; import org.opendaylight.controller.cluster.access.commands.ModifyTransactionRequest; import org.opendaylight.controller.cluster.access.commands.ModifyTransactionRequestBuilder; import org.opendaylight.controller.cluster.access.commands.PersistenceProtocol; +import org.opendaylight.controller.cluster.access.commands.ReadTransactionRequest; import org.opendaylight.controller.cluster.access.commands.TransactionAbortRequest; import org.opendaylight.controller.cluster.access.commands.TransactionDelete; import org.opendaylight.controller.cluster.access.commands.TransactionDoCommitRequest; @@ -31,16 +37,19 @@ import org.opendaylight.controller.cluster.access.commands.TransactionPreCommitR import org.opendaylight.controller.cluster.access.commands.TransactionRequest; import org.opendaylight.controller.cluster.access.commands.TransactionWrite; import org.opendaylight.controller.cluster.access.concepts.Response; +import org.opendaylight.controller.cluster.access.concepts.RuntimeRequestException; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; import org.opendaylight.controller.cluster.datastore.util.AbstractDataTreeModificationCursor; +import org.opendaylight.mdsal.common.api.ReadFailedException; +import org.opendaylight.yangtools.util.concurrent.FluentFutures; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; -import org.opendaylight.yangtools.yang.data.api.schema.tree.CursorAwareDataTreeModification; -import org.opendaylight.yangtools.yang.data.api.schema.tree.CursorAwareDataTreeSnapshot; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModificationCursor; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot; +import org.opendaylight.yangtools.yang.data.tree.api.CursorAwareDataTreeModification; +import org.opendaylight.yangtools.yang.data.tree.api.CursorAwareDataTreeSnapshot; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModificationCursor; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -83,10 +92,25 @@ final class LocalReadWriteProxyTransaction extends LocalProxyTransaction { */ private Exception recordedFailure; + @SuppressWarnings("checkstyle:IllegalCatch") LocalReadWriteProxyTransaction(final ProxyHistory parent, final TransactionIdentifier identifier, - final DataTreeSnapshot snapshot) { + final DataTreeSnapshot snapshot) { super(parent, identifier, false); - modification = (CursorAwareDataTreeModification) snapshot.newModification(); + + if (snapshot instanceof FailedDataTreeModification failed) { + recordedFailure = failed.cause(); + modification = failed; + } else { + CursorAwareDataTreeModification mod; + try { + mod = (CursorAwareDataTreeModification) snapshot.newModification(); + } catch (Exception e) { + LOG.debug("Failed to instantiate modification for {}", identifier, e); + recordedFailure = e; + mod = new FailedDataTreeModification(snapshot.modelContext(), e); + } + modification = mod; + } } LocalReadWriteProxyTransaction(final ProxyHistory parent, final TransactionIdentifier identifier) { @@ -105,6 +129,20 @@ final class LocalReadWriteProxyTransaction extends LocalProxyTransaction { return getModification(); } + @Override + FluentFuture doExists(final YangInstanceIdentifier path) { + final var ex = recordedFailure; + return ex == null ? super.doExists(path) + : FluentFutures.immediateFailedFluentFuture(ReadFailedException.MAPPER.apply(ex)); + } + + @Override + FluentFuture> doRead(final YangInstanceIdentifier path) { + final var ex = recordedFailure; + return ex == null ? super.doRead(path) + : FluentFutures.immediateFailedFluentFuture(ReadFailedException.MAPPER.apply(ex)); + } + @Override @SuppressWarnings("checkstyle:IllegalCatch") void doDelete(final YangInstanceIdentifier path) { @@ -177,7 +215,7 @@ final class LocalReadWriteProxyTransaction extends LocalProxyTransaction { } private void sealModification() { - Preconditions.checkState(sealedModification == null, "Transaction %s is already sealed", this); + checkState(sealedModification == null, "Transaction %s is already sealed", this); final CursorAwareDataTreeModification mod = getModification(); mod.ready(); sealedModification = mod; @@ -220,8 +258,8 @@ final class LocalReadWriteProxyTransaction extends LocalProxyTransaction { return Optional.of(b.build()); } - DataTreeSnapshot getSnapshot() { - Preconditions.checkState(sealedModification != null, "Proxy %s is not sealed yet", getIdentifier()); + CursorAwareDataTreeSnapshot getSnapshot() { + checkState(sealedModification != null, "Proxy %s is not sealed yet", getIdentifier()); return sealedModification; } @@ -254,26 +292,26 @@ final class LocalReadWriteProxyTransaction extends LocalProxyTransaction { final Optional maybeProtocol = request.getPersistenceProtocol(); if (maybeProtocol.isPresent()) { - Verify.verify(callback != null, "Request %s has null callback", request); + final var cb = verifyNotNull(callback, "Request %s has null callback", request); if (markSealed()) { sealOnly(); } - switch (maybeProtocol.get()) { + switch (maybeProtocol.orElseThrow()) { case ABORT: - sendMethod.accept(new AbortLocalTransactionRequest(getIdentifier(), localActor()), callback); + sendMethod.accept(new AbortLocalTransactionRequest(getIdentifier(), localActor()), cb); break; case READY: // No-op, as we have already issued a sealOnly() and we are not transmitting anything break; case SIMPLE: - sendMethod.accept(commitRequest(false), callback); + sendMethod.accept(commitRequest(false), cb); break; case THREE_PHASE: - sendMethod.accept(commitRequest(true), callback); + sendMethod.accept(commitRequest(true), cb); break; default: - throw new IllegalArgumentException("Unhandled protocol " + maybeProtocol.get()); + throw new IllegalArgumentException("Unhandled protocol " + maybeProtocol.orElseThrow()); } } } @@ -321,6 +359,22 @@ final class LocalReadWriteProxyTransaction extends LocalProxyTransaction { } } + @Override + Response handleExistsRequest(final DataTreeSnapshot snapshot, final ExistsTransactionRequest request) { + final var ex = recordedFailure; + return ex == null ? super.handleExistsRequest(snapshot, request) + : request.toRequestFailure( + new RuntimeRequestException("Previous modification failed", ReadFailedException.MAPPER.apply(ex))); + } + + @Override + Response handleReadRequest(final DataTreeSnapshot snapshot, final ReadTransactionRequest request) { + final var ex = recordedFailure; + return ex == null ? super.handleReadRequest(snapshot, request) + : request.toRequestFailure( + new RuntimeRequestException("Previous modification failed", ReadFailedException.MAPPER.apply(ex))); + } + @Override void forwardToLocal(final LocalProxyTransaction successor, final TransactionRequest request, final Consumer> callback) { @@ -336,7 +390,7 @@ final class LocalReadWriteProxyTransaction extends LocalProxyTransaction { } private static LocalReadWriteProxyTransaction verifyLocalReadWrite(final LocalProxyTransaction successor) { - Verify.verify(successor instanceof LocalReadWriteProxyTransaction, "Unexpected successor %s", successor); + verify(successor instanceof LocalReadWriteProxyTransaction, "Unexpected successor %s", successor); return (LocalReadWriteProxyTransaction) successor; } @@ -353,12 +407,12 @@ final class LocalReadWriteProxyTransaction extends LocalProxyTransaction { closedException = this::abortedException; } + @SuppressFBWarnings(value = "THROWS_METHOD_THROWS_RUNTIMEEXCEPTION", justification = "Replay of recorded failure") private @NonNull CursorAwareDataTreeModification getModification() { if (closedException != null) { throw closedException.get(); } - - return Preconditions.checkNotNull(modification, "Transaction %s is DONE", getIdentifier()); + return verifyNotNull(modification, "Transaction %s is DONE", getIdentifier()); } private void sendRebased(final CommitLocalTransactionRequest request, final Consumer> callback) { @@ -369,8 +423,18 @@ final class LocalReadWriteProxyTransaction extends LocalProxyTransaction { // Rebase old modification on new data tree. final CursorAwareDataTreeModification mod = getModification(); - try (DataTreeModificationCursor cursor = mod.openCursor()) { - request.getModification().applyToCursor(cursor); + if (!(mod instanceof FailedDataTreeModification)) { + request.getDelayedFailure().ifPresentOrElse(failure -> { + if (recordedFailure == null) { + recordedFailure = failure; + } else { + recordedFailure.addSuppressed(failure); + } + }, () -> { + try (DataTreeModificationCursor cursor = mod.openCursor()) { + request.getModification().applyToCursor(cursor); + } + }); } if (markSealed()) { diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ModuleShardBackendResolver.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ModuleShardBackendResolver.java index 74aca03e86..6ab566e2fa 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ModuleShardBackendResolver.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ModuleShardBackendResolver.java @@ -7,20 +7,20 @@ */ package org.opendaylight.controller.cluster.databroker.actors.dds; -import static akka.pattern.Patterns.ask; import static com.google.common.base.Verify.verifyNotNull; import akka.dispatch.ExecutionContexts; import akka.dispatch.OnComplete; +import akka.pattern.Patterns; import akka.util.Timeout; -import com.google.common.collect.BiMap; import com.google.common.collect.ImmutableBiMap; -import com.google.common.collect.ImmutableBiMap.Builder; import java.util.concurrent.CompletionStage; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; +import java.util.stream.Stream; import org.checkerframework.checker.lock.qual.GuardedBy; +import org.eclipse.jdt.annotation.NonNull; import org.opendaylight.controller.cluster.access.client.BackendInfoResolver; import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier; import org.opendaylight.controller.cluster.datastore.shardmanager.RegisterForShardAvailabilityChanges; @@ -52,19 +52,20 @@ final class ModuleShardBackendResolver extends AbstractShardBackendResolver { @GuardedBy("this") private long nextShard = 1; - private volatile BiMap shards = ImmutableBiMap.of(DefaultShardStrategy.DEFAULT_SHARD, 0L); + private volatile ImmutableBiMap shards = ImmutableBiMap.of(DefaultShardStrategy.DEFAULT_SHARD, 0L); // FIXME: we really need just ActorContext.findPrimaryShardAsync() ModuleShardBackendResolver(final ClientIdentifier clientId, final ActorUtils actorUtils) { super(clientId, actorUtils); - shardAvailabilityChangesRegFuture = ask(actorUtils.getShardManager(), new RegisterForShardAvailabilityChanges( - this::onShardAvailabilityChange), Timeout.apply(60, TimeUnit.MINUTES)) + shardAvailabilityChangesRegFuture = Patterns.ask(actorUtils.getShardManager(), + new RegisterForShardAvailabilityChanges(this::onShardAvailabilityChange), + Timeout.apply(60, TimeUnit.MINUTES)) .map(reply -> (Registration)reply, ExecutionContexts.global()); shardAvailabilityChangesRegFuture.onComplete(new OnComplete() { @Override - public void onComplete(Throwable failure, Registration reply) { + public void onComplete(final Throwable failure, final Registration reply) { if (failure != null) { LOG.error("RegisterForShardAvailabilityChanges failed", failure); } @@ -72,7 +73,7 @@ final class ModuleShardBackendResolver extends AbstractShardBackendResolver { }, ExecutionContexts.global()); } - private void onShardAvailabilityChange(String shardName) { + private void onShardAvailabilityChange(final String shardName) { LOG.debug("onShardAvailabilityChange for {}", shardName); Long cookie = shards.get(shardName); @@ -85,22 +86,26 @@ final class ModuleShardBackendResolver extends AbstractShardBackendResolver { } Long resolveShardForPath(final YangInstanceIdentifier path) { - final String shardName = actorUtils().getShardStrategyFactory().getStrategy(path).findShard(path); + return resolveCookie(actorUtils().getShardStrategyFactory().getStrategy(path).findShard(path)); + } + + Stream resolveAllShards() { + return actorUtils().getConfiguration().getAllShardNames().stream() + .sorted() + .map(this::resolveCookie); + } + + private @NonNull Long resolveCookie(final String shardName) { + final Long cookie = shards.get(shardName); + return cookie != null ? cookie : populateShard(shardName); + } + + private synchronized @NonNull Long populateShard(final String shardName) { Long cookie = shards.get(shardName); if (cookie == null) { - synchronized (this) { - cookie = shards.get(shardName); - if (cookie == null) { - cookie = nextShard++; - - Builder builder = ImmutableBiMap.builder(); - builder.putAll(shards); - builder.put(shardName, cookie); - shards = builder.build(); - } - } + cookie = nextShard++; + shards = ImmutableBiMap.builder().putAll(shards).put(shardName, cookie).build(); } - return cookie; } @@ -174,14 +179,14 @@ final class ModuleShardBackendResolver extends AbstractShardBackendResolver { public void close() { shardAvailabilityChangesRegFuture.onComplete(new OnComplete() { @Override - public void onComplete(Throwable failure, Registration reply) { + public void onComplete(final Throwable failure, final Registration reply) { reply.close(); } }, ExecutionContexts.global()); } @Override - public String resolveCookieName(Long cookie) { + public String resolveCookieName(final Long cookie) { return verifyNotNull(shards.inverse().get(cookie), "Unexpected null cookie: %s", cookie); } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ProxyHistory.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ProxyHistory.java index 70b5960a05..437effe9ae 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ProxyHistory.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ProxyHistory.java @@ -46,8 +46,8 @@ import org.opendaylight.controller.cluster.access.concepts.RequestException; import org.opendaylight.controller.cluster.access.concepts.Response; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; import org.opendaylight.yangtools.concepts.Identifiable; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot; -import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot; +import org.opendaylight.yangtools.yang.data.tree.api.ReadOnlyDataTree; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -373,7 +373,7 @@ abstract class ProxyHistory implements Identifiable { static ProxyHistory createClient(final AbstractClientHistory parent, final AbstractClientConnection connection, final LocalHistoryIdentifier identifier) { final Optional dataTree = connection.getBackendInfo().flatMap(ShardBackendInfo::getDataTree); - return dataTree.isPresent() ? new Local(parent, connection, identifier, dataTree.get()) + return dataTree.isPresent() ? new Local(parent, connection, identifier, dataTree.orElseThrow()) : new Remote(parent, connection, identifier); } @@ -381,7 +381,7 @@ abstract class ProxyHistory implements Identifiable { final AbstractClientConnection connection, final LocalHistoryIdentifier identifier) { final Optional dataTree = connection.getBackendInfo().flatMap(ShardBackendInfo::getDataTree); - return dataTree.isPresent() ? new LocalSingle(parent, connection, identifier, dataTree.get()) + return dataTree.isPresent() ? new LocalSingle(parent, connection, identifier, dataTree.orElseThrow()) : new RemoteSingle(parent, connection, identifier); } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/RemoteProxyTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/RemoteProxyTransaction.java index 824a2f9b31..946e3341fd 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/RemoteProxyTransaction.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/RemoteProxyTransaction.java @@ -49,7 +49,7 @@ import org.opendaylight.yangtools.util.concurrent.FluentFutures; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -64,18 +64,14 @@ import org.slf4j.LoggerFactory; *

    * This class is not safe to access from multiple application threads, as is usual for transactions. Its internal state * transitions based on backend responses are thread-safe. - * - * @author Robert Varga */ final class RemoteProxyTransaction extends AbstractProxyTransaction { private static final Logger LOG = LoggerFactory.getLogger(RemoteProxyTransaction.class); - // FIXME: make this tuneable - private static final int REQUEST_MAX_MODIFICATIONS = 1000; - private final ModifyTransactionRequestBuilder builder; private final boolean sendReadyOnSeal; private final boolean snapshotOnly; + private final int maxModifications; private boolean builderBusy; @@ -87,6 +83,7 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction { this.snapshotOnly = snapshotOnly; this.sendReadyOnSeal = sendReadyOnSeal; builder = new ModifyTransactionRequestBuilder(identifier, localActor()); + maxModifications = parent.parent().actorUtils().getDatastoreContext().getShardBatchedModificationCount(); } @Override @@ -169,7 +166,7 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction { private void sendModification(final TransactionRequest request, final OptionalLong enqueuedTicks) { if (enqueuedTicks.isPresent()) { - enqueueRequest(request, response -> completeModify(request, response), enqueuedTicks.getAsLong()); + enqueueRequest(request, response -> completeModify(request, response), enqueuedTicks.orElseThrow()); } else { sendRequest(request, response -> completeModify(request, response)); } @@ -184,7 +181,7 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction { ensureInitializedBuilder(); builder.addModification(modification); - if (builder.size() >= REQUEST_MAX_MODIFICATIONS) { + if (builder.size() >= maxModifications) { flushBuilder(enqueuedTicks); } } else { @@ -205,8 +202,8 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction { private Exception recordFailedResponse(final Response response) { final Exception failure; - if (response instanceof RequestFailure) { - final RequestException cause = ((RequestFailure) response).getCause(); + if (response instanceof RequestFailure requestFailure) { + final RequestException cause = requestFailure.getCause(); failure = cause instanceof RequestTimeoutException ? new DataStoreUnavailableException(cause.getMessage(), cause) : cause; } else { @@ -230,8 +227,8 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction { final Response response) { LOG.debug("Exists request for {} completed with {}", path, response); - if (response instanceof ExistsTransactionSuccess) { - future.set(((ExistsTransactionSuccess) response).getExists()); + if (response instanceof ExistsTransactionSuccess success) { + future.set(success.getExists()); } else { failReadFuture(future, "Error executing exists request for path " + path, response); } @@ -243,8 +240,8 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction { final Response response) { LOG.debug("Read request for {} completed with {}", path, response); - if (response instanceof ReadTransactionSuccess) { - future.set(((ReadTransactionSuccess) response).getData()); + if (response instanceof ReadTransactionSuccess success) { + future.set(success.getData()); } else { failReadFuture(future, "Error reading data for path " + path, response); } @@ -303,19 +300,19 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction { } void handleForwardedRequest(final TransactionRequest request, final Consumer> callback) { - if (request instanceof ModifyTransactionRequest) { - handleForwardedModifyTransactionRequest(callback, (ModifyTransactionRequest) request); - } else if (request instanceof ReadTransactionRequest) { + if (request instanceof ModifyTransactionRequest modifyRequest) { + handleForwardedModifyTransactionRequest(callback, modifyRequest); + } else if (request instanceof ReadTransactionRequest readRequest) { ensureFlushedBuider(); sendRequest(new ReadTransactionRequest(getIdentifier(), nextSequence(), localActor(), - ((ReadTransactionRequest) request).getPath(), isSnapshotOnly()), resp -> { + readRequest.getPath(), isSnapshotOnly()), resp -> { recordFinishedRequest(resp); callback.accept(resp); }); - } else if (request instanceof ExistsTransactionRequest) { + } else if (request instanceof ExistsTransactionRequest existsRequest) { ensureFlushedBuider(); sendRequest(new ExistsTransactionRequest(getIdentifier(), nextSequence(), localActor(), - ((ExistsTransactionRequest) request).getPath(), isSnapshotOnly()), resp -> { + existsRequest.getPath(), isSnapshotOnly()), resp -> { recordFinishedRequest(resp); callback.accept(resp); }); @@ -336,7 +333,7 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction { } else if (request instanceof TransactionPurgeRequest) { enqueuePurge(callback); } else { - throw new IllegalArgumentException("Unhandled request {}" + request); + throw unhandledRequest(request); } } @@ -355,7 +352,7 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction { } final TransactionRequest tmp; - switch (maybeProto.get()) { + switch (maybeProto.orElseThrow()) { case ABORT: tmp = abortRequest(); sendRequest(tmp, resp -> { @@ -385,7 +382,7 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction { }); break; default: - throw new IllegalArgumentException("Unhandled protocol " + maybeProto.get()); + throw new IllegalArgumentException("Unhandled protocol " + maybeProto.orElseThrow()); } } } @@ -399,12 +396,12 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction { @Override void handleReplayedLocalRequest(final AbstractLocalTransactionRequest request, final Consumer> callback, final long enqueuedTicks) { - if (request instanceof CommitLocalTransactionRequest) { - replayLocalCommitRequest((CommitLocalTransactionRequest) request, callback, enqueuedTicks); + if (request instanceof CommitLocalTransactionRequest commitRequest) { + replayLocalCommitRequest(commitRequest, callback, enqueuedTicks); } else if (request instanceof AbortLocalTransactionRequest) { enqueueRequest(abortRequest(), callback, enqueuedTicks); } else { - throw new IllegalStateException("Unhandled request " + request); + throw unhandledRequest(request); } } @@ -439,19 +436,19 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction { final Consumer> cb = callback != null ? callback : resp -> { /* NOOP */ }; final OptionalLong optTicks = OptionalLong.of(enqueuedTicks); - if (request instanceof ModifyTransactionRequest) { - handleReplayedModifyTransactionRequest(enqueuedTicks, cb, (ModifyTransactionRequest) request); - } else if (request instanceof ReadTransactionRequest) { + if (request instanceof ModifyTransactionRequest modifyRequest) { + handleReplayedModifyTransactionRequest(enqueuedTicks, cb, modifyRequest); + } else if (request instanceof ReadTransactionRequest readRequest) { ensureFlushedBuider(optTicks); enqueueRequest(new ReadTransactionRequest(getIdentifier(), nextSequence(), localActor(), - ((ReadTransactionRequest) request).getPath(), isSnapshotOnly()), resp -> { + readRequest.getPath(), isSnapshotOnly()), resp -> { recordFinishedRequest(resp); cb.accept(resp); }, enqueuedTicks); - } else if (request instanceof ExistsTransactionRequest) { + } else if (request instanceof ExistsTransactionRequest existsRequest) { ensureFlushedBuider(optTicks); enqueueRequest(new ExistsTransactionRequest(getIdentifier(), nextSequence(), localActor(), - ((ExistsTransactionRequest) request).getPath(), isSnapshotOnly()), resp -> { + existsRequest.getPath(), isSnapshotOnly()), resp -> { recordFinishedRequest(resp); cb.accept(resp); }, enqueuedTicks); @@ -472,14 +469,13 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction { enqueueDoAbort(callback, enqueuedTicks); } else if (request instanceof TransactionPurgeRequest) { enqueuePurge(callback, enqueuedTicks); - } else if (request instanceof IncrementTransactionSequenceRequest) { - final IncrementTransactionSequenceRequest req = (IncrementTransactionSequenceRequest) request; + } else if (request instanceof IncrementTransactionSequenceRequest req) { ensureFlushedBuider(optTicks); enqueueRequest(new IncrementTransactionSequenceRequest(getIdentifier(), nextSequence(), localActor(), snapshotOnly, req.getIncrement()), callback, enqueuedTicks); incrementSequence(req.getIncrement()); } else { - throw new IllegalArgumentException("Unhandled request {}" + request); + throw unhandledRequest(request); } } @@ -496,7 +492,7 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction { } final TransactionRequest tmp; - switch (maybeProto.get()) { + switch (maybeProto.orElseThrow()) { case ABORT: tmp = abortRequest(); enqueueRequest(tmp, resp -> { @@ -526,7 +522,7 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction { }, enqueuedTicks); break; default: - throw new IllegalArgumentException("Unhandled protocol " + maybeProto.get()); + throw new IllegalArgumentException("Unhandled protocol " + maybeProto.orElseThrow()); } } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ShardBackendInfo.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ShardBackendInfo.java index 0958aade71..95ce87ca95 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ShardBackendInfo.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ShardBackendInfo.java @@ -17,7 +17,7 @@ import java.util.Optional; import org.opendaylight.controller.cluster.access.ABIVersion; import org.opendaylight.controller.cluster.access.client.BackendInfo; import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier; -import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree; +import org.opendaylight.yangtools.yang.data.tree.api.ReadOnlyDataTree; /** * Combined backend tracking. Aside from usual {@link BackendInfo}, this object also tracks the cookie assigned diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/SimpleDataStoreClientBehavior.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/SimpleDataStoreClientBehavior.java index aaaa88e8b6..984a4e4f0c 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/SimpleDataStoreClientBehavior.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/SimpleDataStoreClientBehavior.java @@ -7,6 +7,7 @@ */ package org.opendaylight.controller.cluster.databroker.actors.dds; +import java.util.stream.Stream; import org.opendaylight.controller.cluster.access.client.ClientActorContext; import org.opendaylight.controller.cluster.datastore.utils.ActorUtils; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; @@ -18,7 +19,7 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; */ final class SimpleDataStoreClientBehavior extends AbstractDataStoreClientBehavior { // Pre-boxed instance - private static final Long ZERO = Long.valueOf(0); + private static final Long ZERO = 0L; private SimpleDataStoreClientBehavior(final ClientActorContext context, final SimpleShardBackendResolver resolver) { @@ -34,4 +35,9 @@ final class SimpleDataStoreClientBehavior extends AbstractDataStoreClientBehavio Long resolveShardForPath(final YangInstanceIdentifier path) { return ZERO; } + + @Override + Stream resolveAllShards() { + return Stream.of(ZERO); + } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/VotingFuture.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/VotingFuture.java index f9fffea025..6433b6b587 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/VotingFuture.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/VotingFuture.java @@ -7,8 +7,10 @@ */ package org.opendaylight.controller.cluster.databroker.actors.dds; -import com.google.common.base.Preconditions; -import com.google.common.base.Verify; +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Verify.verify; +import static java.util.Objects.requireNonNull; + import com.google.common.util.concurrent.AbstractFuture; import java.util.ArrayList; import java.util.Collection; @@ -44,11 +46,10 @@ class VotingFuture extends AbstractFuture { private volatile int neededVotes; VotingFuture(final T result, final int requiredVotes) { - Preconditions.checkArgument(requiredVotes > 0); + this.result = requireNonNull(result); + checkArgument(requiredVotes > 0); this.neededVotes = requiredVotes; - // null is okay to allow Void type - this.result = result; } void voteYes() { @@ -70,7 +71,7 @@ class VotingFuture extends AbstractFuture { private boolean castVote() { final int votes = VOTES_UPDATER.decrementAndGet(this); - Verify.verify(votes >= 0); + verify(votes >= 0); return votes == 0; } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractDataStore.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractDataStore.java index 70fef4197e..05edad2cf1 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractDataStore.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractDataStore.java @@ -7,7 +7,6 @@ */ package org.opendaylight.controller.cluster.datastore; -import static com.google.common.base.Preconditions.checkArgument; import static java.util.Objects.requireNonNull; import akka.actor.ActorRef; @@ -20,11 +19,12 @@ import com.google.common.base.Throwables; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.SettableFuture; import com.google.common.util.concurrent.Uninterruptibles; -import java.util.Collection; -import java.util.Set; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import java.util.List; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import org.eclipse.jdt.annotation.NonNull; import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier; import org.opendaylight.controller.cluster.common.actor.Dispatchers; import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient; @@ -36,18 +36,16 @@ import org.opendaylight.controller.cluster.datastore.shardmanager.AbstractShardM import org.opendaylight.controller.cluster.datastore.shardmanager.ShardManagerCreator; import org.opendaylight.controller.cluster.datastore.utils.ActorUtils; import org.opendaylight.controller.cluster.datastore.utils.PrimaryShardInfoFutureCache; -import org.opendaylight.mdsal.dom.api.ClusteredDOMDataTreeChangeListener; +import org.opendaylight.mdsal.dom.api.DOMDataBroker.CommitCohortExtension; import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener; import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort; -import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistration; -import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistry; import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier; import org.opendaylight.mdsal.dom.spi.store.DOMStoreTreeChangePublisher; -import org.opendaylight.yangtools.concepts.ListenerRegistration; +import org.opendaylight.yangtools.concepts.Registration; +import org.opendaylight.yangtools.yang.common.Empty; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate; import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext; -import org.opendaylight.yangtools.yang.model.api.EffectiveModelContextListener; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import scala.concurrent.duration.Duration; @@ -55,13 +53,12 @@ import scala.concurrent.duration.Duration; /** * Base implementation of a distributed DOMStore. */ -public abstract class AbstractDataStore implements DistributedDataStoreInterface, EffectiveModelContextListener, - DatastoreContextPropertiesUpdater.Listener, DOMStoreTreeChangePublisher, - DOMDataTreeCommitCohortRegistry, AutoCloseable { - +public abstract class AbstractDataStore implements DistributedDataStoreInterface, + DatastoreContextPropertiesUpdater.Listener, DOMStoreTreeChangePublisher, CommitCohortExtension, + AutoCloseable { private static final Logger LOG = LoggerFactory.getLogger(AbstractDataStore.class); - private final SettableFuture readinessFuture = SettableFuture.create(); + private final SettableFuture readinessFuture = SettableFuture.create(); private final ClientIdentifier identifier; private final DataStoreClient client; private final ActorUtils actorUtils; @@ -71,6 +68,7 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface private DatastoreInfoMXBeanImpl datastoreInfoMXBean; @SuppressWarnings("checkstyle:IllegalCatch") + @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Testing overrides") protected AbstractDataStore(final ActorSystem actorSystem, final ClusterWrapper cluster, final Configuration configuration, final DatastoreContextFactory datastoreContextFactory, final DatastoreSnapshot restoreFromSnapshot) { @@ -109,7 +107,7 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface LOG.error("Failed to get actor for {}", clientProps, e); clientActor.tell(PoisonPill.getInstance(), ActorRef.noSender()); Throwables.throwIfUnchecked(e); - throw new RuntimeException(e); + throw new IllegalStateException(e); } identifier = client.getIdentifier(); @@ -125,21 +123,15 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface datastoreInfoMXBean.registerMBean(); } - @VisibleForTesting - protected AbstractDataStore(final ActorUtils actorUtils, final ClientIdentifier identifier) { - this.actorUtils = requireNonNull(actorUtils, "actorContext should not be null"); - this.client = null; - this.identifier = requireNonNull(identifier); - } - @VisibleForTesting protected AbstractDataStore(final ActorUtils actorUtils, final ClientIdentifier identifier, final DataStoreClient clientActor) { this.actorUtils = requireNonNull(actorUtils, "actorContext should not be null"); - this.client = clientActor; + client = clientActor; this.identifier = requireNonNull(identifier); } + @VisibleForTesting protected AbstractShardManagerCreator getShardManagerCreator() { return new ShardManagerCreator(); } @@ -148,17 +140,18 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface return client; } - final ClientIdentifier getIdentifier() { - return identifier; - } - public void setCloseable(final AutoCloseable closeable) { this.closeable = closeable; } @Override - public ListenerRegistration registerTreeChangeListener( - final YangInstanceIdentifier treeId, final L listener) { + public final Registration registerTreeChangeListener(final YangInstanceIdentifier treeId, + final DOMDataTreeChangeListener listener) { + return registerTreeChangeListener(treeId, listener, true); + } + + private @NonNull Registration registerTreeChangeListener(final YangInstanceIdentifier treeId, + final DOMDataTreeChangeListener listener, final boolean clustered) { requireNonNull(treeId, "treeId should not be null"); requireNonNull(listener, "listener should not be null"); @@ -169,47 +162,51 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface if (treeId.isEmpty()) { // User is targeting root of the datastore. If there is more than one shard, we have to register with them // all and perform data composition. - final Set shardNames = actorUtils.getConfiguration().getAllShardNames(); + final var shardNames = actorUtils.getConfiguration().getAllShardNames(); if (shardNames.size() > 1) { - checkArgument(listener instanceof ClusteredDOMDataTreeChangeListener, - "Cannot listen on root without non-clustered listener %s", listener); + if (!clustered) { + throw new IllegalArgumentException( + "Cannot listen on root without non-clustered listener " + listener); + } return new RootDataTreeChangeListenerProxy<>(actorUtils, listener, shardNames); } } - final String shardName = actorUtils.getShardStrategyFactory().getStrategy(treeId).findShard(treeId); + final var shardName = actorUtils.getShardStrategyFactory().getStrategy(treeId).findShard(treeId); LOG.debug("Registering tree listener: {} for tree: {} shard: {}", listener, treeId, shardName); - final DataTreeChangeListenerProxy listenerRegistrationProxy = - new DataTreeChangeListenerProxy<>(actorUtils, listener, treeId); - listenerRegistrationProxy.init(shardName); + return DataTreeChangeListenerProxy.of(actorUtils, listener, treeId, clustered, shardName); + } - return listenerRegistrationProxy; + @Override + @Deprecated(since = "9.0.0", forRemoval = true) + public final Registration registerLegacyTreeChangeListener(final YangInstanceIdentifier treeId, + final DOMDataTreeChangeListener listener) { + return registerTreeChangeListener(treeId, listener, false); } @Override - public DOMDataTreeCommitCohortRegistration registerCommitCohort( - final DOMDataTreeIdentifier subtree, final C cohort) { - YangInstanceIdentifier treeId = requireNonNull(subtree, "subtree should not be null").getRootIdentifier(); + // Non-final for testing + public Registration registerCommitCohort(final DOMDataTreeIdentifier subtree, + final DOMDataTreeCommitCohort cohort) { + YangInstanceIdentifier treeId = requireNonNull(subtree, "subtree should not be null").path(); requireNonNull(cohort, "listener should not be null"); final String shardName = actorUtils.getShardStrategyFactory().getStrategy(treeId).findShard(treeId); LOG.debug("Registering cohort: {} for tree: {} shard: {}", cohort, treeId, shardName); - DataTreeCohortRegistrationProxy cohortProxy = - new DataTreeCohortRegistrationProxy<>(actorUtils, subtree, cohort); + final var cohortProxy = new DataTreeCohortRegistrationProxy<>(actorUtils, subtree, cohort); cohortProxy.init(shardName); return cohortProxy; } - @Override public void onModelContextUpdated(final EffectiveModelContext newModelContext) { actorUtils.setSchemaContext(newModelContext); } @Override - public void onDatastoreContextUpdated(final DatastoreContextFactory contextFactory) { + public final void onDatastoreContextUpdated(final DatastoreContextFactory contextFactory) { LOG.info("DatastoreContext updated for data store {}", actorUtils.getDataStoreName()); actorUtils.setDatastoreContext(contextFactory); @@ -218,7 +215,7 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface @Override @SuppressWarnings("checkstyle:IllegalCatch") - public void close() { + public final void close() { LOG.info("Closing data store {}", identifier); if (datastoreConfigMXBean != null) { @@ -244,13 +241,13 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface } @Override - public ActorUtils getActorUtils() { + public final ActorUtils getActorUtils() { return actorUtils; } // TODO: consider removing this in favor of awaitReadiness() @Deprecated - public void waitTillReady() { + public final void waitTillReady() { LOG.info("Beginning to wait for data store to become ready : {}", identifier); final Duration toWait = initialSettleTime(); @@ -269,13 +266,13 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface @Beta @Deprecated - public boolean awaitReadiness() throws InterruptedException { + public final boolean awaitReadiness() throws InterruptedException { return awaitReadiness(initialSettleTime()); } @Beta @Deprecated - public boolean awaitReadiness(final Duration toWait) throws InterruptedException { + public final boolean awaitReadiness(final Duration toWait) throws InterruptedException { try { if (toWait.isFinite()) { try { @@ -296,7 +293,8 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface @Beta @Deprecated - public void awaitReadiness(final long timeout, final TimeUnit unit) throws InterruptedException, TimeoutException { + public final void awaitReadiness(final long timeout, final TimeUnit unit) + throws InterruptedException, TimeoutException { if (!awaitReadiness(Duration.create(timeout, unit))) { throw new TimeoutException("Shard leaders failed to settle"); } @@ -332,41 +330,32 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface } @VisibleForTesting - SettableFuture readinessFuture() { + public final SettableFuture readinessFuture() { return readinessFuture; } @Override - @SuppressWarnings("unchecked") - public ListenerRegistration registerProxyListener( - final YangInstanceIdentifier shardLookup, final YangInstanceIdentifier insideShard, - final DOMDataTreeChangeListener delegate) { - + public final Registration registerProxyListener(final YangInstanceIdentifier shardLookup, + final YangInstanceIdentifier insideShard, final DOMDataTreeChangeListener delegate) { requireNonNull(shardLookup, "shardLookup should not be null"); requireNonNull(insideShard, "insideShard should not be null"); requireNonNull(delegate, "delegate should not be null"); - final String shardName = actorUtils.getShardStrategyFactory().getStrategy(shardLookup).findShard(shardLookup); - LOG.debug("Registering tree listener: {} for tree: {} shard: {}, path inside shard: {}", - delegate,shardLookup, shardName, insideShard); - - // wrap this in the ClusteredDOMDataTreeChangeLister interface - // since we always want clustered registration - final DataTreeChangeListenerProxy listenerRegistrationProxy = - new DataTreeChangeListenerProxy<>(actorUtils, new ClusteredDOMDataTreeChangeListener() { - @Override - public void onDataTreeChanged(final Collection changes) { - delegate.onDataTreeChanged(changes); - } - - @Override - public void onInitialData() { - delegate.onInitialData(); - } - }, insideShard); - listenerRegistrationProxy.init(shardName); - - return (ListenerRegistration) listenerRegistrationProxy; + final var shardName = actorUtils.getShardStrategyFactory().getStrategy(shardLookup).findShard(shardLookup); + LOG.debug("Registering tree listener: {} for tree: {} shard: {}, path inside shard: {}", delegate, shardLookup, + shardName, insideShard); + + return DataTreeChangeListenerProxy.of(actorUtils, new DOMDataTreeChangeListener() { + @Override + public void onDataTreeChanged(final List changes) { + delegate.onDataTreeChanged(changes); + } + + @Override + public void onInitialData() { + delegate.onInitialData(); + } + }, insideShard, true, shardName); } private Duration initialSettleTime() { diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractDatastoreContextIntrospectorFactory.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractDatastoreContextIntrospectorFactory.java index 4aa075f3d3..b44bf38432 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractDatastoreContextIntrospectorFactory.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractDatastoreContextIntrospectorFactory.java @@ -12,9 +12,10 @@ import java.util.Map; import org.eclipse.jdt.annotation.NonNull; import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer; import org.opendaylight.mdsal.common.api.LogicalDatastoreType; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev140612.DataStorePropertiesContainer; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev231229.DataStorePropertiesContainer; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; -import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes; +import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier; +import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes; abstract class AbstractDatastoreContextIntrospectorFactory implements DatastoreContextIntrospectorFactory { @Override @@ -35,11 +36,12 @@ abstract class AbstractDatastoreContextIntrospectorFactory implements DatastoreC @VisibleForTesting final @NonNull DatastoreContextIntrospector newInstance(final DatastoreContext context) { - final DataStorePropertiesContainer defaultPropsContainer = (DataStorePropertiesContainer) - serializer().fromNormalizedNode(YangInstanceIdentifier.of(DataStorePropertiesContainer.QNAME), - ImmutableNodes.containerNode(DataStorePropertiesContainer.QNAME)).getValue(); - - return new DatastoreContextIntrospector(context, defaultPropsContainer); + return new DatastoreContextIntrospector(context, (DataStorePropertiesContainer) serializer() + .fromNormalizedNode(YangInstanceIdentifier.of(DataStorePropertiesContainer.QNAME), + ImmutableNodes.newContainerBuilder() + .withNodeIdentifier(new NodeIdentifier(DataStorePropertiesContainer.QNAME)) + .build()) + .getValue()); } abstract BindingNormalizedNodeSerializer serializer(); diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractFrontendHistory.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractFrontendHistory.java index 022bb7aa07..d00db5757e 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractFrontendHistory.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractFrontendHistory.java @@ -37,7 +37,7 @@ import org.opendaylight.controller.cluster.access.concepts.RequestException; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; import org.opendaylight.controller.cluster.datastore.utils.MutableUnsignedLongSet; import org.opendaylight.yangtools.concepts.Identifiable; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -79,10 +79,10 @@ abstract class AbstractFrontendHistory implements Identifiable handleTransactionRequest(final TransactionRequest request, final RequestEnvelope envelope, final long now) throws RequestException { - if (request instanceof TransactionPurgeRequest) { - return handleTransactionPurgeRequest((TransactionPurgeRequest) request, envelope, now); - } else if (request instanceof SkipTransactionsRequest) { - return handleSkipTransactionsRequest((SkipTransactionsRequest) request, envelope, now); + if (request instanceof TransactionPurgeRequest purgeRequest) { + return handleTransactionPurgeRequest(purgeRequest, envelope, now); + } else if (request instanceof SkipTransactionsRequest skipRequest) { + return handleSkipTransactionsRequest(skipRequest, envelope, now); } final TransactionIdentifier id = request.getTarget(); @@ -113,7 +113,7 @@ abstract class AbstractFrontendHistory implements Identifiable> maybeReplay = tx.replaySequence(request.getSequence()); if (maybeReplay.isPresent()) { - final TransactionSuccess replay = maybeReplay.get(); + final TransactionSuccess replay = maybeReplay.orElseThrow(); LOG.debug("{}: envelope {} replaying response {}", persistenceId(), envelope, replay); return replay; } @@ -170,7 +170,7 @@ abstract class AbstractFrontendHistory implements Identifiable(others.size() + 1); @@ -224,13 +224,12 @@ abstract class AbstractFrontendHistory implements Identifiable request, final TransactionIdentifier id) { - if (request instanceof CommitLocalTransactionRequest) { + if (request instanceof CommitLocalTransactionRequest commitLocalRequest) { LOG.debug("{}: allocating new ready transaction {}", persistenceId(), id); tree.getStats().incrementReadWriteTransactionCount(); - return createReadyTransaction(id, ((CommitLocalTransactionRequest) request).getModification()); + return createReadyTransaction(id, commitLocalRequest.getModification()); } - if (request instanceof AbstractReadTransactionRequest - && ((AbstractReadTransactionRequest) request).isSnapshotOnly()) { + if (request instanceof AbstractReadTransactionRequest readTxRequest && readTxRequest.isSnapshotOnly()) { LOG.debug("{}: allocating new open snapshot {}", persistenceId(), id); tree.getStats().incrementReadOnlyTransactionCount(); return createOpenSnapshot(id); diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractShardDataTreeNotificationPublisherActorProxy.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractShardDataTreeNotificationPublisherActorProxy.java index 3d1bebeb92..b4c65a80c0 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractShardDataTreeNotificationPublisherActorProxy.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractShardDataTreeNotificationPublisherActorProxy.java @@ -12,7 +12,7 @@ import akka.actor.ActorRef; import akka.actor.Props; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import org.opendaylight.controller.cluster.common.actor.Dispatchers; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractShardDataTreeTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractShardDataTreeTransaction.java index bec1a38f24..e559ff1249 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractShardDataTreeTransaction.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractShardDataTreeTransaction.java @@ -14,7 +14,7 @@ import com.google.common.base.MoreObjects; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; import org.opendaylight.controller.cluster.datastore.persisted.AbortTransactionPayload; import org.opendaylight.yangtools.concepts.Identifiable; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot; /** * Abstract base for transactions running on SharrdDataTree. This class is NOT thread-safe. diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractThreePhaseCommitCohort.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractThreePhaseCommitCohort.java deleted file mode 100644 index 7ef1cd4985..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractThreePhaseCommitCohort.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.datastore; - -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; -import java.util.List; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort; -import scala.concurrent.Future; - -/** - * Abstract base class for {@link DOMStoreThreePhaseCommitCohort} instances returned by this - * implementation. In addition to the usual set of methods it also contains the list of actor - * futures. - */ -public abstract class AbstractThreePhaseCommitCohort implements DOMStoreThreePhaseCommitCohort { - protected static final ListenableFuture IMMEDIATE_VOID_SUCCESS = Futures.immediateFuture(null); - protected static final ListenableFuture IMMEDIATE_BOOLEAN_SUCCESS = Futures.immediateFuture(Boolean.TRUE); - - abstract List> getCohortFutures(); -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractTransactionContextFactory.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractTransactionContextFactory.java deleted file mode 100644 index 6941d1904c..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractTransactionContextFactory.java +++ /dev/null @@ -1,280 +0,0 @@ -/* - * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.datastore; - -import static java.util.Objects.requireNonNull; - -import akka.actor.ActorSelection; -import java.util.Collection; -import java.util.Optional; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.atomic.AtomicLongFieldUpdater; -import org.eclipse.jdt.annotation.NonNull; -import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier; -import org.opendaylight.controller.cluster.access.concepts.MemberName; -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo; -import org.opendaylight.controller.cluster.datastore.utils.ActorUtils; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction; -import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import scala.concurrent.Future; -import scala.util.Try; - -/** - * Factory for creating local and remote TransactionContext instances. Maintains a cache of known local - * transaction factories. - */ -abstract class AbstractTransactionContextFactory implements AutoCloseable { - private static final Logger LOG = LoggerFactory.getLogger(AbstractTransactionContextFactory.class); - @SuppressWarnings("rawtypes") - private static final AtomicLongFieldUpdater TX_COUNTER_UPDATER = - AtomicLongFieldUpdater.newUpdater(AbstractTransactionContextFactory.class, "nextTx"); - - private final ConcurrentMap knownLocal = new ConcurrentHashMap<>(); - private final @NonNull LocalHistoryIdentifier historyId; - private final @NonNull ActorUtils actorUtils; - - // Used via TX_COUNTER_UPDATER - @SuppressWarnings("unused") - private volatile long nextTx; - - protected AbstractTransactionContextFactory(final ActorUtils actorUtils, final LocalHistoryIdentifier historyId) { - this.actorUtils = requireNonNull(actorUtils); - this.historyId = requireNonNull(historyId); - } - - final ActorUtils getActorUtils() { - return actorUtils; - } - - final LocalHistoryIdentifier getHistoryId() { - return historyId; - } - - @SuppressWarnings("checkstyle:IllegalCatch") - private TransactionContext maybeCreateLocalTransactionContext(final TransactionProxy parent, - final String shardName) { - final LocalTransactionFactory local = knownLocal.get(shardName); - if (local != null) { - LOG.debug("Tx {} - Creating local component for shard {} using factory {}", parent.getIdentifier(), - shardName, local); - - try { - return createLocalTransactionContext(local, parent); - } catch (Exception e) { - return new NoOpTransactionContext(e, parent.getIdentifier()); - } - } - - return null; - } - - private AbstractTransactionContextWrapper maybeCreateDirectTransactionContextWrapper( - final PrimaryShardInfo primaryShardInfo, final TransactionProxy parent, - final String shardName, final DelayedTransactionContextWrapper transactionContextWrapper) { - LOG.debug("Tx {}: Found primary {} for shard {}, trying to use DirectTransactionContextWrapper", - parent.getIdentifier(), primaryShardInfo.getPrimaryShardActor(), shardName); - - updateShardInfo(shardName, primaryShardInfo); - - final TransactionContext localContext = maybeCreateLocalTransactionContext(parent, shardName); - try { - if (localContext != null) { - LOG.debug("Tx {}: Local transaction context created successfully, using DirectTransactionWrapper", - parent.getIdentifier()); - return new DirectTransactionContextWrapper(parent.getIdentifier(), actorUtils, shardName, - localContext); - } - - LOG.debug("Tx {}: Local transaction context creation failed, using DelayedTransactionWrapper", - parent.getIdentifier()); - final RemoteTransactionContextSupport remote = new RemoteTransactionContextSupport( - transactionContextWrapper, parent, shardName); - remote.setPrimaryShard(primaryShardInfo); - return transactionContextWrapper; - } finally { - onTransactionContextCreated(parent.getIdentifier()); - } - } - - private void onFindPrimaryShardSuccess(final PrimaryShardInfo primaryShardInfo, final TransactionProxy parent, - final String shardName, final DelayedTransactionContextWrapper transactionContextWrapper) { - LOG.debug("Tx {}: Found primary {} for shard {}", parent.getIdentifier(), - primaryShardInfo.getPrimaryShardActor(), shardName); - - updateShardInfo(shardName, primaryShardInfo); - - final TransactionContext localContext = maybeCreateLocalTransactionContext(parent, shardName); - try { - if (localContext != null) { - transactionContextWrapper.executePriorTransactionOperations(localContext); - } else { - final RemoteTransactionContextSupport remote = new RemoteTransactionContextSupport( - transactionContextWrapper, parent, shardName); - remote.setPrimaryShard(primaryShardInfo); - } - } finally { - onTransactionContextCreated(parent.getIdentifier()); - } - } - - private void onFindPrimaryShardFailure(final Throwable failure, final TransactionProxy parent, - final String shardName, final DelayedTransactionContextWrapper transactionContextWrapper) { - LOG.debug("Tx {}: Find primary for shard {} failed", parent.getIdentifier(), shardName, failure); - - try { - transactionContextWrapper.executePriorTransactionOperations( - new NoOpTransactionContext(failure, parent.getIdentifier())); - } finally { - onTransactionContextCreated(parent.getIdentifier()); - } - } - - final AbstractTransactionContextWrapper newTransactionContextWrapper(final TransactionProxy parent, - final String shardName) { - final DelayedTransactionContextWrapper contextWrapper = new DelayedTransactionContextWrapper( - parent.getIdentifier(), actorUtils, shardName); - final Future findPrimaryFuture = findPrimaryShard(shardName, parent.getIdentifier()); - if (findPrimaryFuture.isCompleted()) { - final Try maybe = findPrimaryFuture.value().get(); - if (maybe.isSuccess()) { - return maybeCreateDirectTransactionContextWrapper(maybe.get(), parent, shardName, contextWrapper); - } - - onFindPrimaryShardFailure(maybe.failed().get(), parent, shardName, contextWrapper); - } else { - findPrimaryFuture.onComplete(result -> { - if (result.isSuccess()) { - onFindPrimaryShardSuccess(result.get(), parent, shardName, contextWrapper); - } else { - onFindPrimaryShardFailure(result.failed().get(), parent, shardName, contextWrapper); - } - return null; - }, actorUtils.getClientDispatcher()); - } - return contextWrapper; - } - - private void updateShardInfo(final String shardName, final PrimaryShardInfo primaryShardInfo) { - final Optional maybeDataTree = primaryShardInfo.getLocalShardDataTree(); - if (maybeDataTree.isPresent()) { - if (!knownLocal.containsKey(shardName)) { - LOG.debug("Shard {} resolved to local data tree - adding local factory", shardName); - - F factory = factoryForShard(shardName, primaryShardInfo.getPrimaryShardActor(), maybeDataTree.get()); - knownLocal.putIfAbsent(shardName, factory); - } - } else if (knownLocal.containsKey(shardName)) { - LOG.debug("Shard {} invalidating local data tree", shardName); - - knownLocal.remove(shardName); - } - } - - protected final MemberName getMemberName() { - return historyId.getClientId().getFrontendId().getMemberName(); - } - - /** - * Create an identifier for the next TransactionProxy attached to this component - * factory. - * @return Transaction identifier, may not be null. - */ - protected final TransactionIdentifier nextIdentifier() { - return new TransactionIdentifier(historyId, TX_COUNTER_UPDATER.getAndIncrement(this)); - } - - /** - * Find the primary shard actor. - * - * @param shardName Shard name - * @return Future containing shard information. - */ - protected abstract Future findPrimaryShard(@NonNull String shardName, - @NonNull TransactionIdentifier txId); - - /** - * Create local transaction factory for specified shard, backed by specified shard leader - * and data tree instance. - * - * @param shardName the shard name - * @param shardLeader the shard leader - * @param dataTree Backing data tree instance. The data tree may only be accessed in - * read-only manner. - * @return Transaction factory for local use. - */ - protected abstract F factoryForShard(String shardName, ActorSelection shardLeader, ReadOnlyDataTree dataTree); - - /** - * Callback invoked from child transactions to push any futures, which need to - * be waited for before the next transaction is allocated. - * @param cohortFutures Collection of futures - */ - protected abstract void onTransactionReady(@NonNull TransactionIdentifier transaction, - @NonNull Collection> cohortFutures); - - /** - * Callback invoked when the internal TransactionContext has been created for a transaction. - * - * @param transactionId the ID of the transaction. - */ - protected abstract void onTransactionContextCreated(@NonNull TransactionIdentifier transactionId); - - private static TransactionContext createLocalTransactionContext(final LocalTransactionFactory factory, - final TransactionProxy parent) { - - switch (parent.getType()) { - case READ_ONLY: - final DOMStoreReadTransaction readOnly = factory.newReadOnlyTransaction(parent.getIdentifier()); - return new LocalTransactionContext(readOnly, parent.getIdentifier(), factory) { - @Override - DOMStoreWriteTransaction getWriteDelegate() { - throw new UnsupportedOperationException(); - } - - @Override - DOMStoreReadTransaction getReadDelegate() { - return readOnly; - } - }; - case READ_WRITE: - final DOMStoreReadWriteTransaction readWrite = factory.newReadWriteTransaction(parent.getIdentifier()); - return new LocalTransactionContext(readWrite, parent.getIdentifier(), factory) { - @Override - DOMStoreWriteTransaction getWriteDelegate() { - return readWrite; - } - - @Override - DOMStoreReadTransaction getReadDelegate() { - return readWrite; - } - }; - case WRITE_ONLY: - final DOMStoreWriteTransaction writeOnly = factory.newWriteOnlyTransaction(parent.getIdentifier()); - return new LocalTransactionContext(writeOnly, parent.getIdentifier(), factory) { - @Override - DOMStoreWriteTransaction getWriteDelegate() { - return writeOnly; - } - - @Override - DOMStoreReadTransaction getReadDelegate() { - throw new UnsupportedOperationException(); - } - }; - default: - throw new IllegalArgumentException("Invalid transaction type: " + parent.getType()); - } - } -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractTransactionContextWrapper.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractTransactionContextWrapper.java deleted file mode 100644 index 49dac87cc9..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractTransactionContextWrapper.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.datastore; - -import static java.util.Objects.requireNonNull; - -import akka.actor.ActorSelection; -import java.util.Optional; -import java.util.SortedSet; -import java.util.concurrent.TimeUnit; -import org.eclipse.jdt.annotation.NonNull; -import org.eclipse.jdt.annotation.Nullable; -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -import org.opendaylight.controller.cluster.datastore.utils.ActorUtils; -import scala.concurrent.Future; - -/** - * A helper class that wraps an eventual TransactionContext instance. We have two specializations: - *

      - *
    • {@link DelayedTransactionContextWrapper}, which enqueues operations towards the backend
    • - *
    • {@link DirectTransactionContextWrapper}, which sends operations to the backend
    • - *
    - */ -abstract class AbstractTransactionContextWrapper { - private final TransactionIdentifier identifier; - private final OperationLimiter limiter; - private final String shardName; - - AbstractTransactionContextWrapper(@NonNull final TransactionIdentifier identifier, - @NonNull final ActorUtils actorUtils, @NonNull final String shardName) { - this.identifier = requireNonNull(identifier); - this.shardName = requireNonNull(shardName); - limiter = new OperationLimiter(identifier, - // 1 extra permit for the ready operation - actorUtils.getDatastoreContext().getShardBatchedModificationCount() + 1, - TimeUnit.MILLISECONDS.toSeconds(actorUtils.getDatastoreContext().getOperationTimeoutInMillis())); - } - - final TransactionIdentifier getIdentifier() { - return identifier; - } - - final OperationLimiter getLimiter() { - return limiter; - } - - final String getShardName() { - return shardName; - } - - abstract @Nullable TransactionContext getTransactionContext(); - - /** - * Either enqueue or execute specified operation. - * - * @param op Operation to (eventually) execute - */ - abstract void maybeExecuteTransactionOperation(TransactionOperation op); - - /** - * Mark the transaction as ready. - * - * @param participatingShardNames Shards which participate on the transaction - * @return Future indicating the transaction has been readied on the backend - */ - abstract @NonNull Future readyTransaction(Optional> participatingShardNames); -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ChainedCommitCohort.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ChainedCommitCohort.java index 1f87fd4259..e3bb074bdc 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ChainedCommitCohort.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ChainedCommitCohort.java @@ -14,9 +14,10 @@ import com.google.common.util.concurrent.FutureCallback; import java.util.Optional; import java.util.SortedSet; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification; +import org.opendaylight.yangtools.yang.common.Empty; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateTip; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -52,12 +53,12 @@ final class ChainedCommitCohort extends ShardDataTreeCohort { } @Override - public TransactionIdentifier getIdentifier() { - return delegate.getIdentifier(); + TransactionIdentifier transactionId() { + return delegate.transactionId(); } @Override - public void canCommit(final FutureCallback callback) { + public void canCommit(final FutureCallback callback) { delegate.canCommit(callback); } @@ -67,7 +68,7 @@ final class ChainedCommitCohort extends ShardDataTreeCohort { } @Override - public void abort(final FutureCallback callback) { + public void abort(final FutureCallback callback) { delegate.abort(callback); } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/CohortEntry.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/CohortEntry.java index 6c0c13b3ab..120b004a6e 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/CohortEntry.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/CohortEntry.java @@ -13,15 +13,18 @@ import static java.util.Objects.requireNonNull; import akka.actor.ActorRef; import com.google.common.primitives.UnsignedLong; import com.google.common.util.concurrent.FutureCallback; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.util.List; import java.util.Optional; import java.util.SortedSet; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; import org.opendaylight.controller.cluster.datastore.ShardCommitCoordinator.CohortDecorator; import org.opendaylight.controller.cluster.datastore.modification.Modification; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification; +import org.opendaylight.yangtools.yang.common.Empty; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification; +@Deprecated(since = "9.0.0", forRemoval = true) final class CohortEntry { private final ReadWriteShardDataTreeTransaction transaction; private final TransactionIdentifier transactionId; @@ -36,16 +39,16 @@ final class CohortEntry { private Shard shard; private CohortEntry(final ReadWriteShardDataTreeTransaction transaction, final short clientVersion) { - this.cohort = null; + cohort = null; this.transaction = requireNonNull(transaction); - this.transactionId = transaction.getIdentifier(); + transactionId = transaction.getIdentifier(); this.clientVersion = clientVersion; } private CohortEntry(final ShardDataTreeCohort cohort, final short clientVersion) { this.cohort = requireNonNull(cohort); - this.transactionId = cohort.getIdentifier(); - this.transaction = null; + transactionId = cohort.transactionId(); + transaction = null; this.clientVersion = clientVersion; } @@ -90,6 +93,7 @@ final class CohortEntry { } @SuppressWarnings("checkstyle:IllegalCatch") + @SuppressFBWarnings(value = "THROWS_METHOD_THROWS_RUNTIMEEXCEPTION", justification = "Re-thrown") void applyModifications(final List modifications) { totalBatchedModificationsReceived++; if (lastBatchedModificationsException == null) { @@ -105,7 +109,7 @@ final class CohortEntry { } } - void canCommit(final FutureCallback callback) { + void canCommit(final FutureCallback callback) { cohort.canCommit(callback); } @@ -117,7 +121,7 @@ final class CohortEntry { cohort.commit(callback); } - void abort(final FutureCallback callback) { + void abort(final FutureCallback callback) { cohort.abort(callback); } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/CompositeDataTreeCohort.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/CompositeDataTreeCohort.java index bca00ebc4a..078b45f68f 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/CompositeDataTreeCohort.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/CompositeDataTreeCohort.java @@ -20,11 +20,9 @@ import akka.dispatch.Recover; import akka.pattern.Patterns; import akka.util.Timeout; import com.google.common.collect.Lists; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.util.AbstractMap.SimpleImmutableEntry; import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map.Entry; @@ -36,8 +34,9 @@ import org.eclipse.jdt.annotation.NonNull; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; import org.opendaylight.controller.cluster.datastore.DataTreeCohortActor.CanCommit; import org.opendaylight.controller.cluster.datastore.DataTreeCohortActor.Success; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate; -import org.opendaylight.yangtools.yang.model.api.SchemaContext; +import org.opendaylight.yangtools.yang.common.Empty; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate; +import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import scala.compat.java8.FutureConverters; @@ -48,7 +47,6 @@ import scala.concurrent.Future; *

    * It tracks current operation and list of cohorts which successfuly finished previous phase in * case, if abort is necessary to invoke it only on cohort steps which are still active. - * */ class CompositeDataTreeCohort { private static final Logger LOG = LoggerFactory.getLogger(CompositeDataTreeCohort.class); @@ -92,7 +90,7 @@ class CompositeDataTreeCohort { ABORTED } - static final Recover EXCEPTION_TO_MESSAGE = new Recover() { + static final Recover EXCEPTION_TO_MESSAGE = new Recover<>() { @Override public Failure recover(final Throwable error) { return new Failure(error); @@ -101,17 +99,17 @@ class CompositeDataTreeCohort { private final DataTreeCohortActorRegistry registry; private final TransactionIdentifier txId; - private final SchemaContext schema; + private final EffectiveModelContext schema; private final Executor callbackExecutor; private final Timeout timeout; - private @NonNull List successfulFromPrevious = Collections.emptyList(); + private @NonNull List successfulFromPrevious = List.of(); private State state = State.IDLE; CompositeDataTreeCohort(final DataTreeCohortActorRegistry registry, final TransactionIdentifier transactionID, - final SchemaContext schema, final Executor callbackExecutor, final Timeout timeout) { + final EffectiveModelContext schema, final Executor callbackExecutor, final Timeout timeout) { this.registry = requireNonNull(registry); - this.txId = requireNonNull(transactionID); + txId = requireNonNull(transactionID); this.schema = requireNonNull(schema); this.callbackExecutor = requireNonNull(callbackExecutor); this.timeout = requireNonNull(timeout); @@ -135,11 +133,11 @@ class CompositeDataTreeCohort { throw new IllegalStateException("Unhandled state " + state); } - successfulFromPrevious = Collections.emptyList(); + successfulFromPrevious = List.of(); state = State.IDLE; } - Optional> canCommit(final DataTreeCandidate tip) { + Optional> canCommit(final DataTreeCandidate tip) { if (LOG.isTraceEnabled()) { LOG.trace("{}: canCommit - candidate: {}", txId, tip); } else { @@ -149,7 +147,7 @@ class CompositeDataTreeCohort { final List messages = registry.createCanCommitMessages(txId, tip, schema); LOG.debug("{}: canCommit - messages: {}", txId, messages); if (messages.isEmpty()) { - successfulFromPrevious = Collections.emptyList(); + successfulFromPrevious = List.of(); changeStateFrom(State.IDLE, State.CAN_COMMIT_SUCCESSFUL); return Optional.empty(); } @@ -167,7 +165,7 @@ class CompositeDataTreeCohort { return Optional.of(processResponses(futures, State.CAN_COMMIT_SENT, State.CAN_COMMIT_SUCCESSFUL)); } - Optional> preCommit() { + Optional> preCommit() { LOG.debug("{}: preCommit - successfulFromPrevious: {}", txId, successfulFromPrevious); if (successfulFromPrevious.isEmpty()) { @@ -181,7 +179,7 @@ class CompositeDataTreeCohort { return Optional.of(processResponses(futures, State.PRE_COMMIT_SENT, State.PRE_COMMIT_SUCCESSFUL)); } - Optional> commit() { + Optional> commit() { LOG.debug("{}: commit - successfulFromPrevious: {}", txId, successfulFromPrevious); if (successfulFromPrevious.isEmpty()) { changeStateFrom(State.PRE_COMMIT_SUCCESSFUL, State.COMMITED); @@ -222,10 +220,10 @@ class CompositeDataTreeCohort { return ret; } - private @NonNull CompletionStage processResponses(final List>> futures, + private @NonNull CompletionStage processResponses(final List>> futures, final State currentState, final State afterState) { LOG.debug("{}: processResponses - currentState: {}, afterState: {}", txId, currentState, afterState); - final CompletableFuture returnFuture = new CompletableFuture<>(); + final CompletableFuture returnFuture = new CompletableFuture<>(); Future> aggregateFuture = Futures.sequence(Lists.transform(futures, Entry::getValue), ExecutionContexts.global()); @@ -240,14 +238,10 @@ class CompositeDataTreeCohort { return returnFuture; } - // FB issues violation for passing null to CompletableFuture#complete but it is valid and necessary when the - // generic type is Void. - @SuppressFBWarnings(value = { "NP_NONNULL_PARAM_VIOLATION", "UPM_UNCALLED_PRIVATE_METHOD" }, - justification = "https://github.com/spotbugs/spotbugs/issues/811") private void processResponses(final Throwable failure, final Iterable results, - final State currentState, final State afterState, final CompletableFuture resultFuture) { + final State currentState, final State afterState, final CompletableFuture resultFuture) { if (failure != null) { - successfulFromPrevious = Collections.emptyList(); + successfulFromPrevious = List.of(); resultFuture.completeExceptionally(failure); return; } @@ -274,12 +268,12 @@ class CompositeDataTreeCohort { firstEx.addSuppressed(it.next().cause()); } - successfulFromPrevious = Collections.emptyList(); + successfulFromPrevious = List.of(); resultFuture.completeExceptionally(firstEx); } else { successfulFromPrevious = successful; changeStateFrom(currentState, afterState); - resultFuture.complete(null); + resultFuture.complete(Empty.value()); } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DOMDataTreeCandidateTO.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DOMDataTreeCandidateTO.java index a8a0124fcd..88877bc36a 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DOMDataTreeCandidateTO.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DOMDataTreeCandidateTO.java @@ -12,7 +12,7 @@ import static java.util.Objects.requireNonNull; import com.google.common.base.MoreObjects; import org.opendaylight.mdsal.dom.api.DOMDataTreeCandidate; import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateNode; final class DOMDataTreeCandidateTO implements DOMDataTreeCandidate { diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataStoreVersions.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataStoreVersions.java index eb444f2d8f..996fe1023d 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataStoreVersions.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataStoreVersions.java @@ -27,9 +27,12 @@ public final class DataStoreVersions { public static final short FLUORINE_VERSION = 9; @Deprecated public static final short NEON_SR2_VERSION = 10; + @Deprecated public static final short SODIUM_SR1_VERSION = 11; + @Deprecated public static final short PHOSPHORUS_VERSION = 12; - public static final short CURRENT_VERSION = PHOSPHORUS_VERSION; + public static final short POTASSIUM_VERSION = 13; + public static final short CURRENT_VERSION = POTASSIUM_VERSION; private DataStoreVersions() { diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerActor.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerActor.java index 9384a84e38..6f88d3ea98 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerActor.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerActor.java @@ -60,9 +60,9 @@ class DataTreeChangeListenerActor extends AbstractUntypedActor { LOG.debug("{}: Notifying onInitialData to listener {}", logContext, listener); try { - this.listener.onInitialData(); + listener.onInitialData(); } catch (Exception e) { - LOG.error("{}: Error notifying listener {}", logContext, this.listener, e); + LOG.error("{}: Error notifying listener {}", logContext, listener, e); } } @@ -75,15 +75,21 @@ class DataTreeChangeListenerActor extends AbstractUntypedActor { return; } - LOG.debug("{}: Sending {} change notification(s) {} to listener {}", logContext, message.getChanges().size(), - message.getChanges(), listener); + final var changes = message.getChanges(); + LOG.debug("{}: Sending {} change notification(s) to listener {}", logContext, changes.size(), listener); + if (LOG.isTraceEnabled() && !changes.isEmpty()) { + LOG.trace("{}: detailed change follow", logContext); + for (int i = 0, size = changes.size(); i < size; ++i) { + LOG.trace("{}: change {}: {}", logContext, i, changes.get(i)); + } + } notificationCount++; try { - this.listener.onDataTreeChanged(message.getChanges()); + listener.onDataTreeChanged(changes); } catch (Exception e) { - LOG.error("{}: Error notifying listener {}", logContext, this.listener, e); + LOG.error("{}: Error notifying listener {}", logContext, listener, e); } // TODO: do we really need this? diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerProxy.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerProxy.java index 0268b8e36c..be849284e7 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerProxy.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerProxy.java @@ -14,20 +14,20 @@ import akka.actor.ActorSelection; import akka.actor.PoisonPill; import akka.dispatch.OnComplete; import com.google.common.annotations.VisibleForTesting; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import com.google.common.util.concurrent.MoreExecutors; +import java.util.concurrent.Executor; import org.checkerframework.checker.lock.qual.GuardedBy; +import org.eclipse.jdt.annotation.NonNull; import org.opendaylight.controller.cluster.datastore.exceptions.LocalShardNotFoundException; import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeNotificationListenerRegistration; import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener; import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeNotificationListenerReply; import org.opendaylight.controller.cluster.datastore.utils.ActorUtils; -import org.opendaylight.mdsal.dom.api.ClusteredDOMDataTreeChangeListener; import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener; -import org.opendaylight.yangtools.concepts.AbstractListenerRegistration; +import org.opendaylight.yangtools.concepts.AbstractObjectRegistration; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import scala.concurrent.Future; /** * Proxy class for holding required state to lazily instantiate a listener registration with an @@ -35,28 +35,61 @@ import scala.concurrent.Future; * * @param listener type */ -final class DataTreeChangeListenerProxy extends AbstractListenerRegistration { +final class DataTreeChangeListenerProxy extends AbstractObjectRegistration { private static final Logger LOG = LoggerFactory.getLogger(DataTreeChangeListenerProxy.class); private final ActorRef dataChangeListenerActor; private final ActorUtils actorUtils; private final YangInstanceIdentifier registeredPath; + private final boolean clustered; @GuardedBy("this") private ActorSelection listenerRegistrationActor; - DataTreeChangeListenerProxy(final ActorUtils actorUtils, final T listener, - final YangInstanceIdentifier registeredPath) { + @VisibleForTesting + private DataTreeChangeListenerProxy(final ActorUtils actorUtils, final DOMDataTreeChangeListener listener, + final YangInstanceIdentifier registeredPath, final boolean clustered, final String shardName) { super(listener); this.actorUtils = requireNonNull(actorUtils); this.registeredPath = requireNonNull(registeredPath); - this.dataChangeListenerActor = actorUtils.getActorSystem().actorOf( + this.clustered = clustered; + dataChangeListenerActor = actorUtils.getActorSystem().actorOf( DataTreeChangeListenerActor.props(getInstance(), registeredPath) .withDispatcher(actorUtils.getNotificationDispatcherPath())); - LOG.debug("{}: Created actor {} for DTCL {}", actorUtils.getDatastoreContext().getLogicalStoreType(), dataChangeListenerActor, listener); } + static @NonNull DataTreeChangeListenerProxy of(final ActorUtils actorUtils, + final DOMDataTreeChangeListener listener, final YangInstanceIdentifier registeredPath, + final boolean clustered, final String shardName) { + return ofTesting(actorUtils, listener, registeredPath, clustered, shardName, MoreExecutors.directExecutor()); + } + + @VisibleForTesting + static @NonNull DataTreeChangeListenerProxy ofTesting(final ActorUtils actorUtils, + final DOMDataTreeChangeListener listener, final YangInstanceIdentifier registeredPath, + final boolean clustered, final String shardName, final Executor executor) { + final var ret = new DataTreeChangeListenerProxy(actorUtils, listener, registeredPath, clustered, shardName); + executor.execute(() -> { + LOG.debug("{}: Starting discovery of shard {}", ret.logContext(), shardName); + actorUtils.findLocalShardAsync(shardName).onComplete(new OnComplete<>() { + @Override + public void onComplete(final Throwable failure, final ActorRef shard) { + if (failure instanceof LocalShardNotFoundException) { + LOG.debug("{}: No local shard found for {} - DataTreeChangeListener {} at path {} cannot be " + + "registered", ret.logContext(), shardName, listener, registeredPath); + } else if (failure != null) { + LOG.error("{}: Failed to find local shard {} - DataTreeChangeListener {} at path {} cannot be " + + "registered", ret.logContext(), shardName, listener, registeredPath, failure); + } else { + ret.doRegistration(shard); + } + } + }, actorUtils.getClientDispatcher()); + }); + return ret; + } + @Override protected synchronized void removeRegistration() { if (listenerRegistrationActor != null) { @@ -68,27 +101,6 @@ final class DataTreeChangeListenerProxy ext dataChangeListenerActor.tell(PoisonPill.getInstance(), ActorRef.noSender()); } - void init(final String shardName) { - Future findFuture = actorUtils.findLocalShardAsync(shardName); - findFuture.onComplete(new OnComplete() { - @Override - public void onComplete(final Throwable failure, final ActorRef shard) { - if (failure instanceof LocalShardNotFoundException) { - LOG.debug("{}: No local shard found for {} - DataTreeChangeListener {} at path {} " - + "cannot be registered", logContext(), shardName, getInstance(), registeredPath); - } else if (failure != null) { - LOG.error("{}: Failed to find local shard {} - DataTreeChangeListener {} at path {} " - + "cannot be registered", logContext(), shardName, getInstance(), registeredPath, - failure); - } else { - doRegistration(shard); - } - } - }, actorUtils.getClientDispatcher()); - } - - @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD", - justification = "https://github.com/spotbugs/spotbugs/issues/811") private void setListenerRegistrationActor(final ActorSelection actor) { if (actor == null) { LOG.debug("{}: Ignoring null actor on {}", logContext(), this); @@ -97,7 +109,7 @@ final class DataTreeChangeListenerProxy ext synchronized (this) { if (!isClosed()) { - this.listenerRegistrationActor = actor; + listenerRegistrationActor = actor; return; } } @@ -106,28 +118,21 @@ final class DataTreeChangeListenerProxy ext actor.tell(CloseDataTreeNotificationListenerRegistration.getInstance(), null); } - @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD", - justification = "https://github.com/spotbugs/spotbugs/issues/811") private void doRegistration(final ActorRef shard) { - - Future future = actorUtils.executeOperationAsync(shard, - new RegisterDataTreeChangeListener(registeredPath, dataChangeListenerActor, - getInstance() instanceof ClusteredDOMDataTreeChangeListener), - actorUtils.getDatastoreContext().getShardInitializationTimeout()); - - future.onComplete(new OnComplete() { - @Override - public void onComplete(final Throwable failure, final Object result) { - if (failure != null) { - LOG.error("{}: Failed to register DataTreeChangeListener {} at path {}", logContext(), + actorUtils.executeOperationAsync(shard, + new RegisterDataTreeChangeListener(registeredPath, dataChangeListenerActor, clustered), + actorUtils.getDatastoreContext().getShardInitializationTimeout()).onComplete(new OnComplete<>() { + @Override + public void onComplete(final Throwable failure, final Object result) { + if (failure != null) { + LOG.error("{}: Failed to register DataTreeChangeListener {} at path {}", logContext(), getInstance(), registeredPath, failure); - } else { - RegisterDataTreeNotificationListenerReply reply = (RegisterDataTreeNotificationListenerReply)result; - setListenerRegistrationActor(actorUtils.actorSelection( - reply.getListenerRegistrationPath())); + } else { + setListenerRegistrationActor(actorUtils.actorSelection( + ((RegisterDataTreeNotificationListenerReply) result).getListenerRegistrationPath())); + } } - } - }, actorUtils.getClientDispatcher()); + }, actorUtils.getClientDispatcher()); } @VisibleForTesting diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerSupport.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerSupport.java index 09586b270b..f5e1d1374b 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerSupport.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerSupport.java @@ -23,9 +23,9 @@ import org.slf4j.LoggerFactory; final class DataTreeChangeListenerSupport extends LeaderLocalDelegateFactory { private static final Logger LOG = LoggerFactory.getLogger(DataTreeChangeListenerSupport.class); - private final Collection> + private final Collection delayedDataTreeChangeListenerRegistrations = ConcurrentHashMap.newKeySet(); - private final Collection> + private final Collection delayedListenerOnAllRegistrations = ConcurrentHashMap.newKeySet(); private final Collection leaderOnlyListenerActors = ConcurrentHashMap.newKeySet(); private final Collection allListenerActors = ConcurrentHashMap.newKeySet(); @@ -62,8 +62,7 @@ final class DataTreeChangeListenerSupport extends LeaderLocalDelegateFactory reg : - delayedListenerOnAllRegistrations) { + for (var reg : delayedListenerOnAllRegistrations) { reg.doRegistration(this); } @@ -71,8 +70,7 @@ final class DataTreeChangeListenerSupport extends LeaderLocalDelegateFactory reg : - delayedDataTreeChangeListenerRegistrations) { + for (var reg : delayedDataTreeChangeListenerRegistrations) { reg.doRegistration(this); } @@ -91,9 +89,8 @@ final class DataTreeChangeListenerSupport extends LeaderLocalDelegateFactory delayedReg = - new DelayedDataTreeChangeListenerRegistration<>(message, registrationActor); - final Collection> delayedRegList; + final var delayedReg = new DelayedDataTreeChangeListenerRegistration(message, registrationActor); + final Collection delayedRegList; if (message.isRegisterOnAllInstances()) { delayedRegList = delayedListenerOnAllRegistrations; } else { diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeCohortActor.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeCohortActor.java index 9c0d1ca569..9efca64937 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeCohortActor.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeCohortActor.java @@ -14,7 +14,6 @@ import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.MoreExecutors; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.util.Collection; import java.util.HashMap; import java.util.Map; @@ -30,7 +29,7 @@ import org.opendaylight.mdsal.common.api.ThreePhaseCommitStep; import org.opendaylight.mdsal.dom.api.DOMDataTreeCandidate; import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; -import org.opendaylight.yangtools.yang.model.api.SchemaContext; +import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext; /** * Proxy actor which acts as a facade to the user-provided commit cohort. Responsible for @@ -90,10 +89,10 @@ final class DataTreeCohortActor extends AbstractUntypedActor { private final Collection candidates; private final ActorRef cohort; - private final SchemaContext schema; + private final EffectiveModelContext schema; CanCommit(final TransactionIdentifier txId, final Collection candidates, - final SchemaContext schema, final ActorRef cohort) { + final EffectiveModelContext schema, final ActorRef cohort) { super(txId); this.cohort = Objects.requireNonNull(cohort); this.candidates = Objects.requireNonNull(candidates); @@ -104,7 +103,7 @@ final class DataTreeCohortActor extends AbstractUntypedActor { return candidates; } - SchemaContext getSchema() { + EffectiveModelContext getSchema() { return schema; } @@ -208,15 +207,11 @@ final class DataTreeCohortActor extends AbstractUntypedActor { }, callbackExecutor); } - @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD", - justification = "https://github.com/spotbugs/spotbugs/issues/811") private void failed(final TransactionIdentifier txId, final ActorRef sender, final Throwable failure) { currentStateMap.remove(txId); sender.tell(new Status.Failure(failure), getSelf()); } - @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD", - justification = "https://github.com/spotbugs/spotbugs/issues/811") private void success(final TransactionIdentifier txId, final ActorRef sender, final S nextStep) { currentStateMap.computeIfPresent(txId, (key, behaviour) -> nextBehaviour(txId, nextStep)); sender.tell(new Success(getSelf(), txId), getSelf()); diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeCohortActorRegistry.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeCohortActorRegistry.java index 3ff6a9f0e6..fa10f947db 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeCohortActorRegistry.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeCohortActorRegistry.java @@ -26,27 +26,24 @@ import org.opendaylight.mdsal.common.api.LogicalDatastoreType; import org.opendaylight.mdsal.dom.api.DOMDataTreeCandidate; import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier; import org.opendaylight.mdsal.dom.spi.AbstractRegistrationTree; -import org.opendaylight.mdsal.dom.spi.RegistrationTreeNode; -import org.opendaylight.mdsal.dom.spi.RegistrationTreeSnapshot; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode; -import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType; -import org.opendaylight.yangtools.yang.model.api.SchemaContext; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateNode; +import org.opendaylight.yangtools.yang.data.tree.api.ModificationType; +import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Registry of user commit cohorts, which is responsible for handling registration and calculation * of affected cohorts based on {@link DataTreeCandidate}. This class is NOT thread-safe. - * */ class DataTreeCohortActorRegistry extends AbstractRegistrationTree { private static final Logger LOG = LoggerFactory.getLogger(DataTreeCohortActorRegistry.class); - private final Map> cohortToNode = new HashMap<>(); + private final Map> cohortToNode = new HashMap<>(); Collection getCohortActors() { return new ArrayList<>(cohortToNode.keySet()); @@ -57,8 +54,7 @@ class DataTreeCohortActorRegistry extends AbstractRegistrationTree { takeLock(); try { final ActorRef cohortRef = cohort.getCohort(); - final RegistrationTreeNode node = - findNodeFor(cohort.getPath().getRootIdentifier().getPathArguments()); + final Node node = findNodeFor(cohort.getPath().path().getPathArguments()); addRegistration(node, cohort.getCohort()); cohortToNode.put(cohortRef, node); } catch (final Exception e) { @@ -72,7 +68,7 @@ class DataTreeCohortActorRegistry extends AbstractRegistrationTree { void removeCommitCohort(final ActorRef sender, final RemoveCohort message) { final ActorRef cohort = message.getCohort(); - final RegistrationTreeNode node = cohortToNode.get(cohort); + final Node node = cohortToNode.get(cohort); if (node != null) { removeRegistration(node, cohort); cohortToNode.remove(cohort); @@ -82,8 +78,8 @@ class DataTreeCohortActorRegistry extends AbstractRegistrationTree { } List createCanCommitMessages(final TransactionIdentifier txId, - final DataTreeCandidate candidate, final SchemaContext schema) { - try (RegistrationTreeSnapshot cohorts = takeSnapshot()) { + final DataTreeCandidate candidate, final EffectiveModelContext schema) { + try (var cohorts = takeSnapshot()) { return new CanCommitMessageBuilder(txId, candidate, schema).perform(cohorts.getRootNode()); } } @@ -131,54 +127,52 @@ class DataTreeCohortActorRegistry extends AbstractRegistrationTree { private final Multimap actorToCandidates = ArrayListMultimap.create(); private final TransactionIdentifier txId; private final DataTreeCandidate candidate; - private final SchemaContext schema; + private final EffectiveModelContext schema; CanCommitMessageBuilder(final TransactionIdentifier txId, final DataTreeCandidate candidate, - final SchemaContext schema) { + final EffectiveModelContext schema) { this.txId = requireNonNull(txId); this.candidate = requireNonNull(candidate); this.schema = schema; } private void lookupAndCreateCanCommits(final List args, final int offset, - final RegistrationTreeNode node) { + final Node node) { if (args.size() != offset) { final PathArgument arg = args.get(offset); - final RegistrationTreeNode exactChild = node.getExactChild(arg); + final var exactChild = node.getExactChild(arg); if (exactChild != null) { lookupAndCreateCanCommits(args, offset + 1, exactChild); } - for (final RegistrationTreeNode c : node.getInexactChildren(arg)) { - lookupAndCreateCanCommits(args, offset + 1, c); + for (var inexact : node.getInexactChildren(arg)) { + lookupAndCreateCanCommits(args, offset + 1, inexact); } } else { lookupAndCreateCanCommits(candidate.getRootPath(), node, candidate.getRootNode()); } } - private void lookupAndCreateCanCommits(final YangInstanceIdentifier path, - final RegistrationTreeNode regNode, final DataTreeCandidateNode candNode) { - if (candNode.getModificationType() == ModificationType.UNMODIFIED) { + private void lookupAndCreateCanCommits(final YangInstanceIdentifier path, final Node regNode, + final DataTreeCandidateNode candNode) { + if (candNode.modificationType() == ModificationType.UNMODIFIED) { LOG.debug("Skipping unmodified candidate {}", path); return; } - final Collection regs = regNode.getRegistrations(); + final var regs = regNode.getRegistrations(); if (!regs.isEmpty()) { createCanCommits(regs, path, candNode); } - for (final DataTreeCandidateNode candChild : candNode.getChildNodes()) { - if (candChild.getModificationType() != ModificationType.UNMODIFIED) { - final RegistrationTreeNode regChild = - regNode.getExactChild(candChild.getIdentifier()); + for (var candChild : candNode.childNodes()) { + if (candChild.modificationType() != ModificationType.UNMODIFIED) { + final var regChild = regNode.getExactChild(candChild.name()); if (regChild != null) { - lookupAndCreateCanCommits(path.node(candChild.getIdentifier()), regChild, candChild); + lookupAndCreateCanCommits(path.node(candChild.name()), regChild, candChild); } - for (final RegistrationTreeNode rc : regNode - .getInexactChildren(candChild.getIdentifier())) { - lookupAndCreateCanCommits(path.node(candChild.getIdentifier()), rc, candChild); + for (var rc : regNode.getInexactChildren(candChild.name())) { + lookupAndCreateCanCommits(path.node(candChild.name()), rc, candChild); } } } @@ -193,11 +187,11 @@ class DataTreeCohortActorRegistry extends AbstractRegistrationTree { } private static DOMDataTreeIdentifier treeIdentifier(final YangInstanceIdentifier path) { - return new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, path); + return DOMDataTreeIdentifier.of(LogicalDatastoreType.CONFIGURATION, path); } - List perform(final RegistrationTreeNode rootNode) { - final List toLookup = candidate.getRootPath().getPathArguments(); + List perform(final Node rootNode) { + final var toLookup = candidate.getRootPath().getPathArguments(); lookupAndCreateCanCommits(toLookup, 0, rootNode); final Map> mapView = actorToCandidates.asMap(); @@ -210,7 +204,7 @@ class DataTreeCohortActorRegistry extends AbstractRegistrationTree { } } - CompositeDataTreeCohort createCohort(final SchemaContext schemaContext, final TransactionIdentifier txId, + CompositeDataTreeCohort createCohort(final EffectiveModelContext schemaContext, final TransactionIdentifier txId, final Executor callbackExecutor, final Timeout commitStepTimeout) { return new CompositeDataTreeCohort(this, txId, schemaContext, callbackExecutor, commitStepTimeout); } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeCohortRegistrationProxy.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeCohortRegistrationProxy.java index e5f4ceaa7e..4e3c6cb8d7 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeCohortRegistrationProxy.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeCohortRegistrationProxy.java @@ -13,13 +13,11 @@ import akka.actor.ActorRef; import akka.dispatch.OnComplete; import akka.pattern.Patterns; import akka.util.Timeout; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.util.concurrent.TimeUnit; import org.checkerframework.checker.lock.qual.GuardedBy; import org.opendaylight.controller.cluster.datastore.exceptions.LocalShardNotFoundException; import org.opendaylight.controller.cluster.datastore.utils.ActorUtils; import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort; -import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistration; import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier; import org.opendaylight.yangtools.concepts.AbstractObjectRegistration; import org.slf4j.Logger; @@ -27,11 +25,10 @@ import org.slf4j.LoggerFactory; import scala.concurrent.Future; import scala.concurrent.duration.FiniteDuration; -public class DataTreeCohortRegistrationProxy extends AbstractObjectRegistration - implements DOMDataTreeCommitCohortRegistration { - +public class DataTreeCohortRegistrationProxy extends AbstractObjectRegistration { private static final Logger LOG = LoggerFactory.getLogger(DataTreeCohortRegistrationProxy.class); private static final Timeout TIMEOUT = new Timeout(new FiniteDuration(5, TimeUnit.SECONDS)); + private final DOMDataTreeIdentifier subtree; private final ActorRef actor; private final ActorUtils actorUtils; @@ -43,8 +40,8 @@ public class DataTreeCohortRegistrationProxy super(cohort); this.subtree = requireNonNull(subtree); this.actorUtils = requireNonNull(actorUtils); - this.actor = actorUtils.getActorSystem().actorOf(DataTreeCohortActor.props(getInstance(), - subtree.getRootIdentifier()).withDispatcher(actorUtils.getNotificationDispatcherPath())); + actor = actorUtils.getActorSystem().actorOf(DataTreeCohortActor.props(getInstance(), + subtree.path()).withDispatcher(actorUtils.getNotificationDispatcherPath())); } public void init(final String shardName) { @@ -66,8 +63,6 @@ public class DataTreeCohortRegistrationProxy }, actorUtils.getClientDispatcher()); } - @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD", - justification = "https://github.com/spotbugs/spotbugs/issues/811") private synchronized void performRegistration(final ActorRef shard) { if (isClosed()) { return; @@ -75,7 +70,7 @@ public class DataTreeCohortRegistrationProxy cohortRegistry = shard; Future future = Patterns.ask(shard, new DataTreeCohortActorRegistry.RegisterCohort(subtree, actor), TIMEOUT); - future.onComplete(new OnComplete() { + future.onComplete(new OnComplete<>() { @Override public void onComplete(final Throwable failure, final Object val) { diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DatastoreContext.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DatastoreContext.java index 28fb89d2be..24b3775127 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DatastoreContext.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DatastoreContext.java @@ -24,7 +24,7 @@ import org.opendaylight.controller.cluster.raft.ConfigParams; import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl; import org.opendaylight.controller.cluster.raft.PeerAddressResolver; import org.opendaylight.mdsal.common.api.LogicalDatastoreType; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev140612.DataStoreProperties.ExportOnRecovery; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev231229.DataStoreProperties.ExportOnRecovery; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -65,7 +65,7 @@ public class DatastoreContext implements ClientActorConfig { public static final int DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT = 1000; public static final long DEFAULT_SHARD_COMMIT_QUEUE_EXPIRY_TIMEOUT_IN_MS = TimeUnit.MILLISECONDS.convert(2, TimeUnit.MINUTES); - public static final int DEFAULT_MAX_MESSAGE_SLICE_SIZE = 2048 * 1000; // 2MB + public static final int DEFAULT_MAX_MESSAGE_SLICE_SIZE = 480 * 1024; // 480KiB public static final int DEFAULT_INITIAL_PAYLOAD_SERIALIZED_BUFFER_CAPACITY = 512; public static final ExportOnRecovery DEFAULT_EXPORT_ON_RECOVERY = ExportOnRecovery.Off; public static final String DEFAULT_RECOVERY_EXPORT_BASE_DIR = "persistence-export"; @@ -92,11 +92,10 @@ public class DatastoreContext implements ClientActorConfig { private long transactionCreationInitialRateLimit = DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT; private String dataStoreName = UNKNOWN_DATA_STORE_TYPE; private LogicalDatastoreType logicalStoreType = LogicalDatastoreType.OPERATIONAL; - private YangInstanceIdentifier storeRoot = YangInstanceIdentifier.empty(); + private YangInstanceIdentifier storeRoot = YangInstanceIdentifier.of(); private int shardBatchedModificationCount = DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT; private boolean writeOnlyTransactionOptimizationsEnabled = true; private long shardCommitQueueExpiryTimeoutInMillis = DEFAULT_SHARD_COMMIT_QUEUE_EXPIRY_TIMEOUT_IN_MS; - private boolean useTellBasedProtocol = false; private boolean transactionDebugContextEnabled = false; private String shardManagerPersistenceId; private int maximumMessageSliceSize = DEFAULT_MAX_MESSAGE_SLICE_SIZE; @@ -127,34 +126,33 @@ public class DatastoreContext implements ClientActorConfig { } private DatastoreContext(final DatastoreContext other) { - this.shardTransactionIdleTimeout = other.shardTransactionIdleTimeout; - this.operationTimeoutInMillis = other.operationTimeoutInMillis; - this.dataStoreMXBeanType = other.dataStoreMXBeanType; - this.shardTransactionCommitTimeoutInSeconds = other.shardTransactionCommitTimeoutInSeconds; - this.shardTransactionCommitQueueCapacity = other.shardTransactionCommitQueueCapacity; - this.shardInitializationTimeout = other.shardInitializationTimeout; - this.shardLeaderElectionTimeout = other.shardLeaderElectionTimeout; - this.initialSettleTimeoutMultiplier = other.initialSettleTimeoutMultiplier; - this.persistent = other.persistent; - this.snapshotOnRootOverwrite = other.snapshotOnRootOverwrite; - this.configurationReader = other.configurationReader; - this.transactionCreationInitialRateLimit = other.transactionCreationInitialRateLimit; - this.dataStoreName = other.dataStoreName; - this.logicalStoreType = other.logicalStoreType; - this.storeRoot = other.storeRoot; - this.shardBatchedModificationCount = other.shardBatchedModificationCount; - this.writeOnlyTransactionOptimizationsEnabled = other.writeOnlyTransactionOptimizationsEnabled; - this.shardCommitQueueExpiryTimeoutInMillis = other.shardCommitQueueExpiryTimeoutInMillis; - this.transactionDebugContextEnabled = other.transactionDebugContextEnabled; - this.shardManagerPersistenceId = other.shardManagerPersistenceId; - this.useTellBasedProtocol = other.useTellBasedProtocol; - this.backendAlivenessTimerInterval = other.backendAlivenessTimerInterval; - this.requestTimeout = other.requestTimeout; - this.noProgressTimeout = other.noProgressTimeout; - this.initialPayloadSerializedBufferCapacity = other.initialPayloadSerializedBufferCapacity; - this.useLz4Compression = other.useLz4Compression; - this.exportOnRecovery = other.exportOnRecovery; - this.recoveryExportBaseDir = other.recoveryExportBaseDir; + shardTransactionIdleTimeout = other.shardTransactionIdleTimeout; + operationTimeoutInMillis = other.operationTimeoutInMillis; + dataStoreMXBeanType = other.dataStoreMXBeanType; + shardTransactionCommitTimeoutInSeconds = other.shardTransactionCommitTimeoutInSeconds; + shardTransactionCommitQueueCapacity = other.shardTransactionCommitQueueCapacity; + shardInitializationTimeout = other.shardInitializationTimeout; + shardLeaderElectionTimeout = other.shardLeaderElectionTimeout; + initialSettleTimeoutMultiplier = other.initialSettleTimeoutMultiplier; + persistent = other.persistent; + snapshotOnRootOverwrite = other.snapshotOnRootOverwrite; + configurationReader = other.configurationReader; + transactionCreationInitialRateLimit = other.transactionCreationInitialRateLimit; + dataStoreName = other.dataStoreName; + logicalStoreType = other.logicalStoreType; + storeRoot = other.storeRoot; + shardBatchedModificationCount = other.shardBatchedModificationCount; + writeOnlyTransactionOptimizationsEnabled = other.writeOnlyTransactionOptimizationsEnabled; + shardCommitQueueExpiryTimeoutInMillis = other.shardCommitQueueExpiryTimeoutInMillis; + transactionDebugContextEnabled = other.transactionDebugContextEnabled; + shardManagerPersistenceId = other.shardManagerPersistenceId; + backendAlivenessTimerInterval = other.backendAlivenessTimerInterval; + requestTimeout = other.requestTimeout; + noProgressTimeout = other.noProgressTimeout; + initialPayloadSerializedBufferCapacity = other.initialPayloadSerializedBufferCapacity; + useLz4Compression = other.useLz4Compression; + exportOnRecovery = other.exportOnRecovery; + recoveryExportBaseDir = other.recoveryExportBaseDir; setShardJournalRecoveryLogBatchSize(other.raftConfig.getJournalRecoveryLogBatchSize()); setSnapshotBatchCount(other.raftConfig.getSnapshotBatchCount()); @@ -167,7 +165,6 @@ public class DatastoreContext implements ClientActorConfig { setCandidateElectionTimeoutDivisor(other.raftConfig.getCandidateElectionTimeoutDivisor()); setCustomRaftPolicyImplementation(other.raftConfig.getCustomRaftPolicyImplementationClass()); setMaximumMessageSliceSize(other.getMaximumMessageSliceSize()); - setShardSnapshotChunkSize(other.raftConfig.getSnapshotChunkSize()); setPeerAddressResolver(other.raftConfig.getPeerAddressResolver()); setTempFileDirectory(other.getTempFileDirectory()); setFileBackedStreamingThreshold(other.getFileBackedStreamingThreshold()); @@ -229,7 +226,7 @@ public class DatastoreContext implements ClientActorConfig { } public boolean isSnapshotOnRootOverwrite() { - return this.snapshotOnRootOverwrite; + return snapshotOnRootOverwrite; } public AkkaConfigurationReader getConfigurationReader() { @@ -331,17 +328,8 @@ public class DatastoreContext implements ClientActorConfig { raftConfig.setRecoverySnapshotIntervalSeconds(recoverySnapshotInterval); } - @Deprecated - private void setShardSnapshotChunkSize(final int shardSnapshotChunkSize) { - // We'll honor the shardSnapshotChunkSize setting for backwards compatibility but only if it doesn't exceed - // maximumMessageSliceSize. - if (shardSnapshotChunkSize < maximumMessageSliceSize) { - raftConfig.setSnapshotChunkSize(shardSnapshotChunkSize); - } - } - private void setMaximumMessageSliceSize(final int maximumMessageSliceSize) { - raftConfig.setSnapshotChunkSize(maximumMessageSliceSize); + raftConfig.setMaximumMessageSliceSize(maximumMessageSliceSize); this.maximumMessageSliceSize = maximumMessageSliceSize; } @@ -365,10 +353,6 @@ public class DatastoreContext implements ClientActorConfig { return transactionDebugContextEnabled; } - public boolean isUseTellBasedProtocol() { - return useTellBasedProtocol; - } - public boolean isUseLz4Compression() { return useLz4Compression; } @@ -405,7 +389,7 @@ public class DatastoreContext implements ClientActorConfig { return initialPayloadSerializedBufferCapacity; } - public static class Builder implements org.opendaylight.yangtools.concepts.Builder { + public static class Builder { private final DatastoreContext datastoreContext; Builder(final DatastoreContext datastoreContext) { @@ -602,11 +586,6 @@ public class DatastoreContext implements ClientActorConfig { return this; } - public Builder useTellBasedProtocol(final boolean value) { - datastoreContext.useTellBasedProtocol = value; - return this; - } - public Builder useLz4Compression(final boolean value) { datastoreContext.useLz4Compression = value; return this; @@ -636,14 +615,6 @@ public class DatastoreContext implements ClientActorConfig { return this; } - @Deprecated - public Builder shardSnapshotChunkSize(final int shardSnapshotChunkSize) { - LOG.warn("The shard-snapshot-chunk-size configuration parameter is deprecated - " - + "use maximum-message-slice-size instead"); - datastoreContext.setShardSnapshotChunkSize(shardSnapshotChunkSize); - return this; - } - public Builder maximumMessageSliceSize(final int maximumMessageSliceSize) { datastoreContext.setMaximumMessageSliceSize(maximumMessageSliceSize); return this; @@ -689,7 +660,6 @@ public class DatastoreContext implements ClientActorConfig { return this; } - @Override public DatastoreContext build() { if (datastoreContext.dataStoreName != null) { GLOBAL_DATASTORE_NAMES.add(datastoreContext.dataStoreName); diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DatastoreContextIntrospector.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DatastoreContextIntrospector.java index 0ee005a708..ac50ff30a2 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DatastoreContextIntrospector.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DatastoreContextIntrospector.java @@ -23,7 +23,6 @@ import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Map.Entry; -import java.util.Optional; import java.util.Set; import java.util.function.Function; import javax.management.ConstructorParameters; @@ -31,8 +30,8 @@ import org.apache.commons.lang3.StringUtils; import org.apache.commons.text.WordUtils; import org.checkerframework.checker.lock.qual.GuardedBy; import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev140612.DataStoreProperties; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev140612.DataStorePropertiesContainer; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev231229.DataStoreProperties; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev231229.DataStorePropertiesContainer; import org.opendaylight.yangtools.yang.common.Uint16; import org.opendaylight.yangtools.yang.common.Uint32; import org.opendaylight.yangtools.yang.common.Uint64; @@ -84,7 +83,7 @@ public class DatastoreContextIntrospector { private static void introspectPrimitiveTypes() { final Set> primitives = ImmutableSet.>builder().addAll( Primitives.allWrapperTypes()).add(String.class).build(); - for (final Class primitive: primitives) { + for (final Class primitive : primitives) { try { processPropertyType(primitive); } catch (final NoSuchMethodException e) { @@ -175,7 +174,7 @@ public class DatastoreContextIntrospector { // This must be a yang-defined type. We need to find the constructor that takes a // primitive as the only argument. This will be used to construct instances to perform // validation (eg range checking). The yang-generated types have a couple single-argument - // constructors but the one we want has the bean ConstructorProperties annotation. + // constructors but the one we want has the ConstructorParameters annotation. for (final Constructor ctor: propertyType.getConstructors()) { final ConstructorParameters ctorParAnnotation = ctor.getAnnotation(ConstructorParameters.class); if (ctor.getParameterCount() == 1 && ctorParAnnotation != null) { @@ -382,9 +381,8 @@ public class DatastoreContextIntrospector { if (propertyType.isEnum()) { try { final Method enumConstructor = propertyType.getDeclaredMethod("forName", String.class); - final Object optional = enumConstructor.invoke(null, from.toString().toLowerCase(Locale.ROOT)); - if (optional instanceof Optional) { - return ((Optional)optional).orElseThrow(); + if (enumConstructor.getReturnType().equals(propertyType)) { + return enumConstructor.invoke(null, from.toString().toLowerCase(Locale.ROOT)); } } catch (NoSuchMethodException e) { LOG.error("Error constructing value ({}) for enum {}", from, propertyType); diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DebugThreePhaseCommitCohort.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DebugThreePhaseCommitCohort.java deleted file mode 100644 index afb5773f43..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DebugThreePhaseCommitCohort.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.datastore; - -import static java.util.Objects.requireNonNull; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.util.concurrent.FutureCallback; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.MoreExecutors; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import java.util.List; -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import scala.concurrent.Future; - -/** - * An AbstractThreePhaseCommitCohort implementation used for debugging. If a failure occurs, the transaction - * call site is printed. - * - * @author Thomas Pantelis - */ -class DebugThreePhaseCommitCohort extends AbstractThreePhaseCommitCohort { - private static final Logger LOG = LoggerFactory.getLogger(DebugThreePhaseCommitCohort.class); - - private final AbstractThreePhaseCommitCohort delegate; - private final Throwable debugContext; - private final TransactionIdentifier transactionId; - - @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_FINAL") - private Logger log = LOG; - - DebugThreePhaseCommitCohort(final TransactionIdentifier transactionId, - final AbstractThreePhaseCommitCohort delegate, final Throwable debugContext) { - this.delegate = requireNonNull(delegate); - this.debugContext = requireNonNull(debugContext); - this.transactionId = requireNonNull(transactionId); - } - - private ListenableFuture addFutureCallback(final ListenableFuture future) { - Futures.addCallback(future, new FutureCallback() { - @Override - public void onSuccess(final V result) { - // no-op - } - - @Override - public void onFailure(final Throwable failure) { - log.warn("Transaction {} failed with error \"{}\" - was allocated in the following context", - transactionId, failure, debugContext); - } - }, MoreExecutors.directExecutor()); - - return future; - } - - @Override - public ListenableFuture canCommit() { - return addFutureCallback(delegate.canCommit()); - } - - @Override - public ListenableFuture preCommit() { - return addFutureCallback(delegate.preCommit()); - } - - @Override - public ListenableFuture commit() { - return addFutureCallback(delegate.commit()); - } - - @Override - public ListenableFuture abort() { - return delegate.abort(); - } - - @SuppressWarnings({ "rawtypes", "unchecked" }) - @Override - List> getCohortFutures() { - return ((AbstractThreePhaseCommitCohort)delegate).getCohortFutures(); - } - - @VisibleForTesting - void setLogger(final Logger logger) { - this.log = logger; - } -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DefaultShardDataTreeChangeListenerPublisher.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DefaultShardDataTreeChangeListenerPublisher.java index 720aadb175..d7d90474f3 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DefaultShardDataTreeChangeListenerPublisher.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DefaultShardDataTreeChangeListenerPublisher.java @@ -7,15 +7,14 @@ */ package org.opendaylight.controller.cluster.datastore; -import java.util.Collection; +import java.util.List; import java.util.Optional; import java.util.function.Consumer; import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener; -import org.opendaylight.mdsal.dom.spi.AbstractDOMDataTreeChangeListenerRegistration; import org.opendaylight.mdsal.dom.spi.store.AbstractDOMStoreTreeChangePublisher; -import org.opendaylight.yangtools.concepts.ListenerRegistration; +import org.opendaylight.yangtools.concepts.Registration; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -46,38 +45,34 @@ final class DefaultShardDataTreeChangeListenerPublisher extends AbstractDOMStore } @Override - protected void notifyListener(final AbstractDOMDataTreeChangeListenerRegistration registration, - final Collection changes) { - LOG.debug("{}: notifyListener: listener: {}", logContext, registration.getInstance()); - registration.getInstance().onDataTreeChanged(changes); + protected void notifyListener(final Reg registration, final List changes) { + final var listener = registration.listener(); + LOG.debug("{}: notifyListener: listener: {}", logContext, listener); + listener.onDataTreeChanged(changes); } @Override - protected void registrationRemoved(final AbstractDOMDataTreeChangeListenerRegistration registration) { + protected void registrationRemoved(final Reg registration) { LOG.debug("Registration {} removed", registration); } @Override public void registerTreeChangeListener(final YangInstanceIdentifier treeId, final DOMDataTreeChangeListener listener, final Optional initialState, - final Consumer> onRegistration) { + final Consumer onRegistration) { registerTreeChangeListener(treeId, listener, onRegistration); if (initialState.isPresent()) { - notifySingleListener(treeId, listener, initialState.get(), logContext); + notifySingleListener(treeId, listener, initialState.orElseThrow(), logContext); } else { listener.onInitialData(); } } void registerTreeChangeListener(final YangInstanceIdentifier treeId, final DOMDataTreeChangeListener listener, - final Consumer> onRegistration) { + final Consumer onRegistration) { LOG.debug("{}: registerTreeChangeListener: path: {}, listener: {}", logContext, treeId, listener); - - AbstractDOMDataTreeChangeListenerRegistration registration = - super.registerTreeChangeListener(treeId, listener); - - onRegistration.accept(registration); + onRegistration.accept(super.registerTreeChangeListener(treeId, listener)); } static void notifySingleListener(final YangInstanceIdentifier treeId, final DOMDataTreeChangeListener listener, diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DelayedDataTreeChangeListenerRegistration.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DelayedDataTreeChangeListenerRegistration.java index ef26e94ee9..740aef92b8 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DelayedDataTreeChangeListenerRegistration.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DelayedDataTreeChangeListenerRegistration.java @@ -8,12 +8,11 @@ package org.opendaylight.controller.cluster.datastore; import akka.actor.ActorRef; -import java.util.EventListener; import org.checkerframework.checker.lock.qual.GuardedBy; import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener; -import org.opendaylight.yangtools.concepts.ListenerRegistration; +import org.opendaylight.yangtools.concepts.Registration; -class DelayedDataTreeChangeListenerRegistration implements ListenerRegistration { +class DelayedDataTreeChangeListenerRegistration implements Registration { private final RegisterDataTreeChangeListener registrationMessage; private final ActorRef registrationActor; @@ -32,17 +31,6 @@ class DelayedDataTreeChangeListenerRegistration impleme } } - @Override - public L getInstance() { - // ObjectRegistration annotates this method as @Nonnull but we could return null if the delegate is not set yet. - // In reality, we do not and should not ever call this method on DelayedDataTreeChangeListenerRegistration - // instances anyway but, since we have to provide an implementation to satisfy the interface, we throw - // UnsupportedOperationException to honor the API contract of not returning null and to avoid a FindBugs error - // for possibly returning null. - throw new UnsupportedOperationException( - "getInstance should not be called on this instance since it could be null"); - } - @Override public synchronized void close() { closed = true; diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DelayedTransactionContextWrapper.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DelayedTransactionContextWrapper.java deleted file mode 100644 index 17df235363..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DelayedTransactionContextWrapper.java +++ /dev/null @@ -1,208 +0,0 @@ -/* - * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.datastore; - -import static com.google.common.base.Preconditions.checkState; - -import akka.actor.ActorSelection; -import akka.dispatch.Futures; -import java.util.AbstractMap.SimpleImmutableEntry; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Map.Entry; -import java.util.Optional; -import java.util.SortedSet; -import org.checkerframework.checker.lock.qual.GuardedBy; -import org.eclipse.jdt.annotation.NonNull; -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -import org.opendaylight.controller.cluster.datastore.utils.ActorUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import scala.concurrent.Future; -import scala.concurrent.Promise; - -/** - * Delayed implementation of TransactionContextWrapper. Operations destined for the target - * TransactionContext instance are cached until the TransactionContext instance becomes - * available at which time they are executed. - * - * @author Thomas Pantelis - */ -final class DelayedTransactionContextWrapper extends AbstractTransactionContextWrapper { - private static final Logger LOG = LoggerFactory.getLogger(DelayedTransactionContextWrapper.class); - - /** - * The list of transaction operations to execute once the TransactionContext becomes available. - */ - @GuardedBy("queuedTxOperations") - private final List> queuedTxOperations = new ArrayList<>(); - - /** - * The resulting TransactionContext. - */ - private volatile TransactionContext transactionContext; - @GuardedBy("queuedTxOperations") - private TransactionContext deferredTransactionContext; - @GuardedBy("queuedTxOperations") - private boolean pendingEnqueue; - - DelayedTransactionContextWrapper(@NonNull final TransactionIdentifier identifier, - @NonNull final ActorUtils actorUtils, @NonNull final String shardName) { - super(identifier, actorUtils, shardName); - } - - @Override - TransactionContext getTransactionContext() { - return transactionContext; - } - - @Override - void maybeExecuteTransactionOperation(final TransactionOperation op) { - final TransactionContext localContext = transactionContext; - if (localContext != null) { - op.invoke(localContext, null); - } else { - // The shard Tx hasn't been created yet so add the Tx operation to the Tx Future - // callback to be executed after the Tx is created. - enqueueTransactionOperation(op); - } - } - - @Override - Future readyTransaction(final Optional> participatingShardNames) { - // avoid the creation of a promise and a TransactionOperation - final TransactionContext localContext = transactionContext; - if (localContext != null) { - return localContext.readyTransaction(null, participatingShardNames); - } - - final Promise promise = Futures.promise(); - enqueueTransactionOperation(new TransactionOperation() { - @Override - public void invoke(final TransactionContext newTransactionContext, final Boolean havePermit) { - promise.completeWith(newTransactionContext.readyTransaction(havePermit, participatingShardNames)); - } - }); - - return promise.future(); - } - - /** - * Adds a TransactionOperation to be executed once the TransactionContext becomes available. This method is called - * only after the caller has checked (without synchronizing with executePriorTransactionOperations()) that the - * context is not available. - */ - private void enqueueTransactionOperation(final TransactionOperation operation) { - // We have three things to do here: - // - synchronize with executePriorTransactionOperations() so that logical operation ordering is maintained - // - acquire a permit for the operation if we still need to enqueue it - // - enqueue the operation - // - // Since each operation needs to acquire a permit exactly once and the limiter is shared between us and the - // TransactionContext, we need to know whether an operation has a permit before we enqueue it. Further - // complications are: - // - this method may be called from the thread invoking executePriorTransactionOperations() - // - user may be violating API contract of using the transaction from a single thread - - // As a first step, we will synchronize on the queue and check if the handoff has completed. While we have - // the lock, we will assert that we will be enqueing another operation. - final TransactionContext contextOnEntry; - synchronized (queuedTxOperations) { - contextOnEntry = transactionContext; - if (contextOnEntry == null) { - checkState(pendingEnqueue == false, "Concurrent access to transaction %s detected", getIdentifier()); - pendingEnqueue = true; - } - } - - // Short-circuit if there is a context - if (contextOnEntry != null) { - operation.invoke(transactionContext, null); - return; - } - - boolean cleanupEnqueue = true; - TransactionContext finishHandoff = null; - try { - // Acquire the permit, - final boolean havePermit = getLimiter().acquire(); - if (!havePermit) { - LOG.warn("Failed to acquire enqueue operation permit for transaction {} on shard {}", getIdentifier(), - getShardName()); - } - - // Ready to enqueue, take the lock again and append the operation - synchronized (queuedTxOperations) { - LOG.debug("Tx {} Queuing TransactionOperation", getIdentifier()); - queuedTxOperations.add(new SimpleImmutableEntry<>(operation, havePermit)); - pendingEnqueue = false; - cleanupEnqueue = false; - finishHandoff = deferredTransactionContext; - deferredTransactionContext = null; - } - } finally { - if (cleanupEnqueue) { - synchronized (queuedTxOperations) { - pendingEnqueue = false; - finishHandoff = deferredTransactionContext; - deferredTransactionContext = null; - } - } - if (finishHandoff != null) { - executePriorTransactionOperations(finishHandoff); - } - } - } - - void executePriorTransactionOperations(final TransactionContext localTransactionContext) { - while (true) { - // Access to queuedTxOperations and transactionContext must be protected and atomic - // (ie synchronized) with respect to #addTxOperationOnComplete to handle timing - // issues and ensure no TransactionOperation is missed and that they are processed - // in the order they occurred. - - // We'll make a local copy of the queuedTxOperations list to handle re-entrancy - // in case a TransactionOperation results in another transaction operation being - // queued (eg a put operation from a client read Future callback that is notified - // synchronously). - final Collection> operationsBatch; - synchronized (queuedTxOperations) { - if (queuedTxOperations.isEmpty()) { - if (!pendingEnqueue) { - // We're done invoking the TransactionOperations so we can now publish the TransactionContext. - localTransactionContext.operationHandOffComplete(); - - // This is null-to-non-null transition after which we are releasing the lock and not doing - // any further processing. - transactionContext = localTransactionContext; - } else { - deferredTransactionContext = localTransactionContext; - } - return; - } - - operationsBatch = new ArrayList<>(queuedTxOperations); - queuedTxOperations.clear(); - } - - // Invoke TransactionOperations outside the sync block to avoid unnecessary blocking. A slight down-side is - // that we need to re-acquire the lock below but this should be negligible. - for (Entry oper : operationsBatch) { - final Boolean permit = oper.getValue(); - if (permit.booleanValue() && !localTransactionContext.usesOperationLimiting()) { - // If the context is not using limiting we need to release operations as we are queueing them, so - // user threads are not charged for them. - getLimiter().release(); - } - oper.getKey().invoke(localTransactionContext, permit); - } - } - } - -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DirectTransactionContextWrapper.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DirectTransactionContextWrapper.java deleted file mode 100644 index f004088134..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DirectTransactionContextWrapper.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.datastore; - -import static java.util.Objects.requireNonNull; - -import akka.actor.ActorSelection; -import java.util.Optional; -import java.util.SortedSet; -import org.eclipse.jdt.annotation.NonNull; -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -import org.opendaylight.controller.cluster.datastore.utils.ActorUtils; -import scala.concurrent.Future; - -/** - * Direct implementation of TransactionContextWrapper. Operation are executed directly on TransactionContext. Always - * has completed context and executes on local shard. - */ -final class DirectTransactionContextWrapper extends AbstractTransactionContextWrapper { - private final TransactionContext transactionContext; - - DirectTransactionContextWrapper(@NonNull final TransactionIdentifier identifier, - @NonNull final ActorUtils actorUtils, - @NonNull final String shardName, - @NonNull final TransactionContext transactionContext) { - super(identifier, actorUtils, shardName); - this.transactionContext = requireNonNull(transactionContext); - } - - @Override - TransactionContext getTransactionContext() { - return transactionContext; - } - - @Override - void maybeExecuteTransactionOperation(final TransactionOperation op) { - op.invoke(transactionContext, null); - } - - @Override - Future readyTransaction(final Optional> participatingShardNames) { - return transactionContext.readyTransaction(null, participatingShardNames); - } -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStore.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStore.java deleted file mode 100644 index 16198ff544..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStore.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ - -package org.opendaylight.controller.cluster.datastore; - -import akka.actor.ActorSystem; -import com.google.common.annotations.VisibleForTesting; -import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier; -import org.opendaylight.controller.cluster.datastore.config.Configuration; -import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot; -import org.opendaylight.controller.cluster.datastore.utils.ActorUtils; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction; - -/** - * Implements a distributed DOMStore using Akka Patterns.ask(). - */ -public class DistributedDataStore extends AbstractDataStore { - - private final TransactionContextFactory txContextFactory; - - public DistributedDataStore(final ActorSystem actorSystem, final ClusterWrapper cluster, - final Configuration configuration, final DatastoreContextFactory datastoreContextFactory, - final DatastoreSnapshot restoreFromSnapshot) { - super(actorSystem, cluster, configuration, datastoreContextFactory, restoreFromSnapshot); - this.txContextFactory = new TransactionContextFactory(getActorUtils(), getIdentifier()); - } - - @VisibleForTesting - DistributedDataStore(final ActorUtils actorUtils, final ClientIdentifier identifier) { - super(actorUtils, identifier); - this.txContextFactory = new TransactionContextFactory(getActorUtils(), getIdentifier()); - } - - - @Override - public DOMStoreTransactionChain createTransactionChain() { - return txContextFactory.createTransactionChain(); - } - - @Override - public DOMStoreReadTransaction newReadOnlyTransaction() { - return new TransactionProxy(txContextFactory, TransactionType.READ_ONLY); - } - - @Override - public DOMStoreWriteTransaction newWriteOnlyTransaction() { - getActorUtils().acquireTxCreationPermit(); - return new TransactionProxy(txContextFactory, TransactionType.WRITE_ONLY); - } - - @Override - public DOMStoreReadWriteTransaction newReadWriteTransaction() { - getActorUtils().acquireTxCreationPermit(); - return new TransactionProxy(txContextFactory, TransactionType.READ_WRITE); - } - - @Override - public void close() { - txContextFactory.close(); - super.close(); - } -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreFactory.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreFactory.java index 221db2faea..350b915b57 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreFactory.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreFactory.java @@ -7,12 +7,10 @@ */ package org.opendaylight.controller.cluster.datastore; -import akka.actor.ActorSystem; import org.opendaylight.controller.cluster.ActorSystemProvider; import org.opendaylight.controller.cluster.databroker.ClientBackedDataStore; import org.opendaylight.controller.cluster.datastore.config.Configuration; import org.opendaylight.controller.cluster.datastore.config.ConfigurationImpl; -import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot; import org.opendaylight.mdsal.dom.api.DOMSchemaService; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -45,7 +43,7 @@ public final class DistributedDataStoreFactory { updater.setListener(dataStore); - schemaService.registerSchemaContextListener(dataStore); + schemaService.registerSchemaContextListener(dataStore::onModelContextUpdated); dataStore.setCloseable(updater); dataStore.waitTillReady(); @@ -60,8 +58,8 @@ public final class DistributedDataStoreFactory { final String datastoreName = initialDatastoreContext.getDataStoreName(); LOG.info("Create data store instance of type : {}", datastoreName); - final ActorSystem actorSystem = actorSystemProvider.getActorSystem(); - final DatastoreSnapshot restoreFromSnapshot = datastoreSnapshotRestore.getAndRemove(datastoreName).orElse(null); + final var actorSystem = actorSystemProvider.getActorSystem(); + final var restoreFromSnapshot = datastoreSnapshotRestore.getAndRemove(datastoreName).orElse(null); final Configuration config; if (orgConfig == null) { @@ -69,23 +67,12 @@ public final class DistributedDataStoreFactory { } else { config = orgConfig; } - final ClusterWrapper clusterWrapper = new ClusterWrapperImpl(actorSystem); - final DatastoreContextFactory contextFactory = introspector.newContextFactory(); + final var clusterWrapper = new ClusterWrapperImpl(actorSystem); + final var contextFactory = introspector.newContextFactory(); - // This is the potentially-updated datastore context, distinct from the initial one - final DatastoreContext datastoreContext = contextFactory.getBaseDatastoreContext(); - - final AbstractDataStore dataStore; - if (datastoreContext.isUseTellBasedProtocol()) { - dataStore = new ClientBackedDataStore(actorSystem, clusterWrapper, config, contextFactory, - restoreFromSnapshot); - LOG.info("Data store {} is using tell-based protocol", datastoreName); - } else { - dataStore = new DistributedDataStore(actorSystem, clusterWrapper, config, contextFactory, - restoreFromSnapshot); - LOG.info("Data store {} is using ask-based protocol", datastoreName); - } - - return dataStore; + final var ret = new ClientBackedDataStore(actorSystem, clusterWrapper, config, contextFactory, + restoreFromSnapshot); + LOG.info("Data store {} is using tell-based protocol", datastoreName); + return ret; } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreInterface.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreInterface.java index deae01960b..5f4e30978e 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreInterface.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreInterface.java @@ -11,7 +11,7 @@ import com.google.common.annotations.Beta; import org.opendaylight.controller.cluster.datastore.utils.ActorUtils; import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener; import org.opendaylight.mdsal.dom.spi.store.DOMStore; -import org.opendaylight.yangtools.concepts.ListenerRegistration; +import org.opendaylight.yangtools.concepts.Registration; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; /** @@ -24,7 +24,6 @@ public interface DistributedDataStoreInterface extends DOMStore { ActorUtils getActorUtils(); @Beta - ListenerRegistration registerProxyListener( - YangInstanceIdentifier shardLookup, YangInstanceIdentifier insideShard, + Registration registerProxyListener(YangInstanceIdentifier shardLookup, YangInstanceIdentifier insideShard, DOMDataTreeChangeListener delegate); } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ForwardingDataTreeChangeListener.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ForwardingDataTreeChangeListener.java index 82a30b6c40..dca9c0773e 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ForwardingDataTreeChangeListener.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ForwardingDataTreeChangeListener.java @@ -11,12 +11,12 @@ import static java.util.Objects.requireNonNull; import akka.actor.ActorRef; import akka.actor.ActorSelection; -import java.util.Collection; +import java.util.List; import org.eclipse.jdt.annotation.Nullable; import org.opendaylight.controller.cluster.datastore.messages.DataTreeChanged; import org.opendaylight.controller.cluster.datastore.messages.OnInitialData; import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -37,7 +37,7 @@ final class ForwardingDataTreeChangeListener implements DOMDataTreeChangeListene } @Override - public void onDataTreeChanged(final Collection changes) { + public void onDataTreeChanged(final List changes) { LOG.debug("Sending DataTreeChanged to {}", actor); actor.tell(new DataTreeChanged(changes), sendingActor); } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/FrontendClientMetadataBuilder.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/FrontendClientMetadataBuilder.java index 7f281ab0f3..c89627800f 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/FrontendClientMetadataBuilder.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/FrontendClientMetadataBuilder.java @@ -7,11 +7,11 @@ */ package org.opendaylight.controller.cluster.datastore; -import static com.google.common.base.Verify.verify; import static java.util.Objects.requireNonNull; import com.google.common.base.MoreObjects; import com.google.common.base.MoreObjects.ToStringHelper; +import com.google.common.base.VerifyException; import com.google.common.collect.Collections2; import com.google.common.collect.ImmutableList; import java.util.HashMap; @@ -21,27 +21,23 @@ import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier; import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; import org.opendaylight.controller.cluster.datastore.persisted.FrontendClientMetadata; -import org.opendaylight.controller.cluster.datastore.persisted.FrontendHistoryMetadata; import org.opendaylight.controller.cluster.datastore.utils.ImmutableUnsignedLongSet; import org.opendaylight.controller.cluster.datastore.utils.MutableUnsignedLongSet; -import org.opendaylight.yangtools.concepts.Builder; -import org.opendaylight.yangtools.concepts.Identifiable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * This class is NOT thread-safe. */ -abstract class FrontendClientMetadataBuilder implements Builder, - Identifiable { +abstract sealed class FrontendClientMetadataBuilder { static final class Disabled extends FrontendClientMetadataBuilder { - Disabled(final String shardName, final ClientIdentifier identifier) { - super(shardName, identifier); + Disabled(final String shardName, final ClientIdentifier clientId) { + super(shardName, clientId); } @Override - public FrontendClientMetadata build() { - return new FrontendClientMetadata(getIdentifier(), ImmutableUnsignedLongSet.of(), ImmutableList.of()); + FrontendClientMetadata build() { + return new FrontendClientMetadata(clientId(), ImmutableUnsignedLongSet.of(), ImmutableList.of()); } @Override @@ -81,7 +77,7 @@ abstract class FrontendClientMetadataBuilder implements Builder histories = new HashMap<>(); - for (FrontendHistoryMetadataBuilder e : currentHistories.values()) { - if (e.getIdentifier().getHistoryId() != 0) { - final AbstractFrontendHistory state = e.toLeaderState(shard); - verify(state instanceof LocalFrontendHistory, "Unexpected state %s", state); - histories.put(e.getIdentifier(), (LocalFrontendHistory) state); + final var histories = new HashMap(); + for (var historyMetaBuilder : currentHistories.values()) { + final var historyId = historyMetaBuilder.getIdentifier(); + if (historyId.getHistoryId() != 0) { + final var state = historyMetaBuilder.toLeaderState(shard); + if (state instanceof LocalFrontendHistory localState) { + histories.put(historyId, localState); + } else { + throw new VerifyException("Unexpected state " + state); + } } } final AbstractFrontendHistory singleHistory; - final FrontendHistoryMetadataBuilder singleHistoryMeta = currentHistories.get( - new LocalHistoryIdentifier(getIdentifier(), 0)); + final var singleHistoryMeta = currentHistories.get(new LocalHistoryIdentifier(clientId(), 0)); if (singleHistoryMeta == null) { - final ShardDataTree tree = shard.getDataStore(); - singleHistory = StandaloneFrontendHistory.create(shard.persistenceId(), getIdentifier(), tree); + final var tree = shard.getDataStore(); + singleHistory = StandaloneFrontendHistory.create(shard.persistenceId(), clientId(), tree); } else { singleHistory = singleHistoryMeta.toLeaderState(shard); } - return new LeaderFrontendState.Enabled(shard.persistenceId(), getIdentifier(), shard.getDataStore(), + return new LeaderFrontendState.Enabled(shard.persistenceId(), clientId(), shard.getDataStore(), purgedHistories.mutableCopy(), singleHistory, histories); } @@ -259,36 +258,37 @@ abstract class FrontendClientMetadataBuilder implements Builder, - Identifiable { - +final class FrontendHistoryMetadataBuilder implements Identifiable { private final @NonNull Map closedTransactions; private final @NonNull MutableUnsignedLongSet purgedTransactions; private final @NonNull LocalHistoryIdentifier identifier; @@ -51,7 +48,6 @@ final class FrontendHistoryMetadataBuilder implements Builder getClients() { return clients.values().stream() - .map(FrontendClientMetadataBuilder::getIdentifier) - .collect(ImmutableSet.toImmutableSet()); + .map(FrontendClientMetadataBuilder::clientId) + .collect(ImmutableSet.toImmutableSet()); } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/FrontendReadOnlyTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/FrontendReadOnlyTransaction.java index e8b99e60a5..e1b8a3fb94 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/FrontendReadOnlyTransaction.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/FrontendReadOnlyTransaction.java @@ -40,7 +40,7 @@ final class FrontendReadOnlyTransaction extends FrontendTransaction { private FrontendReadOnlyTransaction(final AbstractFrontendHistory history, final ReadOnlyShardDataTreeTransaction transaction) { super(history, transaction.getIdentifier()); - this.openTransaction = requireNonNull(transaction); + openTransaction = requireNonNull(transaction); } static FrontendReadOnlyTransaction create(final AbstractFrontendHistory history, @@ -75,7 +75,8 @@ final class FrontendReadOnlyTransaction extends FrontendTransaction { // The only valid request here is with abort protocol final Optional optProto = request.getPersistenceProtocol(); checkArgument(optProto.isPresent(), "Commit protocol is missing in %s", request); - checkArgument(optProto.get() == PersistenceProtocol.ABORT, "Unsupported commit protocol in %s", request); + checkArgument(optProto.orElseThrow() == PersistenceProtocol.ABORT, "Unsupported commit protocol in %s", + request); openTransaction.abort(() -> recordAndSendSuccess(envelope, now, new ModifyTransactionSuccess(request.getTarget(), request.getSequence()))); } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/FrontendReadWriteTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/FrontendReadWriteTransaction.java index 4bd2a57257..c626791547 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/FrontendReadWriteTransaction.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/FrontendReadWriteTransaction.java @@ -42,9 +42,10 @@ import org.opendaylight.controller.cluster.access.concepts.RequestException; import org.opendaylight.controller.cluster.access.concepts.RuntimeRequestException; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; import org.opendaylight.controller.cluster.access.concepts.UnsupportedRequestException; +import org.opendaylight.yangtools.yang.common.Empty; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -100,7 +101,7 @@ final class FrontendReadWriteTransaction extends FrontendTransaction { Ready(final ShardDataTreeCohort readyCohort) { this.readyCohort = requireNonNull(readyCohort); - this.stage = CommitStage.READY; + stage = CommitStage.READY; } @Override @@ -163,13 +164,13 @@ final class FrontendReadWriteTransaction extends FrontendTransaction { private FrontendReadWriteTransaction(final AbstractFrontendHistory history, final TransactionIdentifier id, final ReadWriteShardDataTreeTransaction transaction) { super(history, id); - this.state = new Open(transaction); + state = new Open(transaction); } private FrontendReadWriteTransaction(final AbstractFrontendHistory history, final TransactionIdentifier id, final DataTreeModification mod) { super(history, id); - this.state = new Sealed(mod); + state = new Sealed(mod); } static FrontendReadWriteTransaction createOpen(final AbstractFrontendHistory history, @@ -186,20 +187,20 @@ final class FrontendReadWriteTransaction extends FrontendTransaction { @Override TransactionSuccess doHandleRequest(final TransactionRequest request, final RequestEnvelope envelope, final long now) throws RequestException { - if (request instanceof ModifyTransactionRequest) { - return handleModifyTransaction((ModifyTransactionRequest) request, envelope, now); - } else if (request instanceof CommitLocalTransactionRequest) { - handleCommitLocalTransaction((CommitLocalTransactionRequest) request, envelope, now); + if (request instanceof ModifyTransactionRequest modifyRequest) { + return handleModifyTransaction(modifyRequest, envelope, now); + } else if (request instanceof CommitLocalTransactionRequest commitLocalRequest) { + handleCommitLocalTransaction(commitLocalRequest, envelope, now); return null; - } else if (request instanceof ExistsTransactionRequest) { - return handleExistsTransaction((ExistsTransactionRequest) request); - } else if (request instanceof ReadTransactionRequest) { - return handleReadTransaction((ReadTransactionRequest) request); - } else if (request instanceof TransactionPreCommitRequest) { - handleTransactionPreCommit((TransactionPreCommitRequest) request, envelope, now); + } else if (request instanceof ExistsTransactionRequest existsRequest) { + return handleExistsTransaction(existsRequest); + } else if (request instanceof ReadTransactionRequest readRequest) { + return handleReadTransaction(readRequest); + } else if (request instanceof TransactionPreCommitRequest preCommitRequest) { + handleTransactionPreCommit(preCommitRequest, envelope, now); return null; - } else if (request instanceof TransactionDoCommitRequest) { - handleTransactionDoCommit((TransactionDoCommitRequest) request, envelope, now); + } else if (request instanceof TransactionDoCommitRequest doCommitRequest) { + handleTransactionDoCommit(doCommitRequest, envelope, now); return null; } else if (request instanceof TransactionAbortRequest) { return handleTransactionAbort(request.getSequence(), envelope, now); @@ -349,9 +350,9 @@ final class FrontendReadWriteTransaction extends FrontendTransaction { final Ready ready = checkReady(); startAbort(); - ready.readyCohort.abort(new FutureCallback() { + ready.readyCohort.abort(new FutureCallback<>() { @Override - public void onSuccess(final Void result) { + public void onSuccess(final Empty result) { recordAndSendSuccess(envelope, now, new TransactionAbortSuccess(getIdentifier(), sequence)); finishAbort(); } @@ -377,9 +378,9 @@ final class FrontendReadWriteTransaction extends FrontendTransaction { case READY: ready.stage = CommitStage.CAN_COMMIT_PENDING; LOG.debug("{}: Transaction {} initiating canCommit", persistenceId(), getIdentifier()); - checkReady().readyCohort.canCommit(new FutureCallback() { + checkReady().readyCohort.canCommit(new FutureCallback<>() { @Override - public void onSuccess(final Void result) { + public void onSuccess(final Empty result) { successfulCanCommit(envelope, now); } @@ -429,9 +430,9 @@ final class FrontendReadWriteTransaction extends FrontendTransaction { case READY: ready.stage = CommitStage.CAN_COMMIT_PENDING; LOG.debug("{}: Transaction {} initiating direct canCommit", persistenceId(), getIdentifier()); - ready.readyCohort.canCommit(new FutureCallback() { + ready.readyCohort.canCommit(new FutureCallback<>() { @Override - public void onSuccess(final Void result) { + public void onSuccess(final Empty result) { successfulDirectCanCommit(envelope, now); } @@ -511,7 +512,8 @@ final class FrontendReadWriteTransaction extends FrontendTransaction { final Optional optFailure = request.getDelayedFailure(); if (optFailure.isPresent()) { - state = new Ready(history().createFailedCohort(getIdentifier(), sealedModification, optFailure.get())); + state = new Ready(history().createFailedCohort(getIdentifier(), sealedModification, + optFailure.orElseThrow())); } else { state = new Ready(history().createReadyCohort(getIdentifier(), sealedModification, Optional.empty())); } @@ -545,10 +547,10 @@ final class FrontendReadWriteTransaction extends FrontendTransaction { for (TransactionModification m : modifications) { if (m instanceof TransactionDelete) { modification.delete(m.getPath()); - } else if (m instanceof TransactionWrite) { - modification.write(m.getPath(), ((TransactionWrite) m).getData()); - } else if (m instanceof TransactionMerge) { - modification.merge(m.getPath(), ((TransactionMerge) m).getData()); + } else if (m instanceof TransactionWrite write) { + modification.write(m.getPath(), write.getData()); + } else if (m instanceof TransactionMerge merge) { + modification.merge(m.getPath(), merge.getData()); } else { LOG.warn("{}: ignoring unhandled modification {}", persistenceId(), m); } @@ -566,7 +568,7 @@ final class FrontendReadWriteTransaction extends FrontendTransaction { return replyModifySuccess(request.getSequence()); } - switch (maybeProto.get()) { + switch (maybeProto.orElseThrow()) { case ABORT: if (ABORTING.equals(state)) { LOG.debug("{}: Transaction {} already aborting", persistenceId(), getIdentifier()); @@ -592,7 +594,7 @@ final class FrontendReadWriteTransaction extends FrontendTransaction { coordinatedCommit(envelope, now); return null; default: - LOG.warn("{}: rejecting unsupported protocol {}", persistenceId(), maybeProto.get()); + LOG.warn("{}: rejecting unsupported protocol {}", persistenceId(), maybeProto.orElseThrow()); throw new UnsupportedRequestException(request); } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LeaderFrontendState.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LeaderFrontendState.java index e086e51a66..916cb75f5a 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LeaderFrontendState.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LeaderFrontendState.java @@ -12,7 +12,6 @@ import static java.util.Objects.requireNonNull; import com.google.common.base.MoreObjects; import com.google.common.base.MoreObjects.ToStringHelper; import java.util.HashMap; -import java.util.Iterator; import java.util.Map; import org.eclipse.jdt.annotation.NonNull; import org.eclipse.jdt.annotation.Nullable; @@ -40,10 +39,8 @@ import org.slf4j.LoggerFactory; /** * Frontend state as observed by the shard leader. This class is responsible for tracking generations and sequencing * in the frontend/backend conversation. This class is NOT thread-safe. - * - * @author Robert Varga */ -abstract class LeaderFrontendState implements Identifiable { +abstract sealed class LeaderFrontendState implements Identifiable { static final class Disabled extends LeaderFrontendState { Disabled(final String persistenceId, final ClientIdentifier clientId, final ShardDataTree tree) { super(persistenceId, clientId, tree); @@ -95,12 +92,12 @@ abstract class LeaderFrontendState implements Identifiable { checkRequestSequence(envelope); try { - if (request instanceof CreateLocalHistoryRequest) { - return handleCreateHistory((CreateLocalHistoryRequest) request, envelope, now); - } else if (request instanceof DestroyLocalHistoryRequest) { - return handleDestroyHistory((DestroyLocalHistoryRequest) request, envelope, now); - } else if (request instanceof PurgeLocalHistoryRequest) { - return handlePurgeHistory((PurgeLocalHistoryRequest) request, envelope, now); + if (request instanceof CreateLocalHistoryRequest req) { + return handleCreateHistory(req, envelope, now); + } else if (request instanceof DestroyLocalHistoryRequest req) { + return handleDestroyHistory(req, envelope, now); + } else if (request instanceof PurgeLocalHistoryRequest req) { + return handlePurgeHistory(req, envelope, now); } else { LOG.warn("{}: rejecting unsupported request {}", persistenceId(), request); throw new UnsupportedRequestException(request); @@ -116,7 +113,7 @@ abstract class LeaderFrontendState implements Identifiable { checkRequestSequence(envelope); try { - final LocalHistoryIdentifier lhId = request.getTarget().getHistoryId(); + final var lhId = request.getTarget().getHistoryId(); final AbstractFrontendHistory history; if (lhId.getHistoryId() != 0) { @@ -163,8 +160,8 @@ abstract class LeaderFrontendState implements Identifiable { private LocalHistorySuccess handleCreateHistory(final CreateLocalHistoryRequest request, final RequestEnvelope envelope, final long now) throws RequestException { - final LocalHistoryIdentifier historyId = request.getTarget(); - final AbstractFrontendHistory existing = localHistories.get(historyId); + final var historyId = request.getTarget(); + final var existing = localHistories.get(historyId); if (existing != null) { // History already exists: report success LOG.debug("{}: history {} already exists", persistenceId(), historyId); @@ -184,7 +181,7 @@ abstract class LeaderFrontendState implements Identifiable { } // We have to send the response only after persistence has completed - final ShardDataTreeTransactionChain chain = tree().ensureTransactionChain(historyId, () -> { + final var chain = tree().ensureTransactionChain(historyId, () -> { LOG.debug("{}: persisted history {}", persistenceId(), historyId); envelope.sendSuccess(new LocalHistorySuccess(historyId, request.getSequence()), tree().readTime() - now); @@ -197,8 +194,8 @@ abstract class LeaderFrontendState implements Identifiable { private LocalHistorySuccess handleDestroyHistory(final DestroyLocalHistoryRequest request, final RequestEnvelope envelope, final long now) { - final LocalHistoryIdentifier id = request.getTarget(); - final LocalFrontendHistory existing = localHistories.get(id); + final var id = request.getTarget(); + final var existing = localHistories.get(id); if (existing == null) { // History does not exist: report success LOG.debug("{}: history {} does not exist, nothing to destroy", persistenceId(), id); @@ -211,8 +208,8 @@ abstract class LeaderFrontendState implements Identifiable { private LocalHistorySuccess handlePurgeHistory(final PurgeLocalHistoryRequest request, final RequestEnvelope envelope, final long now) { - final LocalHistoryIdentifier id = request.getTarget(); - final LocalFrontendHistory existing = localHistories.remove(id); + final var id = request.getTarget(); + final var existing = localHistories.remove(id); if (existing == null) { LOG.debug("{}: history {} has already been purged", persistenceId(), id); return new LocalHistorySuccess(id, request.getSequence()); @@ -297,16 +294,16 @@ abstract class LeaderFrontendState implements Identifiable { void retire() { // Hunt down any transactions associated with this frontend - final Iterator it = tree.cohortIterator(); + final var it = tree.cohortIterator(); while (it.hasNext()) { - final SimpleShardDataTreeCohort cohort = it.next(); - if (clientId.equals(cohort.getIdentifier().getHistoryId().getClientId())) { + final var cohort = it.next(); + final var transactionId = cohort.transactionId(); + if (clientId.equals(transactionId.getHistoryId().getClientId())) { if (cohort.getState() != State.COMMIT_PENDING) { - LOG.debug("{}: Retiring transaction {}", persistenceId, cohort.getIdentifier()); + LOG.debug("{}: Retiring transaction {}", persistenceId, transactionId); it.remove(); } else { - LOG.debug("{}: Transaction {} already committing, not retiring it", persistenceId, - cohort.getIdentifier()); + LOG.debug("{}: Transaction {} already committing, not retiring it", persistenceId, transactionId); } } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalFrontendHistory.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalFrontendHistory.java index 3125ed651a..8226ac3c75 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalFrontendHistory.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalFrontendHistory.java @@ -18,7 +18,7 @@ import java.util.SortedSet; import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; import org.opendaylight.controller.cluster.datastore.utils.MutableUnsignedLongSet; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification; /** * Chained transaction specialization of {@link AbstractFrontendHistory}. It prevents concurrent open transactions. diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalThreePhaseCommitCohort.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalThreePhaseCommitCohort.java deleted file mode 100644 index ac279b7e99..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalThreePhaseCommitCohort.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.datastore; - -import static java.util.Objects.requireNonNull; - -import akka.actor.ActorSelection; -import akka.dispatch.Futures; -import akka.dispatch.OnComplete; -import com.google.common.util.concurrent.ListenableFuture; -import java.util.Optional; -import java.util.SortedSet; -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply; -import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction; -import org.opendaylight.controller.cluster.datastore.utils.ActorUtils; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort; -import org.opendaylight.mdsal.dom.spi.store.SnapshotBackedWriteTransaction; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import scala.concurrent.Future; - -/** - * Fake {@link DOMStoreThreePhaseCommitCohort} instantiated for local transactions to conform with the DOM - * transaction APIs. It is only used to hold the data from a local DOM transaction ready operation and to - * initiate direct or coordinated commits from the front-end by sending the ReadyLocalTransaction message. - * It is not actually called by the front-end to perform 3PC thus the canCommit/preCommit/commit methods - * are no-ops. - */ -class LocalThreePhaseCommitCohort implements DOMStoreThreePhaseCommitCohort { - private static final Logger LOG = LoggerFactory.getLogger(LocalThreePhaseCommitCohort.class); - - private final SnapshotBackedWriteTransaction transaction; - private final DataTreeModification modification; - private final ActorUtils actorUtils; - private final ActorSelection leader; - private final Exception operationError; - - protected LocalThreePhaseCommitCohort(final ActorUtils actorUtils, final ActorSelection leader, - final SnapshotBackedWriteTransaction transaction, - final DataTreeModification modification, - final Exception operationError) { - this.actorUtils = requireNonNull(actorUtils); - this.leader = requireNonNull(leader); - this.transaction = requireNonNull(transaction); - this.modification = requireNonNull(modification); - this.operationError = operationError; - } - - protected LocalThreePhaseCommitCohort(final ActorUtils actorUtils, final ActorSelection leader, - final SnapshotBackedWriteTransaction transaction, final Exception operationError) { - this.actorUtils = requireNonNull(actorUtils); - this.leader = requireNonNull(leader); - this.transaction = requireNonNull(transaction); - this.operationError = requireNonNull(operationError); - this.modification = null; - } - - private Future initiateCommit(final boolean immediate, - final Optional> participatingShardNames) { - if (operationError != null) { - return Futures.failed(operationError); - } - - final ReadyLocalTransaction message = new ReadyLocalTransaction(transaction.getIdentifier(), - modification, immediate, participatingShardNames); - return actorUtils.executeOperationAsync(leader, message, actorUtils.getTransactionCommitOperationTimeout()); - } - - Future initiateCoordinatedCommit(final Optional> participatingShardNames) { - final Future messageFuture = initiateCommit(false, participatingShardNames); - final Future ret = TransactionReadyReplyMapper.transform(messageFuture, actorUtils, - transaction.getIdentifier()); - ret.onComplete(new OnComplete() { - @Override - public void onComplete(final Throwable failure, final ActorSelection success) { - if (failure != null) { - LOG.warn("Failed to prepare transaction {} on backend", transaction.getIdentifier(), failure); - transactionAborted(transaction); - return; - } - - LOG.debug("Transaction {} resolved to actor {}", transaction.getIdentifier(), success); - } - }, actorUtils.getClientDispatcher()); - - return ret; - } - - Future initiateDirectCommit() { - final Future messageFuture = initiateCommit(true, Optional.empty()); - messageFuture.onComplete(new OnComplete() { - @Override - public void onComplete(final Throwable failure, final Object message) { - if (failure != null) { - LOG.warn("Failed to prepare transaction {} on backend", transaction.getIdentifier(), failure); - transactionAborted(transaction); - } else if (CommitTransactionReply.isSerializedType(message)) { - LOG.debug("Transaction {} committed successfully", transaction.getIdentifier()); - transactionCommitted(transaction); - } else { - LOG.error("Transaction {} resulted in unhandled message type {}, aborting", - transaction.getIdentifier(), message.getClass()); - transactionAborted(transaction); - } - } - }, actorUtils.getClientDispatcher()); - - return messageFuture; - } - - @Override - public final ListenableFuture canCommit() { - // Intended no-op - throw new UnsupportedOperationException(); - } - - @Override - public final ListenableFuture preCommit() { - // Intended no-op - throw new UnsupportedOperationException(); - } - - @Override - public final ListenableFuture abort() { - // Intended no-op - throw new UnsupportedOperationException(); - } - - @Override - public final ListenableFuture commit() { - // Intended no-op - throw new UnsupportedOperationException(); - } - - protected void transactionAborted(final SnapshotBackedWriteTransaction aborted) { - } - - protected void transactionCommitted(final SnapshotBackedWriteTransaction comitted) { - } -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionChain.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionChain.java deleted file mode 100644 index c995e1150d..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionChain.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.datastore; - -import static com.google.common.base.Preconditions.checkArgument; -import static java.util.Objects.requireNonNull; - -import akka.actor.ActorSelection; -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -import org.opendaylight.mdsal.dom.spi.store.AbstractSnapshotBackedTransactionChain; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction; -import org.opendaylight.mdsal.dom.spi.store.SnapshotBackedWriteTransaction; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot; -import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree; - -/** - * Transaction chain instantiated on top of a locally-available DataTree. It does not instantiate - * a transaction in the leader and rather chains transactions on top of themselves. - */ -final class LocalTransactionChain extends AbstractSnapshotBackedTransactionChain - implements LocalTransactionFactory { - private static final Throwable ABORTED = new Throwable("Transaction aborted"); - private final TransactionChainProxy parent; - private final ActorSelection leader; - private final ReadOnlyDataTree tree; - - LocalTransactionChain(final TransactionChainProxy parent, final ActorSelection leader, - final ReadOnlyDataTree tree) { - this.parent = requireNonNull(parent); - this.leader = requireNonNull(leader); - this.tree = requireNonNull(tree); - } - - ReadOnlyDataTree getDataTree() { - return tree; - } - - @Override - protected TransactionIdentifier nextTransactionIdentifier() { - throw new UnsupportedOperationException(); - } - - @Override - protected boolean getDebugTransactions() { - return false; - } - - @Override - protected DataTreeSnapshot takeSnapshot() { - return tree.takeSnapshot(); - } - - @Override - protected DOMStoreThreePhaseCommitCohort createCohort( - final SnapshotBackedWriteTransaction transaction, - final DataTreeModification modification, - final Exception operationError) { - return new LocalChainThreePhaseCommitCohort(transaction, modification, operationError); - } - - @Override - public DOMStoreReadTransaction newReadOnlyTransaction(TransactionIdentifier identifier) { - return super.newReadOnlyTransaction(identifier); - } - - @Override - public DOMStoreReadWriteTransaction newReadWriteTransaction(TransactionIdentifier identifier) { - return super.newReadWriteTransaction(identifier); - } - - @Override - public DOMStoreWriteTransaction newWriteOnlyTransaction(TransactionIdentifier identifier) { - return super.newWriteOnlyTransaction(identifier); - } - - @SuppressWarnings({"unchecked", "checkstyle:IllegalCatch"}) - @Override - public LocalThreePhaseCommitCohort onTransactionReady(DOMStoreWriteTransaction tx, Exception operationError) { - checkArgument(tx instanceof SnapshotBackedWriteTransaction); - if (operationError != null) { - return new LocalChainThreePhaseCommitCohort((SnapshotBackedWriteTransaction)tx, - operationError); - } - - try { - return (LocalThreePhaseCommitCohort) tx.ready(); - } catch (Exception e) { - // Unfortunately we need to cast to SnapshotBackedWriteTransaction here as it's required by - // LocalThreePhaseCommitCohort and the base class. - return new LocalChainThreePhaseCommitCohort((SnapshotBackedWriteTransaction)tx, e); - } - } - - private class LocalChainThreePhaseCommitCohort extends LocalThreePhaseCommitCohort { - - protected LocalChainThreePhaseCommitCohort(SnapshotBackedWriteTransaction transaction, - DataTreeModification modification, Exception operationError) { - super(parent.getActorUtils(), leader, transaction, modification, operationError); - } - - protected LocalChainThreePhaseCommitCohort(SnapshotBackedWriteTransaction transaction, - Exception operationError) { - super(parent.getActorUtils(), leader, transaction, operationError); - } - - @Override - protected void transactionAborted(SnapshotBackedWriteTransaction transaction) { - onTransactionFailed(transaction, ABORTED); - } - - @Override - protected void transactionCommitted(SnapshotBackedWriteTransaction transaction) { - onTransactionCommited(transaction); - } - } -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionContext.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionContext.java deleted file mode 100644 index 6b30069418..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionContext.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.datastore; - -import static java.util.Objects.requireNonNull; - -import akka.actor.ActorSelection; -import com.google.common.util.concurrent.FutureCallback; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.MoreExecutors; -import com.google.common.util.concurrent.SettableFuture; -import java.util.Optional; -import java.util.SortedSet; -import java.util.function.Consumer; -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -import org.opendaylight.controller.cluster.datastore.messages.AbstractRead; -import org.opendaylight.mdsal.common.api.ReadFailedException; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransaction; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction; -import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; -import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; -import scala.concurrent.Future; - -/** - * Processes front-end transaction operations locally before being committed to the destination shard. - * Instances of this class are used when the destination shard is local to the caller. - * - * @author Thomas Pantelis - */ -abstract class LocalTransactionContext extends TransactionContext { - private final DOMStoreTransaction txDelegate; - private final LocalTransactionReadySupport readySupport; - private Exception operationError; - - LocalTransactionContext(final DOMStoreTransaction txDelegate, final TransactionIdentifier identifier, - final LocalTransactionReadySupport readySupport) { - super(identifier); - this.txDelegate = requireNonNull(txDelegate); - this.readySupport = readySupport; - } - - abstract DOMStoreWriteTransaction getWriteDelegate(); - - abstract DOMStoreReadTransaction getReadDelegate(); - - @SuppressWarnings("checkstyle:IllegalCatch") - private void executeModification(final Consumer consumer) { - incrementModificationCount(); - if (operationError == null) { - try { - consumer.accept(getWriteDelegate()); - } catch (Exception e) { - operationError = e; - } - } - } - - @Override - void executeDelete(final YangInstanceIdentifier path, final Boolean havePermit) { - executeModification(transaction -> transaction.delete(path)); - } - - @Override - void executeMerge(final YangInstanceIdentifier path, final NormalizedNode data, final Boolean havePermit) { - executeModification(transaction -> transaction.merge(path, data)); - } - - @Override - void executeWrite(final YangInstanceIdentifier path, final NormalizedNode data, final Boolean havePermit) { - executeModification(transaction -> transaction.write(path, data)); - } - - @Override - void executeRead(final AbstractRead readCmd, final SettableFuture proxyFuture, - final Boolean havePermit) { - Futures.addCallback(readCmd.apply(getReadDelegate()), new FutureCallback() { - @Override - public void onSuccess(final T result) { - proxyFuture.set(result); - } - - @Override - public void onFailure(final Throwable failure) { - proxyFuture.setException(failure instanceof Exception - ? ReadFailedException.MAPPER.apply((Exception) failure) : failure); - } - }, MoreExecutors.directExecutor()); - } - - @Override - Future readyTransaction(final Boolean havePermit, - final Optional> participatingShardNames) { - final LocalThreePhaseCommitCohort cohort = ready(); - return cohort.initiateCoordinatedCommit(participatingShardNames); - } - - @Override - Future directCommit(final Boolean havePermit) { - final LocalThreePhaseCommitCohort cohort = ready(); - return cohort.initiateDirectCommit(); - } - - @Override - void closeTransaction() { - txDelegate.close(); - } - - private LocalThreePhaseCommitCohort ready() { - logModificationCount(); - return readySupport.onTransactionReady(getWriteDelegate(), operationError); - } -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionFactory.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionFactory.java deleted file mode 100644 index e6be3a0c38..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionFactory.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.datastore; - -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction; - -/** - * A factory for creating local transactions used by {@link AbstractTransactionContextFactory} to instantiate - * transactions on shards which are co-located with the shard leader. - * - * @author Thomas Pantelis - */ -interface LocalTransactionFactory extends LocalTransactionReadySupport { - DOMStoreReadTransaction newReadOnlyTransaction(TransactionIdentifier identifier); - - DOMStoreReadWriteTransaction newReadWriteTransaction(TransactionIdentifier identifier); - - DOMStoreWriteTransaction newWriteOnlyTransaction(TransactionIdentifier identifier); -} \ No newline at end of file diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionFactoryImpl.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionFactoryImpl.java deleted file mode 100644 index 8c84449025..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionFactoryImpl.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.datastore; - -import static com.google.common.base.Preconditions.checkArgument; -import static java.util.Objects.requireNonNull; - -import akka.actor.ActorSelection; -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -import org.opendaylight.controller.cluster.datastore.utils.ActorUtils; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction; -import org.opendaylight.mdsal.dom.spi.store.SnapshotBackedTransactions; -import org.opendaylight.mdsal.dom.spi.store.SnapshotBackedWriteTransaction; -import org.opendaylight.mdsal.dom.spi.store.SnapshotBackedWriteTransaction.TransactionReadyPrototype; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification; -import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree; - -/** - * {@link LocalTransactionFactory} for instantiating backing transactions which are - * disconnected from each other, ie not chained. These are used by {@link AbstractTransactionContextFactory} - * to instantiate transactions on shards which are co-located with the shard leader. - */ -final class LocalTransactionFactoryImpl extends TransactionReadyPrototype - implements LocalTransactionFactory { - - private final ActorSelection leader; - private final ReadOnlyDataTree dataTree; - private final ActorUtils actorUtils; - - LocalTransactionFactoryImpl(final ActorUtils actorUtils, final ActorSelection leader, - final ReadOnlyDataTree dataTree) { - this.leader = requireNonNull(leader); - this.dataTree = requireNonNull(dataTree); - this.actorUtils = actorUtils; - } - - ReadOnlyDataTree getDataTree() { - return dataTree; - } - - @Override - public DOMStoreReadTransaction newReadOnlyTransaction(TransactionIdentifier identifier) { - return SnapshotBackedTransactions.newReadTransaction(identifier, false, dataTree.takeSnapshot()); - } - - @Override - public DOMStoreReadWriteTransaction newReadWriteTransaction(TransactionIdentifier identifier) { - return SnapshotBackedTransactions.newReadWriteTransaction(identifier, false, dataTree.takeSnapshot(), this); - } - - @Override - public DOMStoreWriteTransaction newWriteOnlyTransaction(TransactionIdentifier identifier) { - return SnapshotBackedTransactions.newWriteTransaction(identifier, false, dataTree.takeSnapshot(), this); - } - - @Override - protected void transactionAborted(final SnapshotBackedWriteTransaction tx) { - // No-op - } - - @Override - protected DOMStoreThreePhaseCommitCohort transactionReady( - final SnapshotBackedWriteTransaction tx, - final DataTreeModification tree, - final Exception readyError) { - return new LocalThreePhaseCommitCohort(actorUtils, leader, tx, tree, readyError); - } - - @SuppressWarnings({"unchecked", "checkstyle:IllegalCatch"}) - @Override - public LocalThreePhaseCommitCohort onTransactionReady(DOMStoreWriteTransaction tx, Exception operationError) { - checkArgument(tx instanceof SnapshotBackedWriteTransaction); - if (operationError != null) { - return new LocalThreePhaseCommitCohort(actorUtils, leader, - (SnapshotBackedWriteTransaction)tx, operationError); - } - - return (LocalThreePhaseCommitCohort) tx.ready(); - } -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionReadySupport.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionReadySupport.java deleted file mode 100644 index 103af19dd3..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionReadySupport.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.datastore; - -import org.eclipse.jdt.annotation.NonNull; -import org.eclipse.jdt.annotation.Nullable; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction; - -/** - * Interface for a class that can "ready" a transaction. - * - * @author Thomas Pantelis - */ -interface LocalTransactionReadySupport { - LocalThreePhaseCommitCohort onTransactionReady(@NonNull DOMStoreWriteTransaction tx, - @Nullable Exception operationError); -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/NoOpDOMStoreThreePhaseCommitCohort.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/NoOpDOMStoreThreePhaseCommitCohort.java deleted file mode 100644 index 1f5f5bcf79..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/NoOpDOMStoreThreePhaseCommitCohort.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.datastore; - -import com.google.common.util.concurrent.ListenableFuture; -import java.util.Collections; -import java.util.List; -import scala.concurrent.Future; - -/** - * A {@link org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort} - * instance given out for empty transactions. - */ -final class NoOpDOMStoreThreePhaseCommitCohort extends AbstractThreePhaseCommitCohort { - static final NoOpDOMStoreThreePhaseCommitCohort INSTANCE = new NoOpDOMStoreThreePhaseCommitCohort(); - - private NoOpDOMStoreThreePhaseCommitCohort() { - // Hidden to prevent instantiation - } - - @Override - public ListenableFuture canCommit() { - return IMMEDIATE_BOOLEAN_SUCCESS; - } - - @Override - public ListenableFuture preCommit() { - return IMMEDIATE_VOID_SUCCESS; - } - - @Override - public ListenableFuture abort() { - return IMMEDIATE_VOID_SUCCESS; - } - - @Override - public ListenableFuture commit() { - return IMMEDIATE_VOID_SUCCESS; - } - - @Override - List> getCohortFutures() { - return Collections.emptyList(); - } -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/NoOpTransactionContext.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/NoOpTransactionContext.java deleted file mode 100644 index bfb0046ba0..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/NoOpTransactionContext.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.datastore; - -import akka.actor.ActorSelection; -import com.google.common.util.concurrent.SettableFuture; -import java.util.Optional; -import java.util.SortedSet; -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException; -import org.opendaylight.controller.cluster.datastore.messages.AbstractRead; -import org.opendaylight.mdsal.common.api.DataStoreUnavailableException; -import org.opendaylight.mdsal.common.api.ReadFailedException; -import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; -import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import scala.concurrent.Future; - -final class NoOpTransactionContext extends TransactionContext { - private static final Logger LOG = LoggerFactory.getLogger(NoOpTransactionContext.class); - - private final Throwable failure; - - NoOpTransactionContext(final Throwable failure, final TransactionIdentifier identifier) { - super(identifier); - this.failure = failure; - } - - @Override - void closeTransaction() { - LOG.debug("NoOpTransactionContext {} closeTransaction called", getIdentifier()); - } - - @Override - Future directCommit(final Boolean havePermit) { - LOG.debug("Tx {} directCommit called, failure", getIdentifier(), failure); - return akka.dispatch.Futures.failed(failure); - } - - @Override - Future readyTransaction(final Boolean havePermit, - final Optional> participatingShardNamess) { - LOG.debug("Tx {} readyTransaction called, failure", getIdentifier(), failure); - return akka.dispatch.Futures.failed(failure); - } - - @Override - void executeRead(final AbstractRead readCmd, final SettableFuture proxyFuture, final Boolean havePermit) { - LOG.debug("Tx {} executeRead {} called path = {}", getIdentifier(), readCmd.getClass().getSimpleName(), - readCmd.getPath()); - - final Throwable t; - if (failure instanceof NoShardLeaderException) { - t = new DataStoreUnavailableException(failure.getMessage(), failure); - } else { - t = failure; - } - proxyFuture.setException(new ReadFailedException("Error executeRead " + readCmd.getClass().getSimpleName() - + " for path " + readCmd.getPath(), t)); - } - - @Override - void executeDelete(final YangInstanceIdentifier path, final Boolean havePermit) { - LOG.debug("Tx {} executeDelete called path = {}", getIdentifier(), path); - } - - @Override - void executeMerge(final YangInstanceIdentifier path, final NormalizedNode data, final Boolean havePermit) { - LOG.debug("Tx {} executeMerge called path = {}", getIdentifier(), path); - } - - @Override - void executeWrite(final YangInstanceIdentifier path, final NormalizedNode data, final Boolean havePermit) { - LOG.debug("Tx {} executeWrite called path = {}", getIdentifier(), path); - } -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/OSGiDOMStore.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/OSGiDOMStore.java index d53a8bb468..43e9c3e6fd 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/OSGiDOMStore.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/OSGiDOMStore.java @@ -13,10 +13,9 @@ import com.google.common.annotations.Beta; import java.util.Map; import org.opendaylight.controller.cluster.datastore.utils.ActorUtils; import org.opendaylight.mdsal.common.api.LogicalDatastoreType; +import org.opendaylight.mdsal.dom.api.DOMDataBroker.CommitCohortExtension; import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener; import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort; -import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistration; -import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistry; import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier; import org.opendaylight.mdsal.dom.spi.store.DOMStore; import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction; @@ -24,7 +23,7 @@ import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction; import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain; import org.opendaylight.mdsal.dom.spi.store.DOMStoreTreeChangePublisher; import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction; -import org.opendaylight.yangtools.concepts.ListenerRegistration; +import org.opendaylight.yangtools.concepts.Registration; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.osgi.service.component.annotations.Activate; import org.osgi.service.component.annotations.Component; @@ -39,7 +38,7 @@ import org.slf4j.LoggerFactory; @Beta @Component(factory = OSGiDOMStore.FACTORY_NAME, service = { DOMStore.class, DistributedDataStoreInterface.class }) public final class OSGiDOMStore - implements DistributedDataStoreInterface, DOMStoreTreeChangePublisher, DOMDataTreeCommitCohortRegistry { + implements DistributedDataStoreInterface, DOMStoreTreeChangePublisher, CommitCohortExtension { // OSGi DS Component Factory name static final String FACTORY_NAME = "org.opendaylight.controller.cluster.datastore.OSGiDOMStore"; static final String DATASTORE_INST_PROP = ".datastore.instance"; @@ -47,30 +46,41 @@ public final class OSGiDOMStore private static final Logger LOG = LoggerFactory.getLogger(OSGiDOMStore.class); - private LogicalDatastoreType datastoreType; + private final LogicalDatastoreType datastoreType; private AbstractDataStore datastore; + @Activate + public OSGiDOMStore(final Map properties) { + datastoreType = (LogicalDatastoreType) verifyNotNull(properties.get(DATASTORE_TYPE_PROP)); + datastore = (AbstractDataStore) verifyNotNull(properties.get(DATASTORE_INST_PROP)); + LOG.info("Datastore service type {} activated", datastoreType); + } + + @Deactivate + void deactivate() { + datastore = null; + LOG.info("Datastore service type {} deactivated", datastoreType); + } + @Override public ActorUtils getActorUtils() { return datastore.getActorUtils(); } @Override - public ListenerRegistration registerProxyListener( - final YangInstanceIdentifier shardLookup, final YangInstanceIdentifier insideShard, - final DOMDataTreeChangeListener delegate) { + public Registration registerProxyListener(final YangInstanceIdentifier shardLookup, + final YangInstanceIdentifier insideShard, final DOMDataTreeChangeListener delegate) { return datastore.registerProxyListener(shardLookup, insideShard, delegate); } @Override - public ListenerRegistration registerTreeChangeListener( - final YangInstanceIdentifier treeId, final L listener) { + public Registration registerTreeChangeListener(final YangInstanceIdentifier treeId, + final DOMDataTreeChangeListener listener) { return datastore.registerTreeChangeListener(treeId, listener); } @Override - public DOMDataTreeCommitCohortRegistration registerCommitCohort( - final DOMDataTreeIdentifier path, final T cohort) { + public Registration registerCommitCohort(final DOMDataTreeIdentifier path, final DOMDataTreeCommitCohort cohort) { return datastore.registerCommitCohort(path, cohort); } @@ -94,16 +104,9 @@ public final class OSGiDOMStore return datastore.newReadWriteTransaction(); } - @Activate - void activate(final Map properties) { - datastoreType = (LogicalDatastoreType) verifyNotNull(properties.get(DATASTORE_TYPE_PROP)); - datastore = (AbstractDataStore) verifyNotNull(properties.get(DATASTORE_INST_PROP)); - LOG.info("Datastore service type {} activated", datastoreType); - } - - @Deactivate - void deactivate() { - datastore = null; - LOG.info("Datastore service type {} deactivated", datastoreType); + @Override + public Registration registerLegacyTreeChangeListener(final YangInstanceIdentifier treeId, + final DOMDataTreeChangeListener listener) { + return datastore.registerLegacyTreeChangeListener(treeId, listener); } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/OSGiDistributedDataStore.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/OSGiDistributedDataStore.java index 1480643e0b..3e2db7dfed 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/OSGiDistributedDataStore.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/OSGiDistributedDataStore.java @@ -12,17 +12,15 @@ import static java.util.Objects.requireNonNull; import com.google.common.annotations.Beta; import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.Futures; -import java.util.Dictionary; -import java.util.Hashtable; import java.util.Map; import org.checkerframework.checker.lock.qual.GuardedBy; -import org.gaul.modernizer_maven_annotations.SuppressModernizer; import org.opendaylight.controller.cluster.ActorSystemProvider; import org.opendaylight.controller.cluster.datastore.config.Configuration; import org.opendaylight.controller.cluster.datastore.config.ConfigurationImpl; import org.opendaylight.controller.cluster.datastore.config.ModuleShardConfigProvider; import org.opendaylight.mdsal.common.api.LogicalDatastoreType; import org.opendaylight.mdsal.dom.api.DOMSchemaService; +import org.osgi.framework.FrameworkUtil; import org.osgi.service.component.ComponentFactory; import org.osgi.service.component.ComponentInstance; import org.osgi.service.component.annotations.Activate; @@ -53,7 +51,7 @@ public final class OSGiDistributedDataStore { private final String serviceType; @GuardedBy("this") - private ComponentInstance component; + private ComponentInstance component; @GuardedBy("this") private boolean stopped; @@ -87,17 +85,15 @@ public final class OSGiDistributedDataStore { } @Override - @SuppressModernizer public void onSuccess(final Object result) { LOG.debug("Distributed Datastore type {} reached initial settle", datastoreType); synchronized (this) { if (!stopped) { - final Dictionary dict = new Hashtable<>(); - dict.put(OSGiDOMStore.DATASTORE_TYPE_PROP, datastoreType); - dict.put(OSGiDOMStore.DATASTORE_INST_PROP, datastore); - dict.put("type", serviceType); - component = datastoreFactory.newInstance(dict); + component = datastoreFactory.newInstance(FrameworkUtil.asDictionary(Map.of( + OSGiDOMStore.DATASTORE_TYPE_PROP, datastoreType, + OSGiDOMStore.DATASTORE_INST_PROP, datastore, + "type", serviceType))); LOG.info("Distributed Datastore type {} started", datastoreType); } } @@ -111,26 +107,23 @@ public final class OSGiDistributedDataStore { private static final Logger LOG = LoggerFactory.getLogger(OSGiDistributedDataStore.class); - @Reference - DOMSchemaService schemaService = null; - @Reference - ActorSystemProvider actorSystemProvider = null; - @Reference - DatastoreContextIntrospectorFactory introspectorFactory = null; - @Reference - DatastoreSnapshotRestore snapshotRestore = null; - @Reference - ModuleShardConfigProvider configProvider = null; - @Reference(target = "(component.factory=" + OSGiDOMStore.FACTORY_NAME + ")") - ComponentFactory datastoreFactory = null; - + private final ComponentFactory datastoreFactory; private DatastoreState configDatastore; private DatastoreState operDatastore; @Activate - void activate(final Map properties) { - configDatastore = createDatastore(LogicalDatastoreType.CONFIGURATION, "distributed-config", properties, null); - operDatastore = createDatastore(LogicalDatastoreType.OPERATIONAL, "distributed-operational", properties, + public OSGiDistributedDataStore(@Reference final DOMSchemaService schemaService, + @Reference final ActorSystemProvider actorSystemProvider, + @Reference final DatastoreContextIntrospectorFactory introspectorFactory, + @Reference final DatastoreSnapshotRestore snapshotRestore, + @Reference final ModuleShardConfigProvider configProvider, + @Reference(target = "(component.factory=" + OSGiDOMStore.FACTORY_NAME + ")") + final ComponentFactory datastoreFactory, final Map properties) { + this.datastoreFactory = requireNonNull(datastoreFactory); + configDatastore = createDatastore(schemaService, actorSystemProvider, snapshotRestore, introspectorFactory, + LogicalDatastoreType.CONFIGURATION, "distributed-config", properties, null); + operDatastore = createDatastore(schemaService, actorSystemProvider, snapshotRestore, introspectorFactory, + LogicalDatastoreType.OPERATIONAL, "distributed-operational", properties, new ConfigurationImpl(configProvider)); } @@ -149,14 +142,16 @@ public final class OSGiDistributedDataStore { configDatastore = null; } - private DatastoreState createDatastore(final LogicalDatastoreType datastoreType, final String serviceType, - final Map properties, final Configuration config) { + private DatastoreState createDatastore(final DOMSchemaService schemaService, + final ActorSystemProvider actorSystemProvider, final DatastoreSnapshotRestore snapshotRestore, + final DatastoreContextIntrospectorFactory introspectorFactory, final LogicalDatastoreType datastoreType, + final String serviceType, final Map properties,final Configuration config) { LOG.info("Distributed Datastore type {} starting", datastoreType); - final DatastoreContextIntrospector introspector = introspectorFactory.newInstance(datastoreType, properties); - final AbstractDataStore datastore = DistributedDataStoreFactory.createInstance(actorSystemProvider, + final var introspector = introspectorFactory.newInstance(datastoreType, properties); + final var datastore = DistributedDataStoreFactory.createInstance(actorSystemProvider, introspector.getContext(), introspector, snapshotRestore, config); - datastore.setCloseable(schemaService.registerSchemaContextListener(datastore)); - final DatastoreState state = new DatastoreState(introspector, datastoreType, datastore, serviceType); + datastore.setCloseable(schemaService.registerSchemaContextListener(datastore::onModelContextUpdated)); + final var state = new DatastoreState(introspector, datastoreType, datastore, serviceType); Futures.addCallback(datastore.initialSettleFuture(), state, // Note we are invoked from shard manager and therefore could block it, hence the round-trip to executor diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/OperationLimiter.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/OperationLimiter.java deleted file mode 100644 index 3f0c98c47f..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/OperationLimiter.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.datastore; - -import static com.google.common.base.Preconditions.checkArgument; -import static java.util.Objects.requireNonNull; - -import com.google.common.annotations.VisibleForTesting; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Class for limiting operations. - */ -public class OperationLimiter { - private static final Logger LOG = LoggerFactory.getLogger(OperationLimiter.class); - private final TransactionIdentifier identifier; - private final long acquireTimeout; - private final Semaphore semaphore; - private final int maxPermits; - - OperationLimiter(final TransactionIdentifier identifier, final int maxPermits, final long acquireTimeoutSeconds) { - this.identifier = requireNonNull(identifier); - - checkArgument(acquireTimeoutSeconds >= 0); - this.acquireTimeout = TimeUnit.SECONDS.toNanos(acquireTimeoutSeconds); - - checkArgument(maxPermits >= 0); - this.maxPermits = maxPermits; - this.semaphore = new Semaphore(maxPermits); - } - - boolean acquire() { - return acquire(1); - } - - boolean acquire(final int acquirePermits) { - try { - if (semaphore.tryAcquire(acquirePermits, acquireTimeout, TimeUnit.NANOSECONDS)) { - return true; - } - } catch (InterruptedException e) { - if (LOG.isDebugEnabled()) { - LOG.debug("Interrupted when trying to acquire operation permit for transaction {}", identifier, e); - } else { - LOG.warn("Interrupted when trying to acquire operation permit for transaction {}", identifier); - } - } - - return false; - } - - void release() { - release(1); - } - - void release(final int permits) { - this.semaphore.release(permits); - } - - @VisibleForTesting - TransactionIdentifier getIdentifier() { - return identifier; - } - - @VisibleForTesting - int availablePermits() { - return semaphore.availablePermits(); - } - - /** - * Release all the permits. - */ - public void releaseAll() { - this.semaphore.release(maxPermits - availablePermits()); - } -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ReadOnlyShardDataTreeTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ReadOnlyShardDataTreeTransaction.java index 4df1352b19..28042ecc3d 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ReadOnlyShardDataTreeTransaction.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ReadOnlyShardDataTreeTransaction.java @@ -8,11 +8,11 @@ package org.opendaylight.controller.cluster.datastore; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot; final class ReadOnlyShardDataTreeTransaction extends AbstractShardDataTreeTransaction { ReadOnlyShardDataTreeTransaction(final ShardDataTreeTransactionParent parent, final TransactionIdentifier id, - final DataTreeSnapshot snapshot) { + final DataTreeSnapshot snapshot) { super(parent, id, snapshot); } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ReadWriteShardDataTreeTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ReadWriteShardDataTreeTransaction.java index f28d0d08b3..b55d24ac8b 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ReadWriteShardDataTreeTransaction.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ReadWriteShardDataTreeTransaction.java @@ -7,21 +7,21 @@ */ package org.opendaylight.controller.cluster.datastore; -import com.google.common.base.Preconditions; +import static com.google.common.base.Preconditions.checkState; + import java.util.Optional; import java.util.SortedSet; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification; public final class ReadWriteShardDataTreeTransaction extends AbstractShardDataTreeTransaction { - ReadWriteShardDataTreeTransaction(final ShardDataTreeTransactionParent parent, final TransactionIdentifier id, - final DataTreeModification modification) { + final DataTreeModification modification) { super(parent, id, modification); } - ShardDataTreeCohort ready(Optional> participatingShardNames) { - Preconditions.checkState(close(), "Transaction is already closed"); + ShardDataTreeCohort ready(final Optional> participatingShardNames) { + checkState(close(), "Transaction is already closed"); return getParent().finishTransaction(this, participatingShardNames); } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RemoteTransactionContext.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RemoteTransactionContext.java deleted file mode 100644 index ade9c375e5..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RemoteTransactionContext.java +++ /dev/null @@ -1,301 +0,0 @@ -/* - * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved. - * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.datastore; - -import static com.google.common.base.Preconditions.checkState; -import static java.util.Objects.requireNonNull; - -import akka.actor.ActorSelection; -import akka.dispatch.Futures; -import akka.dispatch.OnComplete; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.util.concurrent.SettableFuture; -import java.util.Optional; -import java.util.SortedSet; -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -import org.opendaylight.controller.cluster.datastore.messages.AbstractRead; -import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications; -import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction; -import org.opendaylight.controller.cluster.datastore.modification.AbstractModification; -import org.opendaylight.controller.cluster.datastore.modification.DeleteModification; -import org.opendaylight.controller.cluster.datastore.modification.MergeModification; -import org.opendaylight.controller.cluster.datastore.modification.Modification; -import org.opendaylight.controller.cluster.datastore.modification.WriteModification; -import org.opendaylight.controller.cluster.datastore.utils.ActorUtils; -import org.opendaylight.mdsal.common.api.ReadFailedException; -import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; -import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import scala.concurrent.Future; - -/** - * Redirects front-end transaction operations to a shard for processing. Instances of this class are used - * when the destination shard is remote to the caller. - * - * @author Thomas Pantelis - */ -final class RemoteTransactionContext extends TransactionContext { - private static final Logger LOG = LoggerFactory.getLogger(RemoteTransactionContext.class); - - private final ActorUtils actorUtils; - private final ActorSelection actor; - private final OperationLimiter limiter; - - private BatchedModifications batchedModifications; - private int totalBatchedModificationsSent; - private int batchPermits; - - /** - * We have observed a failed modification batch. This transaction context is effectively doomed, as the backend - * does not have a correct view of the world. If this happens, we do not limit operations but rather short-cut them - * to a either a no-op (modifications) or a failure (reads). Once the transaction is ready, though, we send the - * message to resynchronize with the backend, sharing a 'lost message' failure path. - */ - private volatile Throwable failedModification; - - RemoteTransactionContext(final TransactionIdentifier identifier, final ActorSelection actor, - final ActorUtils actorUtils, final short remoteTransactionVersion, final OperationLimiter limiter) { - super(identifier, remoteTransactionVersion); - this.limiter = requireNonNull(limiter); - this.actor = actor; - this.actorUtils = actorUtils; - } - - private ActorSelection getActor() { - return actor; - } - - protected ActorUtils getActorUtils() { - return actorUtils; - } - - @Override - void closeTransaction() { - LOG.debug("Tx {} closeTransaction called", getIdentifier()); - TransactionContextCleanup.untrack(this); - - actorUtils.sendOperationAsync(getActor(), new CloseTransaction(getTransactionVersion()).toSerializable()); - } - - @Override - Future directCommit(final Boolean havePermit) { - LOG.debug("Tx {} directCommit called", getIdentifier()); - - // Send the remaining batched modifications, if any, with the ready flag set. - bumpPermits(havePermit); - return sendBatchedModifications(true, true, Optional.empty()); - } - - @Override - Future readyTransaction(final Boolean havePermit, - final Optional> participatingShardNames) { - logModificationCount(); - - LOG.debug("Tx {} readyTransaction called", getIdentifier()); - - // Send the remaining batched modifications, if any, with the ready flag set. - - bumpPermits(havePermit); - Future lastModificationsFuture = sendBatchedModifications(true, false, participatingShardNames); - - // Transform the last reply Future into a Future that returns the cohort actor path from - // the last reply message. That's the end result of the ready operation. - return TransactionReadyReplyMapper.transform(lastModificationsFuture, actorUtils, getIdentifier()); - } - - private void bumpPermits(final Boolean havePermit) { - if (Boolean.TRUE.equals(havePermit)) { - ++batchPermits; - } - } - - private BatchedModifications newBatchedModifications() { - return new BatchedModifications(getIdentifier(), getTransactionVersion()); - } - - private void batchModification(final Modification modification, final boolean havePermit) { - incrementModificationCount(); - if (havePermit) { - ++batchPermits; - } - - if (batchedModifications == null) { - batchedModifications = newBatchedModifications(); - } - - batchedModifications.addModification(modification); - - if (batchedModifications.getModifications().size() - >= actorUtils.getDatastoreContext().getShardBatchedModificationCount()) { - sendBatchedModifications(); - } - } - - @VisibleForTesting - Future sendBatchedModifications() { - return sendBatchedModifications(false, false, Optional.empty()); - } - - private Future sendBatchedModifications(final boolean ready, final boolean doCommitOnReady, - final Optional> participatingShardNames) { - Future sent = null; - if (ready || batchedModifications != null && !batchedModifications.getModifications().isEmpty()) { - if (batchedModifications == null) { - batchedModifications = newBatchedModifications(); - } - - LOG.debug("Tx {} sending {} batched modifications, ready: {}", getIdentifier(), - batchedModifications.getModifications().size(), ready); - - batchedModifications.setDoCommitOnReady(doCommitOnReady); - batchedModifications.setTotalMessagesSent(++totalBatchedModificationsSent); - - final BatchedModifications toSend = batchedModifications; - final int permitsToRelease = batchPermits; - batchPermits = 0; - - if (ready) { - batchedModifications.setReady(participatingShardNames); - batchedModifications.setDoCommitOnReady(doCommitOnReady); - batchedModifications = null; - } else { - batchedModifications = newBatchedModifications(); - - final Throwable failure = failedModification; - if (failure != null) { - // We have observed a modification failure, it does not make sense to send this batch. This speeds - // up the time when the application could be blocked due to messages timing out and operation - // limiter kicking in. - LOG.debug("Tx {} modifications previously failed, not sending a non-ready batch", getIdentifier()); - limiter.release(permitsToRelease); - return Futures.failed(failure); - } - } - - sent = actorUtils.executeOperationAsync(getActor(), toSend.toSerializable(), - actorUtils.getTransactionCommitOperationTimeout()); - sent.onComplete(new OnComplete<>() { - @Override - public void onComplete(final Throwable failure, final Object success) { - if (failure != null) { - LOG.debug("Tx {} modifications failed", getIdentifier(), failure); - failedModification = failure; - } else { - LOG.debug("Tx {} modifications completed with {}", getIdentifier(), success); - } - limiter.release(permitsToRelease); - } - }, actorUtils.getClientDispatcher()); - } - - return sent; - } - - @Override - void executeDelete(final YangInstanceIdentifier path, final Boolean havePermit) { - LOG.debug("Tx {} executeDelete called path = {}", getIdentifier(), path); - executeModification(new DeleteModification(path), havePermit); - } - - @Override - void executeMerge(final YangInstanceIdentifier path, final NormalizedNode data, final Boolean havePermit) { - LOG.debug("Tx {} executeMerge called path = {}", getIdentifier(), path); - executeModification(new MergeModification(path, data), havePermit); - } - - @Override - void executeWrite(final YangInstanceIdentifier path, final NormalizedNode data, final Boolean havePermit) { - LOG.debug("Tx {} executeWrite called path = {}", getIdentifier(), path); - executeModification(new WriteModification(path, data), havePermit); - } - - private void executeModification(final AbstractModification modification, final Boolean havePermit) { - final boolean permitToRelease; - if (havePermit == null) { - permitToRelease = failedModification == null && acquireOperation(); - } else { - permitToRelease = havePermit; - } - - batchModification(modification, permitToRelease); - } - - @Override - void executeRead(final AbstractRead readCmd, final SettableFuture returnFuture, - final Boolean havePermit) { - LOG.debug("Tx {} executeRead {} called path = {}", getIdentifier(), readCmd.getClass().getSimpleName(), - readCmd.getPath()); - - final Throwable failure = failedModification; - if (failure != null) { - // If we know there was a previous modification failure, we must not send a read request, as it risks - // returning incorrect data. We check this before acquiring an operation simply because we want the app - // to complete this transaction as soon as possible. - returnFuture.setException(new ReadFailedException("Previous modification failed, cannot " - + readCmd.getClass().getSimpleName() + " for path " + readCmd.getPath(), failure)); - return; - } - - // Send any batched modifications. This is necessary to honor the read uncommitted semantics of the - // public API contract. - - final boolean permitToRelease = havePermit == null ? acquireOperation() : havePermit; - sendBatchedModifications(); - - OnComplete onComplete = new OnComplete<>() { - @Override - public void onComplete(final Throwable failure, final Object response) { - // We have previously acquired an operation, now release it, no matter what happened - if (permitToRelease) { - limiter.release(); - } - - if (failure != null) { - LOG.debug("Tx {} {} operation failed", getIdentifier(), readCmd.getClass().getSimpleName(), - failure); - - returnFuture.setException(new ReadFailedException("Error checking " - + readCmd.getClass().getSimpleName() + " for path " + readCmd.getPath(), failure)); - } else { - LOG.debug("Tx {} {} operation succeeded", getIdentifier(), readCmd.getClass().getSimpleName()); - readCmd.processResponse(response, returnFuture); - } - } - }; - - final Future future = actorUtils.executeOperationAsync(getActor(), - readCmd.asVersion(getTransactionVersion()).toSerializable(), actorUtils.getOperationTimeout()); - future.onComplete(onComplete, actorUtils.getClientDispatcher()); - } - - /** - * Acquire operation from the limiter if the hand-off has completed. If the hand-off is still ongoing, this method - * does nothing. - * - * @return True if a permit was successfully acquired, false otherwise - */ - private boolean acquireOperation() { - checkState(isOperationHandOffComplete(), - "Attempted to acquire execute operation permit for transaction %s on actor %s during handoff", - getIdentifier(), actor); - - if (limiter.acquire()) { - return true; - } - - LOG.warn("Failed to acquire execute operation permit for transaction {} on actor {}", getIdentifier(), actor); - return false; - } - - @Override - boolean usesOperationLimiting() { - return true; - } -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RemoteTransactionContextSupport.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RemoteTransactionContextSupport.java deleted file mode 100644 index 333d11b4f0..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RemoteTransactionContextSupport.java +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved. - * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.datastore; - -import static java.util.Objects.requireNonNull; - -import akka.actor.ActorSelection; -import akka.dispatch.OnComplete; -import akka.pattern.AskTimeoutException; -import akka.util.Timeout; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import java.util.concurrent.TimeUnit; -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException; -import org.opendaylight.controller.cluster.datastore.exceptions.ShardLeaderNotRespondingException; -import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction; -import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply; -import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo; -import org.opendaylight.controller.cluster.datastore.utils.ActorUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import scala.concurrent.Future; -import scala.concurrent.duration.FiniteDuration; - -/** - * Handles creation of TransactionContext instances for remote transactions. This class creates - * remote transactions, if necessary, by sending CreateTransaction messages with retries, up to a limit, - * if the shard doesn't have a leader yet. This is done by scheduling a retry task after a short delay. - *

    - * The end result from a completed CreateTransaction message is a TransactionContext that is - * used to perform transaction operations. Transaction operations that occur before the - * CreateTransaction completes are cached via a DelayedTransactionContextWrapper and executed once the - * CreateTransaction completes, successfully or not. - */ -final class RemoteTransactionContextSupport { - private static final Logger LOG = LoggerFactory.getLogger(RemoteTransactionContextSupport.class); - - private static final long CREATE_TX_TRY_INTERVAL_IN_MS = 1000; - private static final long MAX_CREATE_TX_MSG_TIMEOUT_IN_MS = 5000; - - private final TransactionProxy parent; - private final String shardName; - - /** - * The target primary shard. - */ - private volatile PrimaryShardInfo primaryShardInfo; - - /** - * The total timeout for creating a tx on the primary shard. - */ - private volatile long totalCreateTxTimeout; - - private final Timeout createTxMessageTimeout; - - private final DelayedTransactionContextWrapper transactionContextWrapper; - - RemoteTransactionContextSupport(final DelayedTransactionContextWrapper transactionContextWrapper, - final TransactionProxy parent, final String shardName) { - this.parent = requireNonNull(parent); - this.shardName = shardName; - this.transactionContextWrapper = transactionContextWrapper; - - // For the total create tx timeout, use 2 times the election timeout. This should be enough time for - // a leader re-election to occur if we happen to hit it in transition. - totalCreateTxTimeout = parent.getActorUtils().getDatastoreContext().getShardRaftConfig() - .getElectionTimeOutInterval().toMillis() * 2; - - // We'll use the operationTimeout for the the create Tx message timeout so it can be set appropriately - // for unit tests but cap it at MAX_CREATE_TX_MSG_TIMEOUT_IN_MS. The operationTimeout could be set - // larger than the totalCreateTxTimeout in production which we don't want. - long operationTimeout = parent.getActorUtils().getOperationTimeout().duration().toMillis(); - createTxMessageTimeout = new Timeout(Math.min(operationTimeout, MAX_CREATE_TX_MSG_TIMEOUT_IN_MS), - TimeUnit.MILLISECONDS); - } - - String getShardName() { - return shardName; - } - - private TransactionType getTransactionType() { - return parent.getType(); - } - - private ActorUtils getActorUtils() { - return parent.getActorUtils(); - } - - private TransactionIdentifier getIdentifier() { - return parent.getIdentifier(); - } - - /** - * Sets the target primary shard and initiates a CreateTransaction try. - */ - void setPrimaryShard(final PrimaryShardInfo newPrimaryShardInfo) { - this.primaryShardInfo = newPrimaryShardInfo; - - if (getTransactionType() == TransactionType.WRITE_ONLY - && getActorUtils().getDatastoreContext().isWriteOnlyTransactionOptimizationsEnabled()) { - ActorSelection primaryShard = newPrimaryShardInfo.getPrimaryShardActor(); - - LOG.debug("Tx {} Primary shard {} found - creating WRITE_ONLY transaction context", - getIdentifier(), primaryShard); - - // For write-only Tx's we prepare the transaction modifications directly on the shard actor - // to avoid the overhead of creating a separate transaction actor. - transactionContextWrapper.executePriorTransactionOperations(createValidTransactionContext( - primaryShard, String.valueOf(primaryShard.path()), newPrimaryShardInfo.getPrimaryShardVersion())); - } else { - tryCreateTransaction(); - } - } - - /** - Performs a CreateTransaction try async. - */ - private void tryCreateTransaction() { - LOG.debug("Tx {} Primary shard {} found - trying create transaction", getIdentifier(), - primaryShardInfo.getPrimaryShardActor()); - - Object serializedCreateMessage = new CreateTransaction(getIdentifier(), getTransactionType().ordinal(), - primaryShardInfo.getPrimaryShardVersion()).toSerializable(); - - Future createTxFuture = getActorUtils().executeOperationAsync( - primaryShardInfo.getPrimaryShardActor(), serializedCreateMessage, createTxMessageTimeout); - - createTxFuture.onComplete(new OnComplete() { - @Override - public void onComplete(final Throwable failure, final Object response) { - onCreateTransactionComplete(failure, response); - } - }, getActorUtils().getClientDispatcher()); - } - - private void tryFindPrimaryShard() { - LOG.debug("Tx {} Retrying findPrimaryShardAsync for shard {}", getIdentifier(), shardName); - - this.primaryShardInfo = null; - Future findPrimaryFuture = getActorUtils().findPrimaryShardAsync(shardName); - findPrimaryFuture.onComplete(new OnComplete() { - @Override - public void onComplete(final Throwable failure, final PrimaryShardInfo newPrimaryShardInfo) { - onFindPrimaryShardComplete(failure, newPrimaryShardInfo); - } - }, getActorUtils().getClientDispatcher()); - } - - @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD", - justification = "https://github.com/spotbugs/spotbugs/issues/811") - private void onFindPrimaryShardComplete(final Throwable failure, final PrimaryShardInfo newPrimaryShardInfo) { - if (failure == null) { - this.primaryShardInfo = newPrimaryShardInfo; - tryCreateTransaction(); - } else { - LOG.debug("Tx {}: Find primary for shard {} failed", getIdentifier(), shardName, failure); - - onCreateTransactionComplete(failure, null); - } - } - - private void onCreateTransactionComplete(final Throwable failure, final Object response) { - // An AskTimeoutException will occur if the local shard forwards to an unavailable remote leader or - // the cached remote leader actor is no longer available. - boolean retryCreateTransaction = primaryShardInfo != null - && (failure instanceof NoShardLeaderException || failure instanceof AskTimeoutException); - - // Schedule a retry unless we're out of retries. Note: totalCreateTxTimeout is volatile as it may - // be written by different threads however not concurrently, therefore decrementing it - // non-atomically here is ok. - if (retryCreateTransaction && totalCreateTxTimeout > 0) { - long scheduleInterval = CREATE_TX_TRY_INTERVAL_IN_MS; - if (failure instanceof AskTimeoutException) { - // Since we use the createTxMessageTimeout for the CreateTransaction request and it timed - // out, subtract it from the total timeout. Also since the createTxMessageTimeout period - // has already elapsed, we can immediately schedule the retry (10 ms is virtually immediate). - totalCreateTxTimeout -= createTxMessageTimeout.duration().toMillis(); - scheduleInterval = 10; - } - - totalCreateTxTimeout -= scheduleInterval; - - LOG.debug("Tx {}: create tx on shard {} failed with exception \"{}\" - scheduling retry in {} ms", - getIdentifier(), shardName, failure, scheduleInterval); - - getActorUtils().getActorSystem().scheduler().scheduleOnce( - FiniteDuration.create(scheduleInterval, TimeUnit.MILLISECONDS), - this::tryFindPrimaryShard, getActorUtils().getClientDispatcher()); - return; - } - - createTransactionContext(failure, response); - } - - private void createTransactionContext(final Throwable failure, final Object response) { - // Create the TransactionContext from the response or failure. Store the new - // TransactionContext locally until we've completed invoking the - // TransactionOperations. This avoids thread timing issues which could cause - // out-of-order TransactionOperations. Eg, on a modification operation, if the - // TransactionContext is non-null, then we directly call the TransactionContext. - // However, at the same time, the code may be executing the cached - // TransactionOperations. So to avoid thus timing, we don't publish the - // TransactionContext until after we've executed all cached TransactionOperations. - TransactionContext localTransactionContext; - if (failure != null) { - LOG.debug("Tx {} Creating NoOpTransaction because of error", getIdentifier(), failure); - - Throwable resultingEx = failure; - if (failure instanceof AskTimeoutException) { - resultingEx = new ShardLeaderNotRespondingException(String.format( - "Could not create a %s transaction on shard %s. The shard leader isn't responding.", - parent.getType(), shardName), failure); - } else if (!(failure instanceof NoShardLeaderException)) { - resultingEx = new Exception(String.format( - "Error creating %s transaction on shard %s", parent.getType(), shardName), failure); - } - - localTransactionContext = new NoOpTransactionContext(resultingEx, getIdentifier()); - } else if (CreateTransactionReply.isSerializedType(response)) { - localTransactionContext = createValidTransactionContext( - CreateTransactionReply.fromSerializable(response)); - } else { - IllegalArgumentException exception = new IllegalArgumentException(String.format( - "Invalid reply type %s for CreateTransaction", response.getClass())); - - localTransactionContext = new NoOpTransactionContext(exception, getIdentifier()); - } - transactionContextWrapper.executePriorTransactionOperations(localTransactionContext); - } - - private TransactionContext createValidTransactionContext(final CreateTransactionReply reply) { - LOG.debug("Tx {} Received {}", getIdentifier(), reply); - - return createValidTransactionContext(getActorUtils().actorSelection(reply.getTransactionPath()), - reply.getTransactionPath(), primaryShardInfo.getPrimaryShardVersion()); - } - - private TransactionContext createValidTransactionContext(final ActorSelection transactionActor, - final String transactionPath, final short remoteTransactionVersion) { - final TransactionContext ret = new RemoteTransactionContext(transactionContextWrapper.getIdentifier(), - transactionActor, getActorUtils(), remoteTransactionVersion, transactionContextWrapper.getLimiter()); - - if (parent.getType() == TransactionType.READ_ONLY) { - TransactionContextCleanup.track(parent, ret); - } - - return ret; - } -} - diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RootDataTreeChangeListenerActor.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RootDataTreeChangeListenerActor.java index 8322289618..857c2844ff 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RootDataTreeChangeListenerActor.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RootDataTreeChangeListenerActor.java @@ -14,10 +14,12 @@ import akka.actor.ActorRef; import akka.actor.Props; import com.google.common.collect.Iterables; import java.util.ArrayDeque; +import java.util.ArrayList; import java.util.Collection; import java.util.Deque; import java.util.Iterator; import java.util.LinkedHashMap; +import java.util.List; import java.util.Map; import org.opendaylight.controller.cluster.datastore.messages.DataTreeChanged; import org.opendaylight.controller.cluster.datastore.messages.OnInitialData; @@ -27,10 +29,10 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdent import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; import org.opendaylight.yangtools.yang.data.api.schema.builder.DataContainerNodeBuilder; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNodes; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates; import org.opendaylight.yangtools.yang.data.impl.schema.Builders; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate; +import org.opendaylight.yangtools.yang.data.tree.spi.DataTreeCandidateNodes; +import org.opendaylight.yangtools.yang.data.tree.spi.DataTreeCandidates; import org.opendaylight.yangtools.yang.model.api.SchemaContext; final class RootDataTreeChangeListenerActor extends DataTreeChangeListenerActor { @@ -41,7 +43,7 @@ final class RootDataTreeChangeListenerActor extends DataTreeChangeListenerActor private Deque otherMessages = new ArrayDeque<>(); private RootDataTreeChangeListenerActor(final DOMDataTreeChangeListener listener, final int shardCount) { - super(listener, YangInstanceIdentifier.empty()); + super(listener, YangInstanceIdentifier.of()); this.shardCount = shardCount; } @@ -83,7 +85,7 @@ final class RootDataTreeChangeListenerActor extends DataTreeChangeListenerActor /* * We need to make-pretend that the data coming into the listener is coming from a single logical entity, where * ordering is partially guaranteed (on shard boundaries). The data layout in shards is such that each DataTree - * is rooted at YangInstanceIdentifier.empty(), but their contents vary: + * is rooted at YangInstanceIdentifier.of(), but their contents vary: * * 1) non-default shards contain immediate children of root from one module * 2) default shard contains everything else @@ -95,7 +97,10 @@ final class RootDataTreeChangeListenerActor extends DataTreeChangeListenerActor * Construct an overall NormalizedNode view of the entire datastore by combining first-level children from all * reported initial state reports, report that node as written and then report any additional deltas. */ - final Deque initialChanges = new ArrayDeque<>(); + final List initialChanges = new ArrayList<>(); + // Reserve first item + initialChanges.add(null); + final DataContainerNodeBuilder rootBuilder = Builders.containerBuilder() .withNodeIdentifier(NodeIdentifier.create(SchemaContext.NAME)); for (Object message : initialMessages.values()) { @@ -106,12 +111,12 @@ final class RootDataTreeChangeListenerActor extends DataTreeChangeListenerActor final Iterator it = changes.iterator(); initial = it.next(); // Append to changes to report as initial. This should not be happening (often?). - it.forEachRemaining(initialChanges::addLast); + it.forEachRemaining(initialChanges::add); } else { initial = Iterables.get(changes, 0); } - final NormalizedNode root = initial.getRootNode().getDataAfter().orElseThrow(); + final NormalizedNode root = initial.getRootNode().getDataAfter(); verify(root instanceof ContainerNode, "Unexpected root node %s", root); ((ContainerNode) root).body().forEach(rootBuilder::withChild); } @@ -119,8 +124,8 @@ final class RootDataTreeChangeListenerActor extends DataTreeChangeListenerActor // We will not be intercepting any other messages, allow initial state to be reclaimed as soon as possible initialMessages = null; - // Prepend combined initial changed and report initial changes and clear the map - initialChanges.addFirst(DataTreeCandidates.newDataTreeCandidate(YangInstanceIdentifier.empty(), + // Replace first element with the combined initial change, report initial changes and clear the map + initialChanges.set(0, DataTreeCandidates.newDataTreeCandidate(YangInstanceIdentifier.of(), DataTreeCandidateNodes.written(rootBuilder.build()))); super.dataTreeChanged(new DataTreeChanged(initialChanges)); diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RootDataTreeChangeListenerProxy.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RootDataTreeChangeListenerProxy.java index 6f4a5f1d06..43cbb7e872 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RootDataTreeChangeListenerProxy.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RootDataTreeChangeListenerProxy.java @@ -16,7 +16,6 @@ import akka.actor.ActorSelection; import akka.actor.PoisonPill; import akka.dispatch.OnComplete; import com.google.common.collect.Maps; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -31,13 +30,12 @@ import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeCh import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeNotificationListenerReply; import org.opendaylight.controller.cluster.datastore.utils.ActorUtils; import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener; -import org.opendaylight.yangtools.concepts.AbstractListenerRegistration; +import org.opendaylight.yangtools.concepts.AbstractObjectRegistration; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -final class RootDataTreeChangeListenerProxy - extends AbstractListenerRegistration { +final class RootDataTreeChangeListenerProxy extends AbstractObjectRegistration { private abstract static class State { } @@ -76,7 +74,7 @@ final class RootDataTreeChangeListenerProxy final Set shardNames) { super(listener); this.actorUtils = requireNonNull(actorUtils); - this.state = new ResolveShards(shardNames.size()); + state = new ResolveShards(shardNames.size()); for (String shardName : shardNames) { actorUtils.findLocalShardAsync(shardName).onComplete(new OnComplete() { @@ -95,19 +93,17 @@ final class RootDataTreeChangeListenerProxy } else if (state instanceof ResolveShards) { // Simple case: just mark the fact we were closed, terminating when resolution finishes state = new Terminated(); - } else if (state instanceof Subscribed) { - terminate((Subscribed) state); + } else if (state instanceof Subscribed subscribed) { + terminate(subscribed); } else { throw new IllegalStateException("Unhandled close in state " + state); } } - @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD", - justification = "https://github.com/spotbugs/spotbugs/issues/811") private synchronized void onFindLocalShardComplete(final String shardName, final Throwable failure, final ActorRef shard) { - if (state instanceof ResolveShards) { - localShardsResolved((ResolveShards) state, shardName, failure, shard); + if (state instanceof ResolveShards resolveShards) { + localShardsResolved(resolveShards, shardName, failure, shard); } else { LOG.debug("{}: lookup for shard {} turned into a noop on state {}", logContext(), shardName, state); } @@ -156,7 +152,7 @@ final class RootDataTreeChangeListenerProxy // Subscribe to all shards final RegisterDataTreeChangeListener regMessage = new RegisterDataTreeChangeListener( - YangInstanceIdentifier.empty(), dtclActor, true); + YangInstanceIdentifier.of(), dtclActor, true); for (Entry entry : localShards.entrySet()) { // Do not retain references to localShards final String shardName = entry.getKey(); @@ -172,11 +168,8 @@ final class RootDataTreeChangeListenerProxy } } - @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD", - justification = "https://github.com/spotbugs/spotbugs/issues/811") private synchronized void onShardSubscribed(final String shardName, final Throwable failure, final Object result) { - if (state instanceof Subscribed) { - final Subscribed current = (Subscribed) state; + if (state instanceof Subscribed current) { if (failure != null) { LOG.error("{}: Shard {} failed to subscribe, terminating listener {}", logContext(), shardName,getInstance(), failure); diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/Shard.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/Shard.java index a4d524299b..1fcaa9d64d 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/Shard.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/Shard.java @@ -30,6 +30,7 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Range; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.io.IOException; import java.util.Arrays; import java.util.Collection; @@ -39,6 +40,7 @@ import java.util.Map; import java.util.Optional; import java.util.OptionalLong; import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; import org.eclipse.jdt.annotation.NonNull; import org.eclipse.jdt.annotation.Nullable; import org.opendaylight.controller.cluster.access.ABIVersion; @@ -50,8 +52,6 @@ import org.opendaylight.controller.cluster.access.commands.OutOfSequenceEnvelope import org.opendaylight.controller.cluster.access.commands.TransactionRequest; import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier; import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier; -import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier; -import org.opendaylight.controller.cluster.access.concepts.Request; import org.opendaylight.controller.cluster.access.concepts.RequestEnvelope; import org.opendaylight.controller.cluster.access.concepts.RequestException; import org.opendaylight.controller.cluster.access.concepts.RequestSuccess; @@ -64,7 +64,6 @@ import org.opendaylight.controller.cluster.common.actor.CommonConfig; import org.opendaylight.controller.cluster.common.actor.Dispatchers; import org.opendaylight.controller.cluster.common.actor.Dispatchers.DispatcherType; import org.opendaylight.controller.cluster.common.actor.MessageTracker; -import org.opendaylight.controller.cluster.common.actor.MessageTracker.Error; import org.opendaylight.controller.cluster.common.actor.MeteringBehavior; import org.opendaylight.controller.cluster.datastore.actors.JsonExportActor; import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException; @@ -107,16 +106,15 @@ import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry; import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus; import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState; import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply; +import org.opendaylight.controller.cluster.raft.messages.Payload; import org.opendaylight.controller.cluster.raft.messages.RequestLeadership; import org.opendaylight.controller.cluster.raft.messages.ServerRemoved; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload; -import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev140612.DataStoreProperties.ExportOnRecovery; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev231229.DataStoreProperties.ExportOnRecovery; import org.opendaylight.yangtools.concepts.Identifier; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException; -import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType; +import org.opendaylight.yangtools.yang.data.tree.api.DataTree; +import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException; +import org.opendaylight.yangtools.yang.data.tree.api.TreeType; import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext; -import org.opendaylight.yangtools.yang.model.api.EffectiveModelContextProvider; import scala.concurrent.duration.FiniteDuration; /** @@ -182,6 +180,7 @@ public class Shard extends RaftActor { private DatastoreContext datastoreContext; + @Deprecated(since = "9.0.0", forRemoval = true) private final ShardCommitCoordinator commitCoordinator; private long transactionCommitTimeout; @@ -192,6 +191,7 @@ public class Shard extends RaftActor { private final MessageTracker appendEntriesReplyTracker; + @Deprecated(since = "9.0.0", forRemoval = true) private final ShardTransactionActorFactory transactionActorFactory; private final ShardSnapshotCohort snapshotCohort; @@ -200,6 +200,7 @@ public class Shard extends RaftActor { private ShardSnapshot restoreFromSnapshot; + @Deprecated(since = "9.0.0", forRemoval = true) private final ShardTransactionMessageRetrySupport messageRetrySupport; @VisibleForTesting @@ -217,6 +218,7 @@ public class Shard extends RaftActor { private final ActorRef exportActor; + @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design") Shard(final AbstractBuilder builder) { super(builder.getId().toString(), builder.getPeerAddresses(), Optional.of(builder.getDatastoreContext().getShardRaftConfig()), DataStoreVersions.CURRENT_VERSION); @@ -228,16 +230,11 @@ public class Shard extends RaftActor { frontendMetadata = new FrontendMetadata(name); exportOnRecovery = datastoreContext.getExportOnRecovery(); - switch (exportOnRecovery) { - case Json: - exportActor = getContext().actorOf(JsonExportActor.props(builder.getSchemaContext(), - datastoreContext.getRecoveryExportBaseDir())); - break; - case Off: - default: - exportActor = null; - break; - } + exportActor = switch (exportOnRecovery) { + case Json -> getContext().actorOf(JsonExportActor.props(builder.getSchemaContext(), + datastoreContext.getRecoveryExportBaseDir())); + case Off -> null; + }; setPersistence(datastoreContext.isPersistent()); @@ -302,7 +299,7 @@ public class Shard extends RaftActor { } private Optional createRoleChangeNotifier(final String shardId) { - ActorRef shardRoleChangeNotifier = this.getContext().actorOf( + ActorRef shardRoleChangeNotifier = getContext().actorOf( RoleChangeNotifier.getProps(shardId), shardId + "-notifier"); return Optional.of(shardRoleChangeNotifier); } @@ -335,11 +332,10 @@ public class Shard extends RaftActor { switch (exportOnRecovery) { case Json: if (message instanceof SnapshotOffer) { - exportActor.tell(new JsonExportActor.ExportSnapshot(store.readCurrentData().get(), name), - ActorRef.noSender()); - } else if (message instanceof ReplicatedLogEntry) { - exportActor.tell(new JsonExportActor.ExportJournal((ReplicatedLogEntry) message), + exportActor.tell(new JsonExportActor.ExportSnapshot(store.readCurrentData().orElseThrow(), name), ActorRef.noSender()); + } else if (message instanceof ReplicatedLogEntry replicatedLogEntry) { + exportActor.tell(new JsonExportActor.ExportJournal(replicatedLogEntry), ActorRef.noSender()); } else if (message instanceof RecoveryCompleted) { exportActor.tell(new JsonExportActor.FinishExport(name), ActorRef.noSender()); exportActor.tell(PoisonPill.getInstance(), ActorRef.noSender()); @@ -358,54 +354,37 @@ public class Shard extends RaftActor { @Override // non-final for TestShard protected void handleNonRaftCommand(final Object message) { - try (MessageTracker.Context context = appendEntriesReplyTracker.received(message)) { - final Optional maybeError = context.error(); + try (var context = appendEntriesReplyTracker.received(message)) { + final var maybeError = context.error(); if (maybeError.isPresent()) { LOG.trace("{} : AppendEntriesReply failed to arrive at the expected interval {}", persistenceId(), - maybeError.get()); + maybeError.orElseThrow()); } store.resetTransactionBatch(); - if (message instanceof RequestEnvelope) { - handleRequestEnvelope((RequestEnvelope)message); + if (message instanceof RequestEnvelope request) { + handleRequestEnvelope(request); } else if (MessageAssembler.isHandledMessage(message)) { handleRequestAssemblerMessage(message); - } else if (message instanceof ConnectClientRequest) { - handleConnectClient((ConnectClientRequest)message); - } else if (CreateTransaction.isSerializedType(message)) { - handleCreateTransaction(message); - } else if (message instanceof BatchedModifications) { - handleBatchedModifications((BatchedModifications)message); - } else if (message instanceof ForwardedReadyTransaction) { - handleForwardedReadyTransaction((ForwardedReadyTransaction) message); - } else if (message instanceof ReadyLocalTransaction) { - handleReadyLocalTransaction((ReadyLocalTransaction)message); - } else if (CanCommitTransaction.isSerializedType(message)) { - handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message)); - } else if (CommitTransaction.isSerializedType(message)) { - handleCommitTransaction(CommitTransaction.fromSerializable(message)); - } else if (AbortTransaction.isSerializedType(message)) { - handleAbortTransaction(AbortTransaction.fromSerializable(message)); - } else if (CloseTransactionChain.isSerializedType(message)) { - closeTransactionChain(CloseTransactionChain.fromSerializable(message)); + } else if (message instanceof ConnectClientRequest request) { + handleConnectClient(request); } else if (message instanceof DataTreeChangedReply) { // Ignore reply - } else if (message instanceof RegisterDataTreeChangeListener) { - treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader(), hasLeader()); - } else if (message instanceof UpdateSchemaContext) { - updateSchemaContext((UpdateSchemaContext) message); - } else if (message instanceof PeerAddressResolved) { - PeerAddressResolved resolved = (PeerAddressResolved) message; + } else if (message instanceof RegisterDataTreeChangeListener request) { + treeChangeSupport.onMessage(request, isLeader(), hasLeader()); + } else if (message instanceof UpdateSchemaContext request) { + updateSchemaContext(request); + } else if (message instanceof PeerAddressResolved resolved) { setPeerAddress(resolved.getPeerId(), resolved.getPeerAddress()); } else if (TX_COMMIT_TIMEOUT_CHECK_MESSAGE.equals(message)) { commitTimeoutCheck(); - } else if (message instanceof DatastoreContext) { - onDatastoreContext((DatastoreContext)message); + } else if (message instanceof DatastoreContext request) { + onDatastoreContext(request); } else if (message instanceof RegisterRoleChangeListener) { - roleChangeNotifier.get().forward(message, context()); - } else if (message instanceof FollowerInitialSyncUpStatus) { - shardMBean.setFollowerInitialSyncStatus(((FollowerInitialSyncUpStatus) message).isInitialSyncDone()); + roleChangeNotifier.orElseThrow().forward(message, context()); + } else if (message instanceof FollowerInitialSyncUpStatus request) { + shardMBean.setFollowerInitialSyncStatus(request.isInitialSyncDone()); context().parent().tell(message, self()); } else if (GET_SHARD_MBEAN_MESSAGE.equals(message)) { sender().tell(getShardMBean(), self()); @@ -413,11 +392,8 @@ public class Shard extends RaftActor { sender().tell(store.getDataTree(), self()); } else if (message instanceof ServerRemoved) { context().parent().forward(message, context()); - } else if (ShardTransactionMessageRetrySupport.TIMER_MESSAGE_CLASS.isInstance(message)) { - messageRetrySupport.onTimerMessage(message); - } else if (message instanceof DataTreeCohortActorRegistry.CohortRegistryCommand) { - store.processCohortRegistryCommand(getSender(), - (DataTreeCohortActorRegistry.CohortRegistryCommand) message); + } else if (message instanceof DataTreeCohortActorRegistry.CohortRegistryCommand request) { + store.processCohortRegistryCommand(getSender(), request); } else if (message instanceof MakeLeaderLocal) { onMakeLeaderLocal(); } else if (RESUME_NEXT_PENDING_TRANSACTION.equals(message)) { @@ -425,7 +401,28 @@ public class Shard extends RaftActor { } else if (GetKnownClients.INSTANCE.equals(message)) { handleGetKnownClients(); } else if (!responseMessageSlicer.handleMessage(message)) { - super.handleNonRaftCommand(message); + // Ask-based protocol messages + if (CreateTransaction.isSerializedType(message)) { + handleCreateTransaction(message); + } else if (message instanceof BatchedModifications request) { + handleBatchedModifications(request); + } else if (message instanceof ForwardedReadyTransaction request) { + handleForwardedReadyTransaction(request); + } else if (message instanceof ReadyLocalTransaction request) { + handleReadyLocalTransaction(request); + } else if (CanCommitTransaction.isSerializedType(message)) { + handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message)); + } else if (CommitTransaction.isSerializedType(message)) { + handleCommitTransaction(CommitTransaction.fromSerializable(message)); + } else if (AbortTransaction.isSerializedType(message)) { + handleAbortTransaction(AbortTransaction.fromSerializable(message)); + } else if (CloseTransactionChain.isSerializedType(message)) { + closeTransactionChain(CloseTransactionChain.fromSerializable(message)); + } else if (ShardTransactionMessageRetrySupport.TIMER_MESSAGE_CLASS.isInstance(message)) { + messageRetrySupport.onTimerMessage(message); + } else { + super.handleNonRaftCommand(message); + } } } } @@ -471,7 +468,7 @@ public class Shard extends RaftActor { } private OptionalLong updateAccess(final SimpleShardDataTreeCohort cohort) { - final FrontendIdentifier frontend = cohort.getIdentifier().getHistoryId().getClientId().getFrontendId(); + final FrontendIdentifier frontend = cohort.transactionId().getHistoryId().getClientId().getFrontendId(); final LeaderFrontendState state = knownFrontends.get(frontend); if (state == null) { // Not tell-based protocol, do nothing @@ -635,14 +632,12 @@ public class Shard extends RaftActor { throw new NotLeaderException(getSelf()); } - final Request request = envelope.getMessage(); - if (request instanceof TransactionRequest) { - final TransactionRequest txReq = (TransactionRequest)request; - final ClientIdentifier clientId = txReq.getTarget().getHistoryId().getClientId(); + final var request = envelope.getMessage(); + if (request instanceof TransactionRequest txReq) { + final var clientId = txReq.getTarget().getHistoryId().getClientId(); return getFrontend(clientId).handleTransactionRequest(txReq, envelope, now); - } else if (request instanceof LocalHistoryRequest) { - final LocalHistoryRequest lhReq = (LocalHistoryRequest)request; - final ClientIdentifier clientId = lhReq.getTarget().getClientId(); + } else if (request instanceof LocalHistoryRequest lhReq) { + final var clientId = lhReq.getTarget().getClientId(); return getFrontend(clientId).handleLocalHistoryRequest(lhReq, envelope, now); } else { LOG.warn("{}: rejecting unsupported request {}", persistenceId(), request); @@ -712,13 +707,14 @@ public class Shard extends RaftActor { } } + @Deprecated(since = "9.0.0", forRemoval = true) private void handleCommitTransaction(final CommitTransaction commit) { - final TransactionIdentifier txId = commit.getTransactionId(); + final var txId = commit.getTransactionId(); if (isLeader()) { askProtocolEncountered(txId); commitCoordinator.handleCommit(txId, getSender(), this); } else { - ActorSelection leader = getLeader(); + final var leader = getLeader(); if (leader == null) { messageRetrySupport.addMessageToRetry(commit, getSender(), "Could not commit transaction " + txId); } else { @@ -728,15 +724,16 @@ public class Shard extends RaftActor { } } + @Deprecated(since = "9.0.0", forRemoval = true) private void handleCanCommitTransaction(final CanCommitTransaction canCommit) { - final TransactionIdentifier txId = canCommit.getTransactionId(); + final var txId = canCommit.getTransactionId(); LOG.debug("{}: Can committing transaction {}", persistenceId(), txId); if (isLeader()) { askProtocolEncountered(txId); commitCoordinator.handleCanCommit(txId, getSender(), this); } else { - ActorSelection leader = getLeader(); + final var leader = getLeader(); if (leader == null) { messageRetrySupport.addMessageToRetry(canCommit, getSender(), "Could not canCommit transaction " + txId); @@ -748,6 +745,7 @@ public class Shard extends RaftActor { } @SuppressWarnings("checkstyle:IllegalCatch") + @Deprecated(since = "9.0.0", forRemoval = true) private void handleBatchedModificationsLocal(final BatchedModifications batched, final ActorRef sender) { askProtocolEncountered(batched.getTransactionId()); @@ -760,6 +758,7 @@ public class Shard extends RaftActor { } } + @Deprecated(since = "9.0.0", forRemoval = true) private void handleBatchedModifications(final BatchedModifications batched) { // This message is sent to prepare the modifications transaction directly on the Shard as an // optimization to avoid the extra overhead of a separate ShardTransaction actor. On the last @@ -777,7 +776,7 @@ public class Shard extends RaftActor { if (isLeader() && isLeaderActive) { handleBatchedModificationsLocal(batched, getSender()); } else { - ActorSelection leader = getLeader(); + final var leader = getLeader(); if (!isLeaderActive || leader == null) { messageRetrySupport.addMessageToRetry(batched, getSender(), "Could not process BatchedModifications " + batched.getTransactionId()); @@ -786,9 +785,8 @@ public class Shard extends RaftActor { // we need to reconstruct previous BatchedModifications from the transaction // DataTreeModification, honoring the max batched modification count, and forward all the // previous BatchedModifications to the new leader. - Collection newModifications = commitCoordinator - .createForwardedBatchedModifications(batched, - datastoreContext.getShardBatchedModificationCount()); + final var newModifications = commitCoordinator.createForwardedBatchedModifications(batched, + datastoreContext.getShardBatchedModificationCount()); LOG.debug("{}: Forwarding {} BatchedModifications to leader {}", persistenceId(), newModifications.size(), leader); @@ -817,11 +815,12 @@ public class Shard extends RaftActor { } @SuppressWarnings("checkstyle:IllegalCatch") - private void handleReadyLocalTransaction(final ReadyLocalTransaction message) { - final TransactionIdentifier txId = message.getTransactionId(); + @Deprecated(since = "9.0.0", forRemoval = true) + private void handleReadyLocalTransaction(final ReadyLocalTransaction message) { + final var txId = message.getTransactionId(); LOG.debug("{}: handleReadyLocalTransaction for {}", persistenceId(), txId); - boolean isLeaderActive = isLeaderActive(); + final var isLeaderActive = isLeaderActive(); if (isLeader() && isLeaderActive) { askProtocolEncountered(txId); try { @@ -831,7 +830,7 @@ public class Shard extends RaftActor { getSender().tell(new Failure(e), getSelf()); } } else { - ActorSelection leader = getLeader(); + final var leader = getLeader(); if (!isLeaderActive || leader == null) { messageRetrySupport.addMessageToRetry(message, getSender(), "Could not process ready local transaction " + txId); @@ -843,22 +842,23 @@ public class Shard extends RaftActor { } } + @Deprecated(since = "9.0.0", forRemoval = true) private void handleForwardedReadyTransaction(final ForwardedReadyTransaction forwardedReady) { LOG.debug("{}: handleForwardedReadyTransaction for {}", persistenceId(), forwardedReady.getTransactionId()); - boolean isLeaderActive = isLeaderActive(); + final var isLeaderActive = isLeaderActive(); if (isLeader() && isLeaderActive) { askProtocolEncountered(forwardedReady.getTransactionId()); commitCoordinator.handleForwardedReadyTransaction(forwardedReady, getSender(), this); } else { - ActorSelection leader = getLeader(); + final var leader = getLeader(); if (!isLeaderActive || leader == null) { messageRetrySupport.addMessageToRetry(forwardedReady, getSender(), "Could not process forwarded ready transaction " + forwardedReady.getTransactionId()); } else { LOG.debug("{}: Forwarding ForwardedReadyTransaction to leader {}", persistenceId(), leader); - ReadyLocalTransaction readyLocal = new ReadyLocalTransaction(forwardedReady.getTransactionId(), + final var readyLocal = new ReadyLocalTransaction(forwardedReady.getTransactionId(), forwardedReady.getTransaction().getSnapshot(), forwardedReady.isDoImmediateCommit(), forwardedReady.getParticipatingShardNames()); readyLocal.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion()); @@ -867,8 +867,9 @@ public class Shard extends RaftActor { } } + @Deprecated(since = "9.0.0", forRemoval = true) private void handleAbortTransaction(final AbortTransaction abort) { - final TransactionIdentifier transactionId = abort.getTransactionId(); + final var transactionId = abort.getTransactionId(); askProtocolEncountered(transactionId); doAbortTransaction(transactionId, getSender()); } @@ -877,6 +878,7 @@ public class Shard extends RaftActor { commitCoordinator.handleAbort(transactionID, sender, this); } + @Deprecated(since = "9.0.0", forRemoval = true) private void handleCreateTransaction(final Object message) { if (isLeader()) { createTransaction(CreateTransaction.fromSerializable(message)); @@ -888,9 +890,10 @@ public class Shard extends RaftActor { } } + @Deprecated(since = "9.0.0", forRemoval = true) private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) { if (isLeader()) { - final LocalHistoryIdentifier id = closeTransactionChain.getIdentifier(); + final var id = closeTransactionChain.getIdentifier(); askProtocolEncountered(id.getClientId()); store.closeTransactionChain(id); } else if (getLeader() != null) { @@ -900,6 +903,7 @@ public class Shard extends RaftActor { } } + @Deprecated(since = "9.0.0", forRemoval = true) @SuppressWarnings("checkstyle:IllegalCatch") private void createTransaction(final CreateTransaction createTransaction) { askProtocolEncountered(createTransaction.getTransactionId()); @@ -910,7 +914,7 @@ public class Shard extends RaftActor { return; } - ActorRef transactionActor = createTransaction(createTransaction.getTransactionType(), + final var transactionActor = createTransaction(createTransaction.getTransactionType(), createTransaction.getTransactionId()); getSender().tell(new CreateTransactionReply(Serialization.serializedActorPath(transactionActor), @@ -920,6 +924,7 @@ public class Shard extends RaftActor { } } + @Deprecated(since = "9.0.0", forRemoval = true) private ActorRef createTransaction(final int transactionType, final TransactionIdentifier transactionId) { LOG.debug("{}: Creating transaction : {} ", persistenceId(), transactionId); return transactionActorFactory.newShardTransaction(TransactionType.fromInt(transactionType), @@ -927,14 +932,16 @@ public class Shard extends RaftActor { } // Called on leader only + @Deprecated(since = "9.0.0", forRemoval = true) private void askProtocolEncountered(final TransactionIdentifier transactionId) { askProtocolEncountered(transactionId.getHistoryId().getClientId()); } // Called on leader only + @Deprecated(since = "9.0.0", forRemoval = true) private void askProtocolEncountered(final ClientIdentifier clientId) { - final FrontendIdentifier frontend = clientId.getFrontendId(); - final LeaderFrontendState state = knownFrontends.get(frontend); + final var frontend = clientId.getFrontendId(); + final var state = knownFrontends.get(frontend); if (!(state instanceof LeaderFrontendState.Disabled)) { LOG.debug("{}: encountered ask-based client {}, disabling transaction tracking", persistenceId(), clientId); if (knownFrontends.isEmpty()) { @@ -948,7 +955,7 @@ public class Shard extends RaftActor { } private void updateSchemaContext(final UpdateSchemaContext message) { - updateSchemaContext(message.getEffectiveModelContext()); + updateSchemaContext(message.modelContext()); } @VisibleForTesting @@ -981,13 +988,13 @@ public class Shard extends RaftActor { restoreFromSnapshot = null; //notify shard manager - getContext().parent().tell(new ActorInitialized(), getSelf()); + getContext().parent().tell(new ActorInitialized(getSelf()), ActorRef.noSender()); // Being paranoid here - this method should only be called once but just in case... if (txCommitTimeoutCheckSchedule == null) { // Schedule a message to be periodically sent to check if the current in-progress // transaction should be expired and aborted. - FiniteDuration period = FiniteDuration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS); + final var period = FiniteDuration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS); txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule( period, period, getSelf(), TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender()); @@ -996,14 +1003,14 @@ public class Shard extends RaftActor { @Override protected final void applyState(final ActorRef clientActor, final Identifier identifier, final Object data) { - if (data instanceof Payload) { - if (data instanceof DisableTrackingPayload) { - disableTracking((DisableTrackingPayload) data); + if (data instanceof Payload payload) { + if (payload instanceof DisableTrackingPayload disableTracking) { + disableTracking(disableTracking); return; } try { - store.applyReplicatedPayload(identifier, (Payload)data); + store.applyReplicatedPayload(identifier, payload); } catch (DataValidationFailedException | IOException e) { LOG.error("{}: Error applying replica {}", persistenceId(), identifier, e); } @@ -1092,10 +1099,8 @@ public class Shard extends RaftActor { paused = true; // Tell-based protocol can replay transaction state, so it is safe to blow it up when we are paused. - if (datastoreContext.isUseTellBasedProtocol()) { - knownFrontends.values().forEach(LeaderFrontendState::retire); - knownFrontends = ImmutableMap.of(); - } + knownFrontends.values().forEach(LeaderFrontendState::retire); + knownFrontends = ImmutableMap.of(); store.setRunOnPendingTransactionsComplete(operation); } @@ -1163,7 +1168,7 @@ public class Shard extends RaftActor { private ShardIdentifier id; private Map peerAddresses = Collections.emptyMap(); private DatastoreContext datastoreContext; - private EffectiveModelContextProvider schemaContextProvider; + private Supplier<@NonNull EffectiveModelContext> schemaContextProvider; private DatastoreSnapshot.ShardSnapshot restoreFromSnapshot; private DataTree dataTree; @@ -1184,37 +1189,37 @@ public class Shard extends RaftActor { public T id(final ShardIdentifier newId) { checkSealed(); - this.id = newId; + id = newId; return self(); } public T peerAddresses(final Map newPeerAddresses) { checkSealed(); - this.peerAddresses = newPeerAddresses; + peerAddresses = newPeerAddresses; return self(); } public T datastoreContext(final DatastoreContext newDatastoreContext) { checkSealed(); - this.datastoreContext = newDatastoreContext; + datastoreContext = newDatastoreContext; return self(); } - public T schemaContextProvider(final EffectiveModelContextProvider newSchemaContextProvider) { + public T schemaContextProvider(final Supplier<@NonNull EffectiveModelContext> newSchemaContextProvider) { checkSealed(); - this.schemaContextProvider = requireNonNull(newSchemaContextProvider); + schemaContextProvider = requireNonNull(newSchemaContextProvider); return self(); } public T restoreFromSnapshot(final DatastoreSnapshot.ShardSnapshot newRestoreFromSnapshot) { checkSealed(); - this.restoreFromSnapshot = newRestoreFromSnapshot; + restoreFromSnapshot = newRestoreFromSnapshot; return self(); } public T dataTree(final DataTree newDataTree) { checkSealed(); - this.dataTree = newDataTree; + dataTree = newDataTree; return self(); } @@ -1231,7 +1236,7 @@ public class Shard extends RaftActor { } public EffectiveModelContext getSchemaContext() { - return verifyNotNull(schemaContextProvider.getEffectiveModelContext()); + return verifyNotNull(schemaContextProvider.get()); } public DatastoreSnapshot.ShardSnapshot getRestoreFromSnapshot() { @@ -1243,15 +1248,10 @@ public class Shard extends RaftActor { } public TreeType getTreeType() { - switch (datastoreContext.getLogicalStoreType()) { - case CONFIGURATION: - return TreeType.CONFIGURATION; - case OPERATIONAL: - return TreeType.OPERATIONAL; - default: - throw new IllegalStateException("Unhandled logical store type " - + datastoreContext.getLogicalStoreType()); - } + return switch (datastoreContext.getLogicalStoreType()) { + case CONFIGURATION -> TreeType.CONFIGURATION; + case OPERATIONAL -> TreeType.OPERATIONAL; + }; } protected void verify() { diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardCommitCoordinator.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardCommitCoordinator.java index d9520c5d5c..946203b6b7 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardCommitCoordinator.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardCommitCoordinator.java @@ -15,11 +15,11 @@ import akka.serialization.Serialization; import com.google.common.annotations.VisibleForTesting; import com.google.common.primitives.UnsignedLong; import com.google.common.util.concurrent.FutureCallback; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.Deque; import java.util.HashMap; import java.util.LinkedList; import java.util.Map; @@ -38,7 +38,8 @@ import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionRe import org.opendaylight.controller.cluster.datastore.messages.VersionedExternalizableMessage; import org.opendaylight.controller.cluster.datastore.utils.AbstractBatchedModificationsCursor; import org.opendaylight.yangtools.concepts.Identifier; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate; +import org.opendaylight.yangtools.yang.common.Empty; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate; import org.slf4j.Logger; /** @@ -46,6 +47,7 @@ import org.slf4j.Logger; * * @author Thomas Pantelis */ +@Deprecated(since = "9.0.0", forRemoval = true) final class ShardCommitCoordinator { // Interface hook for unit tests to replace or decorate the ShardDataTreeCohorts. @@ -128,6 +130,7 @@ final class ShardCommitCoordinator { * @param batched the BatchedModifications message to process * @param sender the sender of the message */ + @SuppressFBWarnings(value = "THROWS_METHOD_THROWS_RUNTIMEEXCEPTION", justification = "Replay of captured failure") void handleBatchedModifications(final BatchedModifications batched, final ActorRef sender, final Shard shard) { CohortEntry cohortEntry = cohortCache.get(batched.getTransactionId()); if (cohortEntry == null || cohortEntry.isSealed()) { @@ -204,6 +207,7 @@ final class ShardCommitCoordinator { } } + @Deprecated(since = "9.0.0", forRemoval = true) Collection createForwardedBatchedModifications(final BatchedModifications from, final int maxModificationsPerBatch) { CohortEntry cohortEntry = cohortCache.remove(from.getTransactionId()); @@ -236,9 +240,9 @@ final class ShardCommitCoordinator { } private void handleCanCommit(final CohortEntry cohortEntry) { - cohortEntry.canCommit(new FutureCallback() { + cohortEntry.canCommit(new FutureCallback<>() { @Override - public void onSuccess(final Void result) { + public void onSuccess(final Empty result) { log.debug("{}: canCommit for {}: success", name, cohortEntry.getTransactionId()); if (cohortEntry.isDoImmediateCommit()) { @@ -371,9 +375,9 @@ final class ShardCommitCoordinator { log.debug("{}: Aborting transaction {}", name, transactionID); final ActorRef self = shard.getSelf(); - cohortEntry.abort(new FutureCallback() { + cohortEntry.abort(new FutureCallback<>() { @Override - public void onSuccess(final Void result) { + public void onSuccess(final Empty result) { if (sender != null) { sender.tell(AbortTransactionReply.instance(cohortEntry.getClientVersion()).toSerializable(), self); } @@ -397,19 +401,18 @@ final class ShardCommitCoordinator { } void abortPendingTransactions(final String reason, final Shard shard) { - final Failure failure = new Failure(new RuntimeException(reason)); - Collection pending = dataTree.getAndClearPendingTransactions(); + final var failure = new Failure(new RuntimeException(reason)); + final var pending = dataTree.getAndClearPendingTransactions(); log.debug("{}: Aborting {} pending queued transactions", name, pending.size()); - for (ShardDataTreeCohort cohort : pending) { - CohortEntry cohortEntry = cohortCache.remove(cohort.getIdentifier()); - if (cohortEntry == null) { - continue; - } - - if (cohortEntry.getReplySender() != null) { - cohortEntry.getReplySender().tell(failure, shard.self()); + for (var cohort : pending) { + final var cohortEntry = cohortCache.remove(cohort.transactionId()); + if (cohortEntry != null) { + final var replySender = cohortEntry.getReplySender(); + if (replySender != null) { + replySender.tell(failure, shard.self()); + } } } @@ -417,32 +420,31 @@ final class ShardCommitCoordinator { } Collection convertPendingTransactionsToMessages(final int maxModificationsPerBatch) { - final Collection messages = new ArrayList<>(); - for (ShardDataTreeCohort cohort : dataTree.getAndClearPendingTransactions()) { - CohortEntry cohortEntry = cohortCache.remove(cohort.getIdentifier()); + final var messages = new ArrayList(); + for (var cohort : dataTree.getAndClearPendingTransactions()) { + final var cohortEntry = cohortCache.remove(cohort.transactionId()); if (cohortEntry == null) { continue; } - final Deque newMessages = new ArrayDeque<>(); + final var newMessages = new ArrayDeque(); cohortEntry.getDataTreeModification().applyToCursor(new AbstractBatchedModificationsCursor() { @Override protected BatchedModifications getModifications() { - final BatchedModifications lastBatch = newMessages.peekLast(); - + final var lastBatch = newMessages.peekLast(); if (lastBatch != null && lastBatch.getModifications().size() >= maxModificationsPerBatch) { return lastBatch; } // Allocate a new message - final BatchedModifications ret = new BatchedModifications(cohortEntry.getTransactionId(), + final var ret = new BatchedModifications(cohortEntry.getTransactionId(), cohortEntry.getClientVersion()); newMessages.add(ret); return ret; } }); - final BatchedModifications last = newMessages.peekLast(); + final var last = newMessages.peekLast(); if (last != null) { final boolean immediate = cohortEntry.isDoImmediateCommit(); last.setDoCommitOnReady(immediate); diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTree.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTree.java index 4aa7a7b786..72e7a545a7 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTree.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTree.java @@ -35,7 +35,6 @@ import java.util.Deque; import java.util.HashMap; import java.util.Iterator; import java.util.Map; -import java.util.Map.Entry; import java.util.Optional; import java.util.OptionalLong; import java.util.Queue; @@ -57,7 +56,6 @@ import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifia import org.opendaylight.controller.cluster.datastore.persisted.CloseLocalHistoryPayload; import org.opendaylight.controller.cluster.datastore.persisted.CommitTransactionPayload; import org.opendaylight.controller.cluster.datastore.persisted.CreateLocalHistoryPayload; -import org.opendaylight.controller.cluster.datastore.persisted.DataTreeCandidateInputOutput.DataTreeCandidateWithVersion; import org.opendaylight.controller.cluster.datastore.persisted.MetadataShardDataTreeSnapshot; import org.opendaylight.controller.cluster.datastore.persisted.PayloadVersion; import org.opendaylight.controller.cluster.datastore.persisted.PurgeLocalHistoryPayload; @@ -70,31 +68,31 @@ import org.opendaylight.controller.cluster.datastore.utils.DataTreeModificationO import org.opendaylight.controller.cluster.datastore.utils.ImmutableUnsignedLongSet; import org.opendaylight.controller.cluster.datastore.utils.PruningDataTreeModification; import org.opendaylight.controller.cluster.raft.base.messages.InitiateCaptureSnapshot; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload; +import org.opendaylight.controller.cluster.raft.messages.Payload; import org.opendaylight.mdsal.common.api.OptimisticLockFailedException; import org.opendaylight.mdsal.common.api.TransactionCommitFailedException; import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener; import org.opendaylight.yangtools.concepts.Identifier; -import org.opendaylight.yangtools.concepts.ListenerRegistration; +import org.opendaylight.yangtools.concepts.Registration; +import org.opendaylight.yangtools.yang.common.Empty; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; -import org.opendaylight.yangtools.yang.data.api.schema.tree.ConflictingModificationAppliedException; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeTip; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException; -import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType; -import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType; import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion; -import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory; +import org.opendaylight.yangtools.yang.data.tree.api.ConflictingModificationAppliedException; +import org.opendaylight.yangtools.yang.data.tree.api.DataTree; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateTip; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeTip; +import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException; +import org.opendaylight.yangtools.yang.data.tree.api.ModificationType; +import org.opendaylight.yangtools.yang.data.tree.api.TreeType; +import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory; +import org.opendaylight.yangtools.yang.data.tree.spi.DataTreeCandidates; import org.opendaylight.yangtools.yang.data.util.DataSchemaContextTree; import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext; -import org.opendaylight.yangtools.yang.model.api.SchemaContext; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import scala.concurrent.duration.FiniteDuration; @@ -120,7 +118,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { @Override public String toString() { - return "CommitEntry [tx=" + cohort.getIdentifier() + ", state=" + cohort.getState() + "]"; + return "CommitEntry [tx=" + cohort.transactionId() + ", state=" + cohort.getState() + "]"; } } @@ -161,7 +159,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { */ private DataTreeTip tip; - private SchemaContext schemaContext; + private EffectiveModelContext schemaContext; private DataSchemaContextTree dataSchemaContext; private int currentTransactionBatch; @@ -199,7 +197,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { @VisibleForTesting public ShardDataTree(final Shard shard, final EffectiveModelContext schemaContext, final TreeType treeType) { - this(shard, schemaContext, treeType, YangInstanceIdentifier.empty(), + this(shard, schemaContext, treeType, YangInstanceIdentifier.of(), new DefaultShardDataTreeChangeListenerPublisher(""), ""); } @@ -216,7 +214,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { } @VisibleForTesting - final SchemaContext getSchemaContext() { + final EffectiveModelContext getSchemaContext() { return schemaContext; } @@ -236,7 +234,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { * @return A state snapshot */ @NonNull ShardDataTreeSnapshot takeStateSnapshot() { - final NormalizedNode rootNode = dataTree.takeSnapshot().readNode(YangInstanceIdentifier.empty()).get(); + final NormalizedNode rootNode = takeSnapshot().readNode(YangInstanceIdentifier.of()).orElseThrow(); final Builder>, ShardDataTreeSnapshotMetadata> metaBuilder = ImmutableMap.builder(); @@ -263,14 +261,14 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { } final Map>, ShardDataTreeSnapshotMetadata> snapshotMeta; - if (snapshot instanceof MetadataShardDataTreeSnapshot) { - snapshotMeta = ((MetadataShardDataTreeSnapshot) snapshot).getMetadata(); + if (snapshot instanceof MetadataShardDataTreeSnapshot metaSnapshot) { + snapshotMeta = metaSnapshot.getMetadata(); } else { snapshotMeta = ImmutableMap.of(); } - for (ShardDataTreeMetadata m : metadata) { - final ShardDataTreeSnapshotMetadata s = snapshotMeta.get(m.getSupportedType()); + for (var m : metadata) { + final var s = snapshotMeta.get(m.getSupportedType()); if (s != null) { m.applySnapshot(s); } else { @@ -278,16 +276,16 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { } } - final DataTreeModification unwrapped = dataTree.takeSnapshot().newModification(); + final DataTreeModification unwrapped = newModification(); final DataTreeModification mod = wrapper.apply(unwrapped); // delete everything first - mod.delete(YangInstanceIdentifier.empty()); + mod.delete(YangInstanceIdentifier.of()); - final Optional maybeNode = snapshot.getRootNode(); - if (maybeNode.isPresent()) { + snapshot.getRootNode().ifPresent(rootNode -> { // Add everything from the remote node back - mod.write(YangInstanceIdentifier.empty(), maybeNode.get()); - } + mod.write(YangInstanceIdentifier.of(), rootNode); + }); + mod.ready(); dataTree.validate(unwrapped); @@ -333,35 +331,35 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { @SuppressWarnings("checkstyle:IllegalCatch") private void applyRecoveryCandidate(final CommitTransactionPayload payload) throws IOException { - final Entry entry = payload.acquireCandidate(); - final DataTreeModification unwrapped = dataTree.takeSnapshot().newModification(); - final PruningDataTreeModification mod = createPruningModification(unwrapped, - NormalizedNodeStreamVersion.MAGNESIUM.compareTo(entry.getValue().getVersion()) > 0); + final var entry = payload.acquireCandidate(); + final var unwrapped = newModification(); + final var pruningMod = createPruningModification(unwrapped, + NormalizedNodeStreamVersion.MAGNESIUM.compareTo(entry.streamVersion()) > 0); - DataTreeCandidates.applyToModification(mod, entry.getValue().getCandidate()); - mod.ready(); + DataTreeCandidates.applyToModification(pruningMod, entry.candidate()); + pruningMod.ready(); LOG.trace("{}: Applying recovery modification {}", logContext, unwrapped); try { dataTree.validate(unwrapped); dataTree.commit(dataTree.prepare(unwrapped)); } catch (Exception e) { - File file = new File(System.getProperty("karaf.data", "."), + final var file = new File(System.getProperty("karaf.data", "."), "failed-recovery-payload-" + logContext + ".out"); DataTreeModificationOutput.toFile(file, unwrapped); - throw new IllegalStateException(String.format( - "%s: Failed to apply recovery payload. Modification data was written to file %s", - logContext, file), e); + throw new IllegalStateException( + "%s: Failed to apply recovery payload. Modification data was written to file %s".formatted( + logContext, file), + e); } - allMetadataCommittedTransaction(entry.getKey()); + allMetadataCommittedTransaction(entry.transactionId()); } private PruningDataTreeModification createPruningModification(final DataTreeModification unwrapped, final boolean uintAdapting) { // TODO: we should be able to reuse the pruner, provided we are not reentrant - final ReusableNormalizedNodePruner pruner = ReusableNormalizedNodePruner.forDataSchemaContext( - dataSchemaContext); + final var pruner = ReusableNormalizedNodePruner.forDataSchemaContext(dataSchemaContext); return uintAdapting ? new PruningDataTreeModification.Proactive(unwrapped, dataTree, pruner.withUintAdaption()) : new PruningDataTreeModification.Reactive(unwrapped, dataTree, pruner); } @@ -375,20 +373,20 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { * @throws DataValidationFailedException when the snapshot fails to apply */ final void applyRecoveryPayload(final @NonNull Payload payload) throws IOException { - if (payload instanceof CommitTransactionPayload) { - applyRecoveryCandidate((CommitTransactionPayload) payload); - } else if (payload instanceof AbortTransactionPayload) { - allMetadataAbortedTransaction(((AbortTransactionPayload) payload).getIdentifier()); - } else if (payload instanceof PurgeTransactionPayload) { - allMetadataPurgedTransaction(((PurgeTransactionPayload) payload).getIdentifier()); - } else if (payload instanceof CreateLocalHistoryPayload) { - allMetadataCreatedLocalHistory(((CreateLocalHistoryPayload) payload).getIdentifier()); - } else if (payload instanceof CloseLocalHistoryPayload) { - allMetadataClosedLocalHistory(((CloseLocalHistoryPayload) payload).getIdentifier()); - } else if (payload instanceof PurgeLocalHistoryPayload) { - allMetadataPurgedLocalHistory(((PurgeLocalHistoryPayload) payload).getIdentifier()); - } else if (payload instanceof SkipTransactionsPayload) { - allMetadataSkipTransactions((SkipTransactionsPayload) payload); + if (payload instanceof CommitTransactionPayload commit) { + applyRecoveryCandidate(commit); + } else if (payload instanceof AbortTransactionPayload abort) { + allMetadataAbortedTransaction(abort.getIdentifier()); + } else if (payload instanceof PurgeTransactionPayload purge) { + allMetadataPurgedTransaction(purge.getIdentifier()); + } else if (payload instanceof CreateLocalHistoryPayload create) { + allMetadataCreatedLocalHistory(create.getIdentifier()); + } else if (payload instanceof CloseLocalHistoryPayload close) { + allMetadataClosedLocalHistory(close.getIdentifier()); + } else if (payload instanceof PurgeLocalHistoryPayload purge) { + allMetadataPurgedLocalHistory(purge.getIdentifier()); + } else if (payload instanceof SkipTransactionsPayload skip) { + allMetadataSkipTransactions(skip); } else { LOG.debug("{}: ignoring unhandled payload {}", logContext, payload); } @@ -396,21 +394,21 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { private void applyReplicatedCandidate(final CommitTransactionPayload payload) throws DataValidationFailedException, IOException { - final Entry entry = payload.acquireCandidate(); - final TransactionIdentifier identifier = entry.getKey(); - LOG.debug("{}: Applying foreign transaction {}", logContext, identifier); + final var payloadCandidate = payload.acquireCandidate(); + final var transactionId = payloadCandidate.transactionId(); + LOG.debug("{}: Applying foreign transaction {}", logContext, transactionId); - final DataTreeModification mod = dataTree.takeSnapshot().newModification(); + final var mod = newModification(); // TODO: check version here, which will enable us to perform forward-compatibility transformations - DataTreeCandidates.applyToModification(mod, entry.getValue().getCandidate()); + DataTreeCandidates.applyToModification(mod, payloadCandidate.candidate()); mod.ready(); LOG.trace("{}: Applying foreign modification {}", logContext, mod); dataTree.validate(mod); - final DataTreeCandidate candidate = dataTree.prepare(mod); + final var candidate = dataTree.prepare(mod); dataTree.commit(candidate); - allMetadataCommittedTransaction(identifier); + allMetadataCommittedTransaction(transactionId); notifyListeners(candidate); } @@ -436,52 +434,51 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { * In any case, we know that this is an entry coming from replication, hence we can be sure we will not observe * pre-Boron state -- which limits the number of options here. */ - if (payload instanceof CommitTransactionPayload) { + if (payload instanceof CommitTransactionPayload commit) { if (identifier == null) { - applyReplicatedCandidate((CommitTransactionPayload) payload); + applyReplicatedCandidate(commit); } else { verify(identifier instanceof TransactionIdentifier); // if we did not track this transaction before, it means that it came from another leader and we are in // the process of commiting it while in PreLeader state. That means that it hasnt yet been committed to // the local DataTree and would be lost if it was only applied via payloadReplicationComplete(). if (!payloadReplicationComplete((TransactionIdentifier) identifier)) { - applyReplicatedCandidate((CommitTransactionPayload) payload); + applyReplicatedCandidate(commit); } } // make sure acquireCandidate() is the last call touching the payload data as we want it to be GC-ed. - checkRootOverwrite(((CommitTransactionPayload) payload).acquireCandidate().getValue() - .getCandidate()); - } else if (payload instanceof AbortTransactionPayload) { + checkRootOverwrite(commit.acquireCandidate().candidate()); + } else if (payload instanceof AbortTransactionPayload abort) { if (identifier != null) { - payloadReplicationComplete((AbortTransactionPayload) payload); + payloadReplicationComplete(abort); } - allMetadataAbortedTransaction(((AbortTransactionPayload) payload).getIdentifier()); - } else if (payload instanceof PurgeTransactionPayload) { + allMetadataAbortedTransaction(abort.getIdentifier()); + } else if (payload instanceof PurgeTransactionPayload purge) { if (identifier != null) { - payloadReplicationComplete((PurgeTransactionPayload) payload); + payloadReplicationComplete(purge); } - allMetadataPurgedTransaction(((PurgeTransactionPayload) payload).getIdentifier()); - } else if (payload instanceof CloseLocalHistoryPayload) { + allMetadataPurgedTransaction(purge.getIdentifier()); + } else if (payload instanceof CloseLocalHistoryPayload close) { if (identifier != null) { - payloadReplicationComplete((CloseLocalHistoryPayload) payload); + payloadReplicationComplete(close); } - allMetadataClosedLocalHistory(((CloseLocalHistoryPayload) payload).getIdentifier()); - } else if (payload instanceof CreateLocalHistoryPayload) { + allMetadataClosedLocalHistory(close.getIdentifier()); + } else if (payload instanceof CreateLocalHistoryPayload create) { if (identifier != null) { - payloadReplicationComplete((CreateLocalHistoryPayload)payload); + payloadReplicationComplete(create); } - allMetadataCreatedLocalHistory(((CreateLocalHistoryPayload) payload).getIdentifier()); - } else if (payload instanceof PurgeLocalHistoryPayload) { + allMetadataCreatedLocalHistory(create.getIdentifier()); + } else if (payload instanceof PurgeLocalHistoryPayload purge) { if (identifier != null) { - payloadReplicationComplete((PurgeLocalHistoryPayload)payload); + payloadReplicationComplete(purge); } - allMetadataPurgedLocalHistory(((PurgeLocalHistoryPayload) payload).getIdentifier()); - } else if (payload instanceof SkipTransactionsPayload) { + allMetadataPurgedLocalHistory(purge.getIdentifier()); + } else if (payload instanceof SkipTransactionsPayload skip) { if (identifier != null) { - payloadReplicationComplete((SkipTransactionsPayload)payload); + payloadReplicationComplete(skip); } - allMetadataSkipTransactions((SkipTransactionsPayload) payload); + allMetadataSkipTransactions(skip); } else { LOG.warn("{}: ignoring unhandled identifier {} payload {}", logContext, identifier, payload); } @@ -499,8 +496,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { } // top level container ie "/" - if (candidate.getRootPath().isEmpty() - && candidate.getRootNode().getModificationType() == ModificationType.WRITE) { + if (candidate.getRootPath().isEmpty() && candidate.getRootNode().modificationType() == ModificationType.WRITE) { LOG.debug("{}: shard root overwritten, enqueuing snapshot", logContext); shard.self().tell(new InitiateCaptureSnapshot(), noSender()); } @@ -524,16 +520,17 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { } private boolean payloadReplicationComplete(final TransactionIdentifier txId) { - final CommitEntry current = pendingFinishCommits.peek(); + final var current = pendingFinishCommits.peek(); if (current == null) { LOG.warn("{}: No outstanding transactions, ignoring consensus on transaction {}", logContext, txId); allMetadataCommittedTransaction(txId); return false; } - if (!current.cohort.getIdentifier().equals(txId)) { + final var cohortTxId = current.cohort.transactionId(); + if (!cohortTxId.equals(txId)) { LOG.debug("{}: Head of pendingFinishCommits queue is {}, ignoring consensus on transaction {}", logContext, - current.cohort.getIdentifier(), txId); + cohortTxId, txId); allMetadataCommittedTransaction(txId); return false; } @@ -617,25 +614,29 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { return chain; } - final ReadOnlyShardDataTreeTransaction newReadOnlyTransaction(final TransactionIdentifier txId) { + final @NonNull ReadOnlyShardDataTreeTransaction newReadOnlyTransaction(final TransactionIdentifier txId) { shard.getShardMBean().incrementReadOnlyTransactionCount(); - if (txId.getHistoryId().getHistoryId() == 0) { - return new ReadOnlyShardDataTreeTransaction(this, txId, dataTree.takeSnapshot()); - } + final var historyId = txId.getHistoryId(); + return historyId.getHistoryId() == 0 ? newStandaloneReadOnlyTransaction(txId) + : ensureTransactionChain(historyId, null).newReadOnlyTransaction(txId); + } - return ensureTransactionChain(txId.getHistoryId(), null).newReadOnlyTransaction(txId); + final @NonNull ReadOnlyShardDataTreeTransaction newStandaloneReadOnlyTransaction(final TransactionIdentifier txId) { + return new ReadOnlyShardDataTreeTransaction(this, txId, takeSnapshot()); } - final ReadWriteShardDataTreeTransaction newReadWriteTransaction(final TransactionIdentifier txId) { + final @NonNull ReadWriteShardDataTreeTransaction newReadWriteTransaction(final TransactionIdentifier txId) { shard.getShardMBean().incrementReadWriteTransactionCount(); - if (txId.getHistoryId().getHistoryId() == 0) { - return new ReadWriteShardDataTreeTransaction(ShardDataTree.this, txId, dataTree.takeSnapshot() - .newModification()); - } + final var historyId = txId.getHistoryId(); + return historyId.getHistoryId() == 0 ? newStandaloneReadWriteTransaction(txId) + : ensureTransactionChain(historyId, null).newReadWriteTransaction(txId); + } - return ensureTransactionChain(txId.getHistoryId(), null).newReadWriteTransaction(txId); + final @NonNull ReadWriteShardDataTreeTransaction newStandaloneReadWriteTransaction( + final TransactionIdentifier txId) { + return new ReadWriteShardDataTreeTransaction(this, txId, newModification()); } @VisibleForTesting @@ -728,13 +729,12 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { } final Optional readCurrentData() { - return dataTree.takeSnapshot().readNode(YangInstanceIdentifier.empty()) - .map(state -> DataTreeCandidates.fromNormalizedNode(YangInstanceIdentifier.empty(), state)); + return readNode(YangInstanceIdentifier.of()) + .map(state -> DataTreeCandidates.fromNormalizedNode(YangInstanceIdentifier.of(), state)); } final void registerTreeChangeListener(final YangInstanceIdentifier path, final DOMDataTreeChangeListener listener, - final Optional initialState, - final Consumer> onRegistration) { + final Optional initialState, final Consumer onRegistration) { treeChangeListenerPublisher.registerTreeChangeListener(path, listener, initialState, onRegistration); } @@ -775,7 +775,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { @VisibleForTesting public final Optional readNode(final YangInstanceIdentifier path) { - return dataTree.takeSnapshot().readNode(path); + return takeSnapshot().readNode(path); } final DataTreeSnapshot takeSnapshot() { @@ -784,7 +784,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { @VisibleForTesting final DataTreeModification newModification() { - return dataTree.takeSnapshot().newModification(); + return takeSnapshot().newModification(); } final Collection getAndClearPendingTransactions() { @@ -830,25 +830,25 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { final SimpleShardDataTreeCohort cohort = entry.cohort; final DataTreeModification modification = cohort.getDataTreeModification(); - LOG.debug("{}: Validating transaction {}", logContext, cohort.getIdentifier()); + LOG.debug("{}: Validating transaction {}", logContext, cohort.transactionId()); Exception cause; try { tip.validate(modification); - LOG.debug("{}: Transaction {} validated", logContext, cohort.getIdentifier()); + LOG.debug("{}: Transaction {} validated", logContext, cohort.transactionId()); cohort.successfulCanCommit(); entry.lastAccess = readTime(); return; } catch (ConflictingModificationAppliedException e) { - LOG.warn("{}: Store Tx {}: Conflicting modification for path {}.", logContext, cohort.getIdentifier(), + LOG.warn("{}: Store Tx {}: Conflicting modification for path {}.", logContext, cohort.transactionId(), e.getPath()); cause = new OptimisticLockFailedException("Optimistic lock failed for path " + e.getPath(), e); } catch (DataValidationFailedException e) { - LOG.warn("{}: Store Tx {}: Data validation failed for path {}.", logContext, cohort.getIdentifier(), + LOG.warn("{}: Store Tx {}: Data validation failed for path {}.", logContext, cohort.transactionId(), e.getPath(), e); // For debugging purposes, allow dumping of the modification. Coupled with the above // precondition log, it should allow us to understand what went on. - LOG.debug("{}: Store Tx {}: modifications: {}", logContext, cohort.getIdentifier(), modification); + LOG.debug("{}: Store Tx {}: modifications: {}", logContext, cohort.transactionId(), modification); LOG.trace("{}: Current tree: {}", logContext, dataTree); cause = new TransactionCommitFailedException("Data did not pass validation for path " + e.getPath(), e); } catch (Exception e) { @@ -873,7 +873,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { final SimpleShardDataTreeCohort cohort = entry.cohort; if (cohort.isFailed()) { - LOG.debug("{}: Removing failed transaction {}", logContext, cohort.getIdentifier()); + LOG.debug("{}: Removing failed transaction {}", logContext, cohort.transactionId()); queue.remove(); continue; } @@ -919,12 +919,12 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { Collection precedingShardNames = extractPrecedingShardNames(cohort.getParticipatingShardNames()); if (precedingShardNames.isEmpty()) { - LOG.debug("{}: Tx {} is scheduled for canCommit step", logContext, cohort.getIdentifier()); + LOG.debug("{}: Tx {} is scheduled for canCommit step", logContext, cohort.transactionId()); return; } LOG.debug("{}: Evaluating tx {} for canCommit - preceding participating shard names {}", - logContext, cohort.getIdentifier(), precedingShardNames); + logContext, cohort.transactionId(), precedingShardNames); final Iterator iter = pendingTransactions.iterator(); int index = -1; int moveToIndex = -1; @@ -935,29 +935,29 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { if (cohort.equals(entry.cohort)) { if (moveToIndex < 0) { LOG.debug("{}: Not moving tx {} - cannot proceed with canCommit", - logContext, cohort.getIdentifier()); + logContext, cohort.transactionId()); return; } LOG.debug("{}: Moving {} to index {} in the pendingTransactions queue", - logContext, cohort.getIdentifier(), moveToIndex); + logContext, cohort.transactionId(), moveToIndex); iter.remove(); insertEntry(pendingTransactions, entry, moveToIndex); if (!cohort.equals(pendingTransactions.peek().cohort)) { LOG.debug("{}: Tx {} is not at the head of the queue - cannot proceed with canCommit", - logContext, cohort.getIdentifier()); + logContext, cohort.transactionId()); return; } LOG.debug("{}: Tx {} is now at the head of the queue - proceeding with canCommit", - logContext, cohort.getIdentifier()); + logContext, cohort.transactionId()); break; } if (entry.cohort.getState() != State.READY) { LOG.debug("{}: Skipping pending transaction {} in state {}", - logContext, entry.cohort.getIdentifier(), entry.cohort.getState()); + logContext, entry.cohort.transactionId(), entry.cohort.getState()); continue; } @@ -967,16 +967,16 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { if (precedingShardNames.equals(pendingPrecedingShardNames)) { if (moveToIndex < 0) { LOG.debug("{}: Preceding shard names {} for pending tx {} match - saving moveToIndex {}", - logContext, pendingPrecedingShardNames, entry.cohort.getIdentifier(), index); + logContext, pendingPrecedingShardNames, entry.cohort.transactionId(), index); moveToIndex = index; } else { LOG.debug( "{}: Preceding shard names {} for pending tx {} match but moveToIndex already set to {}", - logContext, pendingPrecedingShardNames, entry.cohort.getIdentifier(), moveToIndex); + logContext, pendingPrecedingShardNames, entry.cohort.transactionId(), moveToIndex); } } else { LOG.debug("{}: Preceding shard names {} for pending tx {} differ - skipping", - logContext, pendingPrecedingShardNames, entry.cohort.getIdentifier()); + logContext, pendingPrecedingShardNames, entry.cohort.transactionId()); } } } @@ -1022,7 +1022,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { final SimpleShardDataTreeCohort current = entry.cohort; verify(cohort.equals(current), "Attempted to pre-commit %s while %s is pending", cohort, current); - final TransactionIdentifier currentId = current.getIdentifier(); + final TransactionIdentifier currentId = current.transactionId(); LOG.debug("{}: Preparing transaction {}", logContext, currentId); final DataTreeCandidateTip candidate; @@ -1034,9 +1034,9 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { return; } - cohort.userPreCommit(candidate, new FutureCallback() { + cohort.userPreCommit(candidate, new FutureCallback<>() { @Override - public void onSuccess(final Void noop) { + public void onSuccess(final Empty result) { // Set the tip of the data tree. tip = verifyNotNull(candidate); @@ -1067,7 +1067,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { @SuppressWarnings("checkstyle:IllegalCatch") private void finishCommit(final SimpleShardDataTreeCohort cohort) { - final TransactionIdentifier txId = cohort.getIdentifier(); + final TransactionIdentifier txId = cohort.transactionId(); final DataTreeCandidate candidate = cohort.getCandidate(); LOG.debug("{}: Resuming commit of transaction {}", logContext, txId); @@ -1105,13 +1105,13 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { final SimpleShardDataTreeCohort current = entry.cohort; if (!cohort.equals(current)) { - LOG.debug("{}: Transaction {} scheduled for commit step", logContext, cohort.getIdentifier()); + LOG.debug("{}: Transaction {} scheduled for commit step", logContext, cohort.transactionId()); return; } - LOG.debug("{}: Starting commit for transaction {}", logContext, current.getIdentifier()); + LOG.debug("{}: Starting commit for transaction {}", logContext, current.transactionId()); - final TransactionIdentifier txId = cohort.getIdentifier(); + final TransactionIdentifier txId = cohort.transactionId(); final Payload payload; try { payload = CommitTransactionPayload.create(txId, candidate, PayloadVersion.current(), @@ -1183,11 +1183,11 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { // the newReadWriteTransaction() final ShardDataTreeCohort newReadyCohort(final TransactionIdentifier txId, final DataTreeModification mod, final Optional> participatingShardNames) { - if (txId.getHistoryId().getHistoryId() == 0) { + final var historyId = txId.getHistoryId(); + if (historyId.getHistoryId() == 0) { return createReadyCohort(txId, mod, participatingShardNames); } - - return ensureTransactionChain(txId.getHistoryId(), null).createReadyCohort(txId, mod, participatingShardNames); + return ensureTransactionChain(historyId, null).createReadyCohort(txId, mod, participatingShardNames); } @SuppressFBWarnings(value = "DB_DUPLICATE_SWITCH_CLAUSES", justification = "See inline comments below.") @@ -1212,11 +1212,11 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { final OptionalLong updateOpt = accessTimeUpdater.apply(currentTx.cohort); if (updateOpt.isPresent()) { - final long newAccess = updateOpt.getAsLong(); + final long newAccess = updateOpt.orElseThrow(); final long newDelta = now - newAccess; if (newDelta < delta) { LOG.debug("{}: Updated current transaction {} access time", logContext, - currentTx.cohort.getIdentifier()); + currentTx.cohort.transactionId()); currentTx.lastAccess = newAccess; delta = newDelta; } @@ -1231,7 +1231,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { final State state = currentTx.cohort.getState(); LOG.warn("{}: Current transaction {} has timed out after {} ms in state {}", logContext, - currentTx.cohort.getIdentifier(), deltaMillis, state); + currentTx.cohort.transactionId(), deltaMillis, state); boolean processNext = true; final TimeoutException cohortFailure = new TimeoutException("Backend timeout in state " + state + " after " + deltaMillis + "ms"); @@ -1271,7 +1271,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { break; case COMMIT_PENDING: LOG.warn("{}: Transaction {} is still committing, cannot abort", logContext, - currentTx.cohort.getIdentifier()); + currentTx.cohort.transactionId()); currentTx.lastAccess = now; processNext = false; return; @@ -1295,7 +1295,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { final Iterator it = Iterables.concat(pendingFinishCommits, pendingCommits, pendingTransactions).iterator(); if (!it.hasNext()) { - LOG.debug("{}: no open transaction while attempting to abort {}", logContext, cohort.getIdentifier()); + LOG.debug("{}: no open transaction while attempting to abort {}", logContext, cohort.transactionId()); return true; } @@ -1303,8 +1303,8 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { final CommitEntry first = it.next(); if (cohort.equals(first.cohort)) { if (cohort.getState() != State.COMMIT_PENDING) { - LOG.debug("{}: aborting head of queue {} in state {}", logContext, cohort.getIdentifier(), - cohort.getIdentifier()); + LOG.debug("{}: aborting head of queue {} in state {}", logContext, cohort.transactionId(), + cohort.transactionId()); it.remove(); if (cohort.getCandidate() != null) { @@ -1315,7 +1315,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { return true; } - LOG.warn("{}: transaction {} is committing, skipping abort", logContext, cohort.getIdentifier()); + LOG.warn("{}: transaction {} is committing, skipping abort", logContext, cohort.transactionId()); return false; } @@ -1323,7 +1323,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { while (it.hasNext()) { final CommitEntry e = it.next(); if (cohort.equals(e.cohort)) { - LOG.debug("{}: aborting queued transaction {}", logContext, cohort.getIdentifier()); + LOG.debug("{}: aborting queued transaction {}", logContext, cohort.transactionId()); it.remove(); if (cohort.getCandidate() != null) { @@ -1331,12 +1331,12 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { } return true; - } else { - newTip = requireNonNullElse(e.cohort.getCandidate(), newTip); } + + newTip = requireNonNullElse(e.cohort.getCandidate(), newTip); } - LOG.debug("{}: aborted transaction {} not found in the queue", logContext, cohort.getIdentifier()); + LOG.debug("{}: aborted transaction {} not found in the queue", logContext, cohort.transactionId()); return true; } @@ -1346,16 +1346,16 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { while (iter.hasNext()) { final SimpleShardDataTreeCohort cohort = iter.next().cohort; if (cohort.getState() == State.CAN_COMMIT_COMPLETE) { - LOG.debug("{}: Revalidating queued transaction {}", logContext, cohort.getIdentifier()); + LOG.debug("{}: Revalidating queued transaction {}", logContext, cohort.transactionId()); try { tip.validate(cohort.getDataTreeModification()); } catch (DataValidationFailedException | RuntimeException e) { - LOG.debug("{}: Failed to revalidate queued transaction {}", logContext, cohort.getIdentifier(), e); + LOG.debug("{}: Failed to revalidate queued transaction {}", logContext, cohort.transactionId(), e); cohort.reportFailure(e); } } else if (cohort.getState() == State.PRE_COMMIT_COMPLETE) { - LOG.debug("{}: Repreparing queued transaction {}", logContext, cohort.getIdentifier()); + LOG.debug("{}: Repreparing queued transaction {}", logContext, cohort.transactionId()); try { tip.validate(cohort.getDataTreeModification()); @@ -1364,7 +1364,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { cohort.setNewCandidate(candidate); tip = candidate; } catch (RuntimeException | DataValidationFailedException e) { - LOG.debug("{}: Failed to reprepare queued transaction {}", logContext, cohort.getIdentifier(), e); + LOG.debug("{}: Failed to reprepare queued transaction {}", logContext, cohort.transactionId(), e); cohort.reportFailure(e); } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeChangeListenerPublisher.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeChangeListenerPublisher.java index bfeb91f65b..4c67c3fc23 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeChangeListenerPublisher.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeChangeListenerPublisher.java @@ -10,9 +10,9 @@ package org.opendaylight.controller.cluster.datastore; import java.util.Optional; import java.util.function.Consumer; import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener; -import org.opendaylight.yangtools.concepts.ListenerRegistration; +import org.opendaylight.yangtools.concepts.Registration; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate; /** * Interface for a class that generates and publishes notifications for DataTreeChangeListeners. @@ -21,6 +21,5 @@ import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate; */ interface ShardDataTreeChangeListenerPublisher extends ShardDataTreeNotificationPublisher { void registerTreeChangeListener(YangInstanceIdentifier treeId, DOMDataTreeChangeListener listener, - Optional initialState, - Consumer> onRegistration); + Optional initialState, Consumer onRegistration); } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeChangeListenerPublisherActorProxy.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeChangeListenerPublisherActorProxy.java index d737b55437..83209731c4 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeChangeListenerPublisherActorProxy.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeChangeListenerPublisherActorProxy.java @@ -13,9 +13,9 @@ import akka.actor.Props; import java.util.Optional; import java.util.function.Consumer; import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener; -import org.opendaylight.yangtools.concepts.ListenerRegistration; +import org.opendaylight.yangtools.concepts.Registration; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate; /** * Implementation of ShardDataTreeChangeListenerPublisher that offloads the generation and publication of data tree @@ -34,7 +34,7 @@ class ShardDataTreeChangeListenerPublisherActorProxy extends AbstractShardDataTr @Override public void registerTreeChangeListener(final YangInstanceIdentifier treeId, final DOMDataTreeChangeListener listener, final Optional currentState, - final Consumer> onRegistration) { + final Consumer onRegistration) { final ShardDataTreeChangePublisherActor.RegisterListener regMessage = new ShardDataTreeChangePublisherActor.RegisterListener(treeId, listener, currentState, onRegistration); log.debug("{}: Sending {} to publisher actor {}", logContext(), regMessage, publisherActor()); diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeChangePublisherActor.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeChangePublisherActor.java index e099d0bc92..5e96133aaa 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeChangePublisherActor.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeChangePublisherActor.java @@ -13,9 +13,9 @@ import akka.actor.Props; import java.util.Optional; import java.util.function.Consumer; import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener; -import org.opendaylight.yangtools.concepts.ListenerRegistration; +import org.opendaylight.yangtools.concepts.Registration; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate; /** * Actor used to generate and publish DataTreeChange notifications. @@ -31,12 +31,11 @@ public final class ShardDataTreeChangePublisherActor @Override protected void handleReceive(final Object message) { - if (message instanceof RegisterListener) { - RegisterListener reg = (RegisterListener)message; + if (message instanceof RegisterListener reg) { LOG.debug("{}: Received {}", logContext(), reg); if (reg.initialState.isPresent()) { DefaultShardDataTreeChangeListenerPublisher.notifySingleListener(reg.path, reg.listener, - reg.initialState.get(), logContext()); + reg.initialState.orElseThrow(), logContext()); } else { reg.listener.onInitialData(); } @@ -55,11 +54,10 @@ public final class ShardDataTreeChangePublisherActor private final YangInstanceIdentifier path; private final DOMDataTreeChangeListener listener; private final Optional initialState; - private final Consumer> onRegistration; + private final Consumer onRegistration; RegisterListener(final YangInstanceIdentifier path, final DOMDataTreeChangeListener listener, - final Optional initialState, - final Consumer> onRegistration) { + final Optional initialState, final Consumer onRegistration) { this.path = requireNonNull(path); this.listener = requireNonNull(listener); this.initialState = requireNonNull(initialState); diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeCohort.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeCohort.java index 581768c0ed..03cc77f0e0 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeCohort.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeCohort.java @@ -14,14 +14,15 @@ import com.google.common.primitives.UnsignedLong; import com.google.common.util.concurrent.FutureCallback; import java.util.Optional; import java.util.SortedSet; +import org.eclipse.jdt.annotation.NonNull; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -import org.opendaylight.yangtools.concepts.Identifiable; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification; +import org.opendaylight.yangtools.yang.common.Empty; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateTip; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification; @VisibleForTesting -public abstract class ShardDataTreeCohort implements Identifiable { +public abstract class ShardDataTreeCohort { public enum State { READY, CAN_COMMIT_PENDING, @@ -39,6 +40,8 @@ public abstract class ShardDataTreeCohort implements Identifiable callback); + public abstract void canCommit(FutureCallback callback); @VisibleForTesting public abstract void preCommit(FutureCallback callback); @VisibleForTesting - public abstract void abort(FutureCallback callback); + public abstract void abort(FutureCallback callback); @VisibleForTesting public abstract void commit(FutureCallback callback); @@ -70,6 +73,6 @@ public abstract class ShardDataTreeCohort implements Identifiable getListenerActorsInfo(final Collection actors) { final Timeout timeout = new Timeout(20, TimeUnit.SECONDS); final List> futureList = new ArrayList<>(actors.size()); - for (ActorSelection actor: actors) { + for (ActorSelection actor : actors) { futureList.add(Patterns.ask(actor, GetInfo.INSTANCE, timeout)); } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeNotificationPublisher.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeNotificationPublisher.java index 4dbd818488..373a130004 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeNotificationPublisher.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeNotificationPublisher.java @@ -8,7 +8,7 @@ package org.opendaylight.controller.cluster.datastore; import java.util.concurrent.TimeUnit; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate; /** * Interface for a class the publishes data tree notifications. diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeNotificationPublisherActor.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeNotificationPublisherActor.java index c22bc3bd98..095a542f6c 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeNotificationPublisherActor.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeNotificationPublisherActor.java @@ -10,7 +10,7 @@ package org.opendaylight.controller.cluster.datastore; import com.google.common.base.Stopwatch; import java.util.concurrent.TimeUnit; import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate; /** * Actor used to generate and publish data tree notifications. This is used to offload the potentially @@ -40,7 +40,7 @@ public class ShardDataTreeNotificationPublisherActor transaction; public ShardReadTransaction(final AbstractShardDataTreeTransaction transaction, final ActorRef shardActor, diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardReadWriteTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardReadWriteTransaction.java index 8cbf143673..84c346def8 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardReadWriteTransaction.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardReadWriteTransaction.java @@ -5,7 +5,6 @@ * terms of the Eclipse Public License v1.0 which accompanies this distribution, * and is available at http://www.eclipse.org/legal/epl-v10.html */ - package org.opendaylight.controller.cluster.datastore; import akka.actor.ActorRef; @@ -17,14 +16,15 @@ import org.opendaylight.controller.cluster.datastore.messages.ReadData; * * @author syedbahm */ +@Deprecated(since = "9.0.0", forRemoval = true) public class ShardReadWriteTransaction extends ShardWriteTransaction { - public ShardReadWriteTransaction(ReadWriteShardDataTreeTransaction transaction, ActorRef shardActor, - ShardStats shardStats) { + public ShardReadWriteTransaction(final ReadWriteShardDataTreeTransaction transaction, final ActorRef shardActor, + final ShardStats shardStats) { super(transaction, shardActor, shardStats); } @Override - public void handleReceive(Object message) { + public void handleReceive(final Object message) { if (ReadData.isSerializedType(message)) { readData(ReadData.fromSerializable(message)); } else if (DataExists.isSerializedType(message)) { diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardRecoveryCoordinator.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardRecoveryCoordinator.java index 87d70da1ab..3baad570b7 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardRecoveryCoordinator.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardRecoveryCoordinator.java @@ -15,8 +15,8 @@ import org.opendaylight.controller.cluster.datastore.persisted.ShardDataTreeSnap import org.opendaylight.controller.cluster.datastore.persisted.ShardSnapshotState; import org.opendaylight.controller.cluster.datastore.utils.NormalizedNodeXMLOutput; import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort; +import org.opendaylight.controller.cluster.raft.messages.Payload; import org.opendaylight.controller.cluster.raft.persisted.Snapshot; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; import org.slf4j.Logger; @@ -46,7 +46,7 @@ abstract class ShardRecoveryCoordinator implements RaftActorRecoveryCohort { WithSnapshot(final ShardDataTree store, final String shardName, final Logger log, final Snapshot snapshot) { super(store, shardName, log); - this.restoreFromSnapshot = requireNonNull(snapshot); + restoreFromSnapshot = requireNonNull(snapshot); } @Override diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardTransaction.java index 44393a14f1..3b3462884f 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardTransaction.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardTransaction.java @@ -29,6 +29,7 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; /** * The ShardTransaction Actor represents a remote transaction that delegates all actions to DOMDataReadWriteTransaction. */ +@Deprecated(since = "9.0.0", forRemoval = true) public abstract class ShardTransaction extends AbstractUntypedActorWithMetering { private final ActorRef shardActor; private final ShardStats shardStats; @@ -119,7 +120,7 @@ public abstract class ShardTransaction extends AbstractUntypedActorWithMetering @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "Some fields are not Serializable but we don't " + "create remote instances of this actor and thus don't need it to be Serializable.") private static class ShardTransactionCreator implements Creator { - + @java.io.Serial private static final long serialVersionUID = 1L; final AbstractShardDataTreeTransaction transaction; @@ -139,23 +140,14 @@ public abstract class ShardTransaction extends AbstractUntypedActorWithMetering @Override public ShardTransaction create() { - final ShardTransaction tx; - switch (type) { - case READ_ONLY: - tx = new ShardReadTransaction(transaction, shardActor, shardStats); - break; - case READ_WRITE: - tx = new ShardReadWriteTransaction((ReadWriteShardDataTreeTransaction)transaction, shardActor, - shardStats); - break; - case WRITE_ONLY: - tx = new ShardWriteTransaction((ReadWriteShardDataTreeTransaction)transaction, shardActor, - shardStats); - break; - default: - throw new IllegalArgumentException("Unhandled transaction type " + type); - } - + final var tx = switch (type) { + case READ_ONLY -> new ShardReadTransaction(transaction, shardActor, shardStats); + case READ_WRITE -> new ShardReadWriteTransaction((ReadWriteShardDataTreeTransaction) transaction, + shardActor, shardStats); + case WRITE_ONLY -> new ShardWriteTransaction((ReadWriteShardDataTreeTransaction) transaction, + shardActor, shardStats); + default -> throw new IllegalArgumentException("Unhandled transaction type " + type); + }; tx.getContext().setReceiveTimeout(datastoreContext.getShardTransactionIdleTimeout()); return tx; } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardTransactionActorFactory.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardTransactionActorFactory.java index 881f3c39d0..122c43592a 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardTransactionActorFactory.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardTransactionActorFactory.java @@ -22,7 +22,8 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier * * @author Thomas Pantelis */ -class ShardTransactionActorFactory { +@Deprecated(since = "9.0.0", forRemoval = true) +final class ShardTransactionActorFactory { private static final AtomicLong ACTOR_NAME_COUNTER = new AtomicLong(); private final ShardDataTree dataTree; @@ -33,9 +34,9 @@ class ShardTransactionActorFactory { private final ActorRef shardActor; private final String shardName; - ShardTransactionActorFactory(ShardDataTree dataTree, DatastoreContext datastoreContext, - String txnDispatcherPath, ActorRef shardActor, ActorContext actorContext, ShardStats shardMBean, - String shardName) { + ShardTransactionActorFactory(final ShardDataTree dataTree, final DatastoreContext datastoreContext, + final String txnDispatcherPath, final ActorRef shardActor, final ActorContext actorContext, + final ShardStats shardMBean, final String shardName) { this.dataTree = requireNonNull(dataTree); this.datastoreContext = requireNonNull(datastoreContext); this.txnDispatcherPath = requireNonNull(txnDispatcherPath); @@ -62,20 +63,12 @@ class ShardTransactionActorFactory { return sb.append(txId.getTransactionId()).append('_').append(ACTOR_NAME_COUNTER.incrementAndGet()).toString(); } - ActorRef newShardTransaction(TransactionType type, TransactionIdentifier transactionID) { - final AbstractShardDataTreeTransaction transaction; - switch (type) { - case READ_ONLY: - transaction = dataTree.newReadOnlyTransaction(transactionID); - break; - case READ_WRITE: - case WRITE_ONLY: - transaction = dataTree.newReadWriteTransaction(transactionID); - break; - default: - throw new IllegalArgumentException("Unsupported transaction type " + type); - } - + ActorRef newShardTransaction(final TransactionType type, final TransactionIdentifier transactionID) { + final AbstractShardDataTreeTransaction transaction = switch (type) { + case READ_ONLY -> dataTree.newReadOnlyTransaction(transactionID); + case READ_WRITE, WRITE_ONLY -> dataTree.newReadWriteTransaction(transactionID); + default -> throw new IllegalArgumentException("Unsupported transaction type " + type); + }; return actorContext.actorOf(ShardTransaction.props(type, transaction, shardActor, datastoreContext, shardMBean) .withDispatcher(txnDispatcherPath), actorNameFor(transactionID)); } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardTransactionMessageRetrySupport.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardTransactionMessageRetrySupport.java index 9a439bd35b..bfd7802213 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardTransactionMessageRetrySupport.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardTransactionMessageRetrySupport.java @@ -25,6 +25,7 @@ import scala.concurrent.duration.FiniteDuration; * * @author Thomas Pantelis */ +@Deprecated(since = "9.0.0", forRemoval = true) class ShardTransactionMessageRetrySupport implements Closeable { private static final Logger LOG = LoggerFactory.getLogger(ShardTransactionMessageRetrySupport.class); @@ -81,7 +82,7 @@ class ShardTransactionMessageRetrySupport implements Closeable { messagesToRetry.clear(); } - private static class MessageInfo { + private static final class MessageInfo { final Object message; final ActorRef replyTo; final String failureMessage; diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardWriteTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardWriteTransaction.java index 67f2c684a1..764361a016 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardWriteTransaction.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardWriteTransaction.java @@ -6,7 +6,6 @@ * terms of the Eclipse Public License v1.0 which accompanies this distribution, * and is available at http://www.eclipse.org/legal/epl-v10.html */ - package org.opendaylight.controller.cluster.datastore; import akka.actor.ActorRef; @@ -24,8 +23,8 @@ import org.opendaylight.controller.cluster.datastore.modification.Modification; * * @author syedbahm */ +@Deprecated(since = "9.0.0", forRemoval = true) public class ShardWriteTransaction extends ShardTransaction { - private int totalBatchedModificationsReceived; private Exception lastBatchedModificationsException; private final ReadWriteShardDataTreeTransaction transaction; diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/SimpleShardDataTreeCohort.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/SimpleShardDataTreeCohort.java index f42af0b88e..2c7d13189f 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/SimpleShardDataTreeCohort.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/SimpleShardDataTreeCohort.java @@ -7,21 +7,23 @@ */ package org.opendaylight.controller.cluster.datastore; +import static com.google.common.base.Verify.verifyNotNull; import static java.util.Objects.requireNonNull; import com.google.common.base.MoreObjects.ToStringHelper; import com.google.common.base.Preconditions; -import com.google.common.base.Verify; import com.google.common.primitives.UnsignedLong; import com.google.common.util.concurrent.FutureCallback; import java.util.Optional; import java.util.SortedSet; import java.util.concurrent.CompletionStage; +import org.eclipse.jdt.annotation.NonNull; import org.eclipse.jdt.annotation.Nullable; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification; +import org.opendaylight.yangtools.yang.common.Empty; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateTip; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -30,7 +32,7 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort { private final DataTreeModification transaction; private final ShardDataTree dataTree; - private final TransactionIdentifier transactionId; + private final @NonNull TransactionIdentifier transactionId; private final CompositeDataTreeCohort userCohorts; private final @Nullable SortedSet participatingShardNames; @@ -54,13 +56,13 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort { this.dataTree = requireNonNull(dataTree); this.transaction = requireNonNull(transaction); this.transactionId = requireNonNull(transactionId); - this.userCohorts = null; - this.participatingShardNames = null; + userCohorts = null; + participatingShardNames = null; this.nextFailure = requireNonNull(nextFailure); } @Override - public TransactionIdentifier getIdentifier() { + TransactionIdentifier transactionId() { return transactionId; } @@ -81,17 +83,17 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort { private void checkState(final State expected) { Preconditions.checkState(state == expected, "State %s does not match expected state %s for %s", - state, expected, getIdentifier()); + state, expected, transactionId()); } @Override - public void canCommit(final FutureCallback newCallback) { + public void canCommit(final FutureCallback newCallback) { if (state == State.CAN_COMMIT_PENDING) { return; } checkState(State.READY); - this.callback = requireNonNull(newCallback); + callback = requireNonNull(newCallback); state = State.CAN_COMMIT_PENDING; if (nextFailure == null) { @@ -104,7 +106,7 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort { @Override public void preCommit(final FutureCallback newCallback) { checkState(State.CAN_COMMIT_COMPLETE); - this.callback = requireNonNull(newCallback); + callback = requireNonNull(newCallback); state = State.PRE_COMMIT_PENDING; if (nextFailure == null) { @@ -115,9 +117,9 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort { } @Override - public void abort(final FutureCallback abortCallback) { + public void abort(final FutureCallback abortCallback) { if (!dataTree.startAbort(this)) { - abortCallback.onSuccess(null); + abortCallback.onSuccess(Empty.value()); return; } @@ -126,15 +128,15 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort { final Optional> maybeAborts = userCohorts.abort(); if (!maybeAborts.isPresent()) { - abortCallback.onSuccess(null); + abortCallback.onSuccess(Empty.value()); return; } - maybeAborts.get().whenComplete((noop, failure) -> { + maybeAborts.orElseThrow().whenComplete((noop, failure) -> { if (failure != null) { abortCallback.onFailure(failure); } else { - abortCallback.onSuccess(null); + abortCallback.onSuccess(Empty.value()); } }); } @@ -142,7 +144,7 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort { @Override public void commit(final FutureCallback newCallback) { checkState(State.PRE_COMMIT_COMPLETE); - this.callback = requireNonNull(newCallback); + callback = requireNonNull(newCallback); state = State.COMMIT_PENDING; if (nextFailure == null) { @@ -154,20 +156,20 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort { private FutureCallback switchState(final State newState) { @SuppressWarnings("unchecked") - final FutureCallback ret = (FutureCallback) this.callback; - this.callback = null; + final FutureCallback ret = (FutureCallback) callback; + callback = null; LOG.debug("Transaction {} changing state from {} to {}", transactionId, state, newState); - this.state = newState; + state = newState; return ret; } void setNewCandidate(final DataTreeCandidateTip dataTreeCandidate) { checkState(State.PRE_COMMIT_COMPLETE); - this.candidate = Verify.verifyNotNull(dataTreeCandidate); + candidate = verifyNotNull(dataTreeCandidate); } void successfulCanCommit() { - switchState(State.CAN_COMMIT_COMPLETE).onSuccess(null); + switchState(State.CAN_COMMIT_COMPLETE).onSuccess(Empty.value()); } void failedCanCommit(final Exception cause) { @@ -181,16 +183,16 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort { * @param dataTreeCandidate {@link DataTreeCandidate} under consideration * @param futureCallback the callback to invoke on completion, which may be immediate or async. */ - void userPreCommit(final DataTreeCandidate dataTreeCandidate, final FutureCallback futureCallback) { + void userPreCommit(final DataTreeCandidate dataTreeCandidate, final FutureCallback futureCallback) { userCohorts.reset(); - final Optional> maybeCanCommitFuture = userCohorts.canCommit(dataTreeCandidate); + final Optional> maybeCanCommitFuture = userCohorts.canCommit(dataTreeCandidate); if (!maybeCanCommitFuture.isPresent()) { doUserPreCommit(futureCallback); return; } - maybeCanCommitFuture.get().whenComplete((noop, failure) -> { + maybeCanCommitFuture.orElseThrow().whenComplete((noop, failure) -> { if (failure != null) { futureCallback.onFailure(failure); } else { @@ -199,25 +201,25 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort { }); } - private void doUserPreCommit(final FutureCallback futureCallback) { - final Optional> maybePreCommitFuture = userCohorts.preCommit(); + private void doUserPreCommit(final FutureCallback futureCallback) { + final Optional> maybePreCommitFuture = userCohorts.preCommit(); if (!maybePreCommitFuture.isPresent()) { - futureCallback.onSuccess(null); + futureCallback.onSuccess(Empty.value()); return; } - maybePreCommitFuture.get().whenComplete((noop, failure) -> { + maybePreCommitFuture.orElseThrow().whenComplete((noop, failure) -> { if (failure != null) { futureCallback.onFailure(failure); } else { - futureCallback.onSuccess(null); + futureCallback.onSuccess(Empty.value()); } }); } void successfulPreCommit(final DataTreeCandidateTip dataTreeCandidate) { LOG.trace("Transaction {} prepared candidate {}", transaction, dataTreeCandidate); - this.candidate = Verify.verifyNotNull(dataTreeCandidate); + candidate = verifyNotNull(dataTreeCandidate); switchState(State.PRE_COMMIT_COMPLETE).onSuccess(dataTreeCandidate); } @@ -233,13 +235,13 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort { } void successfulCommit(final UnsignedLong journalIndex, final Runnable onComplete) { - final Optional> maybeCommitFuture = userCohorts.commit(); + final Optional> maybeCommitFuture = userCohorts.commit(); if (!maybeCommitFuture.isPresent()) { finishSuccessfulCommit(journalIndex, onComplete); return; } - maybeCommitFuture.get().whenComplete((noop, failure) -> { + maybeCommitFuture.orElseThrow().whenComplete((noop, failure) -> { if (failure != null) { LOG.error("User cohorts failed to commit", failure); } @@ -271,7 +273,7 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort { void reportFailure(final Exception cause) { if (nextFailure == null) { - this.nextFailure = requireNonNull(cause); + nextFailure = requireNonNull(cause); } else { LOG.debug("Transaction {} already has a set failure, not updating it", transactionId, cause); } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/SingleCommitCohortProxy.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/SingleCommitCohortProxy.java deleted file mode 100644 index 5e8a95405b..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/SingleCommitCohortProxy.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.datastore; - -import static java.util.Objects.requireNonNull; - -import akka.dispatch.OnComplete; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.SettableFuture; -import java.util.Arrays; -import java.util.List; -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -import org.opendaylight.controller.cluster.datastore.utils.ActorUtils; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import scala.concurrent.Future; - -/** - * A cohort proxy implementation for a single-shard transaction commit. If the transaction was a direct commit - * to the shard, this implementation elides the CanCommitTransaction and CommitTransaction messages to the - * shard as an optimization. - * - * @author Thomas Pantelis - */ -class SingleCommitCohortProxy extends AbstractThreePhaseCommitCohort { - private static final Logger LOG = LoggerFactory.getLogger(SingleCommitCohortProxy.class); - - private final ActorUtils actorUtils; - private final Future cohortFuture; - private final TransactionIdentifier transactionId; - private volatile DOMStoreThreePhaseCommitCohort delegateCohort = NoOpDOMStoreThreePhaseCommitCohort.INSTANCE; - private final OperationCallback.Reference operationCallbackRef; - - SingleCommitCohortProxy(ActorUtils actorUtils, Future cohortFuture, TransactionIdentifier transactionId, - OperationCallback.Reference operationCallbackRef) { - this.actorUtils = actorUtils; - this.cohortFuture = cohortFuture; - this.transactionId = requireNonNull(transactionId); - this.operationCallbackRef = operationCallbackRef; - } - - @Override - public ListenableFuture canCommit() { - LOG.debug("Tx {} canCommit", transactionId); - - final SettableFuture returnFuture = SettableFuture.create(); - - cohortFuture.onComplete(new OnComplete() { - @Override - public void onComplete(Throwable failure, Object cohortResponse) { - if (failure != null) { - operationCallbackRef.get().failure(); - returnFuture.setException(failure); - return; - } - - operationCallbackRef.get().success(); - - LOG.debug("Tx {} successfully completed direct commit", transactionId); - - // The Future was the result of a direct commit to the shard, essentially eliding the - // front-end 3PC coordination. We don't really care about the specific Future - // response object, only that it completed successfully. At this point the Tx is complete - // so return true. The subsequent preCommit and commit phases will be no-ops, ie return - // immediate success, to complete the 3PC for the front-end. - returnFuture.set(Boolean.TRUE); - } - }, actorUtils.getClientDispatcher()); - - return returnFuture; - } - - @Override - public ListenableFuture preCommit() { - return delegateCohort.preCommit(); - } - - @Override - public ListenableFuture abort() { - return delegateCohort.abort(); - } - - @Override - public ListenableFuture commit() { - return delegateCohort.commit(); - } - - @Override - List> getCohortFutures() { - return Arrays.asList(cohortFuture); - } -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/StandaloneFrontendHistory.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/StandaloneFrontendHistory.java index 0278c1d1e5..57c680da2a 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/StandaloneFrontendHistory.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/StandaloneFrontendHistory.java @@ -20,7 +20,7 @@ import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier; import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; import org.opendaylight.controller.cluster.datastore.utils.MutableUnsignedLongSet; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification; /** * Standalone transaction specialization of {@link AbstractFrontendHistory}. There can be multiple open transactions @@ -60,12 +60,12 @@ final class StandaloneFrontendHistory extends AbstractFrontendHistory { @Override FrontendTransaction createOpenSnapshot(final TransactionIdentifier id) { - return FrontendReadOnlyTransaction.create(this, tree.newReadOnlyTransaction(id)); + return FrontendReadOnlyTransaction.create(this, tree.newStandaloneReadOnlyTransaction(id)); } @Override FrontendTransaction createOpenTransaction(final TransactionIdentifier id) { - return FrontendReadWriteTransaction.createOpen(this, tree.newReadWriteTransaction(id)); + return FrontendReadWriteTransaction.createOpen(this, tree.newStandaloneReadWriteTransaction(id)); } @Override diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionChainProxy.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionChainProxy.java deleted file mode 100644 index 4ef89b4684..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionChainProxy.java +++ /dev/null @@ -1,337 +0,0 @@ -/* - * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.datastore; - -import static com.google.common.base.Preconditions.checkState; -import static java.util.Objects.requireNonNull; - -import akka.actor.ActorSelection; -import akka.dispatch.Futures; -import akka.dispatch.OnComplete; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Map.Entry; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; -import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier; -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain; -import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo; -import org.opendaylight.mdsal.dom.api.DOMTransactionChainClosedException; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction; -import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import scala.concurrent.Future; -import scala.concurrent.Promise; - -/** - * A chain of {@link TransactionProxy}s. It allows a single open transaction to be open - * at a time. For remote transactions, it also tracks the outstanding readiness requests - * towards the shard and unblocks operations only after all have completed. - */ -final class TransactionChainProxy extends AbstractTransactionContextFactory - implements DOMStoreTransactionChain { - private abstract static class State { - /** - * Check if it is okay to allocate a new transaction. - * @throws IllegalStateException if a transaction may not be allocated. - */ - abstract void checkReady(); - - /** - * Return the future which needs to be waited for before shard information - * is returned (which unblocks remote transactions). - * @return Future to wait for, or null of no wait is necessary - */ - abstract Future previousFuture(); - } - - private abstract static class Pending extends State { - private final TransactionIdentifier transaction; - private final Future previousFuture; - - Pending(final TransactionIdentifier transaction, final Future previousFuture) { - this.previousFuture = previousFuture; - this.transaction = requireNonNull(transaction); - } - - @Override - final Future previousFuture() { - return previousFuture; - } - - final TransactionIdentifier getIdentifier() { - return transaction; - } - } - - private static final class Allocated extends Pending { - Allocated(final TransactionIdentifier transaction, final Future previousFuture) { - super(transaction, previousFuture); - } - - @Override - void checkReady() { - throw new IllegalStateException(String.format("Previous transaction %s is not ready yet", getIdentifier())); - } - } - - private static final class Submitted extends Pending { - Submitted(final TransactionIdentifier transaction, final Future previousFuture) { - super(transaction, previousFuture); - } - - @Override - void checkReady() { - // Okay to allocate - } - } - - private abstract static class DefaultState extends State { - @Override - final Future previousFuture() { - return null; - } - } - - private static final State IDLE_STATE = new DefaultState() { - @Override - void checkReady() { - // Okay to allocate - } - }; - - private static final State CLOSED_STATE = new DefaultState() { - @Override - void checkReady() { - throw new DOMTransactionChainClosedException("Transaction chain has been closed"); - } - }; - - private static final Logger LOG = LoggerFactory.getLogger(TransactionChainProxy.class); - private static final AtomicReferenceFieldUpdater STATE_UPDATER = - AtomicReferenceFieldUpdater.newUpdater(TransactionChainProxy.class, State.class, "currentState"); - - private final TransactionContextFactory parent; - private volatile State currentState = IDLE_STATE; - - /** - * This map holds Promise instances for each read-only tx. It is used to maintain ordering of tx creates - * wrt to read-only tx's between this class and a LocalTransactionChain since they're bridged by - * asynchronous futures. Otherwise, in the following scenario, eg: - *

    - * 1) Create write tx1 on chain - * 2) do write and submit - * 3) Create read-only tx2 on chain and issue read - * 4) Create write tx3 on chain, do write but do not submit - *

    - * if the sequence/timing is right, tx3 may create its local tx on the LocalTransactionChain before tx2, - * which results in tx2 failing b/c tx3 isn't ready yet. So maintaining ordering prevents this issue - * (see Bug 4774). - *

    - * A Promise is added via newReadOnlyTransaction. When the parent class completes the primary shard - * lookup and creates the TransactionContext (either success or failure), onTransactionContextCreated is - * called which completes the Promise. A write tx that is created prior to completion will wait on the - * Promise's Future via findPrimaryShard. - */ - private final ConcurrentMap> priorReadOnlyTxPromises = - new ConcurrentHashMap<>(); - - TransactionChainProxy(final TransactionContextFactory parent, final LocalHistoryIdentifier historyId) { - super(parent.getActorUtils(), historyId); - this.parent = parent; - } - - @Override - public DOMStoreReadTransaction newReadOnlyTransaction() { - currentState.checkReady(); - TransactionProxy transactionProxy = new TransactionProxy(this, TransactionType.READ_ONLY); - priorReadOnlyTxPromises.put(transactionProxy.getIdentifier(), Futures.promise()); - return transactionProxy; - } - - @Override - public DOMStoreReadWriteTransaction newReadWriteTransaction() { - getActorUtils().acquireTxCreationPermit(); - return allocateWriteTransaction(TransactionType.READ_WRITE); - } - - @Override - public DOMStoreWriteTransaction newWriteOnlyTransaction() { - getActorUtils().acquireTxCreationPermit(); - return allocateWriteTransaction(TransactionType.WRITE_ONLY); - } - - @Override - public void close() { - currentState = CLOSED_STATE; - - // Send a close transaction chain request to each and every shard - - getActorUtils().broadcast(version -> new CloseTransactionChain(getHistoryId(), version).toSerializable(), - CloseTransactionChain.class); - } - - private TransactionProxy allocateWriteTransaction(final TransactionType type) { - State localState = currentState; - localState.checkReady(); - - final TransactionProxy ret = new TransactionProxy(this, type); - currentState = new Allocated(ret.getIdentifier(), localState.previousFuture()); - return ret; - } - - @Override - protected LocalTransactionChain factoryForShard(final String shardName, final ActorSelection shardLeader, - final ReadOnlyDataTree dataTree) { - final LocalTransactionChain ret = new LocalTransactionChain(this, shardLeader, dataTree); - LOG.debug("Allocated transaction chain {} for shard {} leader {}", ret, shardName, shardLeader); - return ret; - } - - /** - * This method is overridden to ensure the previous Tx's ready operations complete - * before we initiate the next Tx in the chain to avoid creation failures if the - * previous Tx's ready operations haven't completed yet. - */ - @SuppressWarnings({ "unchecked", "rawtypes" }) - @Override - protected Future findPrimaryShard(final String shardName, final TransactionIdentifier txId) { - // Read current state atomically - final State localState = currentState; - - // There are no outstanding futures, shortcut - Future previous = localState.previousFuture(); - if (previous == null) { - return combineFutureWithPossiblePriorReadOnlyTxFutures(parent.findPrimaryShard(shardName, txId), txId); - } - - final String previousTransactionId; - - if (localState instanceof Pending) { - previousTransactionId = ((Pending) localState).getIdentifier().toString(); - LOG.debug("Tx: {} - waiting for ready futures with pending Tx {}", txId, previousTransactionId); - } else { - previousTransactionId = ""; - LOG.debug("Waiting for ready futures on chain {}", getHistoryId()); - } - - previous = combineFutureWithPossiblePriorReadOnlyTxFutures(previous, txId); - - // Add a callback for completion of the combined Futures. - final Promise returnPromise = Futures.promise(); - - final OnComplete onComplete = new OnComplete() { - @Override - public void onComplete(final Throwable failure, final Object notUsed) { - if (failure != null) { - // A Ready Future failed so fail the returned Promise. - LOG.error("Tx: {} - ready future failed for previous Tx {}", txId, previousTransactionId); - returnPromise.failure(failure); - } else { - LOG.debug("Tx: {} - previous Tx {} readied - proceeding to FindPrimaryShard", - txId, previousTransactionId); - - // Send the FindPrimaryShard message and use the resulting Future to complete the - // returned Promise. - returnPromise.completeWith(parent.findPrimaryShard(shardName, txId)); - } - } - }; - - previous.onComplete(onComplete, getActorUtils().getClientDispatcher()); - return returnPromise.future(); - } - - private Future combineFutureWithPossiblePriorReadOnlyTxFutures(final Future future, - final TransactionIdentifier txId) { - return priorReadOnlyTxPromises.isEmpty() || priorReadOnlyTxPromises.containsKey(txId) ? future - // Tough luck, we need do some work - : combineWithPriorReadOnlyTxFutures(future, txId); - } - - // Split out of the common path - private Future combineWithPriorReadOnlyTxFutures(final Future future, final TransactionIdentifier txId) { - // Take a stable snapshot, and check if we raced - final List>> priorReadOnlyTxPromiseEntries = - new ArrayList<>(priorReadOnlyTxPromises.entrySet()); - if (priorReadOnlyTxPromiseEntries.isEmpty()) { - return future; - } - - final List> priorReadOnlyTxFutures = new ArrayList<>(priorReadOnlyTxPromiseEntries.size()); - for (Entry> entry: priorReadOnlyTxPromiseEntries) { - LOG.debug("Tx: {} - waiting on future for prior read-only Tx {}", txId, entry.getKey()); - priorReadOnlyTxFutures.add(entry.getValue().future()); - } - - final Future> combinedFutures = Futures.sequence(priorReadOnlyTxFutures, - getActorUtils().getClientDispatcher()); - - final Promise returnPromise = Futures.promise(); - final OnComplete> onComplete = new OnComplete<>() { - @Override - public void onComplete(final Throwable failure, final Iterable notUsed) { - LOG.debug("Tx: {} - prior read-only Tx futures complete", txId); - - // Complete the returned Promise with the original Future. - returnPromise.completeWith(future); - } - }; - - combinedFutures.onComplete(onComplete, getActorUtils().getClientDispatcher()); - return returnPromise.future(); - } - - @Override - protected void onTransactionReady(final TransactionIdentifier transaction, - final Collection> cohortFutures) { - final State localState = currentState; - checkState(localState instanceof Allocated, "Readying transaction %s while state is %s", transaction, - localState); - final TransactionIdentifier currentTx = ((Allocated)localState).getIdentifier(); - checkState(transaction.equals(currentTx), "Readying transaction %s while %s is allocated", transaction, - currentTx); - - // Transaction ready and we are not waiting for futures -- go to idle - if (cohortFutures.isEmpty()) { - currentState = IDLE_STATE; - return; - } - - // Combine the ready Futures into 1 - final Future> combined = Futures.sequence(cohortFutures, getActorUtils().getClientDispatcher()); - - // Record the we have outstanding futures - final State newState = new Submitted(transaction, combined); - currentState = newState; - - // Attach a completion reset, but only if we do not allocate a transaction - // in-between - combined.onComplete(new OnComplete>() { - @Override - public void onComplete(final Throwable arg0, final Iterable arg1) { - STATE_UPDATER.compareAndSet(TransactionChainProxy.this, newState, IDLE_STATE); - } - }, getActorUtils().getClientDispatcher()); - } - - @Override - protected void onTransactionContextCreated(final TransactionIdentifier transactionId) { - Promise promise = priorReadOnlyTxPromises.remove(transactionId); - if (promise != null) { - promise.success(null); - } - } -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContext.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContext.java deleted file mode 100644 index 549136b589..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContext.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.datastore; - -import akka.actor.ActorSelection; -import com.google.common.util.concurrent.SettableFuture; -import java.util.Optional; -import java.util.SortedSet; -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -import org.opendaylight.controller.cluster.datastore.messages.AbstractRead; -import org.opendaylight.yangtools.concepts.AbstractSimpleIdentifiable; -import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; -import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import scala.concurrent.Future; - -abstract class TransactionContext extends AbstractSimpleIdentifiable { - private static final Logger LOG = LoggerFactory.getLogger(TransactionContext.class); - - private final short transactionVersion; - - private long modificationCount = 0; - private boolean handOffComplete; - - TransactionContext(final TransactionIdentifier transactionIdentifier) { - this(transactionIdentifier, DataStoreVersions.CURRENT_VERSION); - } - - TransactionContext(final TransactionIdentifier transactionIdentifier, final short transactionVersion) { - super(transactionIdentifier); - this.transactionVersion = transactionVersion; - } - - final short getTransactionVersion() { - return transactionVersion; - } - - final void incrementModificationCount() { - modificationCount++; - } - - final void logModificationCount() { - LOG.debug("Total modifications on Tx {} = [ {} ]", getIdentifier(), modificationCount); - } - - /** - * Invoked by {@link AbstractTransactionContextWrapper} when it has finished handing - * off operations to this context. From this point on, the context is responsible - * for throttling operations. - * - *

    - * Implementations can rely on the wrapper calling this operation in a synchronized - * block, so they do not need to ensure visibility of this state transition themselves. - */ - final void operationHandOffComplete() { - handOffComplete = true; - } - - final boolean isOperationHandOffComplete() { - return handOffComplete; - } - - /** - * A TransactionContext that uses operation limiting should return true else false. - * - * @return true if operation limiting is used, false otherwise - */ - boolean usesOperationLimiting() { - return false; - } - - abstract void executeDelete(YangInstanceIdentifier path, Boolean havePermit); - - abstract void executeMerge(YangInstanceIdentifier path, NormalizedNode data, Boolean havePermit); - - abstract void executeWrite(YangInstanceIdentifier path, NormalizedNode data, Boolean havePermit); - - abstract void executeRead(AbstractRead readCmd, SettableFuture proxyFuture, Boolean havePermit); - - abstract Future readyTransaction(Boolean havePermit, - Optional> participatingShardNames); - - abstract Future directCommit(Boolean havePermit); - - abstract void closeTransaction(); -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContextCleanup.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContextCleanup.java deleted file mode 100644 index ef8cc49582..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContextCleanup.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.datastore; - -import com.google.common.base.FinalizablePhantomReference; -import com.google.common.base.FinalizableReferenceQueue; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A PhantomReference that closes remote transactions for a TransactionContext when it's - * garbage collected. This is used for read-only transactions as they're not explicitly closed - * by clients. So the only way to detect that a transaction is no longer in use and it's safe - * to clean up is when it's garbage collected. It's inexact as to when an instance will be GC'ed - * but TransactionProxy instances should generally be short-lived enough to avoid being moved - * to the old generation space and thus should be cleaned up in a timely manner as the GC - * runs on the young generation (eden, swap1...) space much more frequently. - */ -final class TransactionContextCleanup extends FinalizablePhantomReference { - private static final Logger LOG = LoggerFactory.getLogger(TransactionContextCleanup.class); - /** - * Used to enqueue the PhantomReferences for read-only TransactionProxy instances. The - * FinalizableReferenceQueue is safe to use statically in an OSGi environment as it uses some - * trickery to clean up its internal thread when the bundle is unloaded. - */ - private static final FinalizableReferenceQueue QUEUE = new FinalizableReferenceQueue(); - - /** - * This stores the TransactionProxyCleanupPhantomReference instances statically, This is - * necessary because PhantomReferences need a hard reference so they're not garbage collected. - * Once finalized, the TransactionProxyCleanupPhantomReference removes itself from this map - * and thus becomes eligible for garbage collection. - */ - private static final Map CACHE = new ConcurrentHashMap<>(); - - private final TransactionContext cleanup; - - private TransactionContextCleanup(final TransactionProxy referent, final TransactionContext cleanup) { - super(referent, QUEUE); - this.cleanup = cleanup; - } - - static void track(final TransactionProxy referent, final TransactionContext cleanup) { - final TransactionContextCleanup ret = new TransactionContextCleanup(referent, cleanup); - CACHE.put(cleanup, ret); - } - - @Override - public void finalizeReferent() { - LOG.trace("Cleaning up {} Tx actors", cleanup); - - if (CACHE.remove(cleanup) != null) { - cleanup.closeTransaction(); - } - } - - static void untrack(final TransactionContext cleanup) { - CACHE.remove(cleanup); - } -} \ No newline at end of file diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContextFactory.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContextFactory.java deleted file mode 100644 index 3944b04ef5..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContextFactory.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.datastore; - -import akka.actor.ActorSelection; -import java.util.Collection; -import java.util.concurrent.atomic.AtomicLong; -import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier; -import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier; -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo; -import org.opendaylight.controller.cluster.datastore.utils.ActorUtils; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain; -import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree; -import scala.concurrent.Future; - -/** - * An {@link AbstractTransactionContextFactory} which produces TransactionContext instances for single - * transactions (ie not chained). - */ -final class TransactionContextFactory extends AbstractTransactionContextFactory { - private final AtomicLong nextHistory = new AtomicLong(1); - - TransactionContextFactory(final ActorUtils actorUtils, final ClientIdentifier clientId) { - super(actorUtils, new LocalHistoryIdentifier(clientId, 0)); - } - - @Override - public void close() { - } - - @Override - protected LocalTransactionFactoryImpl factoryForShard(final String shardName, final ActorSelection shardLeader, - final ReadOnlyDataTree dataTree) { - return new LocalTransactionFactoryImpl(getActorUtils(), shardLeader, dataTree); - } - - @Override - protected Future findPrimaryShard(final String shardName, TransactionIdentifier txId) { - return getActorUtils().findPrimaryShardAsync(shardName); - } - - @Override - protected void onTransactionReady(final TransactionIdentifier transaction, - final Collection> cohortFutures) { - // Transactions are disconnected, this is a no-op - } - - DOMStoreTransactionChain createTransactionChain() { - return new TransactionChainProxy(this, new LocalHistoryIdentifier(getHistoryId().getClientId(), - nextHistory.getAndIncrement())); - } - - @Override - protected void onTransactionContextCreated(final TransactionIdentifier transactionId) { - } -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionModificationOperation.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionModificationOperation.java deleted file mode 100644 index eeaec6b998..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionModificationOperation.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.datastore; - -import static java.util.Objects.requireNonNull; - -import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; -import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; - -/** - * A TransactionOperation to apply a specific modification. Subclasses provide type capture of required data, so that - * we instantiate AbstractModification subclasses for the bare minimum time required. - */ -abstract class TransactionModificationOperation extends TransactionOperation { - private abstract static class AbstractDataOperation extends TransactionModificationOperation { - private final NormalizedNode data; - - AbstractDataOperation(final YangInstanceIdentifier path, final NormalizedNode data) { - super(path); - this.data = requireNonNull(data); - } - - final NormalizedNode data() { - return data; - } - } - - static final class DeleteOperation extends TransactionModificationOperation { - DeleteOperation(final YangInstanceIdentifier path) { - super(path); - } - - @Override - protected void invoke(final TransactionContext transactionContext, final Boolean havePermit) { - transactionContext.executeDelete(path(), havePermit); - } - } - - static final class MergeOperation extends AbstractDataOperation { - MergeOperation(final YangInstanceIdentifier path, final NormalizedNode data) { - super(path, data); - } - - @Override - protected void invoke(final TransactionContext transactionContext, final Boolean havePermit) { - transactionContext.executeMerge(path(), data(), havePermit); - } - } - - static final class WriteOperation extends AbstractDataOperation { - WriteOperation(final YangInstanceIdentifier path, final NormalizedNode data) { - super(path, data); - } - - @Override - protected void invoke(final TransactionContext transactionContext, final Boolean havePermit) { - transactionContext.executeWrite(path(), data(), havePermit); - } - } - - private final YangInstanceIdentifier path; - - TransactionModificationOperation(final YangInstanceIdentifier path) { - this.path = requireNonNull(path); - } - - final YangInstanceIdentifier path() { - return path; - } -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionOperation.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionOperation.java deleted file mode 100644 index 962d26133b..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionOperation.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.datastore; - -import org.eclipse.jdt.annotation.Nullable; - -/** - * Abstract superclass for transaction operations which should be executed - * on a {@link TransactionContext} at a later point in time. - */ -abstract class TransactionOperation { - /** - * Execute the delayed operation. - * - * @param transactionContext the TransactionContext - * @param havePermit Boolean indicator if this operation has tried and acquired a permit, null if there was no - * attempt to acquire a permit. - */ - protected abstract void invoke(TransactionContext transactionContext, @Nullable Boolean havePermit); -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionProxy.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionProxy.java deleted file mode 100644 index 16a979fa6a..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionProxy.java +++ /dev/null @@ -1,390 +0,0 @@ -/* - * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.datastore; - -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Preconditions.checkState; -import static com.google.common.base.Verify.verifyNotNull; -import static java.util.Objects.requireNonNull; - -import akka.actor.ActorSelection; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.Iterables; -import com.google.common.util.concurrent.FluentFuture; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.MoreExecutors; -import com.google.common.util.concurrent.SettableFuture; -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Optional; -import java.util.Set; -import java.util.SortedSet; -import java.util.TreeMap; -import java.util.TreeSet; -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -import org.opendaylight.controller.cluster.datastore.TransactionModificationOperation.DeleteOperation; -import org.opendaylight.controller.cluster.datastore.TransactionModificationOperation.MergeOperation; -import org.opendaylight.controller.cluster.datastore.TransactionModificationOperation.WriteOperation; -import org.opendaylight.controller.cluster.datastore.messages.AbstractRead; -import org.opendaylight.controller.cluster.datastore.messages.DataExists; -import org.opendaylight.controller.cluster.datastore.messages.ReadData; -import org.opendaylight.controller.cluster.datastore.utils.ActorUtils; -import org.opendaylight.controller.cluster.datastore.utils.NormalizedNodeAggregator; -import org.opendaylight.mdsal.dom.spi.store.AbstractDOMStoreTransaction; -import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction; -import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; -import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier; -import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode; -import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild; -import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; -import org.opendaylight.yangtools.yang.data.api.schema.builder.DataContainerNodeBuilder; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException; -import org.opendaylight.yangtools.yang.data.impl.schema.Builders; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import scala.concurrent.Future; -import scala.concurrent.Promise; - -/** - * A transaction potentially spanning multiple backend shards. - */ -public class TransactionProxy extends AbstractDOMStoreTransaction - implements DOMStoreReadWriteTransaction { - private enum TransactionState { - OPEN, - READY, - CLOSED, - } - - private static final Logger LOG = LoggerFactory.getLogger(TransactionProxy.class); - private static final DeleteOperation ROOT_DELETE_OPERATION = new DeleteOperation(YangInstanceIdentifier.empty()); - - private final Map txContextWrappers = new TreeMap<>(); - private final AbstractTransactionContextFactory txContextFactory; - private final TransactionType type; - private TransactionState state = TransactionState.OPEN; - - @VisibleForTesting - public TransactionProxy(final AbstractTransactionContextFactory txContextFactory, final TransactionType type) { - super(txContextFactory.nextIdentifier(), txContextFactory.getActorUtils().getDatastoreContext() - .isTransactionDebugContextEnabled()); - this.txContextFactory = txContextFactory; - this.type = requireNonNull(type); - - LOG.debug("New {} Tx - {}", type, getIdentifier()); - } - - @Override - public FluentFuture exists(final YangInstanceIdentifier path) { - return executeRead(shardNameFromIdentifier(path), new DataExists(path, DataStoreVersions.CURRENT_VERSION)); - } - - private FluentFuture executeRead(final String shardName, final AbstractRead readCmd) { - checkState(type != TransactionType.WRITE_ONLY, "Reads from write-only transactions are not allowed"); - - LOG.trace("Tx {} {} {}", getIdentifier(), readCmd.getClass().getSimpleName(), readCmd.getPath()); - - final SettableFuture proxyFuture = SettableFuture.create(); - AbstractTransactionContextWrapper contextWrapper = getContextWrapper(shardName); - contextWrapper.maybeExecuteTransactionOperation(new TransactionOperation() { - @Override - public void invoke(final TransactionContext transactionContext, final Boolean havePermit) { - transactionContext.executeRead(readCmd, proxyFuture, havePermit); - } - }); - - return FluentFuture.from(proxyFuture); - } - - @Override - public FluentFuture> read(final YangInstanceIdentifier path) { - checkState(type != TransactionType.WRITE_ONLY, "Reads from write-only transactions are not allowed"); - requireNonNull(path, "path should not be null"); - - LOG.trace("Tx {} read {}", getIdentifier(), path); - return path.isEmpty() ? readAllData() : singleShardRead(shardNameFromIdentifier(path), path); - } - - private FluentFuture> singleShardRead(final String shardName, - final YangInstanceIdentifier path) { - return executeRead(shardName, new ReadData(path, DataStoreVersions.CURRENT_VERSION)); - } - - private FluentFuture> readAllData() { - final Set allShardNames = txContextFactory.getActorUtils().getConfiguration().getAllShardNames(); - final Collection>> futures = new ArrayList<>(allShardNames.size()); - - for (String shardName : allShardNames) { - futures.add(singleShardRead(shardName, YangInstanceIdentifier.empty())); - } - - final ListenableFuture>> listFuture = Futures.allAsList(futures); - final ListenableFuture> aggregateFuture; - - aggregateFuture = Futures.transform(listFuture, input -> { - try { - return NormalizedNodeAggregator.aggregate(YangInstanceIdentifier.empty(), input, - txContextFactory.getActorUtils().getSchemaContext(), - txContextFactory.getActorUtils().getDatastoreContext().getLogicalStoreType()); - } catch (DataValidationFailedException e) { - throw new IllegalArgumentException("Failed to aggregate", e); - } - }, MoreExecutors.directExecutor()); - - return FluentFuture.from(aggregateFuture); - } - - @Override - public void delete(final YangInstanceIdentifier path) { - checkModificationState("delete", path); - - if (path.isEmpty()) { - deleteAllData(); - } else { - executeModification(new DeleteOperation(path)); - } - } - - private void deleteAllData() { - for (String shardName : getActorUtils().getConfiguration().getAllShardNames()) { - getContextWrapper(shardName).maybeExecuteTransactionOperation(ROOT_DELETE_OPERATION); - } - } - - @Override - public void merge(final YangInstanceIdentifier path, final NormalizedNode data) { - checkModificationState("merge", path); - - if (path.isEmpty()) { - mergeAllData(checkRootData(data)); - } else { - executeModification(new MergeOperation(path, data)); - } - } - - private void mergeAllData(final ContainerNode rootData) { - // Populate requests for individual shards that are being touched - final Map> rootBuilders = new HashMap<>(); - for (DataContainerChild child : rootData.body()) { - final String shardName = shardNameFromRootChild(child); - rootBuilders.computeIfAbsent(shardName, - unused -> Builders.containerBuilder().withNodeIdentifier(rootData.getIdentifier())) - .addChild(child); - } - - // Now dispatch all merges - for (Entry> entry : rootBuilders.entrySet()) { - getContextWrapper(entry.getKey()).maybeExecuteTransactionOperation(new MergeOperation( - YangInstanceIdentifier.empty(), entry.getValue().build())); - } - } - - @Override - public void write(final YangInstanceIdentifier path, final NormalizedNode data) { - checkModificationState("write", path); - - if (path.isEmpty()) { - writeAllData(checkRootData(data)); - } else { - executeModification(new WriteOperation(path, data)); - } - } - - private void writeAllData(final ContainerNode rootData) { - // Open builders for all shards - final Map> rootBuilders = new HashMap<>(); - for (String shardName : getActorUtils().getConfiguration().getAllShardNames()) { - rootBuilders.put(shardName, Builders.containerBuilder().withNodeIdentifier(rootData.getIdentifier())); - } - - // Now distribute children as needed - for (DataContainerChild child : rootData.body()) { - final String shardName = shardNameFromRootChild(child); - verifyNotNull(rootBuilders.get(shardName), "Failed to find builder for %s", shardName).addChild(child); - } - - // Now dispatch all writes - for (Entry> entry : rootBuilders.entrySet()) { - getContextWrapper(entry.getKey()).maybeExecuteTransactionOperation(new WriteOperation( - YangInstanceIdentifier.empty(), entry.getValue().build())); - } - } - - private void executeModification(final TransactionModificationOperation operation) { - getContextWrapper(operation.path()).maybeExecuteTransactionOperation(operation); - } - - private static ContainerNode checkRootData(final NormalizedNode data) { - // Root has to be a container - checkArgument(data instanceof ContainerNode, "Invalid root data %s", data); - return (ContainerNode) data; - } - - private void checkModificationState(final String opName, final YangInstanceIdentifier path) { - checkState(type != TransactionType.READ_ONLY, "Modification operation on read-only transaction is not allowed"); - checkState(state == TransactionState.OPEN, "Transaction is sealed - further modifications are not allowed"); - LOG.trace("Tx {} {} {}", getIdentifier(), opName, path); - } - - private boolean seal(final TransactionState newState) { - if (state == TransactionState.OPEN) { - state = newState; - return true; - } - return false; - } - - @Override - public final void close() { - if (!seal(TransactionState.CLOSED)) { - checkState(state == TransactionState.CLOSED, "Transaction %s is ready, it cannot be closed", - getIdentifier()); - // Idempotent no-op as per AutoCloseable recommendation - return; - } - - for (AbstractTransactionContextWrapper contextWrapper : txContextWrappers.values()) { - contextWrapper.maybeExecuteTransactionOperation(new TransactionOperation() { - @Override - public void invoke(final TransactionContext transactionContext, final Boolean havePermit) { - transactionContext.closeTransaction(); - } - }); - } - - - txContextWrappers.clear(); - } - - @Override - public final AbstractThreePhaseCommitCohort ready() { - checkState(type != TransactionType.READ_ONLY, "Read-only transactions cannot be readied"); - - final boolean success = seal(TransactionState.READY); - checkState(success, "Transaction %s is %s, it cannot be readied", getIdentifier(), state); - - LOG.debug("Tx {} Readying {} components for commit", getIdentifier(), txContextWrappers.size()); - - final AbstractThreePhaseCommitCohort ret; - switch (txContextWrappers.size()) { - case 0: - ret = NoOpDOMStoreThreePhaseCommitCohort.INSTANCE; - break; - case 1: - final Entry e = Iterables.getOnlyElement( - txContextWrappers.entrySet()); - ret = createSingleCommitCohort(e.getKey(), e.getValue()); - break; - default: - ret = createMultiCommitCohort(); - } - - txContextFactory.onTransactionReady(getIdentifier(), ret.getCohortFutures()); - - final Throwable debugContext = getDebugContext(); - return debugContext == null ? ret : new DebugThreePhaseCommitCohort(getIdentifier(), ret, debugContext); - } - - @SuppressWarnings({ "rawtypes", "unchecked" }) - private AbstractThreePhaseCommitCohort createSingleCommitCohort(final String shardName, - final AbstractTransactionContextWrapper contextWrapper) { - - LOG.debug("Tx {} Readying transaction for shard {}", getIdentifier(), shardName); - - final OperationCallback.Reference operationCallbackRef = - new OperationCallback.Reference(OperationCallback.NO_OP_CALLBACK); - - final TransactionContext transactionContext = contextWrapper.getTransactionContext(); - final Future future; - if (transactionContext == null) { - final Promise promise = akka.dispatch.Futures.promise(); - contextWrapper.maybeExecuteTransactionOperation(new TransactionOperation() { - @Override - public void invoke(final TransactionContext newTransactionContext, final Boolean havePermit) { - promise.completeWith(getDirectCommitFuture(newTransactionContext, operationCallbackRef, - havePermit)); - } - }); - future = promise.future(); - } else { - // avoid the creation of a promise and a TransactionOperation - future = getDirectCommitFuture(transactionContext, operationCallbackRef, null); - } - - return new SingleCommitCohortProxy(txContextFactory.getActorUtils(), future, getIdentifier(), - operationCallbackRef); - } - - private Future getDirectCommitFuture(final TransactionContext transactionContext, - final OperationCallback.Reference operationCallbackRef, final Boolean havePermit) { - TransactionRateLimitingCallback rateLimitingCallback = new TransactionRateLimitingCallback( - txContextFactory.getActorUtils()); - operationCallbackRef.set(rateLimitingCallback); - rateLimitingCallback.run(); - return transactionContext.directCommit(havePermit); - } - - private AbstractThreePhaseCommitCohort createMultiCommitCohort() { - - final List cohorts = new ArrayList<>(txContextWrappers.size()); - final Optional> shardNames = Optional.of(new TreeSet<>(txContextWrappers.keySet())); - for (Entry e : txContextWrappers.entrySet()) { - LOG.debug("Tx {} Readying transaction for shard {}", getIdentifier(), e.getKey()); - - final AbstractTransactionContextWrapper wrapper = e.getValue(); - - // The remote tx version is obtained the via TransactionContext which may not be available yet so - // we pass a Supplier to dynamically obtain it. Once the ready Future is resolved the - // TransactionContext is available. - cohorts.add(new ThreePhaseCommitCohortProxy.CohortInfo(wrapper.readyTransaction(shardNames), - () -> wrapper.getTransactionContext().getTransactionVersion())); - } - - return new ThreePhaseCommitCohortProxy(txContextFactory.getActorUtils(), cohorts, getIdentifier()); - } - - private String shardNameFromRootChild(final DataContainerChild child) { - return shardNameFromIdentifier(YangInstanceIdentifier.create(child.getIdentifier())); - } - - private String shardNameFromIdentifier(final YangInstanceIdentifier path) { - return getActorUtils().getShardStrategyFactory().getStrategy(path).findShard(path); - } - - private AbstractTransactionContextWrapper getContextWrapper(final YangInstanceIdentifier path) { - return getContextWrapper(shardNameFromIdentifier(path)); - } - - private AbstractTransactionContextWrapper getContextWrapper(final String shardName) { - final AbstractTransactionContextWrapper existing = txContextWrappers.get(shardName); - if (existing != null) { - return existing; - } - - final AbstractTransactionContextWrapper fresh = txContextFactory.newTransactionContextWrapper(this, shardName); - txContextWrappers.put(shardName, fresh); - return fresh; - } - - TransactionType getType() { - return type; - } - - boolean isReady() { - return state != TransactionState.OPEN; - } - - final ActorUtils getActorUtils() { - return txContextFactory.getActorUtils(); - } -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionReadyReplyMapper.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionReadyReplyMapper.java deleted file mode 100644 index f5eb0e4be6..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionReadyReplyMapper.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.datastore; - -import static java.util.Objects.requireNonNull; - -import akka.actor.ActorSelection; -import akka.dispatch.Mapper; -import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply; -import org.opendaylight.controller.cluster.datastore.utils.ActorUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import scala.concurrent.Future; - -/** - * A {@link Mapper} extracting the {@link ActorSelection} pointing to the actor which - * is backing a particular transaction. - * - *

    - * This class is not for general consumption. It is public only to support the pre-lithium compatibility - * package. - * TODO: once we remove compatibility, make this class package-private and final. - */ -public class TransactionReadyReplyMapper extends Mapper { - protected static final Mapper SAME_FAILURE_TRANSFORMER = new Mapper() { - @Override - public Throwable apply(final Throwable failure) { - return failure; - } - }; - private static final Logger LOG = LoggerFactory.getLogger(TransactionReadyReplyMapper.class); - private final TransactionIdentifier identifier; - private final ActorUtils actorUtils; - - protected TransactionReadyReplyMapper(final ActorUtils actorUtils, final TransactionIdentifier identifier) { - this.actorUtils = requireNonNull(actorUtils); - this.identifier = requireNonNull(identifier); - } - - protected final ActorUtils getActorUtils() { - return actorUtils; - } - - protected String extractCohortPathFrom(final ReadyTransactionReply readyTxReply) { - return readyTxReply.getCohortPath(); - } - - @Override - public final ActorSelection checkedApply(final Object serializedReadyReply) { - LOG.debug("Tx {} readyTransaction", identifier); - - // At this point the ready operation succeeded and we need to extract the cohort - // actor path from the reply. - if (ReadyTransactionReply.isSerializedType(serializedReadyReply)) { - ReadyTransactionReply readyTxReply = ReadyTransactionReply.fromSerializable(serializedReadyReply); - return actorUtils.actorSelection(extractCohortPathFrom(readyTxReply)); - } - - // Throwing an exception here will fail the Future. - throw new IllegalArgumentException(String.format("%s: Invalid reply type %s", - identifier, serializedReadyReply.getClass())); - } - - static Future transform(final Future readyReplyFuture, final ActorUtils actorUtils, - final TransactionIdentifier identifier) { - return readyReplyFuture.transform(new TransactionReadyReplyMapper(actorUtils, identifier), - SAME_FAILURE_TRANSFORMER, actorUtils.getClientDispatcher()); - } -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionType.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionType.java index 649dae5289..19ca628d26 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionType.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionType.java @@ -12,14 +12,12 @@ public enum TransactionType { WRITE_ONLY, READ_WRITE; - // Cache all values - private static final TransactionType[] VALUES = values(); - public static TransactionType fromInt(final int type) { - try { - return VALUES[type]; - } catch (IndexOutOfBoundsException e) { - throw new IllegalArgumentException("In TransactionType enum value " + type, e); - } + return switch (type) { + case 0 -> READ_ONLY; + case 1 -> WRITE_ONLY; + case 2 -> READ_WRITE; + default -> throw new IllegalArgumentException("In TransactionType enum value " + type); + }; } } \ No newline at end of file diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/actors/DataTreeNotificationListenerRegistrationActor.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/actors/DataTreeNotificationListenerRegistrationActor.java index 33d2415682..2fdf3a9d62 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/actors/DataTreeNotificationListenerRegistrationActor.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/actors/DataTreeNotificationListenerRegistrationActor.java @@ -15,10 +15,11 @@ import akka.actor.PoisonPill; import akka.actor.Props; import com.google.common.annotations.VisibleForTesting; import java.util.concurrent.TimeUnit; +import org.eclipse.jdt.annotation.NonNullByDefault; import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor; import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeNotificationListenerRegistration; import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeNotificationListenerRegistrationReply; -import org.opendaylight.yangtools.concepts.ListenerRegistration; +import org.opendaylight.yangtools.concepts.Registration; import scala.concurrent.duration.FiniteDuration; /** @@ -30,10 +31,9 @@ public final class DataTreeNotificationListenerRegistrationActor extends Abstrac @VisibleForTesting static long killDelay = TimeUnit.MILLISECONDS.convert(5, TimeUnit.SECONDS); - private ListenerRegistration registration; - private Runnable onClose; + private SetRegistration registration = null; + private Cancellable killSchedule = null; private boolean closed; - private Cancellable killSchedule; @Override protected void handleReceive(final Object message) { @@ -42,9 +42,8 @@ public final class DataTreeNotificationListenerRegistrationActor extends Abstrac if (isValidSender(getSender())) { getSender().tell(CloseDataTreeNotificationListenerRegistrationReply.getInstance(), getSelf()); } - } else if (message instanceof SetRegistration) { - registration = ((SetRegistration)message).registration; - onClose = ((SetRegistration)message).onClose; + } else if (message instanceof SetRegistration setRegistration) { + registration = setRegistration; if (closed) { closeListenerRegistration(); } @@ -55,10 +54,12 @@ public final class DataTreeNotificationListenerRegistrationActor extends Abstrac private void closeListenerRegistration() { closed = true; - if (registration != null) { - registration.close(); - onClose.run(); + + final var reg = registration; + if (reg != null) { registration = null; + reg.registration.close(); + reg.onClose.run(); if (killSchedule == null) { killSchedule = getContext().system().scheduler().scheduleOnce(FiniteDuration.create(killDelay, @@ -72,13 +73,11 @@ public final class DataTreeNotificationListenerRegistrationActor extends Abstrac return Props.create(DataTreeNotificationListenerRegistrationActor.class); } - public static class SetRegistration { - private final ListenerRegistration registration; - private final Runnable onClose; - - public SetRegistration(final ListenerRegistration registration, final Runnable onClose) { - this.registration = requireNonNull(registration); - this.onClose = requireNonNull(onClose); + @NonNullByDefault + public record SetRegistration(Registration registration, Runnable onClose) { + public SetRegistration { + requireNonNull(registration); + requireNonNull(onClose); } } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/actors/JsonExportActor.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/actors/JsonExportActor.java index ccaa48b8d5..5eabe94399 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/actors/JsonExportActor.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/actors/JsonExportActor.java @@ -24,16 +24,15 @@ import org.eclipse.jdt.annotation.Nullable; import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor; import org.opendaylight.controller.cluster.datastore.persisted.CommitTransactionPayload; import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNodeContainer; import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode; -import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType; import org.opendaylight.yangtools.yang.data.codec.gson.JSONCodecFactorySupplier; import org.opendaylight.yangtools.yang.data.codec.gson.JSONNormalizedNodeStreamWriter; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateNode; +import org.opendaylight.yangtools.yang.data.tree.api.ModificationType; import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext; import org.opendaylight.yangtools.yang.model.util.SchemaInferenceStack; @@ -99,7 +98,7 @@ public final class JsonExportActor extends AbstractUntypedActor { final Path filePath = snapshotDir.resolve(exportSnapshot.id + "-snapshot.json"); LOG.debug("Creating JSON file : {}", filePath); - final NormalizedNode root = exportSnapshot.dataTreeCandidate.getRootNode().getDataAfter().get(); + final NormalizedNode root = exportSnapshot.dataTreeCandidate.getRootNode().getDataAfter(); checkState(root instanceof NormalizedNodeContainer, "Unexpected root %s", root); writeSnapshot(filePath, (NormalizedNodeContainer) root); @@ -124,8 +123,7 @@ public final class JsonExportActor extends AbstractUntypedActor { try (JsonWriter jsonWriter = new JsonWriter(Files.newBufferedWriter(path))) { jsonWriter.beginObject(); - try (NormalizedNodeWriter nnWriter = NormalizedNodeWriter.forStreamWriter( - JSONNormalizedNodeStreamWriter.createNestedWriter( + try (var nnWriter = NormalizedNodeWriter.forStreamWriter(JSONNormalizedNodeStreamWriter.createNestedWriter( JSONCodecFactorySupplier.RFC7951.getShared(schemaContext), SchemaInferenceStack.of(schemaContext).toInference(), null, jsonWriter), true)) { @@ -144,11 +142,10 @@ public final class JsonExportActor extends AbstractUntypedActor { try (JsonWriter jsonWriter = new JsonWriter(Files.newBufferedWriter(path))) { jsonWriter.beginObject().name("Entries"); jsonWriter.beginArray(); - for (ReplicatedLogEntry entry : entries) { - final Payload data = entry.getData(); - if (data instanceof CommitTransactionPayload) { - final CommitTransactionPayload payload = (CommitTransactionPayload) entry.getData(); - final DataTreeCandidate candidate = payload.getCandidate().getValue().getCandidate(); + for (var entry : entries) { + final var data = entry.getData(); + if (data instanceof CommitTransactionPayload payload) { + final var candidate = payload.getCandidate().candidate(); writeNode(jsonWriter, candidate); } else { jsonWriter.beginObject().name("Payload").value(data.toString()).endObject(); @@ -162,21 +159,18 @@ public final class JsonExportActor extends AbstractUntypedActor { } private static void writeNode(final JsonWriter writer, final DataTreeCandidate candidate) throws IOException { - writer.beginObject(); - writer.name("Entry"); - writer.beginArray(); + writer.beginObject().name("Entry").beginArray(); doWriteNode(writer, candidate.getRootPath(), candidate.getRootNode()); - writer.endArray(); - writer.endObject(); + writer.endArray().endObject(); } private static void doWriteNode(final JsonWriter writer, final YangInstanceIdentifier path, final DataTreeCandidateNode node) throws IOException { - switch (node.getModificationType()) { + switch (node.modificationType()) { case APPEARED: case DISAPPEARED: case SUBTREE_MODIFIED: - NodeIterator iterator = new NodeIterator(null, path, node.getChildNodes().iterator()); + NodeIterator iterator = new NodeIterator(null, path, node.childNodes().iterator()); do { iterator = iterator.next(writer); } while (iterator != null); @@ -193,14 +187,14 @@ public final class JsonExportActor extends AbstractUntypedActor { private static void outputNodeInfo(final JsonWriter writer, final YangInstanceIdentifier path, final DataTreeCandidateNode node) throws IOException { - final ModificationType modificationType = node.getModificationType(); + final ModificationType modificationType = node.modificationType(); writer.beginObject().name("Node"); writer.beginArray(); writer.beginObject().name("Path").value(path.toString()).endObject(); writer.beginObject().name("ModificationType").value(modificationType.toString()).endObject(); if (modificationType == ModificationType.WRITE) { - writer.beginObject().name("Data").value(node.getDataAfter().get().body().toString()).endObject(); + writer.beginObject().name("Data").value(node.getDataAfter().body().toString()).endObject(); } writer.endArray(); writer.endObject(); @@ -212,7 +206,7 @@ public final class JsonExportActor extends AbstractUntypedActor { writer.beginArray(); writer.beginObject().name("Path").value(path.toString()).endObject(); writer.beginObject().name("ModificationType") - .value("UNSUPPORTED MODIFICATION: " + node.getModificationType()).endObject(); + .value("UNSUPPORTED MODIFICATION: " + node.modificationType()).endObject(); writer.endArray(); writer.endObject(); } @@ -239,14 +233,14 @@ public final class JsonExportActor extends AbstractUntypedActor { NodeIterator next(final JsonWriter writer) throws IOException { while (iterator.hasNext()) { - final DataTreeCandidateNode node = iterator.next(); - final YangInstanceIdentifier child = path.node(node.getIdentifier()); + final var node = iterator.next(); + final var child = path.node(node.name()); - switch (node.getModificationType()) { + switch (node.modificationType()) { case APPEARED: case DISAPPEARED: case SUBTREE_MODIFIED: - return new NodeIterator(this, child, node.getChildNodes().iterator()); + return new NodeIterator(this, child, node.childNodes().iterator()); case DELETE: case UNMODIFIED: case WRITE: diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/actors/ShardSnapshotActor.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/actors/ShardSnapshotActor.java index d7d380830f..91da59d3ba 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/actors/ShardSnapshotActor.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/actors/ShardSnapshotActor.java @@ -76,7 +76,7 @@ public final class ShardSnapshotActor extends AbstractUntypedActorWithMetering { private void onSerializeSnapshot(final SerializeSnapshot request) { Optional installSnapshotStream = request.getInstallSnapshotStream(); if (installSnapshotStream.isPresent()) { - try (ObjectOutputStream out = getOutputStream(installSnapshotStream.get())) { + try (ObjectOutputStream out = getOutputStream(installSnapshotStream.orElseThrow())) { request.getSnapshot().serialize(out); } catch (IOException e) { // TODO - we should communicate the failure in the CaptureSnapshotReply. diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/config/Configuration.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/config/Configuration.java index 4325c7f268..70f2ccb69f 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/config/Configuration.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/config/Configuration.java @@ -19,6 +19,7 @@ public interface Configuration { /** * Returns all the shard names that belong on the member by the given name. */ + // FIXME: return Set here @NonNull Collection getMemberShardNames(@NonNull MemberName memberName); /** @@ -34,6 +35,7 @@ public interface Configuration { /** * Returns the member replicas for the given shard name. */ + // FIXME: return Set here @NonNull Collection getMembersFromShardName(@NonNull String shardName); /** @@ -54,6 +56,7 @@ public interface Configuration { /** * Returns a unique set of all member names configured for all shards. */ + // FIXME: return Set here Collection getUniqueMemberNamesForAllShards(); /* diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/config/ConfigurationImpl.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/config/ConfigurationImpl.java index 1ede88c3cb..d0e8d875f6 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/config/ConfigurationImpl.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/config/ConfigurationImpl.java @@ -11,18 +11,20 @@ import static java.util.Objects.requireNonNull; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Set; import org.opendaylight.controller.cluster.access.concepts.MemberName; import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategy; import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory; +// FIXME: Non-final for testing public class ConfigurationImpl implements Configuration { private volatile Map moduleConfigMap; @@ -35,16 +37,17 @@ public class ConfigurationImpl implements Configuration { this(new FileModuleShardConfigProvider(moduleShardsConfigPath, modulesConfigPath)); } + @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Subclassed for testing") public ConfigurationImpl(final ModuleShardConfigProvider provider) { ImmutableMap.Builder mapBuilder = ImmutableMap.builder(); - for (Map.Entry e: provider.retrieveModuleConfigs(this).entrySet()) { + for (Entry e: provider.retrieveModuleConfigs(this).entrySet()) { mapBuilder.put(e.getKey(), e.getValue().build()); } - this.moduleConfigMap = mapBuilder.build(); + moduleConfigMap = mapBuilder.build(); - this.allShardNames = createAllShardNames(moduleConfigMap.values()); - this.namespaceToModuleName = createNamespaceToModuleName(moduleConfigMap.values()); + allShardNames = createAllShardNames(moduleConfigMap.values()); + namespaceToModuleName = createNamespaceToModuleName(moduleConfigMap.values()); } private static Set createAllShardNames(final Iterable moduleConfigs) { @@ -121,7 +124,7 @@ public class ConfigurationImpl implements Configuration { } } - return Collections.emptyList(); + return List.of(); } private static void checkNotNullShardName(final String shardName) { diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/identifiers/ShardIdentifier.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/identifiers/ShardIdentifier.java index 7c206adc58..74d100540a 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/identifiers/ShardIdentifier.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/identifiers/ShardIdentifier.java @@ -52,19 +52,8 @@ public class ShardIdentifier { return false; } - ShardIdentifier that = (ShardIdentifier) obj; - - if (!memberName.equals(that.memberName)) { - return false; - } - if (!shardName.equals(that.shardName)) { - return false; - } - if (!type.equals(that.type)) { - return false; - } - - return true; + final var that = (ShardIdentifier) obj; + return memberName.equals(that.memberName) && shardName.equals(that.shardName) && type.equals(that.type); } @Override @@ -103,17 +92,17 @@ public class ShardIdentifier { } public Builder shardName(final String newShardName) { - this.shardName = newShardName; + shardName = newShardName; return this; } public Builder memberName(final MemberName newMemberName) { - this.memberName = newMemberName; + memberName = newMemberName; return this; } public Builder type(final String newType) { - this.type = newType; + type = newType; return this; } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/identifiers/ShardManagerIdentifier.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/identifiers/ShardManagerIdentifier.java index 880ba99dbd..bb47e7c838 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/identifiers/ShardManagerIdentifier.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/identifiers/ShardManagerIdentifier.java @@ -11,26 +11,19 @@ package org.opendaylight.controller.cluster.datastore.identifiers; public class ShardManagerIdentifier { private final String type; - public ShardManagerIdentifier(String type) { + public ShardManagerIdentifier(final String type) { this.type = type; } @Override - public boolean equals(Object obj) { + public boolean equals(final Object obj) { if (this == obj) { return true; } if (obj == null || getClass() != obj.getClass()) { return false; } - - ShardManagerIdentifier that = (ShardManagerIdentifier) obj; - - if (!type.equals(that.type)) { - return false; - } - - return true; + return type.equals(((ShardManagerIdentifier) obj).type); } @Override @@ -49,14 +42,13 @@ public class ShardManagerIdentifier { public static class Builder { private String type; - public Builder type(String newType) { - this.type = newType; + public Builder type(final String newType) { + type = newType; return this; } public ShardManagerIdentifier build() { - return new ShardManagerIdentifier(this.type); + return new ShardManagerIdentifier(type); } - } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/AbortTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/AbortTransaction.java index 25c13989d5..d0f1d3e7e1 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/AbortTransaction.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/AbortTransaction.java @@ -5,28 +5,29 @@ * terms of the Eclipse Public License v1.0 which accompanies this distribution, * and is available at http://www.eclipse.org/legal/epl-v10.html */ - package org.opendaylight.controller.cluster.datastore.messages; import com.google.common.base.Preconditions; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -public class AbortTransaction extends AbstractThreePhaseCommitMessage { +@Deprecated(since = "9.0.0", forRemoval = true) +public final class AbortTransaction extends AbstractThreePhaseCommitMessage { + @java.io.Serial private static final long serialVersionUID = 1L; public AbortTransaction() { } - public AbortTransaction(TransactionIdentifier transactionID, final short version) { + public AbortTransaction(final TransactionIdentifier transactionID, final short version) { super(transactionID, version); } - public static AbortTransaction fromSerializable(Object serializable) { + public static AbortTransaction fromSerializable(final Object serializable) { Preconditions.checkArgument(serializable instanceof AbortTransaction); return (AbortTransaction)serializable; } - public static boolean isSerializedType(Object message) { + public static boolean isSerializedType(final Object message) { return message instanceof AbortTransaction; } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/AbortTransactionReply.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/AbortTransactionReply.java index 3b58458e1a..911d8cf058 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/AbortTransactionReply.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/AbortTransactionReply.java @@ -5,26 +5,28 @@ * terms of the Eclipse Public License v1.0 which accompanies this distribution, * and is available at http://www.eclipse.org/legal/epl-v10.html */ - package org.opendaylight.controller.cluster.datastore.messages; import org.opendaylight.controller.cluster.datastore.DataStoreVersions; -public class AbortTransactionReply extends VersionedExternalizableMessage { +@Deprecated(since = "9.0.0", forRemoval = true) +public final class AbortTransactionReply extends VersionedExternalizableMessage { + @java.io.Serial + private static final long serialVersionUID = 7251132353204199793L; private static final AbortTransactionReply INSTANCE = new AbortTransactionReply(); public AbortTransactionReply() { } - private AbortTransactionReply(short version) { + private AbortTransactionReply(final short version) { super(version); } - public static AbortTransactionReply instance(short version) { + public static AbortTransactionReply instance(final short version) { return version == DataStoreVersions.CURRENT_VERSION ? INSTANCE : new AbortTransactionReply(version); } - public static boolean isSerializedType(Object message) { + public static boolean isSerializedType(final Object message) { return message instanceof AbortTransactionReply; } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/AbstractRead.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/AbstractRead.java index dd4c9b8b01..00aa7fa64b 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/AbstractRead.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/AbstractRead.java @@ -23,6 +23,7 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; * @author gwu * */ +@Deprecated(since = "9.0.0", forRemoval = true) public abstract class AbstractRead extends VersionedExternalizableMessage { private static final long serialVersionUID = 1L; diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/AbstractThreePhaseCommitMessage.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/AbstractThreePhaseCommitMessage.java index 3b45e642ca..6296c280a2 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/AbstractThreePhaseCommitMessage.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/AbstractThreePhaseCommitMessage.java @@ -19,6 +19,7 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier * * @author Thomas Pantelis */ +@Deprecated(since = "9.0.0", forRemoval = true) public abstract class AbstractThreePhaseCommitMessage extends VersionedExternalizableMessage { private static final long serialVersionUID = 1L; diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ActorInitialized.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ActorInitialized.java index 09c5b739cf..cd6e0d8cfa 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ActorInitialized.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ActorInitialized.java @@ -7,11 +7,14 @@ */ package org.opendaylight.controller.cluster.datastore.messages; -import java.io.Serializable; +import static java.util.Objects.requireNonNull; -public class ActorInitialized implements Serializable { - private static final long serialVersionUID = 1L; +import akka.actor.ActorRef; +import org.eclipse.jdt.annotation.NonNullByDefault; - public ActorInitialized() { +@NonNullByDefault +public record ActorInitialized(ActorRef actorRef) { + public ActorInitialized { + requireNonNull(actorRef); } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/BatchedModifications.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/BatchedModifications.java index 77d2687ccb..4e7b40ab1f 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/BatchedModifications.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/BatchedModifications.java @@ -26,7 +26,9 @@ import org.opendaylight.controller.cluster.datastore.modification.MutableComposi * * @author Thomas Pantelis */ -public class BatchedModifications extends MutableCompositeModification { +@Deprecated(since = "9.0.0", forRemoval = true) +public final class BatchedModifications extends MutableCompositeModification { + @java.io.Serial private static final long serialVersionUID = 1L; private boolean ready; @@ -39,7 +41,7 @@ public class BatchedModifications extends MutableCompositeModification { public BatchedModifications() { } - public BatchedModifications(TransactionIdentifier transactionId, short version) { + public BatchedModifications(final TransactionIdentifier transactionId, final short version) { super(version); this.transactionId = requireNonNull(transactionId, "transactionID can't be null"); } @@ -48,10 +50,10 @@ public class BatchedModifications extends MutableCompositeModification { return ready; } - public void setReady(Optional> possibleParticipatingShardNames) { - this.ready = true; - this.participatingShardNames = requireNonNull(possibleParticipatingShardNames).orElse(null); - Preconditions.checkArgument(this.participatingShardNames == null || this.participatingShardNames.size() > 1); + public void setReady(final Optional> possibleParticipatingShardNames) { + ready = true; + participatingShardNames = requireNonNull(possibleParticipatingShardNames).orElse(null); + Preconditions.checkArgument(participatingShardNames == null || participatingShardNames.size() > 1); } public void setReady() { @@ -66,7 +68,7 @@ public class BatchedModifications extends MutableCompositeModification { return doCommitOnReady; } - public void setDoCommitOnReady(boolean doCommitOnReady) { + public void setDoCommitOnReady(final boolean doCommitOnReady) { this.doCommitOnReady = doCommitOnReady; } @@ -74,7 +76,7 @@ public class BatchedModifications extends MutableCompositeModification { return totalMessagesSent; } - public void setTotalMessagesSent(int totalMessagesSent) { + public void setTotalMessagesSent(final int totalMessagesSent) { this.totalMessagesSent = totalMessagesSent; } @@ -83,7 +85,7 @@ public class BatchedModifications extends MutableCompositeModification { } @Override - public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { + public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { super.readExternal(in); transactionId = TransactionIdentifier.readFrom(in); ready = in.readBoolean(); @@ -104,7 +106,7 @@ public class BatchedModifications extends MutableCompositeModification { } @Override - public void writeExternal(ObjectOutput out) throws IOException { + public void writeExternal(final ObjectOutput out) throws IOException { super.writeExternal(out); transactionId.writeTo(out); out.writeBoolean(ready); @@ -114,7 +116,7 @@ public class BatchedModifications extends MutableCompositeModification { if (getVersion() >= DataStoreVersions.FLUORINE_VERSION) { if (participatingShardNames != null) { out.writeInt(participatingShardNames.size()); - for (String shardName: participatingShardNames) { + for (String shardName : participatingShardNames) { out.writeObject(shardName); } } else { diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/BatchedModificationsReply.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/BatchedModificationsReply.java index 29bb3e9ea6..0cca8d03ff 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/BatchedModificationsReply.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/BatchedModificationsReply.java @@ -16,7 +16,9 @@ import java.io.ObjectOutput; * * @author Thomas Pantelis */ -public class BatchedModificationsReply extends VersionedExternalizableMessage { +@Deprecated(since = "9.0.0", forRemoval = true) +public final class BatchedModificationsReply extends VersionedExternalizableMessage { + @java.io.Serial private static final long serialVersionUID = 1L; private int numBatched; @@ -24,7 +26,7 @@ public class BatchedModificationsReply extends VersionedExternalizableMessage { public BatchedModificationsReply() { } - public BatchedModificationsReply(int numBatched) { + public BatchedModificationsReply(final int numBatched) { this.numBatched = numBatched; } @@ -33,13 +35,13 @@ public class BatchedModificationsReply extends VersionedExternalizableMessage { } @Override - public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { + public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { super.readExternal(in); numBatched = in.readInt(); } @Override - public void writeExternal(ObjectOutput out) throws IOException { + public void writeExternal(final ObjectOutput out) throws IOException { super.writeExternal(out); out.writeInt(numBatched); } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CanCommitTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CanCommitTransaction.java index 087c7b6376..f50412fc0e 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CanCommitTransaction.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CanCommitTransaction.java @@ -5,28 +5,29 @@ * terms of the Eclipse Public License v1.0 which accompanies this distribution, * and is available at http://www.eclipse.org/legal/epl-v10.html */ - package org.opendaylight.controller.cluster.datastore.messages; import com.google.common.base.Preconditions; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -public class CanCommitTransaction extends AbstractThreePhaseCommitMessage { +@Deprecated(since = "9.0.0", forRemoval = true) +public final class CanCommitTransaction extends AbstractThreePhaseCommitMessage { + @java.io.Serial private static final long serialVersionUID = 1L; public CanCommitTransaction() { } - public CanCommitTransaction(TransactionIdentifier transactionID, final short version) { + public CanCommitTransaction(final TransactionIdentifier transactionID, final short version) { super(transactionID, version); } - public static CanCommitTransaction fromSerializable(Object serializable) { + public static CanCommitTransaction fromSerializable(final Object serializable) { Preconditions.checkArgument(serializable instanceof CanCommitTransaction); return (CanCommitTransaction)serializable; } - public static boolean isSerializedType(Object message) { + public static boolean isSerializedType(final Object message) { return message instanceof CanCommitTransaction; } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CanCommitTransactionReply.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CanCommitTransactionReply.java index f346cba433..5c8fae94b8 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CanCommitTransactionReply.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CanCommitTransactionReply.java @@ -14,7 +14,11 @@ import java.io.ObjectInput; import java.io.ObjectOutput; import org.opendaylight.controller.cluster.datastore.DataStoreVersions; -public class CanCommitTransactionReply extends VersionedExternalizableMessage { +@Deprecated(since = "9.0.0", forRemoval = true) +public final class CanCommitTransactionReply extends VersionedExternalizableMessage { + @java.io.Serial + private static final long serialVersionUID = 4355566635423934872L; + private static final CanCommitTransactionReply YES = new CanCommitTransactionReply(true, DataStoreVersions.CURRENT_VERSION); private static final CanCommitTransactionReply NO = @@ -35,13 +39,13 @@ public class CanCommitTransactionReply extends VersionedExternalizableMessage { } @Override - public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { + public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { super.readExternal(in); canCommit = in.readBoolean(); } @Override - public void writeExternal(ObjectOutput out) throws IOException { + public void writeExternal(final ObjectOutput out) throws IOException { super.writeExternal(out); out.writeBoolean(canCommit); } @@ -51,11 +55,11 @@ public class CanCommitTransactionReply extends VersionedExternalizableMessage { return "CanCommitTransactionReply [canCommit=" + canCommit + ", version=" + getVersion() + "]"; } - public static CanCommitTransactionReply yes(short version) { + public static CanCommitTransactionReply yes(final short version) { return version == DataStoreVersions.CURRENT_VERSION ? YES : new CanCommitTransactionReply(true, version); } - public static CanCommitTransactionReply no(short version) { + public static CanCommitTransactionReply no(final short version) { return version == DataStoreVersions.CURRENT_VERSION ? NO : new CanCommitTransactionReply(false, version); } @@ -64,7 +68,7 @@ public class CanCommitTransactionReply extends VersionedExternalizableMessage { return (CanCommitTransactionReply)serializable; } - public static boolean isSerializedType(Object message) { + public static boolean isSerializedType(final Object message) { return message instanceof CanCommitTransactionReply; } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseDataTreeNotificationListenerRegistration.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseDataTreeNotificationListenerRegistration.java index 3b5c6b3b8c..327dca0e80 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseDataTreeNotificationListenerRegistration.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseDataTreeNotificationListenerRegistration.java @@ -10,6 +10,7 @@ package org.opendaylight.controller.cluster.datastore.messages; import java.io.Serializable; public final class CloseDataTreeNotificationListenerRegistration implements Serializable { + @java.io.Serial private static final long serialVersionUID = 1L; private static final CloseDataTreeNotificationListenerRegistration INSTANCE = new CloseDataTreeNotificationListenerRegistration(); @@ -21,6 +22,7 @@ public final class CloseDataTreeNotificationListenerRegistration implements Seri return INSTANCE; } + @java.io.Serial private Object readResolve() { return INSTANCE; } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseDataTreeNotificationListenerRegistrationReply.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseDataTreeNotificationListenerRegistrationReply.java index 0bc5254c14..ae825106ad 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseDataTreeNotificationListenerRegistrationReply.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseDataTreeNotificationListenerRegistrationReply.java @@ -10,6 +10,7 @@ package org.opendaylight.controller.cluster.datastore.messages; import java.io.Serializable; public final class CloseDataTreeNotificationListenerRegistrationReply implements Serializable { + @java.io.Serial private static final long serialVersionUID = 1L; private static final CloseDataTreeNotificationListenerRegistrationReply INSTANCE = new CloseDataTreeNotificationListenerRegistrationReply(); @@ -22,6 +23,7 @@ public final class CloseDataTreeNotificationListenerRegistrationReply implements return INSTANCE; } + @java.io.Serial private Object readResolve() { return INSTANCE; } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseTransaction.java index 1a3567cafa..5b3c050e4b 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseTransaction.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseTransaction.java @@ -5,20 +5,21 @@ * terms of the Eclipse Public License v1.0 which accompanies this distribution, * and is available at http://www.eclipse.org/legal/epl-v10.html */ - package org.opendaylight.controller.cluster.datastore.messages; -public class CloseTransaction extends VersionedExternalizableMessage { +@Deprecated(since = "9.0.0", forRemoval = true) +public final class CloseTransaction extends VersionedExternalizableMessage { + @java.io.Serial private static final long serialVersionUID = 1L; public CloseTransaction() { } - public CloseTransaction(short version) { + public CloseTransaction(final short version) { super(version); } - public static boolean isSerializedType(Object message) { + public static boolean isSerializedType(final Object message) { return message instanceof CloseTransaction; } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseTransactionChain.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseTransactionChain.java index 1e96286eb9..d06b7319b4 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseTransactionChain.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseTransactionChain.java @@ -16,8 +16,10 @@ import java.io.ObjectOutput; import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier; import org.opendaylight.yangtools.concepts.Identifiable; -public class CloseTransactionChain extends VersionedExternalizableMessage +@Deprecated(since = "9.0.0", forRemoval = true) +public final class CloseTransactionChain extends VersionedExternalizableMessage implements Identifiable { + @java.io.Serial private static final long serialVersionUID = 1L; private LocalHistoryIdentifier transactionChainId; diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseTransactionReply.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseTransactionReply.java index 0e21b578ca..a746580516 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseTransactionReply.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseTransactionReply.java @@ -5,9 +5,9 @@ * terms of the Eclipse Public License v1.0 which accompanies this distribution, * and is available at http://www.eclipse.org/legal/epl-v10.html */ - package org.opendaylight.controller.cluster.datastore.messages; +@Deprecated(since = "9.0.0", forRemoval = true) public class CloseTransactionReply extends VersionedExternalizableMessage { private static final long serialVersionUID = 1L; diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CommitTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CommitTransaction.java index fe13e5d8b1..bd80287ae9 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CommitTransaction.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CommitTransaction.java @@ -5,28 +5,29 @@ * terms of the Eclipse Public License v1.0 which accompanies this distribution, * and is available at http://www.eclipse.org/legal/epl-v10.html */ - package org.opendaylight.controller.cluster.datastore.messages; import com.google.common.base.Preconditions; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -public class CommitTransaction extends AbstractThreePhaseCommitMessage { +@Deprecated(since = "9.0.0", forRemoval = true) +public final class CommitTransaction extends AbstractThreePhaseCommitMessage { + @java.io.Serial private static final long serialVersionUID = 1L; public CommitTransaction() { } - public CommitTransaction(TransactionIdentifier transactionID, final short version) { + public CommitTransaction(final TransactionIdentifier transactionID, final short version) { super(transactionID, version); } - public static CommitTransaction fromSerializable(Object serializable) { + public static CommitTransaction fromSerializable(final Object serializable) { Preconditions.checkArgument(serializable instanceof CommitTransaction); return (CommitTransaction)serializable; } - public static boolean isSerializedType(Object message) { + public static boolean isSerializedType(final Object message) { return message instanceof CommitTransaction; } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CommitTransactionReply.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CommitTransactionReply.java index cd3a13a9f7..167124c6fe 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CommitTransactionReply.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CommitTransactionReply.java @@ -5,26 +5,29 @@ * terms of the Eclipse Public License v1.0 which accompanies this distribution, * and is available at http://www.eclipse.org/legal/epl-v10.html */ - package org.opendaylight.controller.cluster.datastore.messages; import org.opendaylight.controller.cluster.datastore.DataStoreVersions; -public class CommitTransactionReply extends VersionedExternalizableMessage { +@Deprecated(since = "9.0.0", forRemoval = true) +public final class CommitTransactionReply extends VersionedExternalizableMessage { + @java.io.Serial + private static final long serialVersionUID = -8342450250867395000L; + public static final CommitTransactionReply INSTANCE = new CommitTransactionReply(); public CommitTransactionReply() { } - private CommitTransactionReply(short version) { + private CommitTransactionReply(final short version) { super(version); } - public static CommitTransactionReply instance(short version) { + public static CommitTransactionReply instance(final short version) { return version == DataStoreVersions.CURRENT_VERSION ? INSTANCE : new CommitTransactionReply(version); } - public static boolean isSerializedType(Object message) { + public static boolean isSerializedType(final Object message) { return message instanceof CommitTransactionReply; } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CreateTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CreateTransaction.java index 3283a55f43..5ef056e8a0 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CreateTransaction.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CreateTransaction.java @@ -15,7 +15,9 @@ import java.io.ObjectInput; import java.io.ObjectOutput; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -public class CreateTransaction extends VersionedExternalizableMessage { +@Deprecated(since = "9.0.0", forRemoval = true) +public final class CreateTransaction extends VersionedExternalizableMessage { + @java.io.Serial private static final long serialVersionUID = 1L; private TransactionIdentifier transactionId; diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CreateTransactionReply.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CreateTransactionReply.java index 87dd7c57fb..644daf21fb 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CreateTransactionReply.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CreateTransactionReply.java @@ -15,7 +15,9 @@ import java.io.ObjectInput; import java.io.ObjectOutput; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -public class CreateTransactionReply extends VersionedExternalizableMessage { +@Deprecated(since = "9.0.0", forRemoval = true) +public final class CreateTransactionReply extends VersionedExternalizableMessage { + @java.io.Serial private static final long serialVersionUID = 1L; private String transactionPath; @@ -40,14 +42,14 @@ public class CreateTransactionReply extends VersionedExternalizableMessage { } @Override - public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { + public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { super.readExternal(in); transactionId = TransactionIdentifier.readFrom(in); transactionPath = in.readUTF(); } @Override - public void writeExternal(ObjectOutput out) throws IOException { + public void writeExternal(final ObjectOutput out) throws IOException { super.writeExternal(out); transactionId.writeTo(out); out.writeUTF(transactionPath); @@ -60,12 +62,12 @@ public class CreateTransactionReply extends VersionedExternalizableMessage { + ", version=" + getVersion() + "]"; } - public static CreateTransactionReply fromSerializable(Object serializable) { + public static CreateTransactionReply fromSerializable(final Object serializable) { checkArgument(serializable instanceof CreateTransactionReply); return (CreateTransactionReply)serializable; } - public static boolean isSerializedType(Object message) { + public static boolean isSerializedType(final Object message) { return message instanceof CreateTransactionReply; } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataExists.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataExists.java index b7e38d5082..6c646f7cc3 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataExists.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataExists.java @@ -5,7 +5,6 @@ * terms of the Eclipse Public License v1.0 which accompanies this distribution, * and is available at http://www.eclipse.org/legal/epl-v10.html */ - package org.opendaylight.controller.cluster.datastore.messages; import com.google.common.base.Preconditions; @@ -15,6 +14,7 @@ import org.opendaylight.mdsal.common.api.ReadFailedException; import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; +@Deprecated(since = "9.0.0", forRemoval = true) public class DataExists extends AbstractRead { private static final long serialVersionUID = 1L; @@ -26,12 +26,12 @@ public class DataExists extends AbstractRead { } @Override - public FluentFuture apply(DOMStoreReadTransaction readDelegate) { + public FluentFuture apply(final DOMStoreReadTransaction readDelegate) { return readDelegate.exists(getPath()); } @Override - public void processResponse(Object response, SettableFuture returnFuture) { + public void processResponse(final Object response, final SettableFuture returnFuture) { if (DataExistsReply.isSerializedType(response)) { returnFuture.set(Boolean.valueOf(DataExistsReply.fromSerializable(response).exists())); } else { @@ -41,7 +41,7 @@ public class DataExists extends AbstractRead { } @Override - protected AbstractRead newInstance(short withVersion) { + protected AbstractRead newInstance(final short withVersion) { return new DataExists(getPath(), withVersion); } @@ -50,7 +50,7 @@ public class DataExists extends AbstractRead { return (DataExists)serializable; } - public static boolean isSerializedType(Object message) { + public static boolean isSerializedType(final Object message) { return message instanceof DataExists; } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataExistsReply.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataExistsReply.java index a57df0ecda..799cd8b86e 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataExistsReply.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataExistsReply.java @@ -5,7 +5,6 @@ * terms of the Eclipse Public License v1.0 which accompanies this distribution, * and is available at http://www.eclipse.org/legal/epl-v10.html */ - package org.opendaylight.controller.cluster.datastore.messages; import com.google.common.base.Preconditions; @@ -13,6 +12,7 @@ import java.io.IOException; import java.io.ObjectInput; import java.io.ObjectOutput; +@Deprecated(since = "9.0.0", forRemoval = true) public class DataExistsReply extends VersionedExternalizableMessage { private static final long serialVersionUID = 1L; @@ -31,13 +31,13 @@ public class DataExistsReply extends VersionedExternalizableMessage { } @Override - public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { + public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { super.readExternal(in); exists = in.readBoolean(); } @Override - public void writeExternal(ObjectOutput out) throws IOException { + public void writeExternal(final ObjectOutput out) throws IOException { super.writeExternal(out); out.writeBoolean(exists); } @@ -47,7 +47,7 @@ public class DataExistsReply extends VersionedExternalizableMessage { return (DataExistsReply)serializable; } - public static boolean isSerializedType(Object message) { + public static boolean isSerializedType(final Object message) { return message instanceof DataExistsReply; } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataTreeChanged.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataTreeChanged.java index 03ae771ca1..63f39519f5 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataTreeChanged.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataTreeChanged.java @@ -9,8 +9,8 @@ package org.opendaylight.controller.cluster.datastore.messages; import static java.util.Objects.requireNonNull; -import java.util.Collection; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate; +import java.util.List; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate; /** * A message about a DataTree having been changed. The message is not @@ -19,9 +19,9 @@ import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate; * candidate. */ public final class DataTreeChanged { - private final Collection changes; + private final List changes; - public DataTreeChanged(final Collection changes) { + public DataTreeChanged(final List changes) { this.changes = requireNonNull(changes); } @@ -30,7 +30,7 @@ public final class DataTreeChanged { * * @return Change events */ - public Collection getChanges() { + public List getChanges() { return changes; } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataTreeChangedReply.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataTreeChangedReply.java index d50079e6a1..06e898e09a 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataTreeChangedReply.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataTreeChangedReply.java @@ -10,6 +10,7 @@ package org.opendaylight.controller.cluster.datastore.messages; import java.io.Serializable; public final class DataTreeChangedReply implements Serializable { + @java.io.Serial private static final long serialVersionUID = 1L; private static final DataTreeChangedReply INSTANCE = new DataTreeChangedReply(); @@ -21,6 +22,7 @@ public final class DataTreeChangedReply implements Serializable { return INSTANCE; } + @java.io.Serial private Object readResolve() { return INSTANCE; } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/EmptyExternalizable.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/EmptyExternalizable.java deleted file mode 100644 index c7ee83a819..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/EmptyExternalizable.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.datastore.messages; - -import java.io.Externalizable; -import java.io.ObjectInput; -import java.io.ObjectOutput; - -/** - * Externalizable with no data. - * - * @author Thomas Pantelis - */ -public class EmptyExternalizable implements Externalizable { - - @Override - public void readExternal(ObjectInput in) { - } - - @Override - public void writeExternal(ObjectOutput out) { - } -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ForwardedReadyTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ForwardedReadyTransaction.java index 2042e49d65..fbc66a4d05 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ForwardedReadyTransaction.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ForwardedReadyTransaction.java @@ -20,16 +20,17 @@ import org.opendaylight.controller.cluster.datastore.ReadWriteShardDataTreeTrans * * @author Thomas Pantelis */ -public class ForwardedReadyTransaction { +@Deprecated(since = "9.0.0", forRemoval = true) +public final class ForwardedReadyTransaction { private final TransactionIdentifier transactionId; private final ReadWriteShardDataTreeTransaction transaction; private final boolean doImmediateCommit; private final short txnClientVersion; - private @Nullable final SortedSet participatingShardNames; + private final @Nullable SortedSet participatingShardNames; - public ForwardedReadyTransaction(TransactionIdentifier transactionId, short txnClientVersion, - ReadWriteShardDataTreeTransaction transaction, boolean doImmediateCommit, - Optional> participatingShardNames) { + public ForwardedReadyTransaction(final TransactionIdentifier transactionId, final short txnClientVersion, + final ReadWriteShardDataTreeTransaction transaction, final boolean doImmediateCommit, + final Optional> participatingShardNames) { this.transactionId = requireNonNull(transactionId); this.transaction = requireNonNull(transaction); this.txnClientVersion = txnClientVersion; diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/GetKnownClients.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/GetKnownClients.java index f1e7fb70b5..30ac1a9eb7 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/GetKnownClients.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/GetKnownClients.java @@ -14,6 +14,7 @@ import org.eclipse.jdt.annotation.NonNull; * Request a shard to report the clients it knows about. Shard is required to respond with {@link GetKnownClientsReply}. */ public final class GetKnownClients implements Serializable { + @java.io.Serial private static final long serialVersionUID = 1L; public static final @NonNull GetKnownClients INSTANCE = new GetKnownClients(); @@ -22,6 +23,7 @@ public final class GetKnownClients implements Serializable { } + @java.io.Serial private Object readResolve() { return INSTANCE; } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/LocalPrimaryShardFound.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/LocalPrimaryShardFound.java index e4b9174f1e..fc0df4a951 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/LocalPrimaryShardFound.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/LocalPrimaryShardFound.java @@ -11,7 +11,7 @@ import static java.util.Objects.requireNonNull; import org.apache.commons.lang3.ObjectUtils; import org.eclipse.jdt.annotation.NonNull; -import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree; +import org.opendaylight.yangtools.yang.data.tree.api.ReadOnlyDataTree; /** * Local message sent in reply to FindPrimaryShard to indicate the primary shard is local to the caller. @@ -19,11 +19,11 @@ import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree; * @author Thomas Pantelis */ public class LocalPrimaryShardFound { - private final String primaryPath; private final ReadOnlyDataTree localShardDataTree; - public LocalPrimaryShardFound(@NonNull String primaryPath, @NonNull ReadOnlyDataTree localShardDataTree) { + public LocalPrimaryShardFound(final @NonNull String primaryPath, + final @NonNull ReadOnlyDataTree localShardDataTree) { this.primaryPath = requireNonNull(primaryPath); this.localShardDataTree = requireNonNull(localShardDataTree); } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/PrimaryShardInfo.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/PrimaryShardInfo.java index 1ca06216dd..c9d10a62e6 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/PrimaryShardInfo.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/PrimaryShardInfo.java @@ -12,7 +12,7 @@ import static java.util.Objects.requireNonNull; import akka.actor.ActorSelection; import java.util.Optional; import org.eclipse.jdt.annotation.NonNull; -import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree; +import org.opendaylight.yangtools.yang.data.tree.api.ReadOnlyDataTree; /** * Local message DTO that contains information about the primary shard. @@ -24,17 +24,17 @@ public class PrimaryShardInfo { private final short primaryShardVersion; private final ReadOnlyDataTree localShardDataTree; - public PrimaryShardInfo(@NonNull ActorSelection primaryShardActor, short primaryShardVersion, - @NonNull ReadOnlyDataTree localShardDataTree) { + public PrimaryShardInfo(final @NonNull ActorSelection primaryShardActor, final short primaryShardVersion, + final @NonNull ReadOnlyDataTree localShardDataTree) { this.primaryShardActor = requireNonNull(primaryShardActor); this.primaryShardVersion = primaryShardVersion; this.localShardDataTree = requireNonNull(localShardDataTree); } - public PrimaryShardInfo(@NonNull ActorSelection primaryShardActor, short primaryShardVersion) { + public PrimaryShardInfo(final @NonNull ActorSelection primaryShardActor, final short primaryShardVersion) { this.primaryShardActor = requireNonNull(primaryShardActor); this.primaryShardVersion = primaryShardVersion; - this.localShardDataTree = null; + localShardDataTree = null; } /** diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ReadData.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ReadData.java index 1113a854bf..8172d64d52 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ReadData.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ReadData.java @@ -16,6 +16,7 @@ import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; +@Deprecated(since = "9.0.0", forRemoval = true) public class ReadData extends AbstractRead> { private static final long serialVersionUID = 1L; diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ReadDataReply.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ReadDataReply.java index 099ca228cd..2ed53ad050 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ReadDataReply.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ReadDataReply.java @@ -13,6 +13,7 @@ import java.io.ObjectOutput; import org.opendaylight.controller.cluster.datastore.node.utils.stream.SerializationUtils; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; +@Deprecated(since = "9.0.0", forRemoval = true) public class ReadDataReply extends VersionedExternalizableMessage { private static final long serialVersionUID = 1L; diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ReadyLocalTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ReadyLocalTransaction.java index d2957a4a54..55ab132db6 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ReadyLocalTransaction.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ReadyLocalTransaction.java @@ -14,7 +14,7 @@ import java.util.SortedSet; import org.eclipse.jdt.annotation.Nullable; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; import org.opendaylight.controller.cluster.datastore.DataStoreVersions; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification; /** * Message notifying the shard leader to apply modifications which have been @@ -23,6 +23,7 @@ import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification * to be sent out to a remote system, it needs to be intercepted by {@link ReadyLocalTransactionSerializer} * and turned into {@link BatchedModifications}. */ +@Deprecated(since = "9.0.0", forRemoval = true) public final class ReadyLocalTransaction { private final DataTreeModification modification; private final TransactionIdentifier transactionId; @@ -33,7 +34,7 @@ public final class ReadyLocalTransaction { private short remoteVersion = DataStoreVersions.CURRENT_VERSION; public ReadyLocalTransaction(final TransactionIdentifier transactionId, final DataTreeModification modification, - final boolean doCommitOnReady, Optional> participatingShardNames) { + final boolean doCommitOnReady, final Optional> participatingShardNames) { this.transactionId = requireNonNull(transactionId); this.modification = requireNonNull(modification); this.doCommitOnReady = doCommitOnReady; @@ -56,7 +57,7 @@ public final class ReadyLocalTransaction { return remoteVersion; } - public void setRemoteVersion(short remoteVersion) { + public void setRemoteVersion(final short remoteVersion) { this.remoteVersion = remoteVersion; } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ReadyLocalTransactionSerializer.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ReadyLocalTransactionSerializer.java index c44f9105c0..7346a8c07e 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ReadyLocalTransactionSerializer.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ReadyLocalTransactionSerializer.java @@ -24,8 +24,8 @@ import org.opendaylight.controller.cluster.datastore.utils.AbstractBatchedModifi * into akka serialization to allow forwarding of ReadyLocalTransaction to remote * shards. */ +@Deprecated(since = "9.0.0", forRemoval = true) public final class ReadyLocalTransactionSerializer extends JSerializer { - private final ExtendedActorSystem system; public ReadyLocalTransactionSerializer(final ExtendedActorSystem system) { diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ReadyTransactionReply.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ReadyTransactionReply.java index 5ddc77f8f6..a341c72333 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ReadyTransactionReply.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ReadyTransactionReply.java @@ -13,6 +13,7 @@ import java.io.ObjectInput; import java.io.ObjectOutput; import org.opendaylight.controller.cluster.datastore.DataStoreVersions; +@Deprecated(since = "9.0.0", forRemoval = true) public class ReadyTransactionReply extends VersionedExternalizableMessage { private static final long serialVersionUID = 1L; @@ -21,11 +22,11 @@ public class ReadyTransactionReply extends VersionedExternalizableMessage { public ReadyTransactionReply() { } - public ReadyTransactionReply(String cohortPath) { + public ReadyTransactionReply(final String cohortPath) { this(cohortPath, DataStoreVersions.CURRENT_VERSION); } - public ReadyTransactionReply(String cohortPath, short version) { + public ReadyTransactionReply(final String cohortPath, final short version) { super(version); this.cohortPath = cohortPath; } @@ -35,22 +36,22 @@ public class ReadyTransactionReply extends VersionedExternalizableMessage { } @Override - public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { + public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { super.readExternal(in); cohortPath = in.readUTF(); } @Override - public void writeExternal(ObjectOutput out) throws IOException { + public void writeExternal(final ObjectOutput out) throws IOException { super.writeExternal(out); out.writeUTF(cohortPath); } - public static ReadyTransactionReply fromSerializable(Object serializable) { + public static ReadyTransactionReply fromSerializable(final Object serializable) { return (ReadyTransactionReply)serializable; } - public static boolean isSerializedType(Object message) { + public static boolean isSerializedType(final Object message) { return message instanceof ReadyTransactionReply; } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ShardLeaderStateChanged.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ShardLeaderStateChanged.java index cbf2cf9e0f..c92670c971 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ShardLeaderStateChanged.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ShardLeaderStateChanged.java @@ -9,11 +9,10 @@ package org.opendaylight.controller.cluster.datastore.messages; import static java.util.Objects.requireNonNull; -import java.util.Optional; import org.eclipse.jdt.annotation.NonNull; import org.eclipse.jdt.annotation.Nullable; import org.opendaylight.controller.cluster.notifications.LeaderStateChanged; -import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree; +import org.opendaylight.yangtools.yang.data.tree.api.ReadOnlyDataTree; /** * A local message derived from LeaderStateChanged containing additional Shard-specific info that is sent @@ -22,22 +21,22 @@ import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree; * * @author Thomas Pantelis */ -public class ShardLeaderStateChanged extends LeaderStateChanged { - private final ReadOnlyDataTree localShardDataTree; +public final class ShardLeaderStateChanged extends LeaderStateChanged { + private final @Nullable ReadOnlyDataTree localShardDataTree; - public ShardLeaderStateChanged(@NonNull String memberId, @Nullable String leaderId, - @NonNull ReadOnlyDataTree localShardDataTree, short leaderPayloadVersion) { + public ShardLeaderStateChanged(final @NonNull String memberId, final @Nullable String leaderId, + final @NonNull ReadOnlyDataTree localShardDataTree, final short leaderPayloadVersion) { super(memberId, leaderId, leaderPayloadVersion); this.localShardDataTree = requireNonNull(localShardDataTree); } - public ShardLeaderStateChanged(@NonNull String memberId, @Nullable String leaderId, - short leaderPayloadVersion) { + public ShardLeaderStateChanged(final @NonNull String memberId, final @Nullable String leaderId, + final short leaderPayloadVersion) { super(memberId, leaderId, leaderPayloadVersion); - this.localShardDataTree = null; + localShardDataTree = null; } - public @NonNull Optional getLocalShardDataTree() { - return Optional.ofNullable(localShardDataTree); + public @Nullable ReadOnlyDataTree localShardDataTree() { + return localShardDataTree; } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/SuccessReply.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/SuccessReply.java deleted file mode 100644 index 4f74bda43e..0000000000 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/SuccessReply.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v1.0 which accompanies this distribution, - * and is available at http://www.eclipse.org/legal/epl-v10.html - */ -package org.opendaylight.controller.cluster.datastore.messages; - -import java.io.Serializable; - -/** - * A reply message indicating success. - * - * @author Thomas Pantelis - */ -public final class SuccessReply implements Serializable { - private static final long serialVersionUID = 1L; - - public static final SuccessReply INSTANCE = new SuccessReply(); - - private SuccessReply() { - } -} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/UpdateSchemaContext.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/UpdateSchemaContext.java index ad32ecfb69..16e59cc233 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/UpdateSchemaContext.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/UpdateSchemaContext.java @@ -7,11 +7,14 @@ */ package org.opendaylight.controller.cluster.datastore.messages; +import static java.util.Objects.requireNonNull; + +import org.eclipse.jdt.annotation.NonNullByDefault; import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext; -import org.opendaylight.yangtools.yang.model.spi.AbstractEffectiveModelContextProvider; -public class UpdateSchemaContext extends AbstractEffectiveModelContextProvider { - public UpdateSchemaContext(final EffectiveModelContext modelContext) { - super(modelContext); +@NonNullByDefault +public record UpdateSchemaContext(EffectiveModelContext modelContext) { + public UpdateSchemaContext { + requireNonNull(modelContext); } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/VersionedExternalizableMessage.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/VersionedExternalizableMessage.java index 687905d722..9548a7ebdc 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/VersionedExternalizableMessage.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/VersionedExternalizableMessage.java @@ -20,6 +20,7 @@ import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVer * * @author Thomas Pantelis */ +@Deprecated(since = "9.0.0", forRemoval = true) public abstract class VersionedExternalizableMessage implements Externalizable, SerializableMessage { private static final long serialVersionUID = 1L; @@ -38,10 +39,10 @@ public abstract class VersionedExternalizableMessage implements Externalizable, } protected final @NonNull NormalizedNodeStreamVersion getStreamVersion() { - if (version >= DataStoreVersions.PHOSPHORUS_VERSION) { + if (version >= DataStoreVersions.POTASSIUM_VERSION) { + return NormalizedNodeStreamVersion.POTASSIUM; + } else if (version >= DataStoreVersions.PHOSPHORUS_VERSION) { return NormalizedNodeStreamVersion.MAGNESIUM; - } else if (version == DataStoreVersions.SODIUM_SR1_VERSION) { - return NormalizedNodeStreamVersion.SODIUM_SR1; } else { throw new IllegalStateException("Unsupported version " + version); } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/AbstractModification.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/AbstractModification.java index 33bd4d45e1..3eec9a4a46 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/AbstractModification.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/AbstractModification.java @@ -5,7 +5,6 @@ * terms of the Eclipse Public License v1.0 which accompanies this distribution, * and is available at http://www.eclipse.org/legal/epl-v10.html */ - package org.opendaylight.controller.cluster.datastore.modification; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; @@ -13,7 +12,10 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; /** * Base class to be used for all simple modifications that can be applied to a DOMStoreTransaction. */ +@Deprecated(since = "9.0.0", forRemoval = true) public abstract class AbstractModification implements Modification { + @java.io.Serial + private static final long serialVersionUID = 2647778426312509718L; private YangInstanceIdentifier path; private short version; diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/CompositeModification.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/CompositeModification.java index b59132fe87..3705707de2 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/CompositeModification.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/CompositeModification.java @@ -16,6 +16,7 @@ import java.util.List; * A CompositeModification gets stored in the transaction log for a Shard. During recovery when the transaction log * is being replayed a DOMStoreWriteTransaction could be created and a CompositeModification could be applied to it. */ +@Deprecated(since = "9.0.0", forRemoval = true) public interface CompositeModification extends Modification { /** * Get a list of modifications contained by this composite. diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/DeleteModification.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/DeleteModification.java index 38517aa04b..267dfa8368 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/DeleteModification.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/DeleteModification.java @@ -14,14 +14,16 @@ import org.opendaylight.controller.cluster.datastore.DataStoreVersions; import org.opendaylight.controller.cluster.datastore.node.utils.stream.SerializationUtils; import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification; import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput; import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification; /** * DeleteModification store all the parameters required to delete a path from the data tree. */ -public class DeleteModification extends AbstractModification { +@Deprecated(since = "9.0.0", forRemoval = true) +public final class DeleteModification extends AbstractModification { + @java.io.Serial private static final long serialVersionUID = 1L; public DeleteModification() { diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/MergeModification.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/MergeModification.java index 098a89521a..9244a38c80 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/MergeModification.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/MergeModification.java @@ -13,13 +13,14 @@ import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; import org.opendaylight.yangtools.yang.data.api.schema.stream.ReusableStreamReceiver; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification; import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification; /** * MergeModification stores all the parameters required to merge data into the specified path. */ -public class MergeModification extends WriteModification { +@Deprecated(since = "9.0.0", forRemoval = true) +public final class MergeModification extends WriteModification { private static final long serialVersionUID = 1L; public MergeModification() { diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/Modification.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/Modification.java index 59ca8eb367..e7757f36fc 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/Modification.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/Modification.java @@ -12,8 +12,8 @@ import java.io.IOException; import java.io.ObjectInput; import java.io.ObjectOutput; import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification; import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification; /** * Represents a modification to the data store. diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/MutableCompositeModification.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/MutableCompositeModification.java index a9ffe9b1ba..26e049089e 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/MutableCompositeModification.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/MutableCompositeModification.java @@ -20,14 +20,15 @@ import org.opendaylight.controller.cluster.datastore.DataStoreVersions; import org.opendaylight.controller.cluster.datastore.messages.VersionedExternalizableMessage; import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction; import org.opendaylight.yangtools.yang.data.api.schema.stream.ReusableStreamReceiver; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification; import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput; import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput; import org.opendaylight.yangtools.yang.data.impl.schema.ReusableImmutableNormalizedNodeStreamWriter; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification; /** * MutableCompositeModification is just a mutable version of a CompositeModification. */ +@Deprecated(since = "9.0.0", forRemoval = true) public class MutableCompositeModification extends VersionedExternalizableMessage implements CompositeModification { private static final long serialVersionUID = 1L; diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/WriteModification.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/WriteModification.java index 9e00d4b174..dc2d3fff43 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/WriteModification.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/WriteModification.java @@ -16,13 +16,14 @@ import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; import org.opendaylight.yangtools.yang.data.api.schema.stream.ReusableStreamReceiver; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification; import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput; import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification; /** * WriteModification stores all the parameters required to write data to the specified path. */ +@Deprecated(since = "9.0.0", forRemoval = true) public class WriteModification extends AbstractModification { private static final long serialVersionUID = 1L; diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/AT.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/AT.java new file mode 100644 index 0000000000..8002815c4a --- /dev/null +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/AT.java @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.datastore.persisted; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +import com.google.common.io.ByteStreams; +import java.io.IOException; +import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; +import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifiablePayload.SerialForm; + +/** + * Serialization proxy for {@link AbortTransactionPayload}. + */ +final class AT implements SerialForm { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private TransactionIdentifier identifier; + private byte[] bytes; + + @SuppressWarnings("checkstyle:RedundantModifier") + public AT() { + // For Externalizable + } + + AT(final byte[] bytes) { + this.bytes = requireNonNull(bytes); + } + + @Override + public byte[] bytes() { + return bytes; + } + + @Override + public void readExternal(final byte[] newBytes) throws IOException { + bytes = requireNonNull(newBytes); + identifier = verifyNotNull(TransactionIdentifier.readFrom(ByteStreams.newDataInput(newBytes))); + } + + @Override + public Object readResolve() { + return new AbortTransactionPayload(identifier, bytes); + } +} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/AbortTransactionPayload.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/AbortTransactionPayload.java index 0e34756ced..3c765be615 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/AbortTransactionPayload.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/AbortTransactionPayload.java @@ -9,7 +9,6 @@ package org.opendaylight.controller.cluster.datastore.persisted; import com.google.common.io.ByteArrayDataOutput; import com.google.common.io.ByteStreams; -import java.io.DataInput; import java.io.IOException; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; import org.slf4j.Logger; @@ -21,34 +20,10 @@ import org.slf4j.LoggerFactory; * @author Robert Varga */ public final class AbortTransactionPayload extends AbstractIdentifiablePayload { - private static final class Proxy extends AbstractProxy { - private static final long serialVersionUID = 1L; - - // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't - // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public Proxy() { - // For Externalizable - } - - Proxy(final byte[] serialized) { - super(serialized); - } - - @Override - protected TransactionIdentifier readIdentifier(final DataInput in) throws IOException { - return TransactionIdentifier.readFrom(in); - } - - @Override - protected AbortTransactionPayload createObject(final TransactionIdentifier identifier, - final byte[] serialized) { - return new AbortTransactionPayload(identifier, serialized); - } - } - private static final Logger LOG = LoggerFactory.getLogger(AbortTransactionPayload.class); + @java.io.Serial private static final long serialVersionUID = 1L; + private static final int PROXY_SIZE = externalizableProxySize(AT::new); AbortTransactionPayload(final TransactionIdentifier transactionId, final byte[] serialized) { super(transactionId, serialized); @@ -62,13 +37,18 @@ public final class AbortTransactionPayload extends AbstractIdentifiablePayload getModifiedChild(final PathArgument identifier) { + public final DataTreeCandidateNode modifiedChild(final PathArgument identifier) { throw new UnsupportedOperationException("Not implemented"); } @Override - public final ModificationType getModificationType() { + public final ModificationType modificationType() { return type; } @Override - public final Optional getDataBefore() { + public final NormalizedNode dataBefore() { throw new UnsupportedOperationException("Before-image not available after serialization"); } static DataTreeCandidateNode createUnmodified() { return new AbstractDataTreeCandidateNode(ModificationType.UNMODIFIED) { @Override - public PathArgument getIdentifier() { + public PathArgument name() { throw new UnsupportedOperationException("Root node does not have an identifier"); } @Override - public Optional getDataAfter() { + public NormalizedNode dataAfter() { throw new UnsupportedOperationException("After-image not available after serialization"); } @Override - public Collection getChildNodes() { + public Collection childNodes() { throw new UnsupportedOperationException("Children not available after serialization"); } }; diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/AbstractIdentifiablePayload.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/AbstractIdentifiablePayload.java index de9b0bba8a..885b6c5336 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/AbstractIdentifiablePayload.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/AbstractIdentifiablePayload.java @@ -10,64 +10,78 @@ package org.opendaylight.controller.cluster.datastore.persisted; import static com.google.common.base.Verify.verifyNotNull; import static java.util.Objects.requireNonNull; -import com.google.common.io.ByteStreams; -import java.io.DataInput; +import com.google.common.base.MoreObjects; import java.io.Externalizable; import java.io.IOException; import java.io.ObjectInput; import java.io.ObjectOutput; -import java.io.Serializable; +import java.util.function.Function; +import org.apache.commons.lang3.SerializationUtils; import org.eclipse.jdt.annotation.NonNull; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.IdentifiablePayload; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload; -import org.opendaylight.yangtools.concepts.Identifiable; +import org.opendaylight.controller.cluster.raft.messages.IdentifiablePayload; import org.opendaylight.yangtools.concepts.Identifier; /** - * Abstract base class for {@link Payload}s which hold a single {@link Identifier}. - * - * @author Robert Varga + * Abstract base class for {@link IdentifiablePayload}s which hold a single {@link Identifier}. */ -public abstract class AbstractIdentifiablePayload extends IdentifiablePayload - implements Serializable { - protected abstract static class AbstractProxy implements Externalizable { - private static final long serialVersionUID = 1L; - - private byte[] serialized; - private T identifier; - - public AbstractProxy() { - // For Externalizable - } - - protected AbstractProxy(final byte[] serialized) { - this.serialized = requireNonNull(serialized); - } - +public abstract class AbstractIdentifiablePayload extends IdentifiablePayload { + /** + * An {@link Externalizable} with default implementations we expect our implementations to comply with. On-wire + * serialization format is defined by {@link #bytes()}. + */ + protected interface SerialForm extends Externalizable { + /** + * Return the serial form of this object contents, corresponding to + * {@link AbstractIdentifiablePayload#serialized}. + * + * @return Serialized form + */ + byte[] bytes(); + + /** + * Resolve this proxy to an actual {@link AbstractIdentifiablePayload}. + * + * @return A payload. + */ + @java.io.Serial + Object readResolve(); + + /** + * Restore state from specified serialized form. + * + * @param newBytes Serialized form, as returned by {@link #bytes()} + * @throws IOException when a deserialization problem occurs + */ + void readExternal(byte[] newBytes) throws IOException; + + /** + * {@inheritDoc} + * + *

    + * The default implementation is canonical and should never be overridden. + */ @Override - public final void writeExternal(final ObjectOutput out) throws IOException { - out.writeInt(serialized.length); - out.write(serialized); + default void readExternal(final ObjectInput in) throws IOException { + final var bytes = new byte[in.readInt()]; + in.readFully(bytes); + readExternal(bytes); } + /** + * {@inheritDoc} + * + *

    + * The default implementation is canonical and should never be overridden. + */ @Override - public final void readExternal(final ObjectInput in) throws IOException { - final int length = in.readInt(); - serialized = new byte[length]; - in.readFully(serialized); - identifier = verifyNotNull(readIdentifier(ByteStreams.newDataInput(serialized))); - } - - protected final Object readResolve() { - return verifyNotNull(createObject(identifier, serialized)); + default void writeExternal(final ObjectOutput out) throws IOException { + final var bytes = bytes(); + out.writeInt(bytes.length); + out.write(bytes); } - - protected abstract @NonNull T readIdentifier(@NonNull DataInput in) throws IOException; - - @SuppressWarnings("checkstyle:hiddenField") - protected abstract @NonNull Identifiable createObject(@NonNull T identifier, byte @NonNull[] serialized); } + @java.io.Serial private static final long serialVersionUID = 1L; private final byte @NonNull [] serialized; @@ -88,10 +102,34 @@ public abstract class AbstractIdentifiablePayload extends return serialized.length; } - protected final Object writeReplace() { + protected final byte @NonNull [] serialized() { + return serialized; + } + + @Override + public final int serializedSize() { + // TODO: this is not entirely accurate, as the serialization stream has additional overheads: + // - 3 bytes for each block of data <256 bytes + // - 5 bytes for each block of data >=256 bytes + // - each block of data is limited to 1024 bytes as per serialization spec + return size() + externalizableProxySize(); + } + + @Override + public final String toString() { + return MoreObjects.toStringHelper(this).add("identifier", identifier).add("size", size()).toString(); + } + + @Override + public final Object writeReplace() { return verifyNotNull(externalizableProxy(serialized)); } - @SuppressWarnings("checkstyle:hiddenField") - protected abstract @NonNull AbstractProxy externalizableProxy(byte @NonNull[] serialized); + protected abstract @NonNull SerialForm externalizableProxy(byte @NonNull[] serialized); + + protected abstract int externalizableProxySize(); + + protected static final int externalizableProxySize(final Function constructor) { + return SerializationUtils.serialize(constructor.apply(new byte[0])).length; + } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/AbstractVersionedShardDataTreeSnapshot.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/AbstractVersionedShardDataTreeSnapshot.java index 5e85434e4a..e87ce58a8b 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/AbstractVersionedShardDataTreeSnapshot.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/AbstractVersionedShardDataTreeSnapshot.java @@ -31,9 +31,9 @@ abstract class AbstractVersionedShardDataTreeSnapshot extends ShardDataTreeSnaps static @NonNull ShardSnapshotState versionedDeserialize(final ObjectInput in) throws IOException { final PayloadVersion version = PayloadVersion.readFrom(in); switch (version) { - case SODIUM_SR1: + case CHLORINE_SR2: return new ShardSnapshotState(readSnapshot(in), true); - case MAGNESIUM: + case POTASSIUM: return new ShardSnapshotState(readSnapshot(in), false); case TEST_FUTURE_VERSION: case TEST_PAST_VERSION: @@ -75,9 +75,9 @@ abstract class AbstractVersionedShardDataTreeSnapshot extends ShardDataTreeSnaps private void versionedSerialize(final ObjectOutput out, final PayloadVersion version) throws IOException { switch (version) { - case SODIUM_SR1: - case MAGNESIUM: - // Sodium and Magnesium snapshots use Java Serialization, but differ in stream format + case CHLORINE_SR2: + case POTASSIUM: + // Sodium onwards snapshots use Java Serialization, but differ in stream format out.writeObject(this); return; case TEST_FUTURE_VERSION: diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/CH.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/CH.java new file mode 100644 index 0000000000..a0af84139a --- /dev/null +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/CH.java @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.datastore.persisted; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +import com.google.common.io.ByteStreams; +import java.io.IOException; +import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier; +import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifiablePayload.SerialForm; + +/** + * Serialization proxy for {@link CreateLocalHistoryPayload}. + */ +final class CH implements SerialForm { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private LocalHistoryIdentifier identifier; + private byte[] bytes; + + @SuppressWarnings("checkstyle:RedundantModifier") + public CH() { + // For Externalizable + } + + CH(final byte[] bytes) { + this.bytes = requireNonNull(bytes); + } + + @Override + public byte[] bytes() { + return bytes; + } + + @Override + public void readExternal(final byte[] newBytes) throws IOException { + bytes = requireNonNull(newBytes); + identifier = verifyNotNull(LocalHistoryIdentifier.readFrom(ByteStreams.newDataInput(newBytes))); + } + + @Override + public Object readResolve() { + return new CreateLocalHistoryPayload(identifier, bytes); + } +} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/CT.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/CT.java new file mode 100644 index 0000000000..2530d338b4 --- /dev/null +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/CT.java @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.cluster.datastore.persisted; + +import static com.google.common.base.Verify.verifyNotNull; +import static java.util.Objects.requireNonNull; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.io.StreamCorruptedException; +import org.opendaylight.controller.cluster.datastore.persisted.CommitTransactionPayload.Chunked; +import org.opendaylight.controller.cluster.datastore.persisted.CommitTransactionPayload.Simple; +import org.opendaylight.controller.cluster.io.ChunkedByteArray; + +/** + * Serialization proxy for {@link CommitTransactionPayload}. + */ +final class CT implements Externalizable { + @java.io.Serial + private static final long serialVersionUID = 1L; + + private CommitTransactionPayload payload; + + @SuppressWarnings("checkstyle:RedundantModifier") + public CT() { + // For Externalizable + } + + CT(final CommitTransactionPayload payload) { + this.payload = requireNonNull(payload); + } + + @Override + public void writeExternal(final ObjectOutput out) throws IOException { + out.writeInt(payload.size()); + payload.writeBytes(out); + } + + @Override + public void readExternal(final ObjectInput in) throws IOException { + final int length = in.readInt(); + if (length < 0) { + throw new StreamCorruptedException("Invalid payload length " + length); + } else if (length < CommitTransactionPayload.MAX_ARRAY_SIZE) { + final byte[] serialized = new byte[length]; + in.readFully(serialized); + payload = new Simple(serialized); + } else { + payload = new Chunked(ChunkedByteArray.readFrom(in, length, CommitTransactionPayload.MAX_ARRAY_SIZE)); + } + } + + @java.io.Serial + private Object readResolve() { + return verifyNotNull(payload); + } +} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/CloseLocalHistoryPayload.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/CloseLocalHistoryPayload.java index 9acc113a42..9d6f526616 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/CloseLocalHistoryPayload.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/CloseLocalHistoryPayload.java @@ -9,7 +9,6 @@ package org.opendaylight.controller.cluster.datastore.persisted; import com.google.common.io.ByteArrayDataOutput; import com.google.common.io.ByteStreams; -import java.io.DataInput; import java.io.IOException; import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier; import org.slf4j.Logger; @@ -21,34 +20,10 @@ import org.slf4j.LoggerFactory; * @author Robert Varga */ public final class CloseLocalHistoryPayload extends AbstractIdentifiablePayload { - private static final class Proxy extends AbstractProxy { - private static final long serialVersionUID = 1L; - - // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't - // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection. - @SuppressWarnings("checkstyle:RedundantModifier") - public Proxy() { - // For Externalizable - } - - Proxy(final byte[] serialized) { - super(serialized); - } - - @Override - protected LocalHistoryIdentifier readIdentifier(final DataInput in) throws IOException { - return LocalHistoryIdentifier.readFrom(in); - } - - @Override - protected CloseLocalHistoryPayload createObject(final LocalHistoryIdentifier identifier, - final byte[] serialized) { - return new CloseLocalHistoryPayload(identifier, serialized); - } - } - private static final Logger LOG = LoggerFactory.getLogger(CloseLocalHistoryPayload.class); + @java.io.Serial private static final long serialVersionUID = 1L; + private static final int PROXY_SIZE = externalizableProxySize(CH::new); CloseLocalHistoryPayload(final LocalHistoryIdentifier historyId, final byte[] serialized) { super(historyId, serialized); @@ -62,13 +37,18 @@ public final class CloseLocalHistoryPayload extends AbstractIdentifiablePayload< } catch (IOException e) { // This should never happen LOG.error("Failed to serialize {}", historyId, e); - throw new RuntimeException("Failed to serialize " + historyId, e); + throw new IllegalStateException("Failed to serialize " + historyId, e); } return new CloseLocalHistoryPayload(historyId, out.toByteArray()); } @Override - protected Proxy externalizableProxy(final byte[] serialized) { - return new Proxy(serialized); + protected DH externalizableProxy(final byte[] serialized) { + return new DH(serialized); + } + + @Override + protected int externalizableProxySize() { + return PROXY_SIZE; } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/CommitTransactionPayload.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/CommitTransactionPayload.java index 5337530ece..45cbcc851a 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/CommitTransactionPayload.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/CommitTransactionPayload.java @@ -7,35 +7,31 @@ */ package org.opendaylight.controller.cluster.datastore.persisted; -import static com.google.common.base.Verify.verifyNotNull; import static com.google.common.math.IntMath.ceilingPowerOfTwo; import static java.util.Objects.requireNonNull; import com.google.common.annotations.Beta; import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.MoreObjects; import com.google.common.io.ByteStreams; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.io.DataInput; import java.io.DataInputStream; import java.io.DataOutputStream; -import java.io.Externalizable; import java.io.IOException; -import java.io.ObjectInput; import java.io.ObjectOutput; import java.io.Serializable; -import java.io.StreamCorruptedException; -import java.util.AbstractMap.SimpleImmutableEntry; -import java.util.Map.Entry; +import org.apache.commons.lang3.SerializationUtils; import org.eclipse.jdt.annotation.NonNull; +import org.eclipse.jdt.annotation.NonNullByDefault; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; -import org.opendaylight.controller.cluster.datastore.persisted.DataTreeCandidateInputOutput.DataTreeCandidateWithVersion; import org.opendaylight.controller.cluster.io.ChunkedByteArray; import org.opendaylight.controller.cluster.io.ChunkedOutputStream; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.IdentifiablePayload; -import org.opendaylight.yangtools.concepts.Either; +import org.opendaylight.controller.cluster.raft.messages.IdentifiablePayload; import org.opendaylight.yangtools.yang.data.api.schema.stream.ReusableStreamReceiver; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate; +import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion; import org.opendaylight.yangtools.yang.data.impl.schema.ReusableImmutableNormalizedNodeStreamWriter; +import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -46,30 +42,42 @@ import org.slf4j.LoggerFactory; * @author Robert Varga */ @Beta -public abstract class CommitTransactionPayload extends IdentifiablePayload +public abstract sealed class CommitTransactionPayload extends IdentifiablePayload implements Serializable { + @NonNullByDefault + public record CandidateTransaction( + TransactionIdentifier transactionId, + DataTreeCandidate candidate, + NormalizedNodeStreamVersion streamVersion) { + public CandidateTransaction { + requireNonNull(transactionId); + requireNonNull(candidate); + requireNonNull(streamVersion); + } + } + private static final Logger LOG = LoggerFactory.getLogger(CommitTransactionPayload.class); private static final long serialVersionUID = 1L; - private static final int MAX_ARRAY_SIZE = ceilingPowerOfTwo(Integer.getInteger( + static final int MAX_ARRAY_SIZE = ceilingPowerOfTwo(Integer.getInteger( "org.opendaylight.controller.cluster.datastore.persisted.max-array-size", 256 * 1024)); - private volatile Entry candidate = null; - - CommitTransactionPayload() { + private volatile CandidateTransaction candidate = null; + private CommitTransactionPayload() { + // hidden on purpose } public static @NonNull CommitTransactionPayload create(final TransactionIdentifier transactionId, final DataTreeCandidate candidate, final PayloadVersion version, final int initialSerializedBufferCapacity) throws IOException { - final ChunkedOutputStream cos = new ChunkedOutputStream(initialSerializedBufferCapacity, MAX_ARRAY_SIZE); - try (DataOutputStream dos = new DataOutputStream(cos)) { + final var cos = new ChunkedOutputStream(initialSerializedBufferCapacity, MAX_ARRAY_SIZE); + try (var dos = new DataOutputStream(cos)) { transactionId.writeTo(dos); DataTreeCandidateInputOutput.writeDataTreeCandidate(dos, version, candidate); } - final Either source = cos.toVariant(); + final var source = cos.toVariant(); LOG.debug("Initial buffer capacity {}, actual serialized size {}", initialSerializedBufferCapacity, cos.size()); return source.isFirst() ? new Simple(source.getFirst()) : new Chunked(source.getSecond()); } @@ -86,8 +94,8 @@ public abstract class CommitTransactionPayload extends IdentifiablePayload getCandidate() throws IOException { - Entry localCandidate = candidate; + public @NonNull CandidateTransaction getCandidate() throws IOException { + var localCandidate = candidate; if (localCandidate == null) { synchronized (this) { localCandidate = candidate; @@ -99,42 +107,61 @@ public abstract class CommitTransactionPayload extends IdentifiablePayload getCandidate( - final ReusableStreamReceiver receiver) throws IOException { - final DataInput in = newDataInput(); - return new SimpleImmutableEntry<>(TransactionIdentifier.readFrom(in), - DataTreeCandidateInputOutput.readDataTreeCandidate(in, receiver)); + public final @NonNull CandidateTransaction getCandidate(final ReusableStreamReceiver receiver) throws IOException { + final var in = newDataInput(); + final var transactionId = TransactionIdentifier.readFrom(in); + final var readCandidate = DataTreeCandidateInputOutput.readDataTreeCandidate(in, receiver); + + return new CandidateTransaction(transactionId, readCandidate.candidate(), readCandidate.version()); } @Override public TransactionIdentifier getIdentifier() { try { - return getCandidate().getKey(); + return getCandidate().transactionId(); } catch (IOException e) { throw new IllegalStateException("Candidate deserialization failed.", e); } } + @Override + public final int serializedSize() { + // TODO: this is not entirely accurate as the the byte[] can be chunked by the serialization stream + return ProxySizeHolder.PROXY_SIZE + size(); + } + /** * The cached candidate needs to be cleared after it is done applying to the DataTree, otherwise it would be keeping * deserialized in memory which are not needed anymore leading to wasted memory. This lets the payload know that * this was the last time the candidate was needed ant it is safe to be cleared. */ - public Entry acquireCandidate() throws IOException { - final Entry localCandidate = getCandidate(); + public @NonNull CandidateTransaction acquireCandidate() throws IOException { + final var localCandidate = getCandidate(); candidate = null; return localCandidate; } + @Override + public final String toString() { + final var helper = MoreObjects.toStringHelper(this); + final var localCandidate = candidate; + if (localCandidate != null) { + helper.add("identifier", candidate.transactionId()); + } + return helper.add("size", size()).toString(); + } + abstract void writeBytes(ObjectOutput out) throws IOException; abstract DataInput newDataInput(); - final Object writeReplace() { - return new Proxy(this); + @Override + public final Object writeReplace() { + return new CT(this); } - private static final class Simple extends CommitTransactionPayload { + static final class Simple extends CommitTransactionPayload { + @java.io.Serial private static final long serialVersionUID = 1L; private final byte[] serialized; @@ -159,7 +186,8 @@ public abstract class CommitTransactionPayload extends IdentifiablePayload