<dependency>
<groupId>com.typesafe.akka</groupId>
<artifactId>akka-actor_2.13</artifactId>
- <version>2.5.31</version>
+ <version>2.6.12</version>
</dependency>
<dependency>
<groupId>com.typesafe.akka</groupId>
<artifactId>akka-cluster_2.13</artifactId>
- <version>2.5.31</version>
+ <version>2.6.12</version>
</dependency>
<dependency>
<groupId>com.typesafe.akka</groupId>
<artifactId>akka-osgi_2.13</artifactId>
- <version>2.5.31</version>
+ <version>2.6.12</version>
</dependency>
<dependency>
<groupId>com.typesafe.akka</groupId>
<artifactId>akka-persistence_2.13</artifactId>
- <version>2.5.31</version>
+ <version>2.6.12</version>
</dependency>
<dependency>
<groupId>com.typesafe.akka</groupId>
<artifactId>akka-protobuf_2.13</artifactId>
- <version>2.5.31</version>
+ <version>2.6.12</version>
</dependency>
<dependency>
<groupId>com.typesafe.akka</groupId>
<artifactId>akka-remote_2.13</artifactId>
- <version>2.5.31</version>
+ <version>2.6.12</version>
</dependency>
<dependency>
<groupId>com.typesafe.akka</groupId>
<artifactId>akka-slf4j_2.13</artifactId>
- <version>2.5.31</version>
+ <version>2.6.12</version>
</dependency>
<dependency>
<groupId>com.typesafe.akka</groupId>
<artifactId>akka-stream_2.13</artifactId>
- <version>2.5.31</version>
+ <version>2.6.12</version>
</dependency>
</dependencies>
# This is useful when you are uncertain of what configuration is used.
log-config-on-start = off
- # Log at info level when messages are sent to dead letters.
+ # Log at info level when messages are sent to dead letters, or published to
+ # eventStream as `DeadLetter`, `Dropped` or `UnhandledMessage`.
# Possible values:
# on: all dead letters are logged
# off: no logging of dead letters
# Possibility to turn off logging of dead letters while the actor system
# is shutting down. Logging is only done when enabled by 'log-dead-letters'
# setting.
- log-dead-letters-during-shutdown = on
+ log-dead-letters-during-shutdown = off
+
+ # When log-dead-letters is enabled, this will re-enable the logging after configured duration.
+ # infinite: suspend the logging forever;
+ # or a duration (eg: 5 minutes), after which the logging will be re-enabled.
+ log-dead-letters-suspend-duration = 5 minutes
# List FQCN of extensions which shall be loaded at actor system startup.
# Library extensions are regular extensions that are loaded at startup and are
#
# Should not be set by end user applications in 'application.conf', use the extensions property for that
#
- library-extensions = ${?akka.library-extensions} ["akka.serialization.SerializationExtension"]
+ library-extensions = ${?akka.library-extensions} ["akka.serialization.SerializationExtension$"]
# List FQCN of extensions which shall be loaded at actor system startup.
# Should be on the format: 'extensions = ["foo", "bar"]' etc.
# terminate the ActorSystem itself, with or without using CoordinatedShutdown.
jvm-shutdown-hooks = on
+ # Version must be the same across all modules and if they are different the startup
+ # will fail. It's possible but not recommended, to disable this check, and only log a warning,
+ # by setting this property to `off`.
+ fail-mixed-versions = on
+
+ # Some modules (remoting only right now) can emit custom events to the Java Flight Recorder if running
+ # on JDK 11 or later. If you for some reason do not want that, it can be disabled and switched to no-ops
+ # with this toggle.
+ java-flight-recorder {
+ enabled = true
+ }
+
actor {
# Either one of "local", "remote" or "cluster" or the
# In addition to the default there is akka.actor.StoppingSupervisorStrategy.
guardian-supervisor-strategy = "akka.actor.DefaultSupervisorStrategy"
- # Timeout for ActorSystem.actorOf
+ # Timeout for Extension creation and a few other potentially blocking
+ # initialization tasks.
creation-timeout = 20s
# Serializes and deserializes (non-primitive) messages to ensure immutability,
# this is only intended for testing.
serialize-messages = off
- # Additional serialization bindings which are enabled automatically when allow-java-serialization is disabled.
- # settings are provided
- java-serialization-disabled-additional-serialization-bindings = {}
-
# Serializes and deserializes creators (in Props) to ensure that they can be
# sent over the network, this is only intended for testing. Purely local deployments
# as marked with deploy.scope == LocalScope are exempt from verification.
serialize-creators = off
+ # If serialize-messages or serialize-creators are enabled classes that starts with
+ # a prefix listed here are not verified.
+ no-serialization-verification-needed-class-prefix = ["akka."]
+
# Timeout for send operations to top-level actors which are in the process
# of being started. This is only relevant if using a bounded mailbox or the
# CallingThreadDispatcher for a top-level actor.
unstarted-push-timeout = 10s
+ # TypedActor deprecated since 2.6.0.
typed {
- # Default timeout for typed actor methods with non-void return type
+ # Default timeout for the deprecated TypedActor (not the new actor APIs in 2.6)
+ # methods with non-void return type.
timeout = 5s
}
# exploration will be +- 5
explore-step-size = 0.1
- # Probability of doing an exploration v.s. optmization.
+ # Probability of doing an exploration v.s. optimization.
chance-of-exploration = 0.4
# When downsizing after a long streak of underutilization, the resizer
}
# This will be used if you have set "executor = "fork-join-executor""
- # Underlying thread pool implementation is akka.dispatch.forkjoin.ForkJoinPool
+ # Underlying thread pool implementation is java.util.concurrent.ForkJoinPool
fork-join-executor {
# Min number of threads to cap factor-based parallelism number to
parallelism-min = 8
# The parallelism factor is used to determine thread pool size using the
# following formula: ceil(available processors * factor). Resulting size
# is then bounded by the parallelism-min and parallelism-max values.
- parallelism-factor = 3.0
+ parallelism-factor = 1.0
# Max number of threads to cap factor-based parallelism number to
parallelism-max = 64
mailbox-requirement = ""
}
+ # Default separate internal dispatcher to run Akka internal tasks and actors on
+ # protecting them against starvation because of accidental blocking in user actors (which run on the
+ # default dispatcher)
+ internal-dispatcher {
+ type = "Dispatcher"
+ executor = "fork-join-executor"
+ throughput = 5
+ fork-join-executor {
+ parallelism-min = 4
+ parallelism-factor = 1.0
+ parallelism-max = 64
+ }
+ }
+
default-blocking-io-dispatcher {
type = "Dispatcher"
executor = "thread-pool-executor"
#
# This setting is a short-cut to
# - using DisabledJavaSerializer instead of JavaSerializer
- # - enable-additional-serialization-bindings = on
#
# Completely disable the use of `akka.serialization.JavaSerialization` by the
# Akka Serialization extension, instead DisabledJavaSerializer will
#
# Please note that this option does not stop you from manually invoking java serialization
#
- # The default value for this might be changed to off in future versions of Akka.
allow-java-serialization = on
+ # Log warnings when the Java serialization is used to serialize messages.
+ # Java serialization is not very performant and should not be used in production
+ # environments unless you don't care about performance and security. In that case
+ # you can turn this off.
+ warn-about-java-serializer-usage = on
+
+ # To be used with the above warn-about-java-serializer-usage
+ # When warn-about-java-serializer-usage = on, and this warn-on-no-serialization-verification = off,
+ # warnings are suppressed for classes extending NoSerializationVerificationNeeded
+ # to reduce noise.
+ warn-on-no-serialization-verification = on
+
# Entries for pluggable serializers and their bindings.
serializers {
java = "akka.serialization.JavaSerializer"
bytes = "akka.serialization.ByteArraySerializer"
-
+ primitive-long = "akka.serialization.LongSerializer"
+ primitive-int = "akka.serialization.IntSerializer"
+ primitive-string = "akka.serialization.StringSerializer"
+ primitive-bytestring = "akka.serialization.ByteStringSerializer"
primitive-boolean = "akka.serialization.BooleanSerializer"
}
"[B" = bytes
"java.io.Serializable" = java
- // TODO issue #27330: BooleanSerializer not enabled for serialization in 2.5.x yet
- #"java.lang.Boolean" = primitive-boolean
- #"scala.Boolean" = primitive-boolean
- }
-
- # Additional serialization-bindings that are replacing Java serialization are
- # defined in this section for backwards compatibility reasons. They are included
- # by default but can be excluded for backwards compatibility with Akka 2.4.x.
- # They can be disabled with enable-additional-serialization-bindings=off.
- #
- # This should only be needed for backwards compatibility reasons.
- enable-additional-serialization-bindings = on
-
- # Additional serialization-bindings that are replacing Java serialization are
- # defined in this section for backwards compatibility reasons. They are included
- # by default but can be excluded for backwards compatibility with Akka 2.4.x.
- # They can be disabled with enable-additional-serialization-bindings=off.
- additional-serialization-bindings {
+ "java.lang.String" = primitive-string
+ "akka.util.ByteString$ByteString1C" = primitive-bytestring
+ "akka.util.ByteString$ByteString1" = primitive-bytestring
+ "akka.util.ByteString$ByteStrings" = primitive-bytestring
+ "java.lang.Long" = primitive-long
+ "scala.Long" = primitive-long
+ "java.lang.Integer" = primitive-int
+ "scala.Int" = primitive-int
+ "java.lang.Boolean" = primitive-boolean
+ "scala.Boolean" = primitive-boolean
}
- # Log warnings when the default Java serialization is used to serialize messages.
- # The default serializer uses Java serialization which is not very performant and should not
- # be used in production environments unless you don't care about performance. In that case
- # you can turn this off.
- warn-about-java-serializer-usage = on
-
- # To be used with the above warn-about-java-serializer-usage
- # When warn-about-java-serializer-usage = on, and this warn-on-no-serialization-verification = off,
- # warnings are suppressed for classes extending NoSerializationVerificationNeeded
- # to reduce noize.
- warn-on-no-serialization-verification = on
-
# Configuration namespace of serialization identifiers.
# Each serializer implementation must have an entry in the following format:
# `akka.actor.serialization-identifiers."FQCN" = ID`
serialization-identifiers {
"akka.serialization.JavaSerializer" = 1
"akka.serialization.ByteArraySerializer" = 4
- "akka.serialization.BooleanSerializer" = 35
- }
-
- # Configuration items which are used by the akka.actor.ActorDSL._ methods
- dsl {
- # Maximum queue size of the actor created by newInbox(); this protects
- # against faulty programs which use select() and consistently miss messages
- inbox-size = 1000
- # Default timeout to assume for operations like Inbox.receive et al
- default-timeout = 5s
+ primitive-long = 18
+ primitive-int = 19
+ primitive-string = 20
+ primitive-bytestring = 21
+ primitive-boolean = 35
}
}
serialization.protobuf {
-
- # Additional classes that are allowed even if they are not defined in `serialization-bindings`.
- # It can be exact class name or name of super class or interfaces (one level).
- # This is useful when a class is not used for serialization any more and therefore removed
- # from `serialization-bindings`, but should still be possible to deserialize.
+ # deprecated, use `allowed-classes` instead
whitelist-class = [
"com.google.protobuf.GeneratedMessage",
"com.google.protobuf.GeneratedMessageV3",
"scalapb.GeneratedMessageCompanion",
- "akka.protobuf.GeneratedMessage"
+ "akka.protobuf.GeneratedMessage",
+ "akka.protobufv3.internal.GeneratedMessageV3"
]
+
+ # Additional classes that are allowed even if they are not defined in `serialization-bindings`.
+ # It can be exact class name or name of super class or interfaces (one level).
+ # This is useful when a class is not used for serialization any more and therefore removed
+ # from `serialization-bindings`, but should still be possible to deserialize.
+ allowed-classes = ${akka.serialization.protobuf.whitelist-class}
+
}
# Used to set the behavior of the scheduler.
# Fully qualified config path which holds the dispatcher configuration
# for the read/write worker actors
- worker-dispatcher = "akka.actor.default-dispatcher"
+ worker-dispatcher = "akka.actor.internal-dispatcher"
# Fully qualified config path which holds the dispatcher configuration
# for the selector management actors
- management-dispatcher = "akka.actor.default-dispatcher"
+ management-dispatcher = "akka.actor.internal-dispatcher"
# Fully qualified config path which holds the dispatcher configuration
# on which file IO tasks are scheduled
# Fully qualified config path which holds the dispatcher configuration
# for the read/write worker actors
- worker-dispatcher = "akka.actor.default-dispatcher"
+ worker-dispatcher = "akka.actor.internal-dispatcher"
# Fully qualified config path which holds the dispatcher configuration
# for the selector management actors
- management-dispatcher = "akka.actor.default-dispatcher"
+ management-dispatcher = "akka.actor.internal-dispatcher"
}
udp-connected {
# Fully qualified config path which holds the dispatcher configuration
# for the read/write worker actors
- worker-dispatcher = "akka.actor.default-dispatcher"
+ worker-dispatcher = "akka.actor.internal-dispatcher"
# Fully qualified config path which holds the dispatcher configuration
# for the selector management actors
- management-dispatcher = "akka.actor.default-dispatcher"
+ management-dispatcher = "akka.actor.internal-dispatcher"
}
dns {
# Fully qualified config path which holds the dispatcher configuration
# for the manager and resolver router actors.
# For actual router configuration see akka.actor.deployment./IO-DNS/*
- dispatcher = "akka.actor.default-dispatcher"
+ dispatcher = "akka.actor.internal-dispatcher"
# Name of the subconfig at path akka.io.dns, see inet-address below
#
# This property is related to `akka.jvm-shutdown-hooks` above.
run-by-jvm-shutdown-hook = on
+ # Run the coordinated shutdown when ActorSystem.terminate is called.
+ # Enabling this and disabling terminate-actor-system is not a supported
+ # combination (will throw ConfigurationException at startup).
+ run-by-actor-system-terminate = on
+
# When Coordinated Shutdown is triggered an instance of `Reason` is
# required. That value can be used to override the default settings.
# Only 'exit-jvm', 'exit-code' and 'terminate-actor-system' may be
# Initial contact points of the cluster.
# The nodes to join automatically at startup.
# Comma separated full URIs defined by a string on the form of
- # "akka.tcp://system@hostname:port"
+ # "akka://system@hostname:port"
# Leave as empty if the node is supposed to be joined manually.
seed-nodes = []
# attempts.
shutdown-after-unsuccessful-join-seed-nodes = off
- # Should the 'leader' in the cluster be allowed to automatically mark
- # unreachable nodes as DOWN after a configured time of unreachability?
- # Using auto-down implies that two separate clusters will automatically be
- # formed in case of network partition.
- #
- # Don't enable this in production, see 'Auto-downing (DO NOT USE)' section
- # of Akka Cluster documentation.
- #
- # Disable with "off" or specify a duration to enable auto-down.
- # If a downing-provider-class is configured this setting is ignored.
- auto-down-unreachable-after = off
-
# Time margin after which shards or singletons that belonged to a downed/removed
# partition are created in surviving partition. The purpose of this margin is that
# in case of a network partition the persistent actors in the non-surviving partitions
# must be stopped before corresponding persistent actors are started somewhere else.
# This is useful if you implement downing strategies that handle network partitions,
# e.g. by keeping the larger side of the partition and shutting down the smaller side.
- # It will not add any extra safety for auto-down-unreachable-after, since that is not
- # handling network partitions.
# Disable with "off" or specify a duration to enable.
+ #
+ # When using the `akka.cluster.sbr.SplitBrainResolver` as downing provider it will use
+ # the akka.cluster.split-brain-resolver.stable-after as the default down-removal-margin
+ # if this down-removal-margin is undefined.
down-removal-margin = off
# Pluggable support for downing of nodes in the cluster.
- # If this setting is left empty behavior will depend on 'auto-down-unreachable' in the following ways:
- # * if it is 'off' the `NoDowning` provider is used and no automatic downing will be performed
- # * if it is set to a duration the `AutoDowning` provider is with the configured downing duration
+ # If this setting is left empty the `NoDowning` provider is used and no automatic downing will be performed.
#
# If specified the value must be the fully qualified class name of a subclass of
# `akka.cluster.DowningProvider` having a public one argument constructor accepting an `ActorSystem`
# If this is set to "off", the leader will not move 'Joining' members to 'Up' during a network
# split. This feature allows the leader to accept 'Joining' members to be 'WeaklyUp'
# so they become part of the cluster even during a network split. The leader will
- # move `Joining` members to 'WeaklyUp' after 3 rounds of 'leader-actions-interval'
- # without convergence.
+ # move `Joining` members to 'WeaklyUp' after this configured duration without convergence.
# The leader will move 'WeaklyUp' members to 'Up' status once convergence has been reached.
- allow-weakly-up-members = on
+ allow-weakly-up-members = 7s
# The roles of this member. List of strings, e.g. roles = ["A", "B"].
# The roles are part of the membership information and can be used by
#<role-name>.min-nr-of-members = 1
}
+ # Application version of the deployment. Used by rolling update features
+ # to distinguish between old and new nodes. The typical convention is to use
+ # 3 digit version numbers `major.minor.patch`, but 1 or two digits are also
+ # supported.
+ #
+ # If no `.` is used it is interpreted as a single digit version number or as
+ # plain alphanumeric if it couldn't be parsed as a number.
+ #
+ # It may also have a qualifier at the end for 2 or 3 digit version numbers such
+ # as "1.2-RC1".
+ # For 1 digit with qualifier, 1-RC1, it is interpreted as plain alphanumeric.
+ #
+ # It has support for https://github.com/dwijnand/sbt-dynver format with `+` or
+ # `-` separator. The number of commits from the tag is handled as a numeric part.
+ # For example `1.0.0+3-73475dce26` is less than `1.0.10+10-ed316bd024` (3 < 10).
+ app-version = "0.0.0"
+
# Minimum required number of members before the leader changes member status
# of 'Joining' members to 'Up'. Typically used together with
# 'Cluster.registerOnMemberUp' to defer some action, such as starting actors,
# until the cluster has reached a certain size.
min-nr-of-members = 1
- # Enable/disable info level logging of cluster events
+ # Enable/disable info level logging of cluster events.
+ # These are logged with logger name `akka.cluster.Cluster`.
log-info = on
# Enable/disable verbose info-level logging of cluster events
# for temporary troubleshooting. Defaults to 'off'.
+ # These are logged with logger name `akka.cluster.Cluster`.
log-info-verbose = off
# Enable or disable JMX MBeans for management of the cluster
# Disable with "off".
publish-stats-interval = off
- # The id of the dispatcher to use for cluster actors. If not specified
- # default dispatcher is used.
+ # The id of the dispatcher to use for cluster actors.
# If specified you need to define the settings of the actual dispatcher.
- use-dispatcher = ""
+ use-dispatcher = "akka.actor.internal-dispatcher"
# Gossip to random node with newer or older state information, if any with
# this probability. Otherwise Gossip to any random live node.
# Number of member nodes that each member will send heartbeat messages to,
# i.e. each node will be monitored by this number of other nodes.
- monitored-by-nr-of-members = 5
+ monitored-by-nr-of-members = 9
# After the heartbeat request has been sent the first failure detection
# will start after this period, even though no heartbeat message has
}
debug {
- # log heartbeat events (very verbose, useful mostly when debugging heartbeating issues)
+ # Log heartbeat events (very verbose, useful mostly when debugging heartbeating issues).
+ # These are logged with logger name `akka.cluster.ClusterHeartbeat`.
verbose-heartbeat-logging = off
# log verbose details about gossip
# Enforce configuration compatibility checks when joining a cluster.
# Set to off to allow joining nodes to join a cluster even when configuration incompatibilities are detected or
# when the cluster does not support this feature. Compatibility checks are always performed and warning and
- # error messsages are logged.
+ # error messages are logged.
#
# This is particularly useful for rolling updates on clusters that do not support that feature. Since the old
# cluster won't be able to send the compatibility confirmation to the joining node, the joining node won't be able
"user.home", "user.name", "user.dir",
"socksNonProxyHosts", "http.nonProxyHosts", "ftp.nonProxyHosts",
"akka.remote.secure-cookie",
+ "akka.remote.classic.netty.ssl.security",
+ # Pre 2.6 path, keep around to avoid sending things misconfigured with old paths
"akka.remote.netty.ssl.security",
"akka.remote.artery.ssl"
]
}
}
+
+#//#split-brain-resolver
+
+# To enable the split brain resolver you first need to enable the provider in your application.conf:
+# akka.cluster.downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider"
+
+akka.cluster.split-brain-resolver {
+ # Select one of the available strategies (see descriptions below):
+ # static-quorum, keep-majority, keep-oldest, down-all, lease-majority
+ active-strategy = keep-majority
+
+ #//#stable-after
+ # Time margin after which shards or singletons that belonged to a downed/removed
+ # partition are created in surviving partition. The purpose of this margin is that
+ # in case of a network partition the persistent actors in the non-surviving partitions
+ # must be stopped before corresponding persistent actors are started somewhere else.
+ # This is useful if you implement downing strategies that handle network partitions,
+ # e.g. by keeping the larger side of the partition and shutting down the smaller side.
+ # Decision is taken by the strategy when there has been no membership or
+ # reachability changes for this duration, i.e. the cluster state is stable.
+ stable-after = 20s
+ #//#stable-after
+
+ # When reachability observations by the failure detector are changed the SBR decisions
+ # are deferred until there are no changes within the 'stable-after' duration.
+ # If this continues for too long it might be an indication of an unstable system/network
+ # and it could result in delayed or conflicting decisions on separate sides of a network
+ # partition.
+ # As a precaution for that scenario all nodes are downed if no decision is made within
+ # `stable-after + down-all-when-unstable` from the first unreachability event.
+ # The measurement is reset if all unreachable have been healed, downed or removed, or
+ # if there are no changes within `stable-after * 2`.
+ # The value can be on, off, or a duration.
+ # By default it is 'on' and then it is derived to be 3/4 of stable-after, but not less than
+ # 4 seconds.
+ down-all-when-unstable = on
+
+}
+#//#split-brain-resolver
+
+# Down the unreachable nodes if the number of remaining nodes are greater than or equal to
+# the given 'quorum-size'. Otherwise down the reachable nodes, i.e. it will shut down that
+# side of the partition. In other words, the 'size' defines the minimum number of nodes
+# that the cluster must have to be operational. If there are unreachable nodes when starting
+# up the cluster, before reaching this limit, the cluster may shutdown itself immediately.
+# This is not an issue if you start all nodes at approximately the same time.
+#
+# Note that you must not add more members to the cluster than 'quorum-size * 2 - 1', because
+# then both sides may down each other and thereby form two separate clusters. For example,
+# quorum-size configured to 3 in a 6 node cluster may result in a split where each side
+# consists of 3 nodes each, i.e. each side thinks it has enough nodes to continue by
+# itself. A warning is logged if this recommendation is violated.
+#//#static-quorum
+akka.cluster.split-brain-resolver.static-quorum {
+ # minimum number of nodes that the cluster must have
+ quorum-size = undefined
+
+ # if the 'role' is defined the decision is based only on members with that 'role'
+ role = ""
+}
+#//#static-quorum
+
+# Down the unreachable nodes if the current node is in the majority part based the last known
+# membership information. Otherwise down the reachable nodes, i.e. the own part. If the
+# the parts are of equal size the part containing the node with the lowest address is kept.
+# Note that if there are more than two partitions and none is in majority each part
+# will shutdown itself, terminating the whole cluster.
+#//#keep-majority
+akka.cluster.split-brain-resolver.keep-majority {
+ # if the 'role' is defined the decision is based only on members with that 'role'
+ role = ""
+}
+#//#keep-majority
+
+# Down the part that does not contain the oldest member (current singleton).
+#
+# There is one exception to this rule if 'down-if-alone' is defined to 'on'.
+# Then, if the oldest node has partitioned from all other nodes the oldest
+# will down itself and keep all other nodes running. The strategy will not
+# down the single oldest node when it is the only remaining node in the cluster.
+#
+# Note that if the oldest node crashes the others will remove it from the cluster
+# when 'down-if-alone' is 'on', otherwise they will down themselves if the
+# oldest node crashes, i.e. shutdown the whole cluster together with the oldest node.
+#//#keep-oldest
+akka.cluster.split-brain-resolver.keep-oldest {
+ # Enable downing of the oldest node when it is partitioned from all other nodes
+ down-if-alone = on
+
+ # if the 'role' is defined the decision is based only on members with that 'role',
+ # i.e. using the oldest member (singleton) within the nodes with that role
+ role = ""
+}
+#//#keep-oldest
+
+# Keep the part that can acquire the lease, and down the other part.
+# Best effort is to keep the side that has most nodes, i.e. the majority side.
+# This is achieved by adding a delay before trying to acquire the lease on the
+# minority side.
+#//#lease-majority
+akka.cluster.split-brain-resolver.lease-majority {
+ lease-implementation = ""
+
+ # This delay is used on the minority side before trying to acquire the lease,
+ # as an best effort to try to keep the majority side.
+ acquire-lease-delay-for-minority = 2s
+
+ # If the 'role' is defined the majority/minority is based only on members with that 'role'.
+ role = ""
+}
+#//#lease-majority
class = "akka.persistence.journal.inmem.InmemJournal"
# Dispatcher for the plugin actor.
plugin-dispatcher = "akka.actor.default-dispatcher"
+
+ # Turn this on to test serialization of the events
+ test-serialization = off
}
# Local file system snapshot store plugin.
artery = "akka.remote.serialization.ArteryMessageSerializer"
proto = "akka.remote.serialization.ProtobufSerializer"
daemon-create = "akka.remote.serialization.DaemonMsgCreateSerializer"
- primitive-long = "akka.remote.serialization.LongSerializer"
- primitive-int = "akka.remote.serialization.IntSerializer"
- primitive-string = "akka.remote.serialization.StringSerializer"
- primitive-bytestring = "akka.remote.serialization.ByteStringSerializer"
akka-system-msg = "akka.remote.serialization.SystemMessageSerializer"
}
# Since akka.protobuf.Message does not extend Serializable but
# GeneratedMessage does, need to use the more specific one here in order
# to avoid ambiguity.
+ # This is only loaded if akka-protobuf is on the classpath
+ # It should not be used and users should migrate to using the protobuf classes
+ # directly
+ # Remove in 2.7
"akka.protobuf.GeneratedMessage" = proto
+ "akka.protobufv3.internal.GeneratedMessageV3" = proto
+
# Since com.google.protobuf.Message does not extend Serializable but
# GeneratedMessage does, need to use the more specific one here in order
# to avoid ambiguity.
# This com.google.protobuf serialization binding is only used if the class can be loaded,
# i.e. com.google.protobuf dependency has been added in the application project.
"com.google.protobuf.GeneratedMessage" = proto
+ "com.google.protobuf.GeneratedMessageV3" = proto
- "java.util.Optional" = akka-misc
-
-
- # The following are handled by the MiscMessageSerializer, but they are not enabled for
- # compatibility reasons (it was added in Akka 2.5.[8,9,12]). Enable them by adding:
- # akka.actor.serialization-bindings {
- # "akka.Done" = akka-misc
- # "akka.NotUsed" = akka-misc
- # "akka.actor.Address" = akka-misc
- # "akka.remote.UniqueAddress" = akka-misc
- # }
- }
-
- # Additional serialization-bindings that are replacing Java serialization are
- # defined in this section for backwards compatibility reasons. They are included
- # by default but can be excluded for backwards compatibility with Akka 2.4.x.
- # They can be disabled with enable-additional-serialization-bindings=off.
- additional-serialization-bindings {
"akka.actor.Identify" = akka-misc
"akka.actor.ActorIdentity" = akka-misc
"scala.Some" = akka-misc
"scala.None$" = akka-misc
+ "java.util.Optional" = akka-misc
"akka.actor.Status$Success" = akka-misc
"akka.actor.Status$Failure" = akka-misc
"akka.actor.ActorRef" = akka-misc
"akka.actor.Kill$" = akka-misc
"akka.remote.RemoteWatcher$Heartbeat$" = akka-misc
"akka.remote.RemoteWatcher$HeartbeatRsp" = akka-misc
- "akka.actor.ActorInitializationException" = akka-misc
-
- "akka.dispatch.sysmsg.SystemMessage" = akka-system-msg
-
- "java.lang.String" = primitive-string
- "akka.util.ByteString$ByteString1C" = primitive-bytestring
- "akka.util.ByteString$ByteString1" = primitive-bytestring
- "akka.util.ByteString$ByteStrings" = primitive-bytestring
- "java.lang.Long" = primitive-long
- "scala.Long" = primitive-long
- "java.lang.Integer" = primitive-int
- "scala.Int" = primitive-int
+ "akka.Done" = akka-misc
+ "akka.NotUsed" = akka-misc
+ "akka.actor.Address" = akka-misc
+ "akka.remote.UniqueAddress" = akka-misc
- # Java Serializer is by default used for exceptions.
- # It's recommended that you implement custom serializer for exceptions that are
- # sent remotely, e.g. in akka.actor.Status.Failure for ask replies. You can add
- # binding to akka-misc (MiscMessageSerializerSpec) for the exceptions that have
- # a constructor with single message String or constructor with message String as
- # first parameter and cause Throwable as second parameter. Note that it's not
- # safe to add this binding for general exceptions such as IllegalArgumentException
- # because it may have a subclass without required constructor.
- "java.lang.Throwable" = java
+ "akka.actor.ActorInitializationException" = akka-misc
"akka.actor.IllegalActorStateException" = akka-misc
"akka.actor.ActorKilledException" = akka-misc
"akka.actor.InvalidActorNameException" = akka-misc
"akka.actor.InvalidMessageException" = akka-misc
-
- # TODO issue #27330: TimeoutException not enabled for serialization in 2.5.x yet
- #"java.util.concurrent.TimeoutException" = akka-misc
- # TODO issue #27330: ThrowableNotSerializableException not enabled for serialization in 2.5.x yet
- #"akka.remote.serialization.ThrowableNotSerializableException" = akka-misc
+ "java.util.concurrent.TimeoutException" = akka-misc
+ "akka.remote.serialization.ThrowableNotSerializableException" = akka-misc
"akka.actor.LocalScope$" = akka-misc
"akka.remote.RemoteScope" = akka-misc
"akka.routing.TailChoppingGroup" = akka-misc
"akka.routing.TailChoppingPool" = akka-misc
"akka.remote.routing.RemoteRouterConfig" = akka-misc
- }
- # Additional serialization bindings which are enabled automatically when allow-java-serialization is disabled.
- java-serialization-disabled-additional-serialization-bindings = {
- "akka.Done" = akka-misc
- "akka.NotUsed" = akka-misc
- "akka.actor.Address" = akka-misc
- "akka.remote.UniqueAddress" = akka-misc
+ "akka.pattern.StatusReply" = akka-misc
+
+ "akka.dispatch.sysmsg.SystemMessage" = akka-system-msg
+
+ # Java Serializer is by default used for exceptions and will by default
+ # not be allowed to be serialized, but in certain cases they are replaced
+ # by `akka.remote.serialization.ThrowableNotSerializableException` if
+ # no specific serializer has been defined:
+ # - when wrapped in `akka.actor.Status.Failure` for ask replies
+ # - when wrapped in system messages for exceptions from remote deployed child actors
+ #
+ # It's recommended that you implement custom serializer for exceptions that are
+ # sent remotely, You can add binding to akka-misc (MiscMessageSerializer) for the
+ # exceptions that have a constructor with single message String or constructor with
+ # message String as first parameter and cause Throwable as second parameter. Note that it's not
+ # safe to add this binding for general exceptions such as IllegalArgumentException
+ # because it may have a subclass without required constructor.
+ "java.lang.Throwable" = java
}
serialization-identifiers {
"akka.remote.serialization.MessageContainerSerializer" = 6
"akka.remote.serialization.MiscMessageSerializer" = 16
"akka.remote.serialization.ArteryMessageSerializer" = 17
+
+ "akka.remote.serialization.SystemMessageSerializer" = 22
+
+ # deprecated in 2.6.0, moved to akka-actor
"akka.remote.serialization.LongSerializer" = 18
+ # deprecated in 2.6.0, moved to akka-actor
"akka.remote.serialization.IntSerializer" = 19
+ # deprecated in 2.6.0, moved to akka-actor
"akka.remote.serialization.StringSerializer" = 20
+ # deprecated in 2.6.0, moved to akka-actor
"akka.remote.serialization.ByteStringSerializer" = 21
- "akka.remote.serialization.SystemMessageSerializer" = 22
}
deployment {
default {
# if this is set to a valid remote address, the named actor will be
- # deployed at that node e.g. "akka.tcp://sys@host:port"
+ # deployed at that node e.g. "akka://sys@host:port"
remote = ""
target {
# A list of hostnames and ports for instantiating the children of a
# router
- # The format should be on "akka.tcp://sys@host:port", where:
+ # The format should be on "akka://sys@host:port", where:
# - sys is the remote actor system name
# - hostname can be either hostname or IP address the remote actor
# should connect to
remote {
### Settings shared by classic remoting and Artery (the new implementation of remoting)
- # If set to a nonempty string remoting will use the given dispatcher for
- # its internal actors otherwise the default dispatcher is used. Please note
- # that since remoting can load arbitrary 3rd party drivers (see
- # "enabled-transport" and "adapters" entries) it is not guaranteed that
- # every module will respect this setting.
- use-dispatcher = "akka.remote.default-remote-dispatcher"
-
- # Settings for the failure detector to monitor connections.
- # For TCP it is not important to have fast failure detection, since
- # most connection failures are captured by TCP itself.
- # The default DeadlineFailureDetector will trigger if there are no heartbeats within
- # the duration heartbeat-interval + acceptable-heartbeat-pause, i.e. 124 seconds
- # with the default settings.
- transport-failure-detector {
+ # Using remoting directly is typically not desirable, so a warning will
+ # be shown to make this clear. Set this setting to 'off' to suppress that
+ # warning.
+ warn-about-direct-use = on
- # FQCN of the failure detector implementation.
- # It must implement akka.remote.FailureDetector and have
- # a public constructor with a com.typesafe.config.Config and
- # akka.actor.EventStream parameter.
- implementation-class = "akka.remote.DeadlineFailureDetector"
- # How often keep-alive heartbeat messages should be sent to each connection.
- heartbeat-interval = 4 s
+ # If Cluster is not used, remote watch and deployment are disabled.
+ # To optionally use them while not using Cluster, set to 'on'.
+ use-unsafe-remote-features-outside-cluster = off
- # Number of potentially lost/delayed heartbeats that will be
- # accepted before considering it to be an anomaly.
- # A margin to the `heartbeat-interval` is important to be able to survive sudden,
- # occasional, pauses in heartbeat arrivals, due to for example garbage collect or
- # network drop.
- acceptable-heartbeat-pause = 120 s
- }
+ # A warning will be logged on remote watch attempts if Cluster
+ # is not in use and 'use-unsafe-remote-features-outside-cluster'
+ # is 'off'. Set this to 'off' to suppress these.
+ warn-unsafe-watch-outside-cluster = on
# Settings for the Phi accrual failure detector (http://www.jaist.ac.jp/~defago/files/pdf/IS_RR_2004_010.pdf
# [Hayashibara et al]) used for remote death watch.
# remote deployment configuration section
deployment {
- # If true, will only allow specific classes to be instanciated on this system via remote deployment
+ # deprecated, use `enable-allow-list`
enable-whitelist = off
+ # If true, will only allow specific classes listed in `allowed-actor-classes` to be instanciated on this
+ # system via remote deployment
+ enable-allow-list = ${akka.remote.deployment.enable-whitelist}
+
+
+ # deprecated, use `allowed-actor-classes`
whitelist = []
+
+ allowed-actor-classes = ${akka.remote.deployment.whitelist}
}
-#//#shared
+
+ ### Default dispatcher for the remoting subsystem
+ default-remote-dispatcher {
+ type = Dispatcher
+ executor = "fork-join-executor"
+ fork-join-executor {
+ parallelism-min = 2
+ parallelism-factor = 0.5
+ parallelism-max = 16
+ }
+ throughput = 10
+ }
+ #//#shared
}
}
akka {
- remote {
-#//#classic
+ remote.classic {
+ #//#classic
+
+ ### Configuration for classic remoting. Classic remoting is deprecated, use artery.
+
+
+ # If set to a nonempty string remoting will use the given dispatcher for
+ # its internal actors otherwise the default dispatcher is used. Please note
+ # that since remoting can load arbitrary 3rd party drivers (see
+ # "enabled-transport" and "adapters" entries) it is not guaranteed that
+ # every module will respect this setting.
+ use-dispatcher = "akka.remote.default-remote-dispatcher"
+
+ # Settings for the failure detector to monitor connections.
+ # For TCP it is not important to have fast failure detection, since
+ # most connection failures are captured by TCP itself.
+ # The default DeadlineFailureDetector will trigger if there are no heartbeats within
+ # the duration heartbeat-interval + acceptable-heartbeat-pause, i.e. 124 seconds
+ # with the default settings.
+ transport-failure-detector {
+
+ # FQCN of the failure detector implementation.
+ # It must implement akka.remote.FailureDetector and have
+ # a public constructor with a com.typesafe.config.Config and
+ # akka.actor.EventStream parameter.
+ implementation-class = "akka.remote.DeadlineFailureDetector"
+
+ # How often keep-alive heartbeat messages should be sent to each connection.
+ heartbeat-interval = 4 s
+
+ # Number of potentially lost/delayed heartbeats that will be
+ # accepted before considering it to be an anomaly.
+ # A margin to the `heartbeat-interval` is important to be able to survive sudden,
+ # occasional, pauses in heartbeat arrivals, due to for example garbage collect or
+ # network drop.
+ acceptable-heartbeat-pause = 120 s
+ }
- ### Configuration for classic remoting
# Timeout after which the startup of the remoting subsystem is considered
# to be failed. Increase this value if your transport drivers (see the
command-ack-timeout = 30 s
# The timeout for outbound associations to perform the handshake.
- # If the transport is akka.remote.netty.tcp or akka.remote.netty.ssl
+ # If the transport is akka.remote.classic.netty.tcp or akka.remote.classic.netty.ssl
# the configured connection-timeout for the transport will be used instead.
handshake-timeout = 15 s
untrusted-mode = off
# When 'untrusted-mode=on' inbound actor selections are by default discarded.
- # Actors with paths defined in this white list are granted permission to receive actor
+ # Actors with paths defined in this list are granted permission to receive actor
# selections messages.
# E.g. trusted-selection-paths = ["/user/receptionist", "/user/namingService"]
trusted-selection-paths = []
- # Should the remote server require that its peers share the same
- # secure-cookie (defined in the 'remote' section)? Secure cookies are passed
- # between during the initial handshake. Connections are refused if the initial
- # message contains a mismatching cookie or the cookie is missing.
- require-cookie = off
-
- # Deprecated since 2.4-M1
- secure-cookie = ""
-
### Logging
# If this is "on", Akka will log all inbound messages at DEBUG level,
# pointing to an implementation class of the Transport interface.
# If multiple transports are provided, the address of the first
# one will be used as a default address.
- enabled-transports = ["akka.remote.netty.tcp"]
+ enabled-transports = ["akka.remote.classic.netty.tcp"]
# Transport drivers can be augmented with adapters by adding their
# name to the applied-adapters setting in the configuration of a
# Akka protocol
applied-adapters = []
- transport-protocol = tcp
-
# The default remote server port clients should connect to.
# Default is 2552 (AKKA), use 0 if you want a random available port
# This port needs to be unique for each actor system on the same machine.
# when running akka nodes in a separated networks (under NATs or docker containers).
# Use 0 if you want a random available port. Examples:
#
- # akka.remote.netty.tcp.port = 2552
- # akka.remote.netty.tcp.bind-port = 2553
+ # akka.remote.classic.netty.tcp.port = 2552
+ # akka.remote.classic.netty.tcp.bind-port = 2553
# Network interface will be bound to the 2553 port, but remoting protocol will
# expect messages sent to port 2552.
#
- # akka.remote.netty.tcp.port = 0
- # akka.remote.netty.tcp.bind-port = 0
+ # akka.remote.classic.netty.tcp.port = 0
+ # akka.remote.classic.netty.tcp.bind-port = 0
# Network interface will be bound to a random port, and remoting protocol will
# expect messages sent to the bound port.
#
- # akka.remote.netty.tcp.port = 2552
- # akka.remote.netty.tcp.bind-port = 0
+ # akka.remote.classic.netty.tcp.port = 2552
+ # akka.remote.classic.netty.tcp.bind-port = 0
# Network interface will be bound to a random port, but remoting protocol will
# expect messages sent to port 2552.
#
- # akka.remote.netty.tcp.port = 0
- # akka.remote.netty.tcp.bind-port = 2553
+ # akka.remote.classic.netty.tcp.port = 0
+ # akka.remote.classic.netty.tcp.bind-port = 2553
# Network interface will be bound to the 2553 port, and remoting protocol will
# expect messages sent to the bound port.
#
- # akka.remote.netty.tcp.port = 2552
- # akka.remote.netty.tcp.bind-port = ""
+ # akka.remote.classic.netty.tcp.port = 2552
+ # akka.remote.classic.netty.tcp.bind-port = ""
# Network interface will be bound to the 2552 port, and remoting protocol will
# expect messages sent to the bound port.
#
- # akka.remote.netty.tcp.port if empty
+ # akka.remote.classic.netty.tcp.port if empty
bind-port = ""
# Use this setting to bind a network interface to a different hostname or ip
# than remoting protocol expects messages at.
# Use "0.0.0.0" to bind to all interfaces.
- # akka.remote.netty.tcp.hostname if empty
+ # akka.remote.classic.netty.tcp.hostname if empty
bind-hostname = ""
# Enables SSL support on this transport
}
- # DEPRECATED, since 2.5.0
- # The netty.udp transport is deprecated, please use Artery instead.
- # See: https://doc.akka.io/docs/akka/current/remoting-artery.html
- netty.udp = ${akka.remote.netty.tcp}
- netty.udp {
- transport-protocol = udp
- }
-
- netty.ssl = ${akka.remote.netty.tcp}
+ netty.ssl = ${akka.remote.classic.netty.tcp}
netty.ssl = {
# Enable SSL/TLS encryption.
# This must be enabled on both the client and server to work.
# Must implement akka.remote.transport.netty.SSLEngineProvider and have a public
# constructor with an ActorSystem parameter.
# The default ConfigSSLEngineProvider is configured by properties in section
- # akka.remote.netty.ssl.security
+ # akka.remote.classic.netty.ssl.security
#
# The SSLEngineProvider can also be defined via ActorSystemSetup with
# SSLEngineProviderSetup when starting the ActorSystem. That is useful when
# This password is used for decrypting the trust store
trust-store-password = "changeme"
- # Protocol to use for SSL encryption, choose from:
- # TLS 1.2 is available since JDK7, and default since JDK8:
- # https://blogs.oracle.com/java-platform-group/entry/java_8_will_use_tls
+ # Protocol to use for SSL encryption.
protocol = "TLSv1.2"
- # Example: ["TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA"]
- # You need to install the JCE Unlimited Strength Jurisdiction Policy
- # Files to use AES 256.
+ # Example: ["TLS_DHE_RSA_WITH_AES_128_GCM_SHA256",
+ # "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+ # "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384",
+ # "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"]
+ # When doing rolling upgrades, make sure to include both the algorithm used
+ # by old nodes and the preferred algorithm.
+ # If you use a JDK 8 prior to 8u161 you need to install
+ # the JCE Unlimited Strength Jurisdiction Policy Files to use AES 256.
# More info here:
- # http://docs.oracle.com/javase/7/docs/technotes/guides/security/SunProviders.html#SunJCEProvider
- enabled-algorithms = ["TLS_RSA_WITH_AES_128_CBC_SHA"]
+ # https://www.oracle.com/java/technologies/javase-jce-all-downloads.html
+ enabled-algorithms = ["TLS_DHE_RSA_WITH_AES_256_GCM_SHA384",
+ "TLS_RSA_WITH_AES_128_CBC_SHA"]
# There are two options, and the default SecureRandom is recommended:
# "" or "SecureRandom" => (default)
debug = off
}
- ### Default dispatcher for the remoting subsystem
-
- default-remote-dispatcher {
- type = Dispatcher
- executor = "fork-join-executor"
- fork-join-executor {
- parallelism-min = 2
- parallelism-factor = 0.5
- parallelism-max = 16
- }
- throughput = 10
- }
-
backoff-remote-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
}
#//#classic
+#//#artery
akka {
remote {
- #//#artery
### Configuration for Artery, the new implementation of remoting
artery {
- # Enable the new remoting with this flag
+ # Disable artery with this flag
enabled = off
# Select the underlying transport implementation.
#
# Possible values: aeron-udp, tcp, tls-tcp
- #
- # The Aeron (UDP) transport is a high performance transport and should be used for systems
- # that require high throughput and low latency. It is using more CPU than TCP when the
- # system is idle or at low message rates. There is no encryption for Aeron.
- # https://github.com/real-logic/aeron
- #
- # The TCP and TLS transport is implemented using Akka Streams TCP/TLS. This is the choice
- # when encryption is needed, but it can also be used with plain TCP without TLS. It's also
- # the obvious choice when UDP can't be used.
- # It has very good performance (high throughput and low latency) but latency at high throughput
- # might not be as good as the Aeron transport.
- # It is using less CPU than Aeron when the system is idle or at low message rates.
+ # See https://doc.akka.io/docs/akka/current/remoting-artery.html#selecting-a-transport for the tradeoffs
+ # for each transport
transport = aeron-udp
# Canonical address is the address other clients should connect to.
bind-timeout = 3s
}
- # Periodically log out all Aeron counters. See https://github.com/real-logic/aeron/wiki/Monitoring-and-Debugging#counters
- # Only used when transport is aeron-udp.
- log-aeron-counters = false
# Actor paths to use the large message stream for when a message
# is sent to them over remoting. The large message stream dedicated
untrusted-mode = off
# When 'untrusted-mode=on' inbound actor selections are by default discarded.
- # Actors with paths defined in this white list are granted permission to receive actor
+ # Actors with paths defined in this list are granted permission to receive actor
# selections messages.
# E.g. trusted-selection-paths = ["/user/receptionist", "/user/namingService"]
trusted-selection-paths = []
# if off then they are not logged
log-sent-messages = off
+ # Logging of message types with payload size in bytes larger than
+ # this value. Maximum detected size per message type is logged once,
+ # with an increase threshold of 10%.
+ # By default this feature is turned off. Activate it by setting the property to
+ # a value in bytes, such as 1000b. Note that for all messages larger than this
+ # limit there will be extra performance and scalability cost.
+ log-frame-size-exceeding = off
+
advanced {
# Maximum serialized message size, including header data.
# Settings for the materializer that is used for the remote streams.
materializer = ${akka.stream.materializer}
- # If set to a nonempty string artery will use the given dispatcher for
- # the ordinary and large message streams, otherwise the default dispatcher is used.
+ # Remoting will use the given dispatcher for the ordinary and large message
+ # streams.
use-dispatcher = "akka.remote.default-remote-dispatcher"
- # If set to a nonempty string remoting will use the given dispatcher for
- # the control stream, otherwise the default dispatcher is used.
+ # Remoting will use the given dispatcher for the control stream.
# It can be good to not use the same dispatcher for the control stream as
# the dispatcher for the ordinary message stream so that heartbeat messages
# are not disturbed.
- use-control-stream-dispatcher = ""
-
- # Controls whether to start the Aeron media driver in the same JVM or use external
- # process. Set to 'off' when using external media driver, and then also set the
- # 'aeron-dir'.
- # Only used when transport is aeron-udp.
- embedded-media-driver = on
-
- # Directory used by the Aeron media driver. It's mandatory to define the 'aeron-dir'
- # if using external media driver, i.e. when 'embedded-media-driver = off'.
- # Embedded media driver will use a this directory, or a temporary directory if this
- # property is not defined (empty).
- # Only used when transport is aeron-udp.
- aeron-dir = ""
-
- # Whether to delete aeron embedded driver directory upon driver stop.
- # Only used when transport is aeron-udp.
- delete-aeron-dir = yes
-
- # Level of CPU time used, on a scale between 1 and 10, during backoff/idle.
- # The tradeoff is that to have low latency more CPU time must be used to be
- # able to react quickly on incoming messages or send as fast as possible after
- # backoff backpressure.
- # Level 1 strongly prefer low CPU consumption over low latency.
- # Level 10 strongly prefer low latency over low CPU consumption.
- # Only used when transport is aeron-udp.
- idle-cpu-level = 5
+ use-control-stream-dispatcher = "akka.actor.internal-dispatcher"
+
# Total number of inbound lanes, shared among all inbound associations. A value
# greater than 1 means that deserialization can be performed in parallel for
# unacknowledged system messages are re-delivered with this interval
system-message-resend-interval = 1 second
- # Timeout of establishing outbound connections.
- # Only used when transport is tcp or tls-tcp.
- connection-timeout = 5 seconds
+
# The timeout for outbound associations to perform the initial handshake.
# This timeout must be greater than the 'image-liveness-timeout' when
# a new session with a restarted destination system.
inject-handshake-interval = 1 second
- # messages that are not accepted by Aeron are dropped after retrying for this period
- # Only used when transport is aeron-udp.
- give-up-message-after = 60 seconds
# System messages that are not acknowledged after re-sending for this period are
# dropped and will trigger quarantine. The value should be longer than the length
# remote messages has been completed
shutdown-flush-timeout = 1 second
+ # Before sending notificaiton of terminated actor (DeathWatchNotification) other messages
+ # will be flushed to make sure that the Terminated message arrives after other messages.
+ # It will wait this long for the flush acknowledgement before continuing.
+ # The flushing can be disabled by setting this to `off`.
+ death-watch-notification-flush-timeout = 3 seconds
+
# See 'inbound-max-restarts'
inbound-restart-timeout = 5 seconds
# If more restarts occurs the ActorSystem will be terminated.
outbound-max-restarts = 5
- # Timeout after which aeron driver has not had keepalive messages
- # from a client before it considers the client dead.
- # Only used when transport is aeron-udp.
- client-liveness-timeout = 20 seconds
-
- # Timeout for each the INACTIVE and LINGER stages an aeron image
- # will be retained for when it is no longer referenced.
- # This timeout must be less than the 'handshake-timeout'.
- # Only used when transport is aeron-udp.
- image-liveness-timeout = 10 seconds
-
- # Timeout after which the aeron driver is considered dead
- # if it does not update its C'n'C timestamp.
- # Only used when transport is aeron-udp.
- driver-timeout = 20 seconds
-
- flight-recorder {
- // FIXME it should be enabled by default when we have a good solution for naming the files
- enabled = off
- # Controls where the flight recorder file will be written. There are three options:
- # 1. Empty: a file will be generated in the temporary directory of the OS
- # 2. A relative or absolute path ending with ".afr": this file will be used
- # 3. A relative or absolute path: this directory will be used, the file will get a random file name
- destination = ""
- }
-
# compression of common strings in remoting messages, like actor destinations, serializers etc
compression {
# Note that compression tables are "rolling" (i.e. a new table replaces the old
# compression table once in a while), and this setting is only about the total number
# of compressions within a single such table.
- # Must be a positive natural number.
+ # Must be a positive natural number. Can be disabled with "off".
max = 256
# interval between new table compression advertisements.
# Note that compression tables are "rolling" (i.e. a new table replaces the old
# compression table once in a while), and this setting is only about the total number
# of compressions within a single such table.
- # Must be a positive natural number.
+ # Must be a positive natural number. Can be disabled with "off".
max = 256
# interval between new table compression advertisements.
# Refer to `akka.remote.artery.RemoteInstrument` for more information.
instruments = ${?akka.remote.artery.advanced.instruments} []
+ # Only used when transport is aeron-udp
+ aeron {
+ # Periodically log out all Aeron counters. See https://github.com/real-logic/aeron/wiki/Monitoring-and-Debugging#counters
+ # Only used when transport is aeron-udp.
+ log-aeron-counters = false
+
+ # Controls whether to start the Aeron media driver in the same JVM or use external
+ # process. Set to 'off' when using external media driver, and then also set the
+ # 'aeron-dir'.
+ # Only used when transport is aeron-udp.
+ embedded-media-driver = on
+
+ # Directory used by the Aeron media driver. It's mandatory to define the 'aeron-dir'
+ # if using external media driver, i.e. when 'embedded-media-driver = off'.
+ # Embedded media driver will use a this directory, or a temporary directory if this
+ # property is not defined (empty).
+ # Only used when transport is aeron-udp.
+ aeron-dir = ""
+
+ # Whether to delete aeron embedded driver directory upon driver stop.
+ # Only used when transport is aeron-udp.
+ delete-aeron-dir = yes
+
+ # Level of CPU time used, on a scale between 1 and 10, during backoff/idle.
+ # The tradeoff is that to have low latency more CPU time must be used to be
+ # able to react quickly on incoming messages or send as fast as possible after
+ # backoff backpressure.
+ # Level 1 strongly prefer low CPU consumption over low latency.
+ # Level 10 strongly prefer low latency over low CPU consumption.
+ # Only used when transport is aeron-udp.
+ idle-cpu-level = 5
+
+ # messages that are not accepted by Aeron are dropped after retrying for this period
+ # Only used when transport is aeron-udp.
+ give-up-message-after = 60 seconds
+
+ # Timeout after which aeron driver has not had keepalive messages
+ # from a client before it considers the client dead.
+ # Only used when transport is aeron-udp.
+ client-liveness-timeout = 20 seconds
+
+ # Timout after after which an uncommitted publication will be unblocked
+ # Only used when transport is aeron-udp.
+ publication-unblock-timeout = 40 seconds
+
+ # Timeout for each the INACTIVE and LINGER stages an aeron image
+ # will be retained for when it is no longer referenced.
+ # This timeout must be less than the 'handshake-timeout'.
+ # Only used when transport is aeron-udp.
+ image-liveness-timeout = 10 seconds
+
+ # Timeout after which the aeron driver is considered dead
+ # if it does not update its C'n'C timestamp.
+ # Only used when transport is aeron-udp.
+ driver-timeout = 20 seconds
+ }
+
+ # Only used when transport is tcp or tls-tcp.
+ tcp {
+ # Timeout of establishing outbound connections.
+ connection-timeout = 5 seconds
+
+ # The local address that is used for the client side of the TCP connection.
+ outbound-client-hostname = ""
+ }
+
}
# SSL configuration that is used when transport=tls-tcp.
# real passwords in config files. trust-store-password=${SSL_TRUST_STORE_PASSWORD}
trust-store-password = "changeme"
- # Protocol to use for SSL encryption, choose from:
- # TLS 1.2 is available since JDK7, and default since JDK8:
- # https://blogs.oracle.com/java-platform-group/entry/java_8_will_use_tls
+ # Protocol to use for SSL encryption.
protocol = "TLSv1.2"
- # Example: ["TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA"]
- # You need to install the JCE Unlimited Strength Jurisdiction Policy
- # Files to use AES 256.
+ # Example: ["TLS_DHE_RSA_WITH_AES_128_GCM_SHA256",
+ # "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+ # "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384",
+ # "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"]
+ # When doing rolling upgrades, make sure to include both the algorithm used
+ # by old nodes and the preferred algorithm.
+ # If you use a JDK 8 prior to 8u161 you need to install
+ # the JCE Unlimited Strength Jurisdiction Policy Files to use AES 256.
# More info here:
- # http://docs.oracle.com/javase/7/docs/technotes/guides/security/SunProviders.html#SunJCEProvider
- enabled-algorithms = ["TLS_RSA_WITH_AES_128_CBC_SHA"]
+ # https://www.oracle.com/java/technologies/javase-jce-all-downloads.html
+ enabled-algorithms = ["TLS_DHE_RSA_WITH_AES_256_GCM_SHA384",
+ "TLS_RSA_WITH_AES_128_CBC_SHA"]
# There are two options, and the default SecureRandom is recommended:
# "" or "SecureRandom" => (default)
require-mutual-authentication = on
# Set this to `on` to verify hostnames with sun.security.util.HostnameChecker
+ # If possible it is recommended to have this enabled. Hostname verification is designed for
+ # situations where things locate each other by hostname, in scenarios where host names are dynamic
+ # and not known up front it can make sense to have this disabled.
hostname-verification = off
}
+ # Config of akka.remote.artery.tcp.ssl.RotatingKeysSSLEngineProvider
+ # This engine provider reads PEM files from a mount point shared with the secret
+ # manager. The constructed SSLContext is cached some time (configurable) so when
+ # the credentials rotate the new credentials are eventually picked up.
+ # By default mTLS is enabled.
+ # This provider also includes a verification phase that runs after the TLS handshake
+ # phase. In this verification, both peers run an authorization and verify they are
+ # part of the same akka cluster. The verification happens via comparing the subject
+ # names in the peer's certificate with the name on the own certificate so if you
+ # use this SSLEngineProvider you should make sure all nodes on the cluster include
+ # at least one common subject name (CN or SAN).
+ # The Key setup this implementation supports has some limitations:
+ # 1. the private key must be provided on a PKCS#1 or a non-encrypted PKCS#8 PEM-formatted file
+ # 2. the private key must be be of an algorythm supported by `akka-pki` tools (e.g. "RSA", not "EC")
+ # 3. the node certificate must be issued by a root CA (not an intermediate CA)
+ # 4. both the node and the CA certificates must be provided in PEM-formatted files
+ rotating-keys-engine {
+
+ # This is a convention that people may follow if they wish to save themselves some configuration
+ secret-mount-point = /var/run/secrets/akka-tls/rotating-keys-engine
+
+ # The absolute path the PEM file with the private key.
+ key-file = ${akka.remote.artery.ssl.rotating-keys-engine.secret-mount-point}/tls.key
+ # The absolute path to the PEM file of the certificate for the private key above.
+ cert-file = ${akka.remote.artery.ssl.rotating-keys-engine.secret-mount-point}/tls.crt
+ # The absolute path to the PEM file of the certificate of the CA that emited
+ # the node certificate above.
+ ca-cert-file = ${akka.remote.artery.ssl.rotating-keys-engine.secret-mount-point}/ca.crt
+
+ # There are two options, and the default SecureRandom is recommended:
+ # "" or "SecureRandom" => (default)
+ # "SHA1PRNG" => Can be slow because of blocking issues on Linux
+ #
+ # Setting a value here may require you to supply the appropriate cipher
+ # suite (see enabled-algorithms section)
+ random-number-generator = ""
+
+ # Example: ["TLS_DHE_RSA_WITH_AES_128_GCM_SHA256",
+ # "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+ # "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384",
+ # "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"]
+ # If you use a JDK 8 prior to 8u161 you need to install
+ # the JCE Unlimited Strength Jurisdiction Policy Files to use AES 256.
+ # More info here:
+ # https://www.oracle.com/java/technologies/javase-jce-all-downloads.html
+ enabled-algorithms = ["TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"]
+
+ # Protocol to use for SSL encryption.
+ protocol = "TLSv1.2"
+
+ # How long should an SSLContext instance be cached. When rotating keys and certificates,
+ # there must a time overlap between the old certificate/key and the new ones. The
+ # value of this setting should be lower than duration of that overlap.
+ ssl-context-cache-ttl = 5m
+ }
}
}
}
# Akka Stream Reference Config File #
#####################################
+# eager creation of the system wide materializer
+akka.library-extensions += "akka.stream.SystemMaterializer$"
akka {
stream {
max-input-buffer-size = 16
# Fully qualified config path which holds the dispatcher configuration
- # to be used by ActorMaterializer when creating Actors.
- # When this value is left empty, the default-dispatcher will be used.
- dispatcher = ""
+ # or full dispatcher configuration to be used by ActorMaterializer when creating Actors.
+ dispatcher = "akka.actor.default-dispatcher"
- blocking-io-dispatcher = "akka.stream.default-blocking-io-dispatcher"
+ # Fully qualified config path which holds the dispatcher configuration
+ # or full dispatcher configuration to be used by stream operators that
+ # perform blocking operations
+ blocking-io-dispatcher = "akka.actor.default-blocking-io-dispatcher"
# Cleanup leaked publishers and subscribers when they are not used within a given
# deadline
}
io.tcp {
- # The outgoing bytes are accumulated in a buffer while waiting for acknoledgment
+ # The outgoing bytes are accumulated in a buffer while waiting for acknowledgment
# of pending write. This improves throughput for small messages (frames) without
# sacrificing latency. While waiting for the ack the stage will eagerly pull
# from upstream until the buffer exceeds this size. That means that the buffer may hold
write-buffer-size = 16 KiB
}
+ # Time to wait for async materializer creation before throwing an exception
+ creation-timeout = 20 seconds
+
//#stream-ref
# configure defaults for SourceRef and SinkRef
stream-ref {
//#stream-ref
}
- # Deprecated, use akka.stream.materializer.blocking-io-dispatcher, this setting
- # was never applied because of bug #24357
- # It must still have a valid value because used from Akka HTTP.
- blocking-io-dispatcher = "akka.stream.default-blocking-io-dispatcher"
-
- default-blocking-io-dispatcher {
- type = "Dispatcher"
- executor = "thread-pool-executor"
- throughput = 1
-
- thread-pool-executor {
- fixed-pool-size = 16
- }
- }
+ # Deprecated, left here to not break Akka HTTP which refers to it
+ blocking-io-dispatcher = "akka.actor.default-blocking-io-dispatcher"
+ # Deprecated, will not be used unless user code refer to it, use 'akka.stream.materializer.blocking-io-dispatcher'
+ # instead, or if from code, prefer the 'ActorAttributes.IODispatcher' attribute
+ default-blocking-io-dispatcher = "akka.actor.default-blocking-io-dispatcher"
}
# configure overrides to ssl-configuration here (to be used by akka-streams, and akka-http – i.e. when serving https connections)
<artifactId>repackaged-akka-jar</artifactId>
<version>${project.version}</version>
</artifactItem>
+ <artifactItem>
+ <groupId>com.hierynomus</groupId>
+ <artifactId>asn-one</artifactId>
+ <version>0.4.0</version>
+ </artifactItem>
</artifactItems>
<overWriteReleases>false</overWriteReleases>
<overWriteSnapshots>true</overWriteSnapshots>
<Export-Package>
akka.*,
com.typesafe.sslconfig.akka.*,
+ jdk.jfr,
</Export-Package>
<Import-Package>
sun.misc;resolution:=optional,
<artifactId>scala-parser-combinators_2.13</artifactId>
<version>1.1.2</version>
</dependency>
+ <dependency>
+ <groupId>org.scalatestplus</groupId>
+ <artifactId>junit-4-13_2.13</artifactId>
+ <version>3.1.4.0</version>
+ <scope>test</scope>
+ </dependency>
<!-- Configuration library -->
<!-- This needs to be kept in sync with the version used by akka -->
<dependency>
<groupId>com.typesafe</groupId>
<artifactId>config</artifactId>
- <version>1.3.3</version>
+ <version>1.4.0</version>
</dependency>
<dependency>
<groupId>com.typesafe</groupId>
<artifactId>ssl-config-core_2.13</artifactId>
- <version>0.3.8</version>
+ <version>0.4.2</version>
</dependency>
<!-- Akka testkit -->
<dependency>
<groupId>com.typesafe.akka</groupId>
<artifactId>akka-testkit_2.13</artifactId>
- <version>2.5.31</version>
+ <version>2.6.12</version>
<scope>test</scope>
<exclusions>
<exclusion>
<dependency>
<groupId>com.typesafe.akka</groupId>
<artifactId>akka-persistence-tck_2.13</artifactId>
- <version>2.5.31</version>
+ <version>2.6.12</version>
<scope>test</scope>
<exclusions>
<exclusion>
<dependency>
<groupId>org.agrona</groupId>
<artifactId>agrona</artifactId>
- <version>0.9.33</version>
+ <version>1.8.0</version>
</dependency>
<dependency>
<groupId>io.aeron</groupId>
<artifactId>aeron-client</artifactId>
- <version>1.15.3</version>
+ <version>1.31.1</version>
</dependency>
<dependency>
<groupId>io.aeron</groupId>
<artifactId>aeron-driver</artifactId>
- <version>1.15.3</version>
+ <version>1.31.1</version>
</dependency>
</dependencies>
</dependencyManagement>
<link>http://google.github.io/truth/api/1.0.1/</link>
<link>http://www.slf4j.org/apidocs/</link>
<link>https://google.github.io/guava/releases/29.0-jre/api/docs/</link>
- <link>http://doc.akka.io/japi/akka/2.5.31/</link>
+ <link>http://doc.akka.io/japi/akka/2.6.12/</link>
<link>http://netty.io/4.1/api/</link>
<link>https://commons.apache.org/proper/commons-lang/javadocs/api-2.6/</link>
<link>https://commons.apache.org/proper/commons-lang/javadocs/api-3.9/</link>
<features xmlns="http://karaf.apache.org/xmlns/features/v1.6.0" name="odl-controller-akka">
<feature version="0.0.0">
<feature>odl-controller-scala</feature>
- <bundle>mvn:com.typesafe/config/1.3.3</bundle>
- <bundle>mvn:com.typesafe/ssl-config-core_2.13/0.3.8</bundle>
- <bundle>mvn:io.aeron/aeron-client/1.15.3</bundle>
- <bundle>mvn:io.aeron/aeron-driver/1.15.3</bundle>
+ <bundle>mvn:com.typesafe/config/1.4.0</bundle>
+ <bundle>mvn:com.typesafe/ssl-config-core_2.13/0.4.2</bundle>
+ <bundle>mvn:io.aeron/aeron-client/1.31.1</bundle>
+ <bundle>mvn:io.aeron/aeron-driver/1.31.1</bundle>
<bundle>mvn:io.netty/netty/3.10.6.Final</bundle>
- <bundle>mvn:org.agrona/agrona/0.9.33</bundle>
+ <bundle>mvn:org.agrona/agrona/1.8.0</bundle>
<bundle>mvn:org.opendaylight.controller/repackaged-akka/${project.version}</bundle>
<bundle>mvn:org.reactivestreams/reactive-streams/1.0.3</bundle>
</feature>
mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/cfg/datastore
</configfile>
</feature>
-</features>
+</features>
\ No newline at end of file
import org.apache.commons.lang.SerializationUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import scala.Option;
import scala.concurrent.Future;
import scala.jdk.javaapi.CollectionConverters;
if (++count <= max && entry.getKey() >= fromSequenceNr && entry.getKey() <= toSequenceNr) {
PersistentRepr persistentMessage =
new PersistentImpl(deserialize(entry.getValue()), entry.getKey(), persistenceId,
- null, false, null, null, 0);
+ null, false, null, null, 0, Option.empty());
replayCallback.accept(persistentMessage);
}
}
<artifactId>commons-io</artifactId>
<scope>test</scope>
</dependency>
+ <dependency>
+ <groupId>org.scalatestplus</groupId>
+ <artifactId>junit-4-13_2.13</artifactId>
+ <scope>test</scope>
+ </dependency>
</dependencies>
<build>
<groupId>com.typesafe.akka</groupId>
<artifactId>akka-persistence-tck_2.13</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.scalatestplus</groupId>
+ <artifactId>junit-4-13_2.13</artifactId>
+ <scope>test</scope>
+ </dependency>
<dependency>
<!-- Enforce Netty’s optional dependency on servlet API -->
canonical.hostname = "127.0.0.1"
canonical.port = 2550
}
- netty.tcp {
+ classic.netty.tcp {
hostname = "127.0.0.1"
port = 2550
}
# with read-only associations
use-passive-connections = off
- netty.tcp {
+ classic.netty.tcp {
maximum-frame-size = 419430400
send-buffer-size = 52428800
receive-buffer-size = 52428800
# This is crucial for correct behavior if you use Cluster Singleton or Cluster Sharding,
# especially together with Akka Persistence.
- #auto-down-unreachable-after = 30s
-
allow-weakly-up-members = on
use-dispatcher = cluster-dispatcher
leaderDistributedDataStore.close();
}
- TestKit.shutdownActorSystem(leaderSystem);
- TestKit.shutdownActorSystem(followerSystem);
- TestKit.shutdownActorSystem(follower2System);
+ TestKit.shutdownActorSystem(leaderSystem, true);
+ TestKit.shutdownActorSystem(followerSystem, true);
+ TestKit.shutdownActorSystem(follower2System,true);
InMemoryJournal.clear();
InMemorySnapshotStore.clear();
followerTestKit.waitForMembersUp("member-1", "member-3");
follower2TestKit.waitForMembersUp("member-1", "member-2");
- TestKit.shutdownActorSystem(follower2System);
+ // behavior is controlled by akka.coordinated-shutdown.run-by-actor-system-terminate configuration option
+ TestKit.shutdownActorSystem(follower2System, true);
ActorRef cars = leaderDistributedDataStore.getActorUtils().findLocalShard("cars").get();
- OnDemandRaftState initialState = (OnDemandRaftState) leaderDistributedDataStore.getActorUtils()
+ final OnDemandRaftState initialState = (OnDemandRaftState) leaderDistributedDataStore.getActorUtils()
.executeOperation(cars, GetOnDemandRaftState.INSTANCE);
Cluster leaderCluster = Cluster.get(leaderSystem);
Stopwatch sw = Stopwatch.createStarted();
while (sw.elapsed(TimeUnit.SECONDS) <= 10) {
CurrentClusterState state = Cluster.get(kit.getSystem()).state();
+
for (Member m : state.getUnreachable()) {
if (member.equals(m.getRoles().iterator().next())) {
return;
import static org.mockito.Mockito.timeout;
import static org.mockito.Mockito.verify;
-import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
import akka.testkit.javadsl.TestKit;
import com.google.common.collect.ImmutableList;
import java.time.Duration;
assertEquals(registerForShard1.getListenerActorPath(), registerForShard2.getListenerActorPath());
final TestKit kit2 = new TestKit(getSystem());
- final ActorRef rootListenerActor = getSystem().actorFor(registerForShard1.getListenerActorPath());
+ final ActorSelection rootListenerActor = getSystem().actorSelection(registerForShard1.getListenerActorPath());
rootListenerActor.tell(new EnableNotification(true, "test"), kit.getRef());
final DataTreeCandidate peopleCandidate = DataTreeCandidates.fromNormalizedNode(YangInstanceIdentifier.empty(),
PeopleModel.create());
import akka.cluster.Member;
import akka.cluster.MemberStatus;
import akka.cluster.UniqueAddress;
+import akka.util.Version;
import org.opendaylight.controller.cluster.access.concepts.MemberName;
import org.opendaylight.controller.cluster.datastore.ClusterWrapper;
import scala.collection.immutable.Set.Set1;
public static MemberRemoved createMemberRemoved(final String memberName, final String address) {
UniqueAddress uniqueAddress = new UniqueAddress(AddressFromURIString.parse(address), 55L);
- Member member = new Member(uniqueAddress, 1, MemberStatus.removed(), new Set1<>(memberName));
+ Member member = new Member(uniqueAddress, 1, MemberStatus.removed(), new Set1<>(memberName), Version.Zero());
return new MemberRemoved(member, MemberStatus.up());
}
public static MemberUp createMemberUp(final String memberName, final String address) {
UniqueAddress uniqueAddress = new UniqueAddress(AddressFromURIString.parse(address), 55L);
- Member member = new Member(uniqueAddress, 1, MemberStatus.up(), new Set1<>(memberName));
+ Member member = new Member(uniqueAddress, 1, MemberStatus.up(), new Set1<>(memberName), Version.Zero());
return new MemberUp(member);
}
public static UnreachableMember createUnreachableMember(final String memberName, final String address) {
UniqueAddress uniqueAddress = new UniqueAddress(AddressFromURIString.parse(address), 55L);
- Member member = new Member(uniqueAddress, 1, MemberStatus.up(), new Set1<>(memberName));
+ Member member = new Member(uniqueAddress, 1, MemberStatus.up(), new Set1<>(memberName), Version.Zero());
return new UnreachableMember(member);
}
public static ReachableMember createReachableMember(final String memberName, final String address) {
UniqueAddress uniqueAddress = new UniqueAddress(AddressFromURIString.parse(address), 55L);
- Member member = new Member(uniqueAddress, 1, MemberStatus.up(), new Set1<>(memberName));
+ Member member = new Member(uniqueAddress, 1, MemberStatus.up(), new Set1<>(memberName), Version.Zero());
return new ReachableMember(member);
}
akka {
persistence.snapshot-store.plugin = "in-memory-snapshot-store"
persistence.journal.plugin = "in-memory-journal"
+ coordinated-shutdown.run-by-actor-system-terminate = off
persistence.non-persistent.journal {
class = "org.opendaylight.controller.cluster.raft.utils.InMemoryJournal"
}
akka {
persistence.snapshot-store.plugin = "in-memory-snapshot-store"
persistence.journal.plugin = "in-memory-journal"
+ coordinated-shutdown.run-by-actor-system-terminate = off
persistence.non-persistent.journal {
class = "org.opendaylight.controller.cluster.raft.utils.InMemoryJournal"
warn-about-java-serializer-usage = false
}
remote {
+ classic {
+ netty.tcp {
+ hostname = "127.0.0.1"
+ port = 2565
+ }
+ }
+
log-remote-lifecycle-events = off
artery {
enabled = on
canonical.hostname = "127.0.0.1"
canonical.port = 2565
+ transport = tcp
}
netty.tcp {
}
cluster {
- auto-down-unreachable-after = 100s
retry-unsuccessful-join-after = 100ms
roles = [
akka {
persistence.snapshot-store.plugin = "in-memory-snapshot-store"
persistence.journal.plugin = "in-memory-journal"
+ coordinated-shutdown.run-by-actor-system-terminate = off
persistence.non-persistent.journal {
class = "org.opendaylight.controller.cluster.raft.utils.InMemoryJournal"
warn-about-java-serializer-usage = false
}
remote {
+ classic {
+ netty.tcp {
+ hostname = "127.0.0.1"
+ port = 2558
+ }
+ }
+
log-remote-lifecycle-events = off
artery {
enabled = on
canonical.hostname = "127.0.0.1"
canonical.port = 2558
+ transport = tcp
}
netty.tcp {
akka {
persistence.snapshot-store.plugin = "in-memory-snapshot-store"
persistence.journal.plugin = "in-memory-journal"
+ coordinated-shutdown.run-by-actor-system-terminate = off
persistence.non-persistent.journal {
class = "org.opendaylight.controller.cluster.raft.utils.InMemoryJournal"
warn-about-java-serializer-usage = false
}
remote {
+ classic {
+ netty.tcp {
+ hostname = "127.0.0.1"
+ port = 2559
+ }
+ }
+
log-remote-lifecycle-events = off
artery {
enabled = on
canonical.hostname = "127.0.0.1"
canonical.port = 2559
+ transport = tcp
}
netty.tcp {
akka {
persistence.snapshot-store.plugin = "in-memory-snapshot-store"
persistence.journal.plugin = "in-memory-journal"
+ coordinated-shutdown.run-by-actor-system-terminate = off
persistence.non-persistent.journal {
class = "org.opendaylight.controller.cluster.raft.utils.InMemoryJournal"
warn-about-java-serializer-usage = false
}
remote {
+ classic {
+ netty.tcp {
+ hostname = "127.0.0.1"
+ port = 2557
+ }
+ }
+
log-remote-lifecycle-events = off
artery {
enabled = on
canonical.hostname = "127.0.0.1"
canonical.port = 2557
+ transport = tcp
}
netty.tcp {
akka {
persistence.snapshot-store.plugin = "in-memory-snapshot-store"
persistence.journal.plugin = "in-memory-journal"
+ coordinated-shutdown.run-by-actor-system-terminate = off
persistence.non-persistent.journal {
class = "org.opendaylight.controller.cluster.raft.utils.InMemoryJournal"
warn-about-java-serializer-usage = false
}
remote {
+ classic {
+ netty.tcp {
+ hostname = "127.0.0.1"
+ port = 2560
+ }
+ }
+
log-remote-lifecycle-events = off
artery {
enabled = on
canonical.hostname = "127.0.0.1"
canonical.port = 2560
+ transport = tcp
}
netty.tcp {
akka {
persistence.snapshot-store.plugin = "in-memory-snapshot-store"
persistence.journal.plugin = "in-memory-journal"
+ coordinated-shutdown.run-by-actor-system-terminate = off
persistence.non-persistent.journal {
class = "org.opendaylight.controller.cluster.raft.utils.InMemoryJournal"
warn-about-java-serializer-usage = false
}
remote {
+ classic {
+ netty.tcp {
+ hostname = "127.0.0.1"
+ port = 2561
+ }
+ }
+
log-remote-lifecycle-events = off
artery {
enabled = on
canonical.hostname = "127.0.0.1"
canonical.port = 2561
+ transport = tcp
}
netty.tcp {
akka {
persistence.snapshot-store.plugin = "in-memory-snapshot-store"
persistence.journal.plugin = "in-memory-journal"
+ coordinated-shutdown.run-by-actor-system-terminate = off
persistence.non-persistent.journal {
class = "org.opendaylight.controller.cluster.raft.utils.InMemoryJournal"
warn-about-java-serializer-usage = false
}
remote {
+ classic {
+ netty.tcp {
+ hostname = "127.0.0.1"
+ port = 2562
+ }
+ }
+
log-remote-lifecycle-events = off
artery {
enabled = on
canonical.hostname = "127.0.0.1"
canonical.port = 2562
+ transport = tcp
}
netty.tcp {
String name = "testLeaderAndFollowerEntityOwnersReassignedAfterShutdown";
final MemberNode leaderNode = MemberNode.builder(memberNodes).akkaConfig("Member1")
.useAkkaArtery(false).testName(name)
- .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
+ .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG)
+ .schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
.createOperDatastore(false).datastoreContextBuilder(leaderDatastoreContextBuilder).build();
final MemberNode follower1Node = MemberNode.builder(memberNodes).akkaConfig("Member2")
- .useAkkaArtery(false).testName(name)
- .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
+ .testName(name).moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG)
+ .useAkkaArtery(false).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
.createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
final MemberNode follower2Node = MemberNode.builder(memberNodes).akkaConfig("Member3")
- .useAkkaArtery(false).testName(name)
- .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
+ .testName(name).moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG)
+ .useAkkaArtery(false).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
.createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
final MemberNode follower3Node = MemberNode.builder(memberNodes).akkaConfig("Member4")
- .useAkkaArtery(false).testName(name)
- .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
+ .testName(name).moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG)
+ .useAkkaArtery(false).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
.createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
final MemberNode follower4Node = MemberNode.builder(memberNodes).akkaConfig("Member5")
- .useAkkaArtery(false).testName(name)
- .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
+ .testName(name).moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG)
+ .useAkkaArtery(false).schemaContext(EOSTestUtils.SCHEMA_CONTEXT)
.createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
AbstractDataStore leaderDistributedDataStore = leaderNode.configDataStore();
// Shutdown the leader and follower3
leaderNode.cleanup();
- follower3Node.cleanup();
follower1Node.waitForMemberDown("member-1");
- follower1Node.waitForMemberDown("member-4");
follower2Node.waitForMemberDown("member-1");
- follower2Node.waitForMemberDown("member-4");
follower4Node.waitForMemberDown("member-1");
+
+ follower3Node.cleanup();
+
+ follower1Node.waitForMemberDown("member-4");
+ follower2Node.waitForMemberDown("member-4");
follower4Node.waitForMemberDown("member-4");
// Verify the prior leader's and follower3 entity owners are re-assigned.
cluster {
seed-nodes = ["akka://opendaylight-cluster-data@127.0.0.1:2550", "akka://opendaylight-cluster-data@127.0.0.1:2553"]
- auto-down-unreachable-after = 10s
-
roles = [
"member-2"
]
cluster {
seed-nodes = ["akka://opendaylight-cluster-data@127.0.0.1:2550", "akka://opendaylight-cluster-data@127.0.0.1:2554"]
- auto-down-unreachable-after = 10s
-
roles = [
"member-3"
]
cluster {
seed-nodes = ["akka://opendaylight-rpc@127.0.0.1:2550"]
-
- auto-down-unreachable-after = 10s
}
}
}
cluster {
seed-nodes = ["akka://opendaylight-rpc@127.0.0.1:2551"]
-
- auto-down-unreachable-after = 10s
}
}
in-memory-journal {
cluster {
seed-nodes = ["akka://opendaylight-rpc@127.0.0.1:2551"]
-
- auto-down-unreachable-after = 10s
}
}
in-memory-journal {
cluster {
seed-nodes = ["akka://opendaylight-rpc@127.0.0.1:2551"]
-
- auto-down-unreachable-after = 10s
}
}
in-memory-journal {