# Initial contact points of the cluster.
# The nodes to join automatically at startup.
# Comma separated full URIs defined by a string on the form of
- # "akka.tcp://system@hostname:port"
+ # "akka://system@hostname:port"
# Leave as empty if the node is supposed to be joined manually.
seed-nodes = []
# attempts.
shutdown-after-unsuccessful-join-seed-nodes = off
- # Should the 'leader' in the cluster be allowed to automatically mark
- # unreachable nodes as DOWN after a configured time of unreachability?
- # Using auto-down implies that two separate clusters will automatically be
- # formed in case of network partition.
- #
- # Don't enable this in production, see 'Auto-downing (DO NOT USE)' section
- # of Akka Cluster documentation.
- #
- # Disable with "off" or specify a duration to enable auto-down.
- # If a downing-provider-class is configured this setting is ignored.
- auto-down-unreachable-after = off
-
# Time margin after which shards or singletons that belonged to a downed/removed
# partition are created in surviving partition. The purpose of this margin is that
# in case of a network partition the persistent actors in the non-surviving partitions
# must be stopped before corresponding persistent actors are started somewhere else.
# This is useful if you implement downing strategies that handle network partitions,
# e.g. by keeping the larger side of the partition and shutting down the smaller side.
- # It will not add any extra safety for auto-down-unreachable-after, since that is not
- # handling network partitions.
# Disable with "off" or specify a duration to enable.
+ #
+ # When using the `akka.cluster.sbr.SplitBrainResolver` as downing provider it will use
+ # the akka.cluster.split-brain-resolver.stable-after as the default down-removal-margin
+ # if this down-removal-margin is undefined.
down-removal-margin = off
# Pluggable support for downing of nodes in the cluster.
- # If this setting is left empty behavior will depend on 'auto-down-unreachable' in the following ways:
- # * if it is 'off' the `NoDowning` provider is used and no automatic downing will be performed
- # * if it is set to a duration the `AutoDowning` provider is with the configured downing duration
+ # If this setting is left empty the `NoDowning` provider is used and no automatic downing will be performed.
#
# If specified the value must be the fully qualified class name of a subclass of
# `akka.cluster.DowningProvider` having a public one argument constructor accepting an `ActorSystem`
# If this is set to "off", the leader will not move 'Joining' members to 'Up' during a network
# split. This feature allows the leader to accept 'Joining' members to be 'WeaklyUp'
# so they become part of the cluster even during a network split. The leader will
- # move `Joining` members to 'WeaklyUp' after 3 rounds of 'leader-actions-interval'
- # without convergence.
+ # move `Joining` members to 'WeaklyUp' after this configured duration without convergence.
# The leader will move 'WeaklyUp' members to 'Up' status once convergence has been reached.
- allow-weakly-up-members = on
+ allow-weakly-up-members = 7s
# The roles of this member. List of strings, e.g. roles = ["A", "B"].
# The roles are part of the membership information and can be used by
#<role-name>.min-nr-of-members = 1
}
+ # Application version of the deployment. Used by rolling update features
+ # to distinguish between old and new nodes. The typical convention is to use
+ # 3 digit version numbers `major.minor.patch`, but 1 or two digits are also
+ # supported.
+ #
+ # If no `.` is used it is interpreted as a single digit version number or as
+ # plain alphanumeric if it couldn't be parsed as a number.
+ #
+ # It may also have a qualifier at the end for 2 or 3 digit version numbers such
+ # as "1.2-RC1".
+ # For 1 digit with qualifier, 1-RC1, it is interpreted as plain alphanumeric.
+ #
+ # It has support for https://github.com/dwijnand/sbt-dynver format with `+` or
+ # `-` separator. The number of commits from the tag is handled as a numeric part.
+ # For example `1.0.0+3-73475dce26` is less than `1.0.10+10-ed316bd024` (3 < 10).
+ app-version = "0.0.0"
+
# Minimum required number of members before the leader changes member status
# of 'Joining' members to 'Up'. Typically used together with
# 'Cluster.registerOnMemberUp' to defer some action, such as starting actors,
# until the cluster has reached a certain size.
min-nr-of-members = 1
- # Enable/disable info level logging of cluster events
+ # Enable/disable info level logging of cluster events.
+ # These are logged with logger name `akka.cluster.Cluster`.
log-info = on
# Enable/disable verbose info-level logging of cluster events
# for temporary troubleshooting. Defaults to 'off'.
+ # These are logged with logger name `akka.cluster.Cluster`.
log-info-verbose = off
# Enable or disable JMX MBeans for management of the cluster
# Disable with "off".
publish-stats-interval = off
- # The id of the dispatcher to use for cluster actors. If not specified
- # default dispatcher is used.
+ # The id of the dispatcher to use for cluster actors.
# If specified you need to define the settings of the actual dispatcher.
- use-dispatcher = ""
+ use-dispatcher = "akka.actor.internal-dispatcher"
# Gossip to random node with newer or older state information, if any with
# this probability. Otherwise Gossip to any random live node.
# Number of member nodes that each member will send heartbeat messages to,
# i.e. each node will be monitored by this number of other nodes.
- monitored-by-nr-of-members = 5
+ monitored-by-nr-of-members = 9
# After the heartbeat request has been sent the first failure detection
# will start after this period, even though no heartbeat message has
}
debug {
- # log heartbeat events (very verbose, useful mostly when debugging heartbeating issues)
+ # Log heartbeat events (very verbose, useful mostly when debugging heartbeating issues).
+ # These are logged with logger name `akka.cluster.ClusterHeartbeat`.
verbose-heartbeat-logging = off
# log verbose details about gossip
# Enforce configuration compatibility checks when joining a cluster.
# Set to off to allow joining nodes to join a cluster even when configuration incompatibilities are detected or
# when the cluster does not support this feature. Compatibility checks are always performed and warning and
- # error messsages are logged.
+ # error messages are logged.
#
# This is particularly useful for rolling updates on clusters that do not support that feature. Since the old
# cluster won't be able to send the compatibility confirmation to the joining node, the joining node won't be able
"user.home", "user.name", "user.dir",
"socksNonProxyHosts", "http.nonProxyHosts", "ftp.nonProxyHosts",
"akka.remote.secure-cookie",
+ "akka.remote.classic.netty.ssl.security",
+ # Pre 2.6 path, keep around to avoid sending things misconfigured with old paths
"akka.remote.netty.ssl.security",
"akka.remote.artery.ssl"
]
}
}
+
+#//#split-brain-resolver
+
+# To enable the split brain resolver you first need to enable the provider in your application.conf:
+# akka.cluster.downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider"
+
+akka.cluster.split-brain-resolver {
+ # Select one of the available strategies (see descriptions below):
+ # static-quorum, keep-majority, keep-oldest, down-all, lease-majority
+ active-strategy = keep-majority
+
+ #//#stable-after
+ # Time margin after which shards or singletons that belonged to a downed/removed
+ # partition are created in surviving partition. The purpose of this margin is that
+ # in case of a network partition the persistent actors in the non-surviving partitions
+ # must be stopped before corresponding persistent actors are started somewhere else.
+ # This is useful if you implement downing strategies that handle network partitions,
+ # e.g. by keeping the larger side of the partition and shutting down the smaller side.
+ # Decision is taken by the strategy when there has been no membership or
+ # reachability changes for this duration, i.e. the cluster state is stable.
+ stable-after = 20s
+ #//#stable-after
+
+ # When reachability observations by the failure detector are changed the SBR decisions
+ # are deferred until there are no changes within the 'stable-after' duration.
+ # If this continues for too long it might be an indication of an unstable system/network
+ # and it could result in delayed or conflicting decisions on separate sides of a network
+ # partition.
+ # As a precaution for that scenario all nodes are downed if no decision is made within
+ # `stable-after + down-all-when-unstable` from the first unreachability event.
+ # The measurement is reset if all unreachable have been healed, downed or removed, or
+ # if there are no changes within `stable-after * 2`.
+ # The value can be on, off, or a duration.
+ # By default it is 'on' and then it is derived to be 3/4 of stable-after, but not less than
+ # 4 seconds.
+ down-all-when-unstable = on
+
+}
+#//#split-brain-resolver
+
+# Down the unreachable nodes if the number of remaining nodes are greater than or equal to
+# the given 'quorum-size'. Otherwise down the reachable nodes, i.e. it will shut down that
+# side of the partition. In other words, the 'size' defines the minimum number of nodes
+# that the cluster must have to be operational. If there are unreachable nodes when starting
+# up the cluster, before reaching this limit, the cluster may shutdown itself immediately.
+# This is not an issue if you start all nodes at approximately the same time.
+#
+# Note that you must not add more members to the cluster than 'quorum-size * 2 - 1', because
+# then both sides may down each other and thereby form two separate clusters. For example,
+# quorum-size configured to 3 in a 6 node cluster may result in a split where each side
+# consists of 3 nodes each, i.e. each side thinks it has enough nodes to continue by
+# itself. A warning is logged if this recommendation is violated.
+#//#static-quorum
+akka.cluster.split-brain-resolver.static-quorum {
+ # minimum number of nodes that the cluster must have
+ quorum-size = undefined
+
+ # if the 'role' is defined the decision is based only on members with that 'role'
+ role = ""
+}
+#//#static-quorum
+
+# Down the unreachable nodes if the current node is in the majority part based the last known
+# membership information. Otherwise down the reachable nodes, i.e. the own part. If the
+# the parts are of equal size the part containing the node with the lowest address is kept.
+# Note that if there are more than two partitions and none is in majority each part
+# will shutdown itself, terminating the whole cluster.
+#//#keep-majority
+akka.cluster.split-brain-resolver.keep-majority {
+ # if the 'role' is defined the decision is based only on members with that 'role'
+ role = ""
+}
+#//#keep-majority
+
+# Down the part that does not contain the oldest member (current singleton).
+#
+# There is one exception to this rule if 'down-if-alone' is defined to 'on'.
+# Then, if the oldest node has partitioned from all other nodes the oldest
+# will down itself and keep all other nodes running. The strategy will not
+# down the single oldest node when it is the only remaining node in the cluster.
+#
+# Note that if the oldest node crashes the others will remove it from the cluster
+# when 'down-if-alone' is 'on', otherwise they will down themselves if the
+# oldest node crashes, i.e. shutdown the whole cluster together with the oldest node.
+#//#keep-oldest
+akka.cluster.split-brain-resolver.keep-oldest {
+ # Enable downing of the oldest node when it is partitioned from all other nodes
+ down-if-alone = on
+
+ # if the 'role' is defined the decision is based only on members with that 'role',
+ # i.e. using the oldest member (singleton) within the nodes with that role
+ role = ""
+}
+#//#keep-oldest
+
+# Keep the part that can acquire the lease, and down the other part.
+# Best effort is to keep the side that has most nodes, i.e. the majority side.
+# This is achieved by adding a delay before trying to acquire the lease on the
+# minority side.
+#//#lease-majority
+akka.cluster.split-brain-resolver.lease-majority {
+ lease-implementation = ""
+
+ # This delay is used on the minority side before trying to acquire the lease,
+ # as an best effort to try to keep the majority side.
+ acquire-lease-delay-for-minority = 2s
+
+ # If the 'role' is defined the majority/minority is based only on members with that 'role'.
+ role = ""
+}
+#//#lease-majority