1 ####################################
2 # Akka Actor Reference Config File #
3 ####################################
5 # This is the reference config file that contains all the default settings.
6 # Make your edits/overrides in your application.conf.
8 # Akka version, checked against the runtime version of Akka. Loaded from generated conf file.
12 # Home directory of Akka, modules in the deploy directory will be loaded
15 # Loggers to register at boot time (akka.event.Logging$DefaultLogger logs
17 loggers = ["akka.event.Logging$DefaultLogger"]
19 # Filter of log events that is used by the LoggingAdapter before
20 # publishing log events to the eventStream. It can perform
21 # fine grained filtering based on the log source. The default
22 # implementation filters on the `loglevel`.
23 # FQCN of the LoggingFilter. The Class of the FQCN must implement
24 # akka.event.LoggingFilter and have a public constructor with
25 # (akka.actor.ActorSystem.Settings, akka.event.EventStream) parameters.
26 logging-filter = "akka.event.DefaultLoggingFilter"
28 # Specifies the default loggers dispatcher
29 loggers-dispatcher = "akka.actor.default-dispatcher"
31 # Loggers are created and registered synchronously during ActorSystem
32 # start-up, and since they are actors, this timeout is used to bound the
34 logger-startup-timeout = 5s
36 # Log level used by the configured loggers (see "loggers") as soon
37 # as they have been started; before that, see "stdout-loglevel"
38 # Options: OFF, ERROR, WARNING, INFO, DEBUG
41 # Log level for the very basic logger activated during ActorSystem startup.
42 # This logger prints the log messages to stdout (System.out).
43 # Options: OFF, ERROR, WARNING, INFO, DEBUG
44 stdout-loglevel = "WARNING"
46 # Log the complete configuration at INFO level when the actor system is started.
47 # This is useful when you are uncertain of what configuration is used.
48 log-config-on-start = off
50 # Log at info level when messages are sent to dead letters, or published to
51 # eventStream as `DeadLetter`, `Dropped` or `UnhandledMessage`.
53 # on: all dead letters are logged
54 # off: no logging of dead letters
55 # n: positive integer, number of dead letters that will be logged
58 # Possibility to turn off logging of dead letters while the actor system
59 # is shutting down. Logging is only done when enabled by 'log-dead-letters'
61 log-dead-letters-during-shutdown = off
63 # When log-dead-letters is enabled, this will re-enable the logging after configured duration.
64 # infinite: suspend the logging forever;
65 # or a duration (eg: 5 minutes), after which the logging will be re-enabled.
66 log-dead-letters-suspend-duration = 5 minutes
68 # List FQCN of extensions which shall be loaded at actor system startup.
69 # Library extensions are regular extensions that are loaded at startup and are
70 # available for third party library authors to enable auto-loading of extensions when
71 # present on the classpath. This is done by appending entries:
72 # 'library-extensions += "Extension"' in the library `reference.conf`.
74 # Should not be set by end user applications in 'application.conf', use the extensions property for that
76 library-extensions = ${?akka.library-extensions} ["akka.serialization.SerializationExtension$"]
78 # List FQCN of extensions which shall be loaded at actor system startup.
79 # Should be on the format: 'extensions = ["foo", "bar"]' etc.
80 # See the Akka Documentation for more info about Extensions
83 # Toggles whether threads created by this ActorSystem should be daemons or not
86 # JVM shutdown, System.exit(-1), in case of a fatal error,
87 # such as OutOfMemoryError
88 jvm-exit-on-fatal-error = on
90 # Akka installs JVM shutdown hooks by default, e.g. in CoordinatedShutdown and Artery. This property will
91 # not disable user-provided hooks registered using `CoordinatedShutdown#addCancellableJvmShutdownHook`.
92 # This property is related to `akka.coordinated-shutdown.run-by-jvm-shutdown-hook` below.
93 # This property makes it possible to disable all such hooks if the application itself
94 # or a higher level framework such as Play prefers to install the JVM shutdown hook and
95 # terminate the ActorSystem itself, with or without using CoordinatedShutdown.
96 jvm-shutdown-hooks = on
98 # Version must be the same across all modules and if they are different the startup
99 # will fail. It's possible but not recommended, to disable this check, and only log a warning,
100 # by setting this property to `off`.
101 fail-mixed-versions = on
103 # Some modules (remoting only right now) can emit custom events to the Java Flight Recorder if running
104 # on JDK 11 or later. If you for some reason do not want that, it can be disabled and switched to no-ops
106 java-flight-recorder {
112 # Either one of "local", "remote" or "cluster" or the
113 # FQCN of the ActorRefProvider to be used; the below is the built-in default,
114 # note that "remote" and "cluster" requires the akka-remote and akka-cluster
115 # artifacts to be on the classpath.
118 # The guardian "/user" will use this class to obtain its supervisorStrategy.
119 # It needs to be a subclass of akka.actor.SupervisorStrategyConfigurator.
120 # In addition to the default there is akka.actor.StoppingSupervisorStrategy.
121 guardian-supervisor-strategy = "akka.actor.DefaultSupervisorStrategy"
123 # Timeout for Extension creation and a few other potentially blocking
124 # initialization tasks.
125 creation-timeout = 20s
127 # Serializes and deserializes (non-primitive) messages to ensure immutability,
128 # this is only intended for testing.
129 serialize-messages = off
131 # Serializes and deserializes creators (in Props) to ensure that they can be
132 # sent over the network, this is only intended for testing. Purely local deployments
133 # as marked with deploy.scope == LocalScope are exempt from verification.
134 serialize-creators = off
136 # If serialize-messages or serialize-creators are enabled classes that starts with
137 # a prefix listed here are not verified.
138 no-serialization-verification-needed-class-prefix = ["akka."]
140 # Timeout for send operations to top-level actors which are in the process
141 # of being started. This is only relevant if using a bounded mailbox or the
142 # CallingThreadDispatcher for a top-level actor.
143 unstarted-push-timeout = 10s
145 # TypedActor deprecated since 2.6.0.
147 # Default timeout for the deprecated TypedActor (not the new actor APIs in 2.6)
148 # methods with non-void return type.
152 # Mapping between ´deployment.router' short names to fully qualified class names
153 router.type-mapping {
154 from-code = "akka.routing.NoRouter"
155 round-robin-pool = "akka.routing.RoundRobinPool"
156 round-robin-group = "akka.routing.RoundRobinGroup"
157 random-pool = "akka.routing.RandomPool"
158 random-group = "akka.routing.RandomGroup"
159 balancing-pool = "akka.routing.BalancingPool"
160 smallest-mailbox-pool = "akka.routing.SmallestMailboxPool"
161 broadcast-pool = "akka.routing.BroadcastPool"
162 broadcast-group = "akka.routing.BroadcastGroup"
163 scatter-gather-pool = "akka.routing.ScatterGatherFirstCompletedPool"
164 scatter-gather-group = "akka.routing.ScatterGatherFirstCompletedGroup"
165 tail-chopping-pool = "akka.routing.TailChoppingPool"
166 tail-chopping-group = "akka.routing.TailChoppingGroup"
167 consistent-hashing-pool = "akka.routing.ConsistentHashingPool"
168 consistent-hashing-group = "akka.routing.ConsistentHashingGroup"
173 # deployment id pattern - on the format: /parent/child etc.
176 # The id of the dispatcher to use for this actor.
177 # If undefined or empty the dispatcher specified in code
178 # (Props.withDispatcher) is used, or default-dispatcher if not
182 # The id of the mailbox to use for this actor.
183 # If undefined or empty the default mailbox of the configured dispatcher
184 # is used or if there is no mailbox configuration the mailbox specified
185 # in code (Props.withMailbox) is used.
186 # If there is a mailbox defined in the configured dispatcher then that
187 # overrides this setting.
190 # routing (load-balance) scheme to use
191 # - available: "from-code", "round-robin", "random", "smallest-mailbox",
192 # "scatter-gather", "broadcast"
193 # - or: Fully qualified class name of the router class.
194 # The class must extend akka.routing.CustomRouterConfig and
195 # have a public constructor with com.typesafe.config.Config
196 # and optional akka.actor.DynamicAccess parameter.
197 # - default is "from-code";
198 # Whether or not an actor is transformed to a Router is decided in code
199 # only (Props.withRouter). The type of router can be overridden in the
200 # configuration; specifying "from-code" means that the values specified
201 # in the code shall be used.
202 # In case of routing, the actors to be routed to can be specified
204 # - nr-of-instances: will create that many children
205 # - routees.paths: will route messages to these paths using ActorSelection,
206 # i.e. will not create children
207 # - resizer: dynamically resizable number of routees as specified in
211 # number of children to create in case of a router;
212 # this setting is ignored if routees.paths is given
215 # within is the timeout used for routers containing future calls
218 # number of virtual nodes per node for consistent-hashing router
219 virtual-nodes-factor = 10
221 tail-chopping-router {
222 # interval is duration between sending message to next routee
223 interval = 10 milliseconds
227 # Alternatively to giving nr-of-instances you can specify the full
228 # paths of those actors which should be routed to. This setting takes
229 # precedence over nr-of-instances
233 # To use a dedicated dispatcher for the routees of the pool you can
234 # define the dispatcher configuration inline with the property name
235 # 'pool-dispatcher' in the deployment section of the router.
238 # fork-join-executor.parallelism-min = 5
239 # fork-join-executor.parallelism-max = 5
242 # Routers with dynamically resizable number of routees; this feature is
243 # enabled by including (parts of) this section in the deployment
248 # The fewest number of routees the router should ever have.
251 # The most number of routees the router should ever have.
252 # Must be greater than or equal to lower-bound.
255 # Threshold used to evaluate if a routee is considered to be busy
256 # (under pressure). Implementation depends on this value (default is 1).
257 # 0: number of routees currently processing a message.
258 # 1: number of routees currently processing a message has
259 # some messages in mailbox.
260 # > 1: number of routees with at least the configured pressure-threshold
261 # messages in their mailbox. Note that estimating mailbox size of
262 # default UnboundedMailbox is O(N) operation.
263 pressure-threshold = 1
265 # Percentage to increase capacity whenever all routees are busy.
266 # For example, 0.2 would increase 20% (rounded up), i.e. if current
267 # capacity is 6 it will request an increase of 2 more routees.
270 # Minimum fraction of busy routees before backing off.
271 # For example, if this is 0.3, then we'll remove some routees only when
272 # less than 30% of routees are busy, i.e. if current capacity is 10 and
273 # 3 are busy then the capacity is unchanged, but if 2 or less are busy
274 # the capacity is decreased.
275 # Use 0.0 or negative to avoid removal of routees.
276 backoff-threshold = 0.3
278 # Fraction of routees to be removed when the resizer reaches the
280 # For example, 0.1 would decrease 10% (rounded up), i.e. if current
281 # capacity is 9 it will request an decrease of 1 routee.
284 # Number of messages between resize operation.
285 # Use 1 to resize before each message.
286 messages-per-resize = 10
289 # Routers with dynamically resizable number of routees based on
290 # performance metrics.
291 # This feature is enabled by including (parts of) this section in
292 # the deployment, cannot be enabled together with default resizer.
293 optimal-size-exploring-resizer {
297 # The fewest number of routees the router should ever have.
300 # The most number of routees the router should ever have.
301 # Must be greater than or equal to lower-bound.
304 # probability of doing a ramping down when all routees are busy
305 # during exploration.
306 chance-of-ramping-down-when-full = 0.2
308 # Interval between each resize attempt
311 # If the routees have not been fully utilized (i.e. all routees busy)
312 # for such length, the resizer will downsize the pool.
313 downsize-after-underutilized-for = 72h
315 # Duration exploration, the ratio between the largest step size and
316 # current pool size. E.g. if the current pool size is 50, and the
317 # explore-step-size is 0.1, the maximum pool size change during
318 # exploration will be +- 5
319 explore-step-size = 0.1
321 # Probability of doing an exploration v.s. optimization.
322 chance-of-exploration = 0.4
324 # When downsizing after a long streak of underutilization, the resizer
325 # will downsize the pool to the highest utiliziation multiplied by a
326 # a downsize ratio. This downsize ratio determines the new pools size
327 # in comparison to the highest utilization.
328 # E.g. if the highest utilization is 10, and the down size ratio
329 # is 0.8, the pool will be downsized to 8
332 # When optimizing, the resizer only considers the sizes adjacent to the
333 # current size. This number indicates how many adjacent sizes to consider.
334 optimization-range = 16
336 # The weight of the latest metric over old metrics when collecting
337 # performance metrics.
338 # E.g. if the last processing speed is 10 millis per message at pool
339 # size 5, and if the new processing speed collected is 6 millis per
340 # message at pool size 5. Given a weight of 0.3, the metrics
341 # representing pool size 5 will be 6 * 0.3 + 10 * 0.7, i.e. 8.8 millis
342 # Obviously, this number should be between 0 and 1.
343 weight-of-latest-metric = 0.5
347 "/IO-DNS/inet-address" {
348 mailbox = "unbounded"
349 router = "consistent-hashing-pool"
353 "/IO-DNS/inet-address/*" {
354 dispatcher = "akka.actor.default-blocking-io-dispatcher"
357 "/IO-DNS/async-dns" {
358 mailbox = "unbounded"
359 router = "round-robin-pool"
365 # Must be one of the following
366 # Dispatcher, PinnedDispatcher, or a FQCN to a class inheriting
367 # MessageDispatcherConfigurator with a public constructor with
368 # both com.typesafe.config.Config parameter and
369 # akka.dispatch.DispatcherPrerequisites parameters.
370 # PinnedDispatcher must be used together with executor=thread-pool-executor.
373 # Which kind of ExecutorService to use for this dispatcher
375 # - "default-executor" requires a "default-executor" section
376 # - "fork-join-executor" requires a "fork-join-executor" section
377 # - "thread-pool-executor" requires a "thread-pool-executor" section
378 # - "affinity-pool-executor" requires an "affinity-pool-executor" section
379 # - A FQCN of a class extending ExecutorServiceConfigurator
380 executor = "default-executor"
382 # This will be used if you have set "executor = "default-executor"".
383 # If an ActorSystem is created with a given ExecutionContext, this
384 # ExecutionContext will be used as the default executor for all
385 # dispatchers in the ActorSystem configured with
386 # executor = "default-executor". Note that "default-executor"
387 # is the default value for executor, and therefore used if not
388 # specified otherwise. If no ExecutionContext is given,
389 # the executor configured in "fallback" will be used.
391 fallback = "fork-join-executor"
394 # This will be used if you have set "executor = "affinity-pool-executor""
395 # Underlying thread pool implementation is akka.dispatch.affinity.AffinityPool.
396 # This executor is classified as "ApiMayChange".
397 affinity-pool-executor {
398 # Min number of threads to cap factor-based parallelism number to
401 # The parallelism factor is used to determine thread pool size using the
402 # following formula: ceil(available processors * factor). Resulting size
403 # is then bounded by the parallelism-min and parallelism-max values.
404 parallelism-factor = 0.8
406 # Max number of threads to cap factor-based parallelism number to.
409 # Each worker in the pool uses a separate bounded MPSC queue. This value
410 # indicates the upper bound of the queue. Whenever an attempt to enqueue
411 # a task is made and the queue does not have capacity to accommodate
412 # the task, the rejection handler created by the rejection handler specified
413 # in "rejection-handler" is invoked.
414 task-queue-size = 512
416 # FQCN of the Rejection handler used in the pool.
417 # Must have an empty public constructor and must
418 # implement akka.actor.affinity.RejectionHandlerFactory.
419 rejection-handler = "akka.dispatch.affinity.ThrowOnOverflowRejectionHandler"
421 # Level of CPU time used, on a scale between 1 and 10, during backoff/idle.
422 # The tradeoff is that to have low latency more CPU time must be used to be
423 # able to react quickly on incoming messages or send as fast as possible after
424 # backoff backpressure.
425 # Level 1 strongly prefer low CPU consumption over low latency.
426 # Level 10 strongly prefer low latency over low CPU consumption.
429 # FQCN of the akka.dispatch.affinity.QueueSelectorFactory.
430 # The Class of the FQCN must have a public constructor with a
431 # (com.typesafe.config.Config) parameter.
432 # A QueueSelectorFactory create instances of akka.dispatch.affinity.QueueSelector,
433 # that is responsible for determining which task queue a Runnable should be enqueued in.
434 queue-selector = "akka.dispatch.affinity.FairDistributionHashCache"
436 # When using the "akka.dispatch.affinity.FairDistributionHashCache" queue selector
437 # internally the AffinityPool uses two methods to determine which task
438 # queue to allocate a Runnable to:
439 # - map based - maintains a round robin counter and a map of Runnable
440 # hashcodes to queues that they have been associated with. This ensures
441 # maximum fairness in terms of work distribution, meaning that each worker
442 # will get approximately equal amount of mailboxes to execute. This is suitable
443 # in cases where we have a small number of actors that will be scheduled on
444 # the pool and we want to ensure the maximum possible utilization of the
446 # - hash based - the task - queue in which the runnable should go is determined
447 # by using an uniformly distributed int to int hash function which uses the
448 # hash code of the Runnable as an input. This is preferred in situations where we
449 # have enough number of distinct actors to ensure statistically uniform
450 # distribution of work across threads or we are ready to sacrifice the
451 # former for the added benefit of avoiding map look-ups.
452 fair-work-distribution {
453 # The value serves as a threshold which determines the point at which the
454 # pool switches from the first to the second work distribution schemes.
455 # For example, if the value is set to 128, the pool can observe up to
456 # 128 unique actors and schedule their mailboxes using the map based
457 # approach. Once this number is reached the pool switches to hash based
458 # task distribution mode. If the value is set to 0, the map based
459 # work distribution approach is disabled and only the hash based is
460 # used irrespective of the number of unique actors. Valid range is
461 # 0 to 2048 (inclusive)
466 # This will be used if you have set "executor = "fork-join-executor""
467 # Underlying thread pool implementation is java.util.concurrent.ForkJoinPool
469 # Min number of threads to cap factor-based parallelism number to
472 # The parallelism factor is used to determine thread pool size using the
473 # following formula: ceil(available processors * factor). Resulting size
474 # is then bounded by the parallelism-min and parallelism-max values.
475 parallelism-factor = 1.0
477 # Max number of threads to cap factor-based parallelism number to
480 # Setting to "FIFO" to use queue like peeking mode which "poll" or "LIFO" to use stack
481 # like peeking mode which "pop".
482 task-peeking-mode = "FIFO"
485 # This will be used if you have set "executor = "thread-pool-executor""
486 # Underlying thread pool implementation is java.util.concurrent.ThreadPoolExecutor
487 thread-pool-executor {
488 # Keep alive time for threads
489 keep-alive-time = 60s
491 # Define a fixed thread pool size with this property. The corePoolSize
492 # and the maximumPoolSize of the ThreadPoolExecutor will be set to this
493 # value, if it is defined. Then the other pool-size properties will not
496 # Valid values are: `off` or a positive integer.
497 fixed-pool-size = off
499 # Min number of threads to cap factor-based corePoolSize number to
500 core-pool-size-min = 8
502 # The core-pool-size-factor is used to determine corePoolSize of the
503 # ThreadPoolExecutor using the following formula:
504 # ceil(available processors * factor).
505 # Resulting size is then bounded by the core-pool-size-min and
506 # core-pool-size-max values.
507 core-pool-size-factor = 3.0
509 # Max number of threads to cap factor-based corePoolSize number to
510 core-pool-size-max = 64
512 # Minimum number of threads to cap factor-based maximumPoolSize number to
513 max-pool-size-min = 8
515 # The max-pool-size-factor is used to determine maximumPoolSize of the
516 # ThreadPoolExecutor using the following formula:
517 # ceil(available processors * factor)
518 # The maximumPoolSize will not be less than corePoolSize.
519 # It is only used if using a bounded task queue.
520 max-pool-size-factor = 3.0
522 # Max number of threads to cap factor-based maximumPoolSize number to
523 max-pool-size-max = 64
525 # Specifies the bounded capacity of the task queue (< 1 == unbounded)
528 # Specifies which type of task queue will be used, can be "array" or
530 task-queue-type = "linked"
532 # Allow core threads to time out
533 allow-core-timeout = on
536 # How long time the dispatcher will wait for new actors until it shuts down
537 shutdown-timeout = 1s
539 # Throughput defines the number of messages that are processed in a batch
540 # before the thread is returned to the pool. Set to 1 for as fair as possible.
543 # Throughput deadline for Dispatcher, set to 0 or negative for no deadline
544 throughput-deadline-time = 0ms
546 # For BalancingDispatcher: If the balancing dispatcher should attempt to
547 # schedule idle actors using the same dispatcher when a message comes in,
548 # and the dispatchers ExecutorService is not fully busy already.
549 attempt-teamwork = on
551 # If this dispatcher requires a specific type of mailbox, specify the
552 # fully-qualified class name here; the actually created mailbox will
553 # be a subtype of this type. The empty string signifies no requirement.
554 mailbox-requirement = ""
557 # Default separate internal dispatcher to run Akka internal tasks and actors on
558 # protecting them against starvation because of accidental blocking in user actors (which run on the
559 # default dispatcher)
560 internal-dispatcher {
562 executor = "fork-join-executor"
566 parallelism-factor = 1.0
571 default-blocking-io-dispatcher {
573 executor = "thread-pool-executor"
576 thread-pool-executor {
582 # FQCN of the MailboxType. The Class of the FQCN must have a public
584 # (akka.actor.ActorSystem.Settings, com.typesafe.config.Config) parameters.
585 mailbox-type = "akka.dispatch.UnboundedMailbox"
587 # If the mailbox is bounded then it uses this setting to determine its
588 # capacity. The provided value must be positive.
590 # Up to version 2.1 the mailbox type was determined based on this setting;
591 # this is no longer the case, the type must explicitly be a bounded mailbox.
592 mailbox-capacity = 1000
594 # If the mailbox is bounded then this is the timeout for enqueueing
595 # in case the mailbox is full. Negative values signify infinite
596 # timeout, which should be avoided as it bears the risk of dead-lock.
597 mailbox-push-timeout-time = 10s
599 # For Actor with Stash: The default capacity of the stash.
600 # If negative (or zero) then an unbounded stash is used (default)
601 # If positive then a bounded stash is used and the capacity is set using
607 # Mapping between message queue semantics and mailbox configurations.
608 # Used by akka.dispatch.RequiresMessageQueue[T] to enforce different
609 # mailbox types on actors.
610 # If your Actor implements RequiresMessageQueue[T], then when you create
611 # an instance of that actor its mailbox type will be decided by looking
612 # up a mailbox configuration via T in this mapping
614 "akka.dispatch.UnboundedMessageQueueSemantics" =
615 akka.actor.mailbox.unbounded-queue-based
616 "akka.dispatch.BoundedMessageQueueSemantics" =
617 akka.actor.mailbox.bounded-queue-based
618 "akka.dispatch.DequeBasedMessageQueueSemantics" =
619 akka.actor.mailbox.unbounded-deque-based
620 "akka.dispatch.UnboundedDequeBasedMessageQueueSemantics" =
621 akka.actor.mailbox.unbounded-deque-based
622 "akka.dispatch.BoundedDequeBasedMessageQueueSemantics" =
623 akka.actor.mailbox.bounded-deque-based
624 "akka.dispatch.MultipleConsumerSemantics" =
625 akka.actor.mailbox.unbounded-queue-based
626 "akka.dispatch.ControlAwareMessageQueueSemantics" =
627 akka.actor.mailbox.unbounded-control-aware-queue-based
628 "akka.dispatch.UnboundedControlAwareMessageQueueSemantics" =
629 akka.actor.mailbox.unbounded-control-aware-queue-based
630 "akka.dispatch.BoundedControlAwareMessageQueueSemantics" =
631 akka.actor.mailbox.bounded-control-aware-queue-based
632 "akka.event.LoggerMessageQueueSemantics" =
633 akka.actor.mailbox.logger-queue
636 unbounded-queue-based {
637 # FQCN of the MailboxType, The Class of the FQCN must have a public
638 # constructor with (akka.actor.ActorSystem.Settings,
639 # com.typesafe.config.Config) parameters.
640 mailbox-type = "akka.dispatch.UnboundedMailbox"
643 bounded-queue-based {
644 # FQCN of the MailboxType, The Class of the FQCN must have a public
645 # constructor with (akka.actor.ActorSystem.Settings,
646 # com.typesafe.config.Config) parameters.
647 mailbox-type = "akka.dispatch.BoundedMailbox"
650 unbounded-deque-based {
651 # FQCN of the MailboxType, The Class of the FQCN must have a public
652 # constructor with (akka.actor.ActorSystem.Settings,
653 # com.typesafe.config.Config) parameters.
654 mailbox-type = "akka.dispatch.UnboundedDequeBasedMailbox"
657 bounded-deque-based {
658 # FQCN of the MailboxType, The Class of the FQCN must have a public
659 # constructor with (akka.actor.ActorSystem.Settings,
660 # com.typesafe.config.Config) parameters.
661 mailbox-type = "akka.dispatch.BoundedDequeBasedMailbox"
664 unbounded-control-aware-queue-based {
665 # FQCN of the MailboxType, The Class of the FQCN must have a public
666 # constructor with (akka.actor.ActorSystem.Settings,
667 # com.typesafe.config.Config) parameters.
668 mailbox-type = "akka.dispatch.UnboundedControlAwareMailbox"
671 bounded-control-aware-queue-based {
672 # FQCN of the MailboxType, The Class of the FQCN must have a public
673 # constructor with (akka.actor.ActorSystem.Settings,
674 # com.typesafe.config.Config) parameters.
675 mailbox-type = "akka.dispatch.BoundedControlAwareMailbox"
678 # The LoggerMailbox will drain all messages in the mailbox
679 # when the system is shutdown and deliver them to the StandardOutLogger.
680 # Do not change this unless you know what you are doing.
682 mailbox-type = "akka.event.LoggerMailboxType"
687 # enable function of Actor.loggable(), which is to log any received message
688 # at DEBUG level, see the “Testing Actor Systems” section of the Akka
689 # Documentation at http://akka.io/docs
692 # enable DEBUG logging of all AutoReceiveMessages (Kill, PoisonPill etc.)
695 # enable DEBUG logging of actor lifecycle changes
698 # enable DEBUG logging of all LoggingFSMs for events, transitions and timers
701 # enable DEBUG logging of subscription changes on the eventStream
704 # enable DEBUG logging of unhandled messages
707 # enable WARN logging of misconfigured routers
708 router-misconfiguration = off
711 # SECURITY BEST-PRACTICE is to disable java serialization for its multiple
712 # known attack surfaces.
714 # This setting is a short-cut to
715 # - using DisabledJavaSerializer instead of JavaSerializer
717 # Completely disable the use of `akka.serialization.JavaSerialization` by the
718 # Akka Serialization extension, instead DisabledJavaSerializer will
719 # be inserted which will fail explicitly if attempts to use java serialization are made.
721 # The log messages emitted by such serializer SHOULD be treated as potential
722 # attacks which the serializer prevented, as they MAY indicate an external operator
723 # attempting to send malicious messages intending to use java serialization as attack vector.
724 # The attempts are logged with the SECURITY marker.
726 # Please note that this option does not stop you from manually invoking java serialization
728 allow-java-serialization = on
730 # Log warnings when the Java serialization is used to serialize messages.
731 # Java serialization is not very performant and should not be used in production
732 # environments unless you don't care about performance and security. In that case
733 # you can turn this off.
734 warn-about-java-serializer-usage = on
736 # To be used with the above warn-about-java-serializer-usage
737 # When warn-about-java-serializer-usage = on, and this warn-on-no-serialization-verification = off,
738 # warnings are suppressed for classes extending NoSerializationVerificationNeeded
740 warn-on-no-serialization-verification = on
742 # Entries for pluggable serializers and their bindings.
744 java = "akka.serialization.JavaSerializer"
745 bytes = "akka.serialization.ByteArraySerializer"
746 primitive-long = "akka.serialization.LongSerializer"
747 primitive-int = "akka.serialization.IntSerializer"
748 primitive-string = "akka.serialization.StringSerializer"
749 primitive-bytestring = "akka.serialization.ByteStringSerializer"
750 primitive-boolean = "akka.serialization.BooleanSerializer"
753 # Class to Serializer binding. You only need to specify the name of an
754 # interface or abstract base class of the messages. In case of ambiguity it
755 # is using the most specific configured class, or giving a warning and
756 # choosing the “first” one.
758 # To disable one of the default serializers, assign its class to "none", like
759 # "java.io.Serializable" = none
760 serialization-bindings {
762 "java.io.Serializable" = java
764 "java.lang.String" = primitive-string
765 "akka.util.ByteString$ByteString1C" = primitive-bytestring
766 "akka.util.ByteString$ByteString1" = primitive-bytestring
767 "akka.util.ByteString$ByteStrings" = primitive-bytestring
768 "java.lang.Long" = primitive-long
769 "scala.Long" = primitive-long
770 "java.lang.Integer" = primitive-int
771 "scala.Int" = primitive-int
772 "java.lang.Boolean" = primitive-boolean
773 "scala.Boolean" = primitive-boolean
776 # Configuration namespace of serialization identifiers.
777 # Each serializer implementation must have an entry in the following format:
778 # `akka.actor.serialization-identifiers."FQCN" = ID`
779 # where `FQCN` is fully qualified class name of the serializer implementation
780 # and `ID` is globally unique serializer identifier number.
781 # Identifier values from 0 to 40 are reserved for Akka internal usage.
782 serialization-identifiers {
783 "akka.serialization.JavaSerializer" = 1
784 "akka.serialization.ByteArraySerializer" = 4
788 primitive-string = 20
789 primitive-bytestring = 21
790 primitive-boolean = 35
795 serialization.protobuf {
796 # deprecated, use `allowed-classes` instead
798 "com.google.protobuf.GeneratedMessage",
799 "com.google.protobuf.GeneratedMessageV3",
800 "scalapb.GeneratedMessageCompanion",
801 "akka.protobuf.GeneratedMessage",
802 "akka.protobufv3.internal.GeneratedMessageV3"
805 # Additional classes that are allowed even if they are not defined in `serialization-bindings`.
806 # It can be exact class name or name of super class or interfaces (one level).
807 # This is useful when a class is not used for serialization any more and therefore removed
808 # from `serialization-bindings`, but should still be possible to deserialize.
809 allowed-classes = ${akka.serialization.protobuf.whitelist-class}
813 # Used to set the behavior of the scheduler.
814 # Changing the default values may change the system behavior drastically so make
815 # sure you know what you're doing! See the Scheduler section of the Akka
816 # Documentation for more details.
818 # The LightArrayRevolverScheduler is used as the default scheduler in the
819 # system. It does not execute the scheduled tasks on exact time, but on every
820 # tick, it will run everything that is (over)due. You can increase or decrease
821 # the accuracy of the execution timing by specifying smaller or larger tick
822 # duration. If you are scheduling a lot of tasks you should consider increasing
823 # the ticks per wheel.
824 # Note that it might take up to 1 tick to stop the Timer, so setting the
825 # tick-duration to a high value will make shutting down the actor system
829 # The timer uses a circular wheel of buckets to store the timer tasks.
830 # This should be set such that the majority of scheduled timeouts (for high
831 # scheduling frequency) will be shorter than one rotation of the wheel
832 # (ticks-per-wheel * ticks-duration)
833 # THIS MUST BE A POWER OF TWO!
834 ticks-per-wheel = 512
836 # This setting selects the timer implementation which shall be loaded at
838 # The class given here must implement the akka.actor.Scheduler interface
839 # and offer a public constructor which takes three arguments:
840 # 1) com.typesafe.config.Config
841 # 2) akka.event.LoggingAdapter
842 # 3) java.util.concurrent.ThreadFactory
843 implementation = akka.actor.LightArrayRevolverScheduler
845 # When shutting down the scheduler, there will typically be a thread which
846 # needs to be stopped, and this timeout determines how long to wait for
847 # that to happen. In case of timeout the shutdown of the actor system will
848 # proceed without running possibly still enqueued tasks.
849 shutdown-timeout = 5s
854 # By default the select loops run on dedicated threads, hence using a
857 type = "PinnedDispatcher"
858 executor = "thread-pool-executor"
859 thread-pool-executor.allow-core-timeout = off
864 # The number of selectors to stripe the served channels over; each of
865 # these will use one select loop on the selector-dispatcher.
868 # Maximum number of open channels supported by this TCP module; there is
869 # no intrinsic general limit, this setting is meant to enable DoS
870 # protection by limiting the number of concurrently connected clients.
871 # Also note that this is a "soft" limit; in certain cases the implementation
872 # will accept a few connections more or a few less than the number configured
873 # here. Must be an integer > 0 or "unlimited".
874 max-channels = 256000
876 # When trying to assign a new connection to a selector and the chosen
877 # selector is at full capacity, retry selector choosing and assignment
878 # this many times before giving up
879 selector-association-retries = 10
881 # The maximum number of connection that are accepted in one go,
882 # higher numbers decrease latency, lower numbers increase fairness on
883 # the worker-dispatcher
884 batch-accept-limit = 10
886 # The number of bytes per direct buffer in the pool used to read or write
887 # network data from the kernel.
888 direct-buffer-size = 128 KiB
890 # The maximal number of direct buffers kept in the direct buffer pool for
892 direct-buffer-pool-limit = 1000
894 # The duration a connection actor waits for a `Register` message from
895 # its commander before aborting the connection.
896 register-timeout = 5s
898 # The maximum number of bytes delivered by a `Received` message. Before
899 # more data is read from the network the connection actor will try to
901 # The purpose of this setting is to impose a smaller limit than the
902 # configured receive buffer size. When using value 'unlimited' it will
903 # try to read all from the receive buffer.
904 max-received-message-size = unlimited
906 # Enable fine grained logging of what goes on inside the implementation.
907 # Be aware that this may log more than once per message sent to the actors
908 # of the tcp implementation.
911 # Fully qualified config path which holds the dispatcher configuration
912 # to be used for running the select() calls in the selectors
913 selector-dispatcher = "akka.io.pinned-dispatcher"
915 # Fully qualified config path which holds the dispatcher configuration
916 # for the read/write worker actors
917 worker-dispatcher = "akka.actor.internal-dispatcher"
919 # Fully qualified config path which holds the dispatcher configuration
920 # for the selector management actors
921 management-dispatcher = "akka.actor.internal-dispatcher"
923 # Fully qualified config path which holds the dispatcher configuration
924 # on which file IO tasks are scheduled
925 file-io-dispatcher = "akka.actor.default-blocking-io-dispatcher"
927 # The maximum number of bytes (or "unlimited") to transfer in one batch
928 # when using `WriteFile` command which uses `FileChannel.transferTo` to
929 # pipe files to a TCP socket. On some OS like Linux `FileChannel.transferTo`
930 # may block for a long time when network IO is faster than file IO.
931 # Decreasing the value may improve fairness while increasing may improve
933 file-io-transferTo-limit = 512 KiB
935 # The number of times to retry the `finishConnect` call after being notified about
936 # OP_CONNECT. Retries are needed if the OP_CONNECT notification doesn't imply that
937 # `finishConnect` will succeed, which is the case on Android.
938 finish-connect-retries = 5
940 # On Windows connection aborts are not reliably detected unless an OP_READ is
941 # registered on the selector _after_ the connection has been reset. This
942 # workaround enables an OP_CONNECT which forces the abort to be visible on Windows.
943 # Enabling this setting on other platforms than Windows will cause various failures
944 # and undefined behavior.
945 # Possible values of this key are on, off and auto where auto will enable the
946 # workaround if Windows is detected automatically.
947 windows-connection-abort-workaround-enabled = off
952 # The number of selectors to stripe the served channels over; each of
953 # these will use one select loop on the selector-dispatcher.
956 # Maximum number of open channels supported by this UDP module Generally
957 # UDP does not require a large number of channels, therefore it is
958 # recommended to keep this setting low.
961 # The select loop can be used in two modes:
962 # - setting "infinite" will select without a timeout, hogging a thread
963 # - setting a positive timeout will do a bounded select call,
964 # enabling sharing of a single thread between multiple selectors
965 # (in this case you will have to use a different configuration for the
966 # selector-dispatcher, e.g. using "type=Dispatcher" with size 1)
967 # - setting it to zero means polling, i.e. calling selectNow()
968 select-timeout = infinite
970 # When trying to assign a new connection to a selector and the chosen
971 # selector is at full capacity, retry selector choosing and assignment
972 # this many times before giving up
973 selector-association-retries = 10
975 # The maximum number of datagrams that are read in one go,
976 # higher numbers decrease latency, lower numbers increase fairness on
977 # the worker-dispatcher
978 receive-throughput = 3
980 # The number of bytes per direct buffer in the pool used to read or write
981 # network data from the kernel.
982 direct-buffer-size = 128 KiB
984 # The maximal number of direct buffers kept in the direct buffer pool for
986 direct-buffer-pool-limit = 1000
988 # Enable fine grained logging of what goes on inside the implementation.
989 # Be aware that this may log more than once per message sent to the actors
990 # of the tcp implementation.
993 # Fully qualified config path which holds the dispatcher configuration
994 # to be used for running the select() calls in the selectors
995 selector-dispatcher = "akka.io.pinned-dispatcher"
997 # Fully qualified config path which holds the dispatcher configuration
998 # for the read/write worker actors
999 worker-dispatcher = "akka.actor.internal-dispatcher"
1001 # Fully qualified config path which holds the dispatcher configuration
1002 # for the selector management actors
1003 management-dispatcher = "akka.actor.internal-dispatcher"
1008 # The number of selectors to stripe the served channels over; each of
1009 # these will use one select loop on the selector-dispatcher.
1012 # Maximum number of open channels supported by this UDP module Generally
1013 # UDP does not require a large number of channels, therefore it is
1014 # recommended to keep this setting low.
1017 # The select loop can be used in two modes:
1018 # - setting "infinite" will select without a timeout, hogging a thread
1019 # - setting a positive timeout will do a bounded select call,
1020 # enabling sharing of a single thread between multiple selectors
1021 # (in this case you will have to use a different configuration for the
1022 # selector-dispatcher, e.g. using "type=Dispatcher" with size 1)
1023 # - setting it to zero means polling, i.e. calling selectNow()
1024 select-timeout = infinite
1026 # When trying to assign a new connection to a selector and the chosen
1027 # selector is at full capacity, retry selector choosing and assignment
1028 # this many times before giving up
1029 selector-association-retries = 10
1031 # The maximum number of datagrams that are read in one go,
1032 # higher numbers decrease latency, lower numbers increase fairness on
1033 # the worker-dispatcher
1034 receive-throughput = 3
1036 # The number of bytes per direct buffer in the pool used to read or write
1037 # network data from the kernel.
1038 direct-buffer-size = 128 KiB
1040 # The maximal number of direct buffers kept in the direct buffer pool for
1042 direct-buffer-pool-limit = 1000
1044 # Enable fine grained logging of what goes on inside the implementation.
1045 # Be aware that this may log more than once per message sent to the actors
1046 # of the tcp implementation.
1049 # Fully qualified config path which holds the dispatcher configuration
1050 # to be used for running the select() calls in the selectors
1051 selector-dispatcher = "akka.io.pinned-dispatcher"
1053 # Fully qualified config path which holds the dispatcher configuration
1054 # for the read/write worker actors
1055 worker-dispatcher = "akka.actor.internal-dispatcher"
1057 # Fully qualified config path which holds the dispatcher configuration
1058 # for the selector management actors
1059 management-dispatcher = "akka.actor.internal-dispatcher"
1063 # Fully qualified config path which holds the dispatcher configuration
1064 # for the manager and resolver router actors.
1065 # For actual router configuration see akka.actor.deployment./IO-DNS/*
1066 dispatcher = "akka.actor.internal-dispatcher"
1068 # Name of the subconfig at path akka.io.dns, see inet-address below
1070 # Change to `async-dns` to use the new "native" DNS resolver,
1071 # which is also capable of resolving SRV records.
1072 resolver = "inet-address"
1074 # To-be-deprecated DNS resolver implementation which uses the Java InetAddress to resolve DNS records.
1075 # To be replaced by `akka.io.dns.async` which implements the DNS protocol natively and without blocking (which InetAddress does)
1077 # Must implement akka.io.DnsProvider
1078 provider-object = "akka.io.InetAddressDnsProvider"
1080 # To set the time to cache name resolutions
1082 # default: sun.net.InetAddressCachePolicy.get() and getNegative()
1083 # forever: cache forever
1085 # n [time unit]: positive timeout with unit, for example 30s
1086 positive-ttl = default
1087 negative-ttl = default
1089 # How often to sweep out expired cache entries.
1090 # Note that this interval has nothing to do with TTLs
1091 cache-cleanup-interval = 120s
1095 provider-object = "akka.io.dns.internal.AsyncDnsProvider"
1097 # Set upper bound for caching successfully resolved dns entries
1098 # if the DNS record has a smaller TTL value than the setting that
1099 # will be used. Default is to use the record TTL with no cap.
1101 # forever: always use the minimum TTL from the found records
1102 # never: never cache
1103 # n [time unit] = cap the caching to this value
1104 positive-ttl = forever
1106 # Set how long the fact that a DNS record could not be found is
1107 # cached. If a new resolution is done while the fact is cached it will
1108 # be failed and not result in an actual DNS resolution. Default is
1111 # never: never cache
1112 # forever: cache a missing DNS record forever (you probably will not want to do this)
1113 # n [time unit] = cache for this long
1114 negative-ttl = never
1116 # Configures nameservers to query during DNS resolution.
1117 # Defaults to the nameservers that would be used by the JVM by default.
1118 # Set to a list of IPs to override the servers, e.g. [ "8.8.8.8", "8.8.4.4" ] for Google's servers
1119 # If multiple are defined then they are tried in order until one responds
1120 nameservers = default
1122 # The time that a request is allowed to live before being discarded
1123 # given no reply. The lower bound of this should always be the amount
1124 # of time to reasonably expect a DNS server to reply within.
1125 # If multiple name servers are provided then each gets this long to response before trying
1127 resolve-timeout = 5s
1129 # How often to sweep out expired cache entries.
1130 # Note that this interval has nothing to do with TTLs
1131 cache-cleanup-interval = 120s
1133 # Configures the list of search domains.
1134 # Defaults to a system dependent lookup (on Unix like OSes, will attempt to parse /etc/resolv.conf, on
1135 # other platforms, will not make any attempt to lookup the search domains). Set to a single domain, or
1136 # a list of domains, eg, [ "example.com", "example.net" ].
1137 search-domains = default
1139 # Any hosts that have a number of dots less than this will not be looked up directly, instead, a search on
1140 # the search domains will be tried first. This corresponds to the ndots option in /etc/resolv.conf, see
1141 # https://linux.die.net/man/5/resolver for more info.
1142 # Defaults to a system dependent lookup (on Unix like OSes, will attempt to parse /etc/resolv.conf, on
1143 # other platforms, will default to 1).
1150 # CoordinatedShutdown is an extension that will perform registered
1151 # tasks in the order that is defined by the phases. It is started
1152 # by calling CoordinatedShutdown(system).run(). This can be triggered
1153 # by different things, for example:
1154 # - JVM shutdown hook will by default run CoordinatedShutdown
1155 # - Cluster node will automatically run CoordinatedShutdown when it
1156 # sees itself as Exiting
1157 # - A management console or other application specific command can
1158 # run CoordinatedShutdown
1159 coordinated-shutdown {
1160 # The timeout that will be used for a phase if not specified with
1161 # 'timeout' in the phase
1162 default-phase-timeout = 5 s
1164 # Terminate the ActorSystem in the last phase actor-system-terminate.
1165 terminate-actor-system = on
1167 # Exit the JVM (System.exit(0)) in the last phase actor-system-terminate
1168 # if this is set to 'on'. It is done after termination of the
1169 # ActorSystem if terminate-actor-system=on, otherwise it is done
1170 # immediately when the last phase is reached.
1173 # Exit status to use on System.exit(int) when 'exit-jvm' is 'on'.
1176 # Run the coordinated shutdown when the JVM process exits, e.g.
1177 # via kill SIGTERM signal (SIGINT ctrl-c doesn't work).
1178 # This property is related to `akka.jvm-shutdown-hooks` above.
1179 run-by-jvm-shutdown-hook = on
1181 # Run the coordinated shutdown when ActorSystem.terminate is called.
1182 # Enabling this and disabling terminate-actor-system is not a supported
1183 # combination (will throw ConfigurationException at startup).
1184 run-by-actor-system-terminate = on
1186 # When Coordinated Shutdown is triggered an instance of `Reason` is
1187 # required. That value can be used to override the default settings.
1188 # Only 'exit-jvm', 'exit-code' and 'terminate-actor-system' may be
1189 # overridden depending on the reason.
1191 # Overrides are applied using the `reason.getClass.getName`.
1192 # Overrides the `exit-code` when the `Reason` is a cluster
1193 # Downing or a Cluster Join Unsuccessful event
1194 "akka.actor.CoordinatedShutdown$ClusterDowningReason$" {
1197 "akka.actor.CoordinatedShutdown$ClusterJoinUnsuccessfulReason$" {
1202 #//#coordinated-shutdown-phases
1203 # CoordinatedShutdown is enabled by default and will run the tasks that
1204 # are added to these phases by individual Akka modules and user logic.
1206 # The phases are ordered as a DAG by defining the dependencies between the phases
1207 # to make sure shutdown tasks are run in the right order.
1209 # In general user tasks belong in the first few phases, but there may be use
1210 # cases where you would want to hook in new phases or register tasks later in
1213 # Each phase is defined as a named config section with the
1214 # following optional properties:
1215 # - timeout=15s: Override the default-phase-timeout for this phase.
1216 # - recover=off: If the phase fails the shutdown is aborted
1217 # and depending phases will not be executed.
1218 # - enabled=off: Skip all tasks registered in this phase. DO NOT use
1219 # this to disable phases unless you are absolutely sure what the
1220 # consequences are. Many of the built in tasks depend on other tasks
1221 # having been executed in earlier phases and may break if those are disabled.
1222 # depends-on=[]: Run the phase after the given phases
1225 # The first pre-defined phase that applications can add tasks to.
1226 # Note that more phases can be added in the application's
1227 # configuration by overriding this phase with an additional
1229 before-service-unbind {
1232 # Stop accepting new incoming connections.
1233 # This is where you can register tasks that makes a server stop accepting new connections. Already
1234 # established connections should be allowed to continue and complete if possible.
1236 depends-on = [before-service-unbind]
1239 # Wait for requests that are in progress to be completed.
1240 # This is where you register tasks that will wait for already established connections to complete, potentially
1241 # also first telling them that it is time to close down.
1242 service-requests-done {
1243 depends-on = [service-unbind]
1246 # Final shutdown of service endpoints.
1247 # This is where you would add tasks that forcefully kill connections that are still around.
1249 depends-on = [service-requests-done]
1252 # Phase for custom application tasks that are to be run
1253 # after service shutdown and before cluster shutdown.
1254 before-cluster-shutdown {
1255 depends-on = [service-stop]
1258 # Graceful shutdown of the Cluster Sharding regions.
1259 # This phase is not meant for users to add tasks to.
1260 cluster-sharding-shutdown-region {
1262 depends-on = [before-cluster-shutdown]
1265 # Emit the leave command for the node that is shutting down.
1266 # This phase is not meant for users to add tasks to.
1268 depends-on = [cluster-sharding-shutdown-region]
1271 # Shutdown cluster singletons
1272 # This is done as late as possible to allow the shard region shutdown triggered in
1273 # the "cluster-sharding-shutdown-region" phase to complete before the shard coordinator is shut down.
1274 # This phase is not meant for users to add tasks to.
1277 depends-on = [cluster-leave]
1280 # Wait until exiting has been completed
1281 # This phase is not meant for users to add tasks to.
1282 cluster-exiting-done {
1283 depends-on = [cluster-exiting]
1286 # Shutdown the cluster extension
1287 # This phase is not meant for users to add tasks to.
1289 depends-on = [cluster-exiting-done]
1292 # Phase for custom application tasks that are to be run
1293 # after cluster shutdown and before ActorSystem termination.
1294 before-actor-system-terminate {
1295 depends-on = [cluster-shutdown]
1298 # Last phase. See terminate-actor-system and exit-jvm above.
1299 # Don't add phases that depends on this phase because the
1300 # dispatcher and scheduler of the ActorSystem have been shutdown.
1301 # This phase is not meant for users to add tasks to.
1302 actor-system-terminate {
1304 depends-on = [before-actor-system-terminate]
1307 #//#coordinated-shutdown-phases