1 ##############################################
2 # Akka Distributed DataReference Config File #
3 ##############################################
5 # This is the reference config file that contains all the default settings.
6 # Make your edits/overrides in your application.conf.
10 # Settings for the DistributedData extension
11 akka.cluster.distributed-data {
12 # Actor name of the Replicator actor, /system/ddataReplicator
13 name = ddataReplicator
15 # Replicas are running on members tagged with this role.
16 # All members are used if undefined or empty.
19 # How often the Replicator should send out gossip information
22 # How often the subscribers will be notified of changes, if any
23 notify-subscribers-interval = 500 ms
25 # Logging of data with payload size in bytes larger than
26 # this value. Maximum detected size per key is logged once,
27 # with an increase threshold of 10%.
28 # It can be disabled by setting the property to off.
29 log-data-size-exceeding = 10 KiB
31 # Maximum number of entries to transfer in one round of gossip exchange when
32 # synchronizing the replicas. Next chunk will be transferred in next round of gossip.
33 # The actual number of data entries in each Gossip message is dynamically
34 # adjusted to not exceed the maximum remote message size (maximum-frame-size).
35 max-delta-elements = 500
37 # The id of the dispatcher to use for Replicator actors.
38 # If specified you need to define the settings of the actual dispatcher.
39 use-dispatcher = "akka.actor.internal-dispatcher"
41 # How often the Replicator checks for pruning of data associated with
42 # removed cluster nodes. If this is set to 'off' the pruning feature will
43 # be completely disabled.
44 pruning-interval = 120 s
46 # How long time it takes to spread the data to all other replica nodes.
47 # This is used when initiating and completing the pruning process of data associated
48 # with removed cluster nodes. The time measurement is stopped when any replica is
49 # unreachable, but it's still recommended to configure this with certain margin.
50 # It should be in the magnitude of minutes even though typical dissemination time
51 # is shorter (grows logarithmic with number of nodes). There is no advantage of
52 # setting this too low. Setting it to large value will delay the pruning process.
53 max-pruning-dissemination = 300 s
55 # The markers of that pruning has been performed for a removed node are kept for this
56 # time and thereafter removed. If and old data entry that was never pruned is somehow
57 # injected and merged with existing data after this time the value will not be correct.
58 # This would be possible (although unlikely) in the case of a long network partition.
59 # It should be in the magnitude of hours. For durable data it is configured by
60 # 'akka.cluster.distributed-data.durable.pruning-marker-time-to-live'.
61 pruning-marker-time-to-live = 6 h
63 # Serialized Write and Read messages are cached when they are sent to
64 # several nodes. If no further activity they are removed from the cache
65 # after this duration.
66 serializer-cache-time-to-live = 10s
68 # Update and Get operations are sent to oldest nodes first.
69 # This is useful together with Cluster Singleton, which is running on oldest nodes.
72 # Settings for delta-CRDT
74 # enable or disable delta-CRDT replication
77 # Some complex deltas grow in size for each update and above this
78 # threshold such deltas are discarded and sent as full state instead.
79 # This is number of elements or similar size hint, not size in bytes.
84 # List of keys that are durable. Prefix matching is supported by using * at the
88 # The markers of that pruning has been performed for a removed node are kept for this
89 # time and thereafter removed. If and old data entry that was never pruned is
90 # injected and merged with existing data after this time the value will not be correct.
91 # This would be possible if replica with durable data didn't participate in the pruning
92 # (e.g. it was shutdown) and later started after this time. A durable replica should not
93 # be stopped for longer time than this duration and if it is joining again after this
94 # duration its data should first be manually removed (from the lmdb directory).
95 # It should be in the magnitude of days. Note that there is a corresponding setting
96 # for non-durable data: 'akka.cluster.distributed-data.pruning-marker-time-to-live'.
97 pruning-marker-time-to-live = 10 d
99 # Fully qualified class name of the durable store actor. It must be a subclass
100 # of akka.actor.Actor and handle the protocol defined in
101 # akka.cluster.ddata.DurableStore. The class must have a constructor with
102 # com.typesafe.config.Config parameter.
103 store-actor-class = akka.cluster.ddata.LmdbDurableStore
105 use-dispatcher = akka.cluster.distributed-data.durable.pinned-store
108 executor = thread-pool-executor
109 type = PinnedDispatcher
112 # Config for the LmdbDurableStore
114 # Directory of LMDB file. There are two options:
115 # 1. A relative or absolute path to a directory that ends with 'ddata'
116 # the full name of the directory will contain name of the ActorSystem
117 # and its remote port.
118 # 2. Otherwise the path is used as is, as a relative or absolute path to
121 # When running in production you may want to configure this to a specific
122 # path (alt 2), since the default directory contains the remote port of the
123 # actor system to make the name unique. If using a dynamically assigned
124 # port (0) it will be different each time and the previously stored data
125 # will not be loaded.
128 # Size in bytes of the memory mapped file.
131 # Accumulate changes before storing improves performance with the
132 # risk of losing the last writes if the JVM crashes.
133 # The interval is by default set to 'off' to write each update immediately.
134 # Enabling write behind by specifying a duration, e.g. 200ms, is especially
135 # efficient when performing many writes to the same key, because it is only
136 # the last value for each key that will be serialized and stored.
137 # write-behind-interval = 200 ms
138 write-behind-interval = off
145 # Protobuf serializer for cluster DistributedData messages
148 akka-data-replication = "akka.cluster.ddata.protobuf.ReplicatorMessageSerializer"
149 akka-replicated-data = "akka.cluster.ddata.protobuf.ReplicatedDataSerializer"
151 serialization-bindings {
152 "akka.cluster.ddata.Replicator$ReplicatorMessage" = akka-data-replication
153 "akka.cluster.ddata.ReplicatedDataSerialization" = akka-replicated-data
155 serialization-identifiers {
156 "akka.cluster.ddata.protobuf.ReplicatedDataSerializer" = 11
157 "akka.cluster.ddata.protobuf.ReplicatorMessageSerializer" = 12