# the "shard-election-timeout-factor" property would be applied to both data stores whereas specifying
# "operational.shard-election-timeout-factor" would only apply to the "operational" data store. Similarly,
# specifying "config.shard-election-timeout-factor" would only apply to the "config" data store.
+# Overall flexibility goes even further, as these overrides can also be combined with per-shard specification,
+# so that you can also use:
+# <shard-name>.<setting>
+# <datastore-name>.<shard-name>.<setting>
+# for example:
+# topology.shard-election-timeout-factor=2 # override both config/operational for topology shard
+# config.topology.shard-election-timeout-factor=5 # override config for topology shard
+# operational.topology.shard-election-timeout-factor=7 # override operational for topology shard
+#
# The multiplication factor to be used to determine shard election timeout. The shard election timeout
# is determined by multiplying shardHeartbeatIntervalInMillis with the shardElectionTimeoutFactor.
# The interval at which a shard will send a heart beat message to its remote shard.
#shard-heartbeat-interval-in-millis=500
+# The amount by which to divide election timeout in case of a candidate. This serves as a counter-balance
+# to shard-election-timeout-factor. The default value is 1, i.e. election timeout is the same in all
+# situations.
+#shard-candidate-election-timeout-divisor=1
+
# The maximum amount of time to wait for a shard to elect a leader before failing an operation (eg transaction create).
#shard-leader-election-timeout-in-seconds=30
#shard-snapshot-batch-count=20000
# The percentage of Runtime.totalMemory() used by the in-memory journal log before a snapshot is to be taken.
+# Disabled, if direct threshold is enabled.
#shard-snapshot-data-threshold-percentage=12
+# The max size of in-memory journal(in MB), after reaching the limit, snapshot will be taken. Should be not less then 1.
+# If set to 0, direct threshold is disabled and percentage is used instead.
+#shard-snapshot-data-threshold=0
+
# The interval at which the leader of the shard will check if its majority followers are active and
# term itself as isolated.
#shard-isolated-leader-check-interval-in-millis=5000
#transaction-creation-initial-rate-limit=100
# The maximum thread pool size for each shard's data store data change notification executor.
+# THIS SETTING HAS HAD NO EFFECT FOR A LONG TIME, IS DEPRECATED, AND WILL BE REMOVED IN A FUTURE RELEASE
#max-shard-data-change-executor-pool-size=20
# The maximum queue size for each shard's data store data change notification executor.
+# THIS SETTING HAS HAD NO EFFECT FOR A LONG TIME, IS DEPRECATED, AND WILL BE REMOVED IN A FUTURE RELEASE
#max-shard-data-change-executor-queue-size=1000
# The maximum queue size for each shard's data store data change listener.
+# THIS SETTING HAS HAD NO EFFECT FOR A LONG TIME, IS DEPRECATED, AND WILL BE REMOVED IN A FUTURE RELEASE
#max-shard-data-change-listener-queue-size=1000
# The maximum queue size for each shard's data store executor.
+# THIS SETTING HAS HAD NO EFFECT FOR A LONG TIME, IS DEPRECATED, AND WILL BE REMOVED IN A FUTURE RELEASE
#max-shard-data-store-executor-queue-size=5000
# A fully qualified java class name. The class should implement
# the stack trace of the creator of the Tx when there is an exception when the transaction is submitted
# (e.g. for a failed validation). Defaults to false due to performance impact.
#transaction-debug-context-enabled=true
+
+# Multiplicator of shard-leader-election-timeout-in-seconds for the purposes of initial datastore
+# convergence. Each frontend datastore instance will wait specified amount of time before becoming
+# exposed as a service. A value of 0 indicates waiting forever. Defaults to 3.
+#initial-settle-timeout-multiplier=3
+
+#Interval after which a snapshot should be taken during the recovery process.
+#recovery-snapshot-interval-seconds=0
+
+# Option to take a snapshot when the entire DataTree root or top-level container is overwritten
+snapshot-on-root-overwrite=false
+
+# Enable lz4 compression for snapshots sent from leader to followers
+#use-lz4-compression=true