X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?p=controller.git;a=blobdiff_plain;f=opendaylight%2Fmd-sal%2Fsal-clustering-config%2Fsrc%2Fmain%2Fresources%2Finitial%2Fdatastore.cfg;h=7d4903e17c9b88ebd9d7986902f1389f461e872f;hp=250a8149ff1e0b77e6f203c275beb473c03d8c2e;hb=e7e69069ae5ecaacc9ea0e47cb40cdf68237d636;hpb=f9bfd1462a1cb57a34a4e567c1931b629df901ea diff --git a/opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/datastore.cfg b/opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/datastore.cfg index 250a8149ff..7d4903e17c 100644 --- a/opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/datastore.cfg +++ b/opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/datastore.cfg @@ -4,6 +4,15 @@ # the "shard-election-timeout-factor" property would be applied to both data stores whereas specifying # "operational.shard-election-timeout-factor" would only apply to the "operational" data store. Similarly, # specifying "config.shard-election-timeout-factor" would only apply to the "config" data store. +# Overall flexibility goes even further, as these overrides can also be combined with per-shard specification, +# so that you can also use: +# . +# .. +# for example: +# topology.shard-election-timeout-factor=2 # override both config/operational for topology shard +# config.topology.shard-election-timeout-factor=5 # override config for topology shard +# operational.topology.shard-election-timeout-factor=7 # override operational for topology shard +# # The multiplication factor to be used to determine shard election timeout. The shard election timeout # is determined by multiplying shardHeartbeatIntervalInMillis with the shardElectionTimeoutFactor. @@ -64,15 +73,19 @@ operational.persistent=false #transaction-creation-initial-rate-limit=100 # The maximum thread pool size for each shard's data store data change notification executor. +# THIS SETTING HAS HAD NO EFFECT FOR A LONG TIME, IS DEPRECATED, AND WILL BE REMOVED IN A FUTURE RELEASE #max-shard-data-change-executor-pool-size=20 # The maximum queue size for each shard's data store data change notification executor. +# THIS SETTING HAS HAD NO EFFECT FOR A LONG TIME, IS DEPRECATED, AND WILL BE REMOVED IN A FUTURE RELEASE #max-shard-data-change-executor-queue-size=1000 # The maximum queue size for each shard's data store data change listener. +# THIS SETTING HAS HAD NO EFFECT FOR A LONG TIME, IS DEPRECATED, AND WILL BE REMOVED IN A FUTURE RELEASE #max-shard-data-change-listener-queue-size=1000 # The maximum queue size for each shard's data store executor. +# THIS SETTING HAS HAD NO EFFECT FOR A LONG TIME, IS DEPRECATED, AND WILL BE REMOVED IN A FUTURE RELEASE #max-shard-data-store-executor-queue-size=5000 # A fully qualified java class name. The class should implement @@ -99,3 +112,17 @@ operational.persistent=false # the stack trace of the creator of the Tx when there is an exception when the transaction is submitted # (e.g. for a failed validation). Defaults to false due to performance impact. #transaction-debug-context-enabled=true + +# Multiplicator of shard-leader-election-timeout-in-seconds for the purposes of initial datastore +# convergence. Each frontend datastore instance will wait specified amount of time before becoming +# exposed as a service. A value of 0 indicates waiting forever. Defaults to 3. +#initial-settle-timeout-multiplier=3 + +#Interval after which a snapshot should be taken during the recovery process. +#recovery-snapshot-interval-seconds=0 + +# Option to take a snapshot when the entire DataTree root or top-level container is overwritten +snapshot-on-root-overwrite=false + +# Enable lz4 compression for snapshots sent from leader to followers +#use-lz4-compression=true