X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?p=controller.git;a=blobdiff_plain;f=opendaylight%2Fmd-sal%2Fsal-clustering-config%2Fsrc%2Fmain%2Fresources%2Finitial%2Fdatastore.cfg;h=f6566523b6ce9d03f7016f49065e42b15f6bdab8;hp=d43ed4b01b9d571bb4bb4fbbbf4a2b7fc6d0d85f;hb=687be5ebc851127bc9579a41c74253e2c145d9a9;hpb=b65f82f3c6e6e4c53ea2efa4dba30ddf82d61616 diff --git a/opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/datastore.cfg b/opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/datastore.cfg index d43ed4b01b..f6566523b6 100644 --- a/opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/datastore.cfg +++ b/opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/datastore.cfg @@ -4,6 +4,15 @@ # the "shard-election-timeout-factor" property would be applied to both data stores whereas specifying # "operational.shard-election-timeout-factor" would only apply to the "operational" data store. Similarly, # specifying "config.shard-election-timeout-factor" would only apply to the "config" data store. +# Overall flexibility goes even further, as these overrides can also be combined with per-shard specification, +# so that you can also use: +# . +# .. +# for example: +# topology.shard-election-timeout-factor=2 # override both config/operational for topology shard +# config.topology.shard-election-timeout-factor=5 # override config for topology shard +# operational.topology.shard-election-timeout-factor=7 # override operational for topology shard +# # The multiplication factor to be used to determine shard election timeout. The shard election timeout # is determined by multiplying shardHeartbeatIntervalInMillis with the shardElectionTimeoutFactor. @@ -44,8 +53,13 @@ operational.persistent=false #shard-snapshot-batch-count=20000 # The percentage of Runtime.totalMemory() used by the in-memory journal log before a snapshot is to be taken. +# Disabled, if direct threshold is enabled. #shard-snapshot-data-threshold-percentage=12 +# The max size of in-memory journal(in MB), after reaching the limit, snapshot will be taken. Should be not less then 1. +# If set to 0, direct threshold is disabled and percentage is used instead. +#shard-snapshot-data-threshold=0 + # The interval at which the leader of the shard will check if its majority followers are active and # term itself as isolated. #shard-isolated-leader-check-interval-in-millis=5000 @@ -63,18 +77,6 @@ operational.persistent=false # measures the latency for a commit and auto-adjusts the rate limit. #transaction-creation-initial-rate-limit=100 -# The maximum thread pool size for each shard's data store data change notification executor. -#max-shard-data-change-executor-pool-size=20 - -# The maximum queue size for each shard's data store data change notification executor. -#max-shard-data-change-executor-queue-size=1000 - -# The maximum queue size for each shard's data store data change listener. -#max-shard-data-change-listener-queue-size=1000 - -# The maximum queue size for each shard's data store executor. -#max-shard-data-store-executor-queue-size=5000 - # A fully qualified java class name. The class should implement # org.opendaylight.controller.cluster.raft.policy.RaftPolicy. This java class should be # accessible to the distributed data store OSGi module so that it can be dynamically loaded via @@ -103,4 +105,34 @@ operational.persistent=false # Multiplicator of shard-leader-election-timeout-in-seconds for the purposes of initial datastore # convergence. Each frontend datastore instance will wait specified amount of time before becoming # exposed as a service. A value of 0 indicates waiting forever. Defaults to 3. -initial-settle-timeout-multiplier=3 +#initial-settle-timeout-multiplier=3 + +#Interval after which a snapshot should be taken during the recovery process. +#recovery-snapshot-interval-seconds=0 + +# Option to take a snapshot when the entire DataTree root or top-level container is overwritten +snapshot-on-root-overwrite=false + +# Enable lz4 compression for snapshots sent from leader to followers +#use-lz4-compression=true + +# Export snapshot and journal content after recovery, possible modes: off, json +# +# Journal Json structure: +# Entries : [ +# Entry : [ +# Node: [ +# Path : {}, +# ModificationType : {}, +# Data : {} +# ] +# ] +# ] +# +# Snapshot Json structure: +# RootNode : {} +# +export-on-recovery=off + +# Directory name for export files +#recovery-export-base-dir=persistence-export