X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?p=controller.git;a=blobdiff_plain;f=opendaylight%2Fmd-sal%2Fsal-clustering-config%2Fsrc%2Fmain%2Fresources%2Finitial%2Fdatastore.cfg;h=608ea328c5e63f58d04ed11499fc729145bd93c2;hp=8e03223a7163c9188c1cdc85fd5a6e75973312d1;hb=33877f41ffc3f8eb36ad8490315419b90817d26e;hpb=6b9787d26aab04acf95fa30601291380f9b1d5c9 diff --git a/opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/datastore.cfg b/opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/datastore.cfg index 8e03223a71..608ea328c5 100644 --- a/opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/datastore.cfg +++ b/opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/datastore.cfg @@ -4,14 +4,28 @@ # the "shard-election-timeout-factor" property would be applied to both data stores whereas specifying # "operational.shard-election-timeout-factor" would only apply to the "operational" data store. Similarly, # specifying "config.shard-election-timeout-factor" would only apply to the "config" data store. +# Overall flexibility goes even further, as these overrides can also be combined with per-shard specification, +# so that you can also use: +# . +# .. +# for example: +# topology.shard-election-timeout-factor=2 # override both config/operational for topology shard +# config.topology.shard-election-timeout-factor=5 # override config for topology shard +# operational.topology.shard-election-timeout-factor=7 # override operational for topology shard +# # The multiplication factor to be used to determine shard election timeout. The shard election timeout # is determined by multiplying shardHeartbeatIntervalInMillis with the shardElectionTimeoutFactor. -#shard-election-timeout-factor=2 +shard-election-timeout-factor=20 # The interval at which a shard will send a heart beat message to its remote shard. #shard-heartbeat-interval-in-millis=500 +# The amount by which to divide election timeout in case of a candidate. This serves as a counter-balance +# to shard-election-timeout-factor. The default value is 1, i.e. election timeout is the same in all +# situations. +#shard-candidate-election-timeout-divisor=1 + # The maximum amount of time to wait for a shard to elect a leader before failing an operation (eg transaction create). #shard-leader-election-timeout-in-seconds=30 @@ -24,52 +38,85 @@ operational.persistent=false # The maximum amount of time a shard transaction can be idle without receiving any messages before it self-destructs. #shard-transaction-idle-timeout-in-minutes=10 -# The maximum amount of time a shard transaction three-phase commit can be idle without receiving the +# The maximum amount of time a shard transaction three-phase commit can be idle without receiving the # next messages before it aborts the transaction. #shard-transaction-commit-timeout-in-seconds=30 # The maximum allowed capacity for each shard's transaction commit queue. #shard-transaction-commit-queue-capacity=20000 -# The maximum amount of time to wait for a shard to initialize from persistence on startup before +# The maximum amount of time to wait for a shard to initialize from persistence on startup before # failing an operation (eg transaction create and change listener registration). #shard-initialization-timeout-in-seconds=300 -# The minimum number of entries to be present in the in-memory journal log before a snapshot is to be taken. -#shard-journal-recovery-log-batch-size=5000 - # The minimum number of entries to be present in the in-memory journal log before a snapshot is to be taken. #shard-snapshot-batch-count=20000 # The percentage of Runtime.totalMemory() used by the in-memory journal log before a snapshot is to be taken. #shard-snapshot-data-threshold-percentage=12 -# The interval at which the leader of the shard will check if its majority followers are active and +# The interval at which the leader of the shard will check if its majority followers are active and # term itself as isolated. #shard-isolated-leader-check-interval-in-millis=5000 -# The number of transaction modification operations (put, merge, delete) to batch before sending to the -# shard transaction actor. Batching improves performance as less modifications messages are sent to the +# The number of transaction modification operations (put, merge, delete) to batch before sending to the +# shard transaction actor. Batching improves performance as less modifications messages are sent to the # actor and thus lessens the chance that the transaction actor's mailbox queue could get full. -#shard-batched-modification-count=100 +#shard-batched-modification-count=1000 # The maximum amount of time for akka operations (remote or local) to complete before failing. #operation-timeout-in-seconds=5 -# The initial number of transactions per second that are allowed before the data store should begin -# applying back pressure. This number is only used as an initial guidance, subsequently the datastore +# The initial number of transactions per second that are allowed before the data store should begin +# applying back pressure. This number is only used as an initial guidance, subsequently the datastore # measures the latency for a commit and auto-adjusts the rate limit. #transaction-creation-initial-rate-limit=100 # The maximum thread pool size for each shard's data store data change notification executor. +# THIS SETTING HAS HAD NO EFFECT FOR A LONG TIME, IS DEPRECATED, AND WILL BE REMOVED IN A FUTURE RELEASE #max-shard-data-change-executor-pool-size=20 # The maximum queue size for each shard's data store data change notification executor. +# THIS SETTING HAS HAD NO EFFECT FOR A LONG TIME, IS DEPRECATED, AND WILL BE REMOVED IN A FUTURE RELEASE #max-shard-data-change-executor-queue-size=1000 # The maximum queue size for each shard's data store data change listener. +# THIS SETTING HAS HAD NO EFFECT FOR A LONG TIME, IS DEPRECATED, AND WILL BE REMOVED IN A FUTURE RELEASE #max-shard-data-change-listener-queue-size=1000 # The maximum queue size for each shard's data store executor. +# THIS SETTING HAS HAD NO EFFECT FOR A LONG TIME, IS DEPRECATED, AND WILL BE REMOVED IN A FUTURE RELEASE #max-shard-data-store-executor-queue-size=5000 +# A fully qualified java class name. The class should implement +# org.opendaylight.controller.cluster.raft.policy.RaftPolicy. This java class should be +# accessible to the distributed data store OSGi module so that it can be dynamically loaded via +# reflection. For now let's assume that these classes to customize raft behaviors should be +# present in the distributed data store module itself. If this property is set to a class which +# cannot be found then the default raft policy will be applied +#custom-raft-policy-implementation= + +# When fragmenting messages thru the akka remoting framework, this is the maximum size in bytes +# for a message slice. +#maximum-message-slice-size=20480000 + +# Enable tell-based protocol between frontend (applications) and backend (shards). Using this protocol +# should avoid AskTimeoutExceptions seen under heavy load. Defaults to false (use ask-based protocol). +#use-tell-based-protocol=true + +# Tune the maximum number of entries a follower is allowed to lag behind the leader before it is +# considered out-of-sync. This flag may require tuning in face of a large number of small transactions. +#sync-index-threshold=10 + +# Record new transaction allocation stack trace, useful for debugging. This makes the log include +# the stack trace of the creator of the Tx when there is an exception when the transaction is submitted +# (e.g. for a failed validation). Defaults to false due to performance impact. +#transaction-debug-context-enabled=true + +# Multiplicator of shard-leader-election-timeout-in-seconds for the purposes of initial datastore +# convergence. Each frontend datastore instance will wait specified amount of time before becoming +# exposed as a service. A value of 0 indicates waiting forever. Defaults to 3. +#initial-settle-timeout-multiplier=3 + +#Interval after which a snapshot should be taken during the recovery process. +#recovery-snapshot-interval-seconds=0