1 // vi: set smarttab et sw=4 tabstop=4:
2 module distributed-datastore-provider {
4 namespace "urn:opendaylight:params:xml:ns:yang:controller:config:distributed-datastore-provider";
5 prefix "distributed-datastore-provider";
8 "This module contains the base YANG definitions for
9 the distributed datastore provider implementation";
11 revision "2023-12-29" {
12 description "Remote use-tell-based-protocol and shard-snapshot-chunk-size leaves";
15 revision "2014-06-12" {
20 typedef non-zero-uint32-type {
26 typedef operation-timeout-type {
32 typedef heartbeat-interval-type {
44 grouping data-store-properties {
45 leaf shard-transaction-idle-timeout-in-minutes {
47 type non-zero-uint32-type;
48 description "The maximum amount of time a shard transaction can be idle without receiving any messages before it self-destructs.";
51 leaf shard-snapshot-batch-count {
53 type non-zero-uint32-type;
54 description "The minimum number of entries to be present in the in-memory journal log before a snapshot is to be taken.";
57 leaf shard-snapshot-data-threshold-percentage {
60 description "The percentage of Runtime.maxMemory() used by the in-memory journal log before a snapshot is to be taken.
61 Disabled, if direct threshold is enabled.";
64 leaf shard-snapshot-data-threshold {
69 description "The threshold of in-memory journal size before a snapshot is to be taken. If set to 0, direct threshold
70 is disabled and percentage is used instead.";
74 leaf shard-heartbeat-interval-in-millis {
76 type heartbeat-interval-type;
77 description "The interval at which a shard will send a heart beat message to its remote shard.";
80 leaf shard-election-timeout-factor {
82 type non-zero-uint32-type;
83 description "The multiplication factor to be used to determine shard election timeout. The shard election timeout
84 is determined by multiplying shard-heartbeat-interval-in-millis with the shard-election-timeout-factor";
87 leaf operation-timeout-in-seconds {
89 type operation-timeout-type;
90 description "The maximum amount of time for akka operations (remote or local) to complete before failing.";
93 leaf shard-journal-recovery-log-batch-size {
95 type non-zero-uint32-type;
96 description "The maximum number of journal log entries to batch on recovery for a shard before committing to the data store.";
99 leaf shard-transaction-commit-timeout-in-seconds {
101 type non-zero-uint32-type;
102 description "The maximum amount of time a shard transaction three-phase commit can be idle without receiving the next messages before it aborts the transaction";
105 leaf shard-transaction-commit-queue-capacity {
107 type non-zero-uint32-type;
108 description "The maximum allowed capacity for each shard's transaction commit queue.";
111 leaf shard-commit-queue-expiry-timeout-in-seconds {
112 default 120; // 2 minutes
113 type non-zero-uint32-type;
114 description "The maximum amount of time a transaction can remain in a shard's commit queue waiting
115 to begin the CanCommit phase as coordinated by the broker front-end. Normally this should be
116 quick but latencies can occur in between transaction ready and CanCommit or a remote broker
117 could lose connection and CanCommit might never occur. Expiring transactions from the queue
118 allows subsequent pending transaction to be processed.";
121 leaf shard-initialization-timeout-in-seconds {
122 default 300; // 5 minutes
123 type non-zero-uint32-type;
124 description "The maximum amount of time to wait for a shard to initialize from persistence
125 on startup before failing an operation (eg transaction create and change
126 listener registration).";
129 leaf shard-leader-election-timeout-in-seconds {
131 type non-zero-uint32-type;
132 description "The maximum amount of time to wait for a shard to elect a leader before failing
133 an operation (eg transaction create).";
136 leaf initial-settle-timeout-multiplier {
139 description "Multiplier for the maximum amount of time to wait for a shard to elect a leader.
140 Zero value means wait indefinitely (as long as it takes).";
143 leaf recovery-snapshot-interval-seconds {
146 description "Interval after which a snapshot should be taken during the recovery process.";
149 leaf shard-batched-modification-count {
151 type non-zero-uint32-type;
152 description "The number of transaction modification operations (put, merge, delete) to
153 batch before sending to the shard transaction actor. Batching improves
154 performance as less modifications messages are sent to the actor and thus
155 lessens the chance that the transaction actor's mailbox queue could get full.";
158 leaf enable-metric-capture {
161 description "Enable or disable metric capture.";
164 leaf bounded-mailbox-capacity {
166 type non-zero-uint32-type;
167 description "Max queue size that an actor's mailbox can reach";
173 description "Enable or disable data persistence";
176 leaf snapshotOnRootOverwrite {
179 description "Enable or disable capturing snapshots on DataTree root overwrites";
182 leaf shard-isolated-leader-check-interval-in-millis {
184 type heartbeat-interval-type;
185 description "The interval at which the leader of the shard will check if its majority
186 followers are active and term itself as isolated";
189 leaf transaction-creation-initial-rate-limit {
191 type non-zero-uint32-type;
192 description "The initial number of transactions per second that are allowed before the data store
193 should begin applying back pressure. This number is only used as an initial guidance,
194 subsequently the datastore measures the latency for a commit and auto-adjusts the rate limit";
197 leaf transaction-debug-context-enabled {
200 description "Enable or disable transaction context debug. This will log the call site trace for
201 transactions that fail";
204 leaf custom-raft-policy-implementation {
207 description "A fully qualified java class name. The class should implement
208 org.opendaylight.controller.cluster.raft.policy.RaftPolicy. This java class should be
209 accessible to the distributed data store OSGi module so that it can be dynamically loaded via
210 reflection. For now let's assume that these classes to customize raft behaviors should be
211 present in the distributed data store module itself. If this property is set to a class which
212 cannot be found then the default raft behavior will be applied";
215 leaf maximum-message-slice-size {
217 type non-zero-uint32-type;
218 description "When fragmenting messages thru the akka remoting framework, this is the
219 maximum size in bytes for a message slice.";
222 leaf file-backed-streaming-threshold-in-megabytes {
224 type non-zero-uint32-type;
225 description "When streaming large amounts of data, eg when sending a snapshot to a follower, this
226 is the threshold in terms of number of megabytes before it should switch from storing in memory to
227 buffering to a file.";
230 leaf sync-index-threshold {
232 type non-zero-uint32-type;
233 description "Permitted synchronization lag, expressed in terms of RAFT entry count. It a follower's
234 commitIndex trails the leader's journal by more than this amount of entries the follower
235 is considered to be out-of-sync.";
238 leaf backend-aliveness-timer-interval-in-seconds {
240 type non-zero-uint32-type;
241 description "The timer interval whereby, on expiration after response inactivity from the back-end,
242 the connection to the back-end is torn down and reconnection is attempted.";
245 leaf frontend-request-timeout-in-seconds {
246 default 120; // 2 minutes
247 type non-zero-uint32-type;
248 description "The timeout interval whereby client frontend transaction requests are failed.";
251 leaf frontend-no-progress-timeout-in-seconds {
252 default 900; // 15 minutes
253 type non-zero-uint32-type;
254 description "The timeout interval whereby the client front-end hasn't made progress with the
255 back-end on any request and terminates.";
258 leaf initial-payload-serialized-buffer-capacity {
260 type non-zero-uint32-type;
261 description "The initial buffer capacity, in bytes, to use when serializing message payloads.";
264 leaf use-lz4-compression {
267 description "Use lz4 compression for snapshots, sent from leader to follower, for snapshots stored
268 by LocalSnapshotStore, use akka.conf configuration.";
271 leaf export-on-recovery {
277 description "Export snapshot and journal during recovery. Possible modes: off(default),
278 json(export to json files). Note that in case of large snapshot,
279 export will take a lot of time.";
282 leaf recovery-export-base-dir {
283 default persistence-export;
285 description "Directory name for snapshot and journal dumps.";
289 container data-store-properties-container {
290 uses data-store-properties;