3 namespace "urn:opendaylight:params:xml:ns:yang:controller:md:sal:cluster:admin";
4 prefix "cluster-admin";
7 "This module contains YANG RPC definitions for administering a cluster.";
9 revision "2025-01-31" {
10 description "A number of modeling updates:
11 - split out 'shard-name' typedef
12 - use 'member-name' from odl-controller-cds-types
13 - 'member-voting-state' now expresses what it means
14 - 'local' is now a presence container, a better modeling practice,
15 - choice/case statements are spelled out for codegen ergonomy,
16 - 'shard-result' error reporting has been cleaned up clarity and ergonomy";
19 revision "2015-10-13" {
20 description "Initial revision.";
23 import odl-controller-cds-types { prefix cds; }
25 typedef data-store-type {
37 description "A valid name for a shard.";
40 error-app-tag "odl-named-shards";
41 error-message "Shard name must not be empty";
46 grouping datastore-shard-id {
48 "Grouping holding combined identifiers of a shard -- its name and datastore type";
53 description "The name of the shard.";
56 leaf data-store-type {
59 description "The type of the data store to which the shard belongs";
63 grouping shard-result-output {
65 key "shard-name data-store-type";
66 description "The list of results, one per shard";
68 uses datastore-shard-id;
75 presence "Indicates the operation was successful";
80 presence "Indicates the operation was unsuccessful";
84 description "Indicates the operation failed with this message, which should be descriptive, if possible.";
92 grouping member-voting-states-input {
93 list member-voting-state {
96 description "The list of member voting states";
109 rpc add-shard-replica {
110 description "Adds a replica of a shard to this node and joins it to an existing cluster. The shard must
111 already have a module configuration defined for it and there must already be a shard existing on
112 another node with a leader. This RPC first contacts peer member seed nodes searching for a shard.
113 When found, an AddServer message is sent to the shard leader and applied as described in the Raft
117 uses datastore-shard-id;
121 rpc remove-shard-replica {
122 description "Removes an existing replica of a shard from this node via the RemoveServer mechanism as
123 described in the Raft paper.";
126 uses datastore-shard-id;
130 type cds:member-name;
131 description "The cluster member from which the shard replica should be removed";
136 rpc make-leader-local {
137 description "Attempts to move the shard leader of the given module based shard to the local node.
138 The rpc returns a response after handling of the underlying MakeLeaderLocal message completes.
139 This operation fails if there is no current shard leader due to lack of network connectivity or
140 a cluster majority. In addition, if the local node is not up to date with the current leader,
141 an attempt is made to first sync the local node with the leader. If this cannot be achieved
142 within two election timeout periods the operation fails.";
145 uses datastore-shard-id;
149 rpc add-replicas-for-all-shards {
150 description "Adds replicas on this node for all currently defined shards. This is equivalent to issuing
151 an add-shard-replica RPC for all shards.";
154 uses shard-result-output;
158 rpc remove-all-shard-replicas {
159 description "Removes replicas for all shards on this node. This is equivalent to issuing
160 a remove-shard-replica for all shards and essentially removes this node from a cluster.";
165 type cds:member-name;
166 description "The cluster member from which the shard replicas should be removed";
171 uses shard-result-output;
175 rpc change-member-voting-states-for-shard {
176 description "Changes the voting states, either voting or non-voting, of cluster members for a shard.
177 Non-voting members will no longer participate in leader elections and consensus but will be
178 replicated. This is useful for having a set of members serve as a backup cluster in case the
179 primary voting cluster suffers catastrophic failure. This RPC can be issued to any cluster member
180 and will be forwarded to the leader.";
183 uses datastore-shard-id;
184 uses member-voting-states-input;
188 rpc change-member-voting-states-for-all-shards {
189 description "Changes the voting states, either voting or non-voting, of cluster members for all shards.
190 Non-voting members will no longer participate in leader elections and consensus but will be
191 replicated. This is useful for having a set of members serve as a backup cluster in case the
192 primary voting cluster suffers catastrophic failure. This RPC can be issued to any cluster member
193 and will be forwarded to the leader.";
196 uses member-voting-states-input;
200 uses shard-result-output;
204 rpc flip-member-voting-states-for-all-shards {
205 description "Flips the voting states of all cluster members for all shards, such that if a member
206 was voting it becomes non-voting and vice versa.";
209 uses shard-result-output;
213 rpc backup-datastore {
214 description "Creates a backup file of the datastore state";
219 description "The path and name of the file in which to store the backup.";
228 description "Optional timeout in seconds for the backup operation which will override all the different
229 timeouts that are being hit on the backend.";
235 description "Returns the current role for the requested module shard.";
238 uses datastore-shard-id;
244 description "Current role for the given shard, if not present the shard currently does not have a role";
250 description "Return the transport-level information about where a shard has a home.";
253 uses datastore-shard-id;
258 description "Location of the hypothetical cluster member node. Relationship to the input parameters
259 and the transport protocol.";
263 presence "Local node is the best node to talk to when it comes from efficiency perspective
264 of underlying implementation. The requester of this RPC is advised to contact
265 any services to the specified shard via the channel on which this RPC was invoked.";
268 case leader-actor-ref-case {
269 leaf leader-actor-ref {
270 description "Actor reference to the actor which is currently acting as the leader.";
278 rpc get-known-clients-for-all-shards {
279 description "Request all shards to report their known frontend clients. This is useful for determining what
280 generation should a resurrected member node should use.";
283 uses shard-result-output {
284 augment shard-result/result/success-case/success {
286 uses cds:client-identifier;
294 rpc activate-eos-datacenter {
295 description "Activates the datacenter that the node this rpc is called on belongs to. The caller must maintain
296 only a single active datacenter at a time as the singleton components will interfere with each
297 other otherwise. This only needs to be used if configuring multiple datacenters or if not using
298 default datacenter.";
301 rpc deactivate-eos-datacenter {
302 description "Deactivates the datacenter that the node this rpc is called on belongs to. The caller must maintain
303 only a single active datacenter at a time as the singleton components will interfere with each
304 other otherwise. This only needs to be used if configuring multiple datacenters or if not using
305 default datacenter.";