3 namespace "urn:opendaylight:params:xml:ns:yang:controller:md:sal:cluster:admin";
4 prefix "cluster-admin";
7 "This module contains YANG RPC definitions for administering a cluster.";
9 revision "2015-10-13" {
10 description "Initial revision.";
13 import odl-controller-cds-types { prefix cds; }
15 typedef data-store-type {
26 grouping datastore-shard-id {
27 description "Grouping holding combined identifiers of a shard -- its name and datastore type";
30 description "The name of the shard.";
34 error-app-tag "odl-named-shards";
35 error-message "Shard name must not be empty";
40 leaf data-store-type {
43 description "The type of the data store to which the shard belongs";
47 grouping shard-operation-result {
48 uses datastore-shard-id;
59 grouping shard-result-output {
61 key "shard-name data-store-type";
62 uses shard-operation-result;
64 description "The list of results, one per shard";
68 grouping member-voting-states-input {
69 list member-voting-state {
78 description "The list of member voting states";
82 rpc add-shard-replica {
84 uses datastore-shard-id;
87 description "Adds a replica of a shard to this node and joins it to an existing cluster. The shard must
88 already have a module configuration defined for it and there must already be a shard existing on
89 another node with a leader. This RPC first contacts peer member seed nodes searching for a shard.
90 When found, an AddServer message is sent to the shard leader and applied as described in the Raft
94 rpc remove-shard-replica {
96 uses datastore-shard-id;
101 description "The cluster member from which the shard replica should be removed";
105 description "Removes an existing replica of a shard from this node via the RemoveServer mechanism as
106 described in the Raft paper.";
109 rpc make-leader-local {
111 uses datastore-shard-id;
114 description "Attempts to move the shard leader of the given module based shard to the local node.
115 The rpc returns a response after handling of the underlying MakeLeaderLocal message completes.
116 This operation fails if there is no current shard leader due to lack of network connectivity or
117 a cluster majority. In addition, if the local node is not up to date with the current leader,
118 an attempt is made to first sync the local node with the leader. If this cannot be achieved
119 within two election timeout periods the operation fails.";
122 rpc add-prefix-shard-replica {
126 type instance-identifier;
129 leaf data-store-type {
131 type data-store-type;
132 description "The type of the data store to which the replica belongs";
136 description "Adds a replica of a shard to this node and joins it to an existing cluster. There must already be
137 a shard existing on another node with a leader. This RPC first contacts peer member seed nodes
138 searching for a shard. When found, an AddServer message is sent to the shard leader and applied as
139 described in the Raft paper.";
142 rpc remove-prefix-shard-replica {
146 type instance-identifier;
151 description "The cluster member from which the shard replica should be removed";
154 leaf data-store-type {
156 type data-store-type;
157 description "The type of the data store to which the replica belongs";
161 description "Removes an existing replica of a prefix shard from this node via the RemoveServer mechanism as
162 described in the Raft paper.";
165 rpc add-replicas-for-all-shards {
167 uses shard-result-output;
170 description "Adds replicas on this node for all currently defined shards. This is equivalent to issuing
171 an add-shard-replica RPC for all shards.";
174 rpc remove-all-shard-replicas {
179 description "The cluster member from which the shard replicas should be removed";
184 uses shard-result-output;
187 description "Removes replicas for all shards on this node. This is equivalent to issuing
188 a remove-shard-replica for all shards and essentially removes this node from a cluster.";
191 rpc change-member-voting-states-for-shard {
193 uses datastore-shard-id;
194 uses member-voting-states-input;
197 description "Changes the voting states, either voting or non-voting, of cluster members for a shard.
198 Non-voting members will no longer participate in leader elections and consensus but will be
199 replicated. This is useful for having a set of members serve as a backup cluster in case the
200 primary voting cluster suffers catastrophic failure. This RPC can be issued to any cluster member
201 and will be forwarded to the leader.";
204 rpc change-member-voting-states-for-all-shards {
206 uses member-voting-states-input;
210 uses shard-result-output;
213 description "Changes the voting states, either voting or non-voting, of cluster members for all shards.
214 Non-voting members will no longer participate in leader elections and consensus but will be
215 replicated. This is useful for having a set of members serve as a backup cluster in case the
216 primary voting cluster suffers catastrophic failure. This RPC can be issued to any cluster member
217 and will be forwarded to the leader.";
220 rpc flip-member-voting-states-for-all-shards {
222 uses shard-result-output;
225 description "Flips the voting states of all cluster members for all shards, such that if a member
226 was voting it becomes non-voting and vice versa.";
229 rpc backup-datastore {
233 description "The path and name of the file in which to store the backup.";
241 description "Optional timeout in seconds for the backup operation which will override all the different
242 timeouts that are being hit on the backend.";
246 description "Creates a backup file of the datastore state";
251 uses datastore-shard-id;
257 description "Current role for the given shard, if not present the shard currently does not have a role";
261 description "Returns the current role for the requested module shard.";
265 description "Return the transport-level information about where a shard has a home.";
267 uses datastore-shard-id;
272 description "Location of the hypothetical cluster member node. Relationship to the input parameters
273 and the transport protocol.";
276 description "Local node is the best node to talk to when it comes from efficiency perspective
277 of underlying implementation. The requester of this RPC is advised to contact
278 any services to the specified shard via the channel on which this RPC was invoked.";
282 leaf leader-actor-ref {
283 description "Actor reference to the actor which is currently acting as the leader.";
290 rpc get-prefix-shard-role {
294 type instance-identifier;
297 leaf data-store-type {
299 type data-store-type;
300 description "The type of the data store to which the replica belongs";
307 description "Current role for the given shard, if not present the shard currently does not have a role";
311 description "Returns the current role for the requested module shard.";
314 rpc get-known-clients-for-all-shards {
315 description "Request all shards to report their known frontend clients. This is useful for determining what
316 generation should a resurrected member node should use.";
319 uses shard-result-output {
320 augment shard-result {
322 when "../succeeded = true";
324 uses cds:client-identifier;