3 namespace "urn:opendaylight:params:xml:ns:yang:controller:md:sal:cluster:admin";
4 prefix "cluster-admin";
7 "This module contains YANG RPC definitions for administering a cluster.";
9 revision "2015-10-13" {
10 description "Initial revision.";
13 import odl-controller-cds-types { prefix cds; }
15 typedef data-store-type {
26 grouping datastore-shard-id {
27 description "Grouping holding combined identifiers of a shard -- its name and datastore type";
30 description "The name of the shard.";
34 error-app-tag "odl-named-shards";
35 error-message "Shard name must not be empty";
40 leaf data-store-type {
43 description "The type of the data store to which the shard belongs";
47 grouping shard-operation-result {
48 uses datastore-shard-id;
59 grouping shard-result-output {
61 key "shard-name data-store-type";
62 uses shard-operation-result;
64 description "The list of results, one per shard";
68 grouping member-voting-states-input {
69 list member-voting-state {
78 description "The list of member voting states";
82 rpc add-shard-replica {
84 uses datastore-shard-id;
87 description "Adds a replica of a shard to this node and joins it to an existing cluster. The shard must
88 already have a module configuration defined for it and there must already be a shard existing on
89 another node with a leader. This RPC first contacts peer member seed nodes searching for a shard.
90 When found, an AddServer message is sent to the shard leader and applied as described in the Raft
94 rpc remove-shard-replica {
96 uses datastore-shard-id;
101 description "The cluster member from which the shard replica should be removed";
105 description "Removes an existing replica of a shard from this node via the RemoveServer mechanism as
106 described in the Raft paper.";
109 rpc make-leader-local {
111 uses datastore-shard-id;
114 description "Attempts to move the shard leader of the given module based shard to the local node.
115 The rpc returns a response after handling of the underlying MakeLeaderLocal message completes.
116 This operation fails if there is no current shard leader due to lack of network connectivity or
117 a cluster majority. In addition, if the local node is not up to date with the current leader,
118 an attempt is made to first sync the local node with the leader. If this cannot be achieved
119 within two election timeout periods the operation fails.";
122 rpc add-replicas-for-all-shards {
124 uses shard-result-output;
127 description "Adds replicas on this node for all currently defined shards. This is equivalent to issuing
128 an add-shard-replica RPC for all shards.";
131 rpc remove-all-shard-replicas {
136 description "The cluster member from which the shard replicas should be removed";
141 uses shard-result-output;
144 description "Removes replicas for all shards on this node. This is equivalent to issuing
145 a remove-shard-replica for all shards and essentially removes this node from a cluster.";
148 rpc change-member-voting-states-for-shard {
150 uses datastore-shard-id;
151 uses member-voting-states-input;
154 description "Changes the voting states, either voting or non-voting, of cluster members for a shard.
155 Non-voting members will no longer participate in leader elections and consensus but will be
156 replicated. This is useful for having a set of members serve as a backup cluster in case the
157 primary voting cluster suffers catastrophic failure. This RPC can be issued to any cluster member
158 and will be forwarded to the leader.";
161 rpc change-member-voting-states-for-all-shards {
163 uses member-voting-states-input;
167 uses shard-result-output;
170 description "Changes the voting states, either voting or non-voting, of cluster members for all shards.
171 Non-voting members will no longer participate in leader elections and consensus but will be
172 replicated. This is useful for having a set of members serve as a backup cluster in case the
173 primary voting cluster suffers catastrophic failure. This RPC can be issued to any cluster member
174 and will be forwarded to the leader.";
177 rpc flip-member-voting-states-for-all-shards {
179 uses shard-result-output;
182 description "Flips the voting states of all cluster members for all shards, such that if a member
183 was voting it becomes non-voting and vice versa.";
186 rpc backup-datastore {
190 description "The path and name of the file in which to store the backup.";
198 description "Optional timeout in seconds for the backup operation which will override all the different
199 timeouts that are being hit on the backend.";
203 description "Creates a backup file of the datastore state";
208 uses datastore-shard-id;
214 description "Current role for the given shard, if not present the shard currently does not have a role";
218 description "Returns the current role for the requested module shard.";
222 description "Return the transport-level information about where a shard has a home.";
224 uses datastore-shard-id;
229 description "Location of the hypothetical cluster member node. Relationship to the input parameters
230 and the transport protocol.";
233 description "Local node is the best node to talk to when it comes from efficiency perspective
234 of underlying implementation. The requester of this RPC is advised to contact
235 any services to the specified shard via the channel on which this RPC was invoked.";
239 leaf leader-actor-ref {
240 description "Actor reference to the actor which is currently acting as the leader.";
247 rpc get-known-clients-for-all-shards {
248 description "Request all shards to report their known frontend clients. This is useful for determining what
249 generation should a resurrected member node should use.";
252 uses shard-result-output {
253 augment shard-result {
255 when "../succeeded = true";
257 uses cds:client-identifier;
265 rpc activate-eos-datacenter {
266 description "Activates the datacenter that the node this rpc is called on belongs to. The caller must maintain
267 only a single active datacenter at a time as the singleton components will interfere with each
268 other otherwise. This only needs to be used if configuring multiple datacenters or if not using
269 default datacenter.";
272 rpc deactivate-eos-datacenter {
273 description "Deactivates the datacenter that the node this rpc is called on belongs to. The caller must maintain
274 only a single active datacenter at a time as the singleton components will interfere with each
275 other otherwise. This only needs to be used if configuring multiple datacenters or if not using
276 default datacenter.";