X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?p=controller.git;a=blobdiff_plain;f=opendaylight%2Fmd-sal%2Fsal-cluster-admin-api%2Fsrc%2Fmain%2Fyang%2Fcluster-admin.yang;h=260623f487f115e904fdccf3ba6d6c4414d5fad1;hp=8a3c58a16279f25d7e4a8d2375ea52c898ef4e40;hb=6ef0b898f2117a4bb3a510c0df7af340f4fc8eca;hpb=be338c9e1dab83e2a5ff21819b92b934ef32faee diff --git a/opendaylight/md-sal/sal-cluster-admin-api/src/main/yang/cluster-admin.yang b/opendaylight/md-sal/sal-cluster-admin-api/src/main/yang/cluster-admin.yang index 8a3c58a162..260623f487 100644 --- a/opendaylight/md-sal/sal-cluster-admin-api/src/main/yang/cluster-admin.yang +++ b/opendaylight/md-sal/sal-cluster-admin-api/src/main/yang/cluster-admin.yang @@ -10,6 +10,8 @@ module cluster-admin { description "Initial revision."; } + import odl-controller-cds-types { prefix cds; } + typedef data-store-type { type enumeration { enum config { @@ -21,14 +23,29 @@ module cluster-admin { } } - grouping shard-operation-result { + grouping datastore-shard-id { + description "Grouping holding combined identifiers of a shard -- its name and datastore type"; + leaf shard-name { - type string; + description "The name of the shard."; + mandatory true; + type string { + length "1..max" { + error-app-tag "odl-named-shards"; + error-message "Shard name must not be empty"; + } + } } leaf data-store-type { + mandatory true; type data-store-type; + description "The type of the data store to which the shard belongs"; } + } + + grouping shard-operation-result { + uses datastore-shard-id; leaf succeeded { type boolean; @@ -64,49 +81,42 @@ module cluster-admin { rpc add-shard-replica { input { - leaf shard-name { - mandatory true; - type string; - description "The name of the shard for which to create a replica."; - } - - leaf data-store-type { - mandatory true; - type data-store-type; - description "The type of the data store to which the replica belongs"; - } + uses datastore-shard-id; } description "Adds a replica of a shard to this node and joins it to an existing cluster. The shard must - already have a module configuration defined for it and there must already be a shard existing on - another node with a leader. This RPC first contacts peer member seed nodes searching for a shard. - When found, an AddServer message is sent to the shard leader and applied as described in the Raft - paper."; + already have a module configuration defined for it and there must already be a shard existing on + another node with a leader. This RPC first contacts peer member seed nodes searching for a shard. + When found, an AddServer message is sent to the shard leader and applied as described in the Raft + paper."; } rpc remove-shard-replica { input { - leaf shard-name { - mandatory true; - type string; - description "The name of the shard for which to remove the replica."; - } + uses datastore-shard-id; leaf member-name { mandatory true; type string; description "The cluster member from which the shard replica should be removed"; } - - leaf data-store-type { - mandatory true; - type data-store-type; - description "The type of the data store to which the replica belongs"; - } } description "Removes an existing replica of a shard from this node via the RemoveServer mechanism as - described in the Raft paper."; + described in the Raft paper."; + } + + rpc make-leader-local { + input { + uses datastore-shard-id; + } + + description "Attempts to move the shard leader of the given module based shard to the local node. + The rpc returns a response after handling of the underlying MakeLeaderLocal message completes. + This operation fails if there is no current shard leader due to lack of network connectivity or + a cluster majority. In addition, if the local node is not up to date with the current leader, + an attempt is made to first sync the local node with the leader. If this cannot be achieved + within two election timeout periods the operation fails."; } rpc add-prefix-shard-replica { @@ -124,9 +134,9 @@ module cluster-admin { } description "Adds a replica of a shard to this node and joins it to an existing cluster. There must already be - a shard existing on another node with a leader. This RPC first contacts peer member seed nodes - searching for a shard. When found, an AddServer message is sent to the shard leader and applied as - described in the Raft paper."; + a shard existing on another node with a leader. This RPC first contacts peer member seed nodes + searching for a shard. When found, an AddServer message is sent to the shard leader and applied as + described in the Raft paper."; } rpc remove-prefix-shard-replica { @@ -149,7 +159,7 @@ module cluster-admin { } description "Removes an existing replica of a prefix shard from this node via the RemoveServer mechanism as - described in the Raft paper."; + described in the Raft paper."; } rpc add-replicas-for-all-shards { @@ -158,7 +168,7 @@ module cluster-admin { } description "Adds replicas on this node for all currently defined shards. This is equivalent to issuing - an add-shard-replica RPC for all shards."; + an add-shard-replica RPC for all shards."; } rpc remove-all-shard-replicas { @@ -175,31 +185,20 @@ module cluster-admin { } description "Removes replicas for all shards on this node. This is equivalent to issuing - a remove-shard-replica for all shards and essentially removes this node from a cluster."; + a remove-shard-replica for all shards and essentially removes this node from a cluster."; } rpc change-member-voting-states-for-shard { input { - leaf shard-name { - mandatory true; - type string; - description "The name of the shard for which to change voting state."; - } - - leaf data-store-type { - mandatory true; - type data-store-type; - description "The type of the data store to which the shard belongs"; - } - + uses datastore-shard-id; uses member-voting-states-input; } description "Changes the voting states, either voting or non-voting, of cluster members for a shard. - Non-voting members will no longer participate in leader elections and consensus but will be - replicated. This is useful for having a set of members serve as a backup cluster in case the - primary voting cluster suffers catastrophic failure. This RPC can be issued to any cluster member - and will be forwarded to the leader."; + Non-voting members will no longer participate in leader elections and consensus but will be + replicated. This is useful for having a set of members serve as a backup cluster in case the + primary voting cluster suffers catastrophic failure. This RPC can be issued to any cluster member + and will be forwarded to the leader."; } rpc change-member-voting-states-for-all-shards { @@ -212,10 +211,10 @@ module cluster-admin { } description "Changes the voting states, either voting or non-voting, of cluster members for all shards. - Non-voting members will no longer participate in leader elections and consensus but will be - replicated. This is useful for having a set of members serve as a backup cluster in case the - primary voting cluster suffers catastrophic failure. This RPC can be issued to any cluster member - and will be forwarded to the leader."; + Non-voting members will no longer participate in leader elections and consensus but will be + replicated. This is useful for having a set of members serve as a backup cluster in case the + primary voting cluster suffers catastrophic failure. This RPC can be issued to any cluster member + and will be forwarded to the leader."; } rpc flip-member-voting-states-for-all-shards { @@ -224,7 +223,7 @@ module cluster-admin { } description "Flips the voting states of all cluster members for all shards, such that if a member - was voting it becomes non-voting and vice versa."; + was voting it becomes non-voting and vice versa."; } rpc backup-datastore { @@ -233,8 +232,100 @@ module cluster-admin { type string; description "The path and name of the file in which to store the backup."; } + + leaf timeout { + type uint32 { + range 1..max; + } + units "seconds"; + description "Optional timeout in seconds for the backup operation which will override all the different + timeouts that are being hit on the backend."; + } } description "Creates a backup file of the datastore state"; } -} \ No newline at end of file + + rpc get-shard-role { + input { + uses datastore-shard-id; + } + + output { + leaf role { + type string; + description "Current role for the given shard, if not present the shard currently does not have a role"; + } + } + + description "Returns the current role for the requested module shard."; + } + + rpc locate-shard { + description "Return the transport-level information about where a shard has a home."; + input { + uses datastore-shard-id; + } + + output { + choice member-node { + description "Location of the hypothetical cluster member node. Relationship to the input parameters + and the transport protocol."; + + leaf local { + description "Local node is the best node to talk to when it comes from efficiency perspective + of underlying implementation. The requester of this RPC is advised to contact + any services to the specified shard via the channel on which this RPC was invoked."; + type empty; + } + + leaf leader-actor-ref { + description "Actor reference to the actor which is currently acting as the leader."; + type string; + } + } + } + } + + rpc get-prefix-shard-role { + input { + leaf shard-prefix { + mandatory true; + type instance-identifier; + } + + leaf data-store-type { + mandatory true; + type data-store-type; + description "The type of the data store to which the replica belongs"; + } + } + + output { + leaf role { + type string; + description "Current role for the given shard, if not present the shard currently does not have a role"; + } + } + + description "Returns the current role for the requested module shard."; + } + + rpc get-known-clients-for-all-shards { + description "Request all shards to report their known frontend clients. This is useful for determining what + generation should a resurrected member node should use."; + + output { + uses shard-result-output { + augment shard-result { + list known-clients { + when "../succeeded = true"; + + uses cds:client-identifier; + key "member type"; + } + } + } + } + } +}