X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?a=blobdiff_plain;f=opendaylight%2Fmd-sal%2Fsal-cluster-admin-api%2Fsrc%2Fmain%2Fyang%2Fcluster-admin.yang;h=25c88475f6cfdff0f72bc45e83339137ddcd3d7c;hb=HEAD;hp=bb0eed38b28e59f45a1f5fe3985d4fddca4b82fc;hpb=1d98accc8edc5b9884f86be655863a14e52258e4;p=controller.git diff --git a/opendaylight/md-sal/sal-cluster-admin-api/src/main/yang/cluster-admin.yang b/opendaylight/md-sal/sal-cluster-admin-api/src/main/yang/cluster-admin.yang index bb0eed38b2..25c88475f6 100644 --- a/opendaylight/md-sal/sal-cluster-admin-api/src/main/yang/cluster-admin.yang +++ b/opendaylight/md-sal/sal-cluster-admin-api/src/main/yang/cluster-admin.yang @@ -10,6 +10,8 @@ module cluster-admin { description "Initial revision."; } + import odl-controller-cds-types { prefix cds; } + typedef data-store-type { type enumeration { enum config { @@ -117,49 +119,6 @@ module cluster-admin { within two election timeout periods the operation fails."; } - rpc add-prefix-shard-replica { - input { - leaf shard-prefix { - mandatory true; - type instance-identifier; - } - - leaf data-store-type { - mandatory true; - type data-store-type; - description "The type of the data store to which the replica belongs"; - } - } - - description "Adds a replica of a shard to this node and joins it to an existing cluster. There must already be - a shard existing on another node with a leader. This RPC first contacts peer member seed nodes - searching for a shard. When found, an AddServer message is sent to the shard leader and applied as - described in the Raft paper."; - } - - rpc remove-prefix-shard-replica { - input { - leaf shard-prefix { - mandatory true; - type instance-identifier; - } - leaf member-name { - mandatory true; - type string; - description "The cluster member from which the shard replica should be removed"; - } - - leaf data-store-type { - mandatory true; - type data-store-type; - description "The type of the data store to which the replica belongs"; - } - } - - description "Removes an existing replica of a prefix shard from this node via the RemoveServer mechanism as - described in the Raft paper."; - } - rpc add-replicas-for-all-shards { output { uses shard-result-output; @@ -230,6 +189,15 @@ module cluster-admin { type string; description "The path and name of the file in which to store the backup."; } + + leaf timeout { + type uint32 { + range 1..max; + } + units "seconds"; + description "Optional timeout in seconds for the backup operation which will override all the different + timeouts that are being hit on the backend."; + } } description "Creates a backup file of the datastore state"; @@ -276,27 +244,35 @@ module cluster-admin { } } - rpc get-prefix-shard-role { - input { - leaf shard-prefix { - mandatory true; - type instance-identifier; - } - - leaf data-store-type { - mandatory true; - type data-store-type; - description "The type of the data store to which the replica belongs"; - } - } + rpc get-known-clients-for-all-shards { + description "Request all shards to report their known frontend clients. This is useful for determining what + generation should a resurrected member node should use."; output { - leaf role { - type string; - description "Current role for the given shard, if not present the shard currently does not have a role"; + uses shard-result-output { + augment shard-result { + list known-clients { + when "../succeeded = true"; + + uses cds:client-identifier; + key "member type"; + } + } } } + } - description "Returns the current role for the requested module shard."; + rpc activate-eos-datacenter { + description "Activates the datacenter that the node this rpc is called on belongs to. The caller must maintain + only a single active datacenter at a time as the singleton components will interfere with each + other otherwise. This only needs to be used if configuring multiple datacenters or if not using + default datacenter."; + } + + rpc deactivate-eos-datacenter { + description "Deactivates the datacenter that the node this rpc is called on belongs to. The caller must maintain + only a single active datacenter at a time as the singleton components will interfere with each + other otherwise. This only needs to be used if configuring multiple datacenters or if not using + default datacenter."; } }