Cluster setup in one click - Docker & Vagrant 28/51528/2
authorAlexis de Talhouët <adetalhouet89@gmail.com>
Tue, 7 Feb 2017 18:12:09 +0000 (13:12 -0500)
committerJamo Luhrsen <jluhrsen@redhat.com>
Thu, 9 Feb 2017 05:27:22 +0000 (21:27 -0800)
TODO:
- add the ability to customize the IPs of the spwan containers through
the config.properties file

Change-Id: I450a596a5201c8a379080a82effe5c1d58f063d6
Signed-off-by: Alexis de Talhouët <adetalhouet89@gmail.com>
Signed-off-by: Jamo Luhrsen <jluhrsen@redhat.com>
tutorials/cluster-nodes/Dockerfile [new file with mode: 0644]
tutorials/cluster-nodes/ODL_clustering.postman_collection.json [new file with mode: 0644]
tutorials/cluster-nodes/README.md [new file with mode: 0644]
tutorials/cluster-nodes/Vagrantfile [new file with mode: 0644]
tutorials/cluster-nodes/docker-compose.yml [new file with mode: 0644]
tutorials/cluster-nodes/scripts/config.properties [new file with mode: 0755]
tutorials/cluster-nodes/scripts/setup_cluster.sh [new file with mode: 0755]
tutorials/cluster-nodes/scripts/setup_odl.sh [new file with mode: 0755]

diff --git a/tutorials/cluster-nodes/Dockerfile b/tutorials/cluster-nodes/Dockerfile
new file mode 100644 (file)
index 0000000..1bdf91f
--- /dev/null
@@ -0,0 +1,16 @@
+##############################################################################
+# Copyright (c) 2017 Alexis de Talhouët.  All rights reserved.
+#
+# This program and the accompanying materials are made available under the
+# terms of the Eclipse Public License v1.0 which accompanies this distribution,
+# and is available at http://www.eclipse.org/legal/epl-v10.html
+##############################################################################
+
+FROM ubuntu:trusty
+
+# As we can't mount folders through docker-compose without
+# having them in sync with the host, we're using a
+# Dockerfile to bypass this limitation
+
+COPY opendaylight /root/opendaylight
+COPY scripts /root/scripts
\ No newline at end of file
diff --git a/tutorials/cluster-nodes/ODL_clustering.postman_collection.json b/tutorials/cluster-nodes/ODL_clustering.postman_collection.json
new file mode 100644 (file)
index 0000000..30e50a8
--- /dev/null
@@ -0,0 +1,511 @@
+{
+    "id": "3560a667-bc27-fe5e-47b0-ae6b69959646",
+    "name": "ODL Clustering",
+    "description": "",
+    "order": [],
+    "folders": [
+        {
+            "id": "27881d9d-f4fe-0fbf-861c-974b230d347b",
+            "name": "cluster-admin",
+            "description": "",
+            "order": [
+                "e0910063-966e-1b89-043f-409fb028883a",
+                "d30bb5b1-2076-13a5-9b4f-1a25799b1a3c",
+                "cca65185-0068-6ca7-b05f-ab3aefefce9c",
+                "7f7c3194-8451-8afd-5276-0599eb7a84ca",
+                "1fb7c71b-c8b8-bcae-7800-16b12c2d40d5",
+                "7307e998-b3b6-134a-6294-12bd30dcf906"
+            ],
+            "owner": "79838"
+        },
+        {
+            "id": "e23de96f-af25-d497-bd13-6fa2bf22b62f",
+            "name": "odl-1",
+            "description": "",
+            "order": [
+                "3f2253a6-0086-cd04-afff-894d461a7b8a",
+                "8a7e0ff5-c969-267a-bd9b-d720324493b1",
+                "eda3e8f3-9421-9357-cf3c-dbada5667c86",
+                "78eed1f6-2b32-b5a7-b8ac-d5b37ae9cddc",
+                "ed36e4ad-5819-c957-0dda-f82d79d5a0fd",
+                "eba3a877-fa9e-0c8d-8b66-9b2e3ff2e19e",
+                "01b654e2-d3e1-63d7-89e5-d209436364a1",
+                "6327aa5f-1b46-12bc-8a6e-c23eeedb1e50"
+            ],
+            "owner": "79838"
+        },
+        {
+            "id": "8bfef314-8c90-9496-8a56-e7bd8926ad29",
+            "name": "odl-2",
+            "description": "",
+            "order": [
+                "a67f1978-bab2-c29e-ec8a-d661be476696",
+                "d9696eae-0131-6592-3298-48df437c4090",
+                "31f11bd3-6b01-5e06-8680-b7f4660344e6",
+                "c8f88472-655b-14eb-167c-fbdadd2dc715",
+                "8ed4aa47-3328-f8b6-8242-3b75b47e24ea"
+            ],
+            "owner": "79838",
+            "collectionId": "3560a667-bc27-fe5e-47b0-ae6b69959646"
+        },
+        {
+            "id": "173f932e-d45c-20b2-ba66-0b8306cc2846",
+            "name": "odl-3",
+            "description": "",
+            "order": [
+                "827363de-7498-7bfa-11f4-b6f661a94831",
+                "40eac8de-9d42-fc99-c62b-16fbba51761c",
+                "96888f3e-a702-8b64-a3e3-cc73edbf9039"
+            ],
+            "owner": "79838",
+            "collectionId": "3560a667-bc27-fe5e-47b0-ae6b69959646"
+        }
+    ],
+    "timestamp": 0,
+    "owner": "79838",
+    "public": false,
+    "requests": [
+        {
+            "id": "01b654e2-d3e1-63d7-89e5-d209436364a1",
+            "headers": "Authorization: Basic YWRtaW46YWRtaW4=\nContent-Type: application/json\nAccept: application/json\n",
+            "url": "http://192.168.50.151:8181/jolokia/read/org.opendaylight.controller:type=DistributedOperationalDatastore,Category=Shards,name=member-1-shard-topology-operational",
+            "pathVariables": {},
+            "preRequestScript": "",
+            "method": "GET",
+            "collectionId": "3560a667-bc27-fe5e-47b0-ae6b69959646",
+            "data": [],
+            "dataMode": "params",
+            "name": "GET shard-topology-operational",
+            "description": "",
+            "descriptionFormat": "html",
+            "time": 1485205131255,
+            "version": 2,
+            "responses": [],
+            "tests": "",
+            "currentHelper": "normal",
+            "helperAttributes": {},
+            "folder": "e23de96f-af25-d497-bd13-6fa2bf22b62f"
+        },
+        {
+            "id": "1fb7c71b-c8b8-bcae-7800-16b12c2d40d5",
+            "headers": "Authorization: Basic YWRtaW46YWRtaW4=\nContent-Type: application/json\nAccept: application/json\n",
+            "url": "http://192.168.50.151:8181/restconf/operations/cluster-admin:add-replicas-for-all-shards",
+            "preRequestScript": "",
+            "pathVariables": {},
+            "method": "POST",
+            "data": [],
+            "dataMode": "raw",
+            "version": 2,
+            "tests": "",
+            "currentHelper": "normal",
+            "helperAttributes": {},
+            "time": 1485290676181,
+            "name": "RPC add-replicas-for-all-shards",
+            "description": "Adds replicas on this node for all currently defined shards. This is equivalent to issuing an add-shard-replica RPC for all shards.",
+            "collectionId": "3560a667-bc27-fe5e-47b0-ae6b69959646",
+            "responses": [],
+            "rawModeData": ""
+        },
+        {
+            "id": "31f11bd3-6b01-5e06-8680-b7f4660344e6",
+            "headers": "Authorization: Basic YWRtaW46YWRtaW4=\nContent-Type: application/json\nAccept: application/json\n",
+            "url": "http://192.168.50.152:8181/jolokia/read/org.opendaylight.controller:Category=Shards,name=member-2-shard-topology-config,type=DistributedConfigDatastore",
+            "preRequestScript": "",
+            "pathVariables": {},
+            "method": "GET",
+            "data": [],
+            "dataMode": "params",
+            "version": 2,
+            "tests": "",
+            "currentHelper": "normal",
+            "helperAttributes": {},
+            "time": 1485206134489,
+            "name": "Fetch Shared details-Topology",
+            "description": "Fetch clustering related data and look for following details to make sure that cluster is up and running.\nUser should direct this request to the controller where jolokia agent is installed.",
+            "collectionId": "3560a667-bc27-fe5e-47b0-ae6b69959646",
+            "responses": []
+        },
+        {
+            "id": "3f2253a6-0086-cd04-afff-894d461a7b8a",
+            "headers": "Authorization: Basic YWRtaW46YWRtaW4=\nContent-Type: application/json\nAccept: application/json\n",
+            "url": "http://192.168.50.151:8181/jolokia/read/akka:type=Cluster",
+            "preRequestScript": "",
+            "pathVariables": {},
+            "method": "GET",
+            "data": [],
+            "dataMode": "params",
+            "version": 2,
+            "tests": "",
+            "currentHelper": "normal",
+            "helperAttributes": {},
+            "time": 1485204305095,
+            "name": "Fetch Cluster details",
+            "description": "Fetch clustering related data and look for following details to make sure that cluster is up and running.\nMembers field should list all the cluster nodes added to the cluster.\nAlso the Unreachable fields should be empty.\n",
+            "collectionId": "3560a667-bc27-fe5e-47b0-ae6b69959646",
+            "folder": "e23de96f-af25-d497-bd13-6fa2bf22b62f"
+        },
+        {
+            "id": "40eac8de-9d42-fc99-c62b-16fbba51761c",
+            "headers": "Authorization: Basic YWRtaW46YWRtaW4=\nContent-Type: application/json\nAccept: application/json\n",
+            "url": "http://192.168.50.153:8181/jolokia/read/org.opendaylight.controller:Category=Shards,name=member-3-shard-inventory-config,type=DistributedConfigDatastore",
+            "preRequestScript": "",
+            "pathVariables": {},
+            "method": "GET",
+            "data": [],
+            "dataMode": "params",
+            "version": 2,
+            "tests": "",
+            "currentHelper": "normal",
+            "helperAttributes": {},
+            "time": 1485206145867,
+            "name": "Fetch Shared details-Inventory",
+            "description": "Fetch clustering related data and look for following details to make sure that cluster is up and running.\nUser should direct this request to the controller where jolokia agent is installed.",
+            "collectionId": "3560a667-bc27-fe5e-47b0-ae6b69959646",
+            "responses": []
+        },
+        {
+            "id": "6327aa5f-1b46-12bc-8a6e-c23eeedb1e50",
+            "headers": "Authorization: Basic YWRtaW46YWRtaW4=\nContent-Type: application/json\nAccept: application/json\n",
+            "url": "http://192.168.50.151:8181/jolokia/read/org.opendaylight.controller:type=DistributedOperationalDatastore,Category=Shards,name=member-1-shard-entity-ownership-operational",
+            "preRequestScript": "",
+            "pathVariables": {},
+            "method": "GET",
+            "data": [],
+            "dataMode": "params",
+            "version": 2,
+            "tests": "",
+            "currentHelper": "normal",
+            "helperAttributes": {},
+            "time": 1485207730541,
+            "name": "GET entity-ownership-operational",
+            "description": "",
+            "collectionId": "3560a667-bc27-fe5e-47b0-ae6b69959646",
+            "responses": []
+        },
+        {
+            "id": "7307e998-b3b6-134a-6294-12bd30dcf906",
+            "headers": "Authorization: Basic YWRtaW46YWRtaW4=\nContent-Type: application/json\nAccept: application/json\n",
+            "url": "http://192.168.50.151:8181/restconf/operations/cluster-admin:backup-datastore",
+            "pathVariables": {},
+            "preRequestScript": "",
+            "method": "POST",
+            "collectionId": "3560a667-bc27-fe5e-47b0-ae6b69959646",
+            "data": [],
+            "dataMode": "raw",
+            "name": "RPC backup-datastore",
+            "description": "Creates a backup file of the datastore state.\n\nTo restore the backup on the target node the file needs to be placed into the $KARAF_HOME/clustered-datastore-restore directory, and then the node restarted. If the directory does not exist (which is quite likely if this is a first-time restore) it needs to be created. On startup, ODL checks if the journal and snapshots directories in $KARAF_HOME are empty, and only then tries to read the contents of the clustered-datastore-restore directory, if it exists. So for a successful restore, those two directories should be empty. The backup file name itself does not matter, and the startup process will delete it after a successful restore.",
+            "descriptionFormat": "html",
+            "time": 1485290717090,
+            "version": 2,
+            "responses": [],
+            "tests": "",
+            "currentHelper": "normal",
+            "helperAttributes": {},
+            "folder": "27881d9d-f4fe-0fbf-861c-974b230d347b",
+            "rawModeData": "{\n  \"input\": {\n    \"file-path\": \"/tmp/datastore_backup\"\n  }\n}"
+        },
+        {
+            "id": "78eed1f6-2b32-b5a7-b8ac-d5b37ae9cddc",
+            "headers": "Authorization: Basic YWRtaW46YWRtaW4=\nContent-Type: application/json\nAccept: application/json\n",
+            "url": "http://192.168.50.151:8181/jolokia/list",
+            "pathVariables": {},
+            "preRequestScript": "",
+            "method": "GET",
+            "collectionId": "3560a667-bc27-fe5e-47b0-ae6b69959646",
+            "data": [],
+            "dataMode": "params",
+            "name": "List schemas",
+            "description": "",
+            "descriptionFormat": "html",
+            "time": 1485204893249,
+            "version": 2,
+            "responses": [],
+            "tests": "",
+            "currentHelper": "normal",
+            "helperAttributes": {},
+            "folder": "e23de96f-af25-d497-bd13-6fa2bf22b62f"
+        },
+        {
+            "id": "7f7c3194-8451-8afd-5276-0599eb7a84ca",
+            "headers": "Authorization: Basic YWRtaW46YWRtaW4=\nContent-Type: application/json\nAccept: application/json\n",
+            "url": "http://192.168.50.151:8181/restconf/operations/cluster-admin:remove-shard-replica",
+            "pathVariables": {},
+            "preRequestScript": "",
+            "method": "POST",
+            "collectionId": "3560a667-bc27-fe5e-47b0-ae6b69959646",
+            "data": [],
+            "dataMode": "raw",
+            "name": "RPC remove-shard-replica",
+            "description": "Removes an existing replica of a shard from this node via the RemoveServer mechanism as described in the Raft paper.",
+            "descriptionFormat": "html",
+            "time": 1485290364020,
+            "version": 2,
+            "responses": [],
+            "tests": "",
+            "currentHelper": "normal",
+            "helperAttributes": {},
+            "folder": "27881d9d-f4fe-0fbf-861c-974b230d347b",
+            "rawModeData": "{\n  \"input\": {\n    \"shard-name\": \"default\",\n    \"member-name\": \"member-2\",\n    \"data-store-type\": \"config\"\n  }\n}"
+        },
+        {
+            "id": "827363de-7498-7bfa-11f4-b6f661a94831",
+            "headers": "Authorization: Basic YWRtaW46YWRtaW4=\nContent-Type: application/json\nAccept: application/json\n",
+            "url": "http://192.168.50.153:8181/jolokia/read/akka:type=Cluster",
+            "preRequestScript": "",
+            "pathVariables": {},
+            "method": "GET",
+            "data": [],
+            "dataMode": "params",
+            "version": 2,
+            "tests": "",
+            "currentHelper": "normal",
+            "helperAttributes": {},
+            "time": 1485204541025,
+            "name": "Fetch Cluster details",
+            "description": "Fetch clustering related data and look for following details to make sure that cluster is up and running.\nMembers field should list all the cluster nodes added to the cluster.\nAlso the Unreachable fields should be empty.\n",
+            "collectionId": "3560a667-bc27-fe5e-47b0-ae6b69959646"
+        },
+        {
+            "id": "8a7e0ff5-c969-267a-bd9b-d720324493b1",
+            "headers": "Authorization: Basic YWRtaW46YWRtaW4=\nContent-Type: application/json\nAccept: application/json\n",
+            "url": "http://192.168.50.151:8181/jolokia/read/org.opendaylight.controller:Category=Shards,name=member-1-shard-inventory-config,type=DistributedConfigDatastore",
+            "preRequestScript": "",
+            "pathVariables": {},
+            "method": "GET",
+            "data": [],
+            "dataMode": "params",
+            "version": 2,
+            "tests": "",
+            "currentHelper": "normal",
+            "helperAttributes": {},
+            "time": 1485204322745,
+            "name": "Fetch Shared details-Inventory",
+            "description": "Fetch clustering related data and look for following details to make sure that cluster is up and running.\nUser should direct this request to the controller where jolokia agent is installed.",
+            "collectionId": "3560a667-bc27-fe5e-47b0-ae6b69959646",
+            "responses": [],
+            "folder": "e23de96f-af25-d497-bd13-6fa2bf22b62f"
+        },
+        {
+            "id": "8ed4aa47-3328-f8b6-8242-3b75b47e24ea",
+            "headers": "Authorization: Basic YWRtaW46YWRtaW4=\nContent-Type: application/json\nAccept: application/json\n",
+            "url": "http://192.168.50.152:8181/jolokia/read/org.opendaylight.controller:type=DistributedConfigDatastore,Category=ShardManager,name=shard-manager-config",
+            "preRequestScript": "",
+            "pathVariables": {},
+            "method": "GET",
+            "data": [],
+            "dataMode": "params",
+            "version": 2,
+            "tests": "",
+            "currentHelper": "normal",
+            "helperAttributes": {},
+            "time": 1485206201789,
+            "name": "Get config local shards",
+            "description": "Fetch clustering related data and look for following details to make sure that cluster is up and running.\nUser should direct this request to the controller where jolokia agent is installed.",
+            "collectionId": "3560a667-bc27-fe5e-47b0-ae6b69959646",
+            "responses": []
+        },
+        {
+            "id": "96888f3e-a702-8b64-a3e3-cc73edbf9039",
+            "headers": "Authorization: Basic YWRtaW46YWRtaW4=\nContent-Type: application/json\nAccept: application/json\n",
+            "url": "http://192.168.50.153:8181/jolokia/read/org.opendaylight.controller:Category=Shards,name=member-3-shard-topology-config,type=DistributedConfigDatastore",
+            "preRequestScript": "",
+            "pathVariables": {},
+            "method": "GET",
+            "data": [],
+            "dataMode": "params",
+            "version": 2,
+            "tests": "",
+            "currentHelper": "normal",
+            "helperAttributes": {},
+            "time": 1485206152420,
+            "name": "Fetch Shared details-Topology",
+            "description": "Fetch clustering related data and look for following details to make sure that cluster is up and running.\nUser should direct this request to the controller where jolokia agent is installed.",
+            "collectionId": "3560a667-bc27-fe5e-47b0-ae6b69959646",
+            "responses": []
+        },
+        {
+            "id": "a67f1978-bab2-c29e-ec8a-d661be476696",
+            "headers": "Authorization: Basic YWRtaW46YWRtaW4=\nContent-Type: application/json\nAccept: application/json\n",
+            "url": "http://192.168.50.152:8181/jolokia/read/akka:type=Cluster",
+            "preRequestScript": "",
+            "pathVariables": {},
+            "method": "GET",
+            "data": [],
+            "dataMode": "params",
+            "version": 2,
+            "tests": "",
+            "currentHelper": "normal",
+            "helperAttributes": {},
+            "time": 1485204496035,
+            "name": "Fetch Cluster details",
+            "description": "Fetch clustering related data and look for following details to make sure that cluster is up and running.\nMembers field should list all the cluster nodes added to the cluster.\nAlso the Unreachable fields should be empty.\n",
+            "collectionId": "3560a667-bc27-fe5e-47b0-ae6b69959646"
+        },
+        {
+            "id": "c8f88472-655b-14eb-167c-fbdadd2dc715",
+            "headers": "Authorization: Basic YWRtaW46YWRtaW4=\nContent-Type: application/json\nAccept: application/json\n",
+            "url": "http://192.168.50.152:8181/jolokia/list",
+            "pathVariables": {},
+            "preRequestScript": "",
+            "method": "GET",
+            "collectionId": "3560a667-bc27-fe5e-47b0-ae6b69959646",
+            "data": [],
+            "dataMode": "params",
+            "name": "List schemas",
+            "description": "",
+            "descriptionFormat": "html",
+            "time": 1485204911082,
+            "version": 2,
+            "responses": [],
+            "tests": "",
+            "currentHelper": "normal",
+            "helperAttributes": {},
+            "folder": "8bfef314-8c90-9496-8a56-e7bd8926ad29"
+        },
+        {
+            "id": "cca65185-0068-6ca7-b05f-ab3aefefce9c",
+            "headers": "Authorization: Basic YWRtaW46YWRtaW4=\nContent-Type: application/json\nAccept: application/json\n",
+            "url": "http://192.168.50.151:8181/restconf/operations/cluster-admin:remove-all-shard-replicas",
+            "pathVariables": {},
+            "preRequestScript": "",
+            "method": "POST",
+            "collectionId": "3560a667-bc27-fe5e-47b0-ae6b69959646",
+            "data": [],
+            "dataMode": "raw",
+            "name": "RPC remove-all-shard-replicas",
+            "description": "Removes replicas for all shards on this node. This is equivalent to issuing a remove-shard-replica for all shards and essentially removes this node from a cluster.",
+            "descriptionFormat": "html",
+            "time": 1485290286817,
+            "version": 2,
+            "responses": [],
+            "tests": "",
+            "currentHelper": "normal",
+            "helperAttributes": {},
+            "folder": "27881d9d-f4fe-0fbf-861c-974b230d347b",
+            "rawModeData": "{\n  \"input\": {\n    \"member-name\": \"member-1\"\n  }\n}"
+        },
+        {
+            "id": "d30bb5b1-2076-13a5-9b4f-1a25799b1a3c",
+            "headers": "Authorization: Basic YWRtaW46YWRtaW4=\nContent-Type: application/json\nAccept: application/json\n",
+            "url": "http://192.168.50.151:8181/restconf/operations/cluster-admin:flip-member-voting-states-for-all-shards",
+            "pathVariables": {},
+            "preRequestScript": "",
+            "method": "POST",
+            "collectionId": "3560a667-bc27-fe5e-47b0-ae6b69959646",
+            "data": [],
+            "dataMode": "raw",
+            "name": "RPC flip-member-voting-states-for-all-shards",
+            "description": "Flips the voting states of all cluster members for all shards, such that if a member was voting it becomes non-voting and vice versa.",
+            "descriptionFormat": "html",
+            "time": 1485289993429,
+            "version": 2,
+            "responses": [],
+            "tests": "",
+            "currentHelper": "normal",
+            "helperAttributes": {},
+            "folder": "27881d9d-f4fe-0fbf-861c-974b230d347b",
+            "rawModeData": ""
+        },
+        {
+            "id": "d9696eae-0131-6592-3298-48df437c4090",
+            "headers": "Authorization: Basic YWRtaW46YWRtaW4=\nContent-Type: application/json\nAccept: application/json\n",
+            "url": "http://192.168.50.152:8181/jolokia/read/org.opendaylight.controller:Category=Shards,name=member-2-shard-inventory-config,type=DistributedConfigDatastore",
+            "preRequestScript": "",
+            "pathVariables": {},
+            "method": "GET",
+            "data": [],
+            "dataMode": "params",
+            "version": 2,
+            "tests": "",
+            "currentHelper": "normal",
+            "helperAttributes": {},
+            "time": 1485206120320,
+            "name": "Fetch Shared details-Inventory",
+            "description": "Fetch clustering related data and look for following details to make sure that cluster is up and running.\nUser should direct this request to the controller where jolokia agent is installed.",
+            "collectionId": "3560a667-bc27-fe5e-47b0-ae6b69959646",
+            "responses": []
+        },
+        {
+            "id": "e0910063-966e-1b89-043f-409fb028883a",
+            "headers": "Authorization: Basic YWRtaW46YWRtaW4=\nContent-Type: application/json\nAccept: application/json\n",
+            "url": "http://192.168.50.151:8181/restconf/operations/cluster-admin:change-member-voting-states-for-all-shards",
+            "preRequestScript": "",
+            "pathVariables": {},
+            "method": "POST",
+            "data": [],
+            "dataMode": "raw",
+            "version": 2,
+            "tests": "",
+            "currentHelper": "normal",
+            "helperAttributes": {},
+            "time": 1485289910941,
+            "name": "RPC change-member-voting-states-for-all-shards",
+            "description": "Changes the voting states, either voting or non-voting, of cluster members for all shards. Non-voting members will no longer participate in leader elections and consensus but will be replicated. This is useful for having a set of members serve as a backup cluster in case the primary voting cluster suffers catastrophic failure. This RPC can be issued to any cluster member and will be forwarded to the leader.",
+            "collectionId": "3560a667-bc27-fe5e-47b0-ae6b69959646",
+            "responses": [],
+            "rawModeData": "{\n  \"input\": {\n    \"member-voting-state\": [\n      {\n        \"member-name\": \"member-1\",\n        \"voting\": false\n      },\n      {\n        \"member-name\": \"member-2\",\n        \"voting\": false\n      },\n      {\n        \"member-name\": \"member-3\",\n        \"voting\": true\n      }\n    ]\n  }\n}"
+        },
+        {
+            "id": "eba3a877-fa9e-0c8d-8b66-9b2e3ff2e19e",
+            "headers": "Authorization: Basic YWRtaW46YWRtaW4=\nContent-Type: application/json\nAccept: application/json\n",
+            "url": "http://192.168.50.151:8181/jolokia/read/org.opendaylight.controller:type=DistributedOperationalDatastore,Category=ShardManager,name=shard-manager-operational",
+            "pathVariables": {},
+            "preRequestScript": "",
+            "method": "GET",
+            "collectionId": "3560a667-bc27-fe5e-47b0-ae6b69959646",
+            "data": [],
+            "dataMode": "params",
+            "name": "Get operational local shards",
+            "description": "Fetch clustering related data and look for following details to make sure that cluster is up and running.\nUser should direct this request to the controller where jolokia agent is installed.",
+            "descriptionFormat": "html",
+            "time": 1485205009023,
+            "version": 2,
+            "responses": [],
+            "tests": "",
+            "currentHelper": "normal",
+            "helperAttributes": {},
+            "folder": "e23de96f-af25-d497-bd13-6fa2bf22b62f"
+        },
+        {
+            "id": "ed36e4ad-5819-c957-0dda-f82d79d5a0fd",
+            "headers": "Authorization: Basic YWRtaW46YWRtaW4=\nContent-Type: application/json\nAccept: application/json\n",
+            "url": "http://192.168.50.151:8181/jolokia/read/org.opendaylight.controller:type=DistributedConfigDatastore,Category=ShardManager,name=shard-manager-config",
+            "pathVariables": {},
+            "preRequestScript": "",
+            "method": "GET",
+            "collectionId": "3560a667-bc27-fe5e-47b0-ae6b69959646",
+            "data": [],
+            "dataMode": "params",
+            "name": "Get config local shards",
+            "description": "Fetch clustering related data and look for following details to make sure that cluster is up and running.\nUser should direct this request to the controller where jolokia agent is installed.",
+            "descriptionFormat": "html",
+            "time": 1485204979563,
+            "version": 2,
+            "responses": [],
+            "tests": "",
+            "currentHelper": "normal",
+            "helperAttributes": {},
+            "folder": "e23de96f-af25-d497-bd13-6fa2bf22b62f"
+        },
+        {
+            "id": "eda3e8f3-9421-9357-cf3c-dbada5667c86",
+            "headers": "Authorization: Basic YWRtaW46YWRtaW4=\nContent-Type: application/json\nAccept: application/json\n",
+            "url": "http://192.168.50.151:8181/jolokia/read/org.opendaylight.controller:Category=Shards,name=member-1-shard-topology-config,type=DistributedConfigDatastore",
+            "preRequestScript": "",
+            "pathVariables": {},
+            "method": "GET",
+            "data": [],
+            "dataMode": "params",
+            "version": 2,
+            "tests": "",
+            "currentHelper": "normal",
+            "helperAttributes": {},
+            "time": 1485204359414,
+            "name": "Fetch Shared details-Topology",
+            "description": "Fetch clustering related data and look for following details to make sure that cluster is up and running.\nUser should direct this request to the controller where jolokia agent is installed.",
+            "collectionId": "3560a667-bc27-fe5e-47b0-ae6b69959646",
+            "responses": [],
+            "folder": "e23de96f-af25-d497-bd13-6fa2bf22b62f"
+        }
+    ]
+}
\ No newline at end of file
diff --git a/tutorials/cluster-nodes/README.md b/tutorials/cluster-nodes/README.md
new file mode 100644 (file)
index 0000000..8939f1b
--- /dev/null
@@ -0,0 +1,93 @@
+# Deploy an OpenDaylight Cluster
+This repo provide a Vagrant file and a docker-compose.yml file so one can deploy an OpenDaylight cluster using VMs or containers.
+## Configuration
+The VMs / Containers are configured through the _config.properties_ file.
+### config.properties
+* How many nodes to deploy?
+    Default is `3`.
+* What OpenDaylight release to use?
+    Default is `Boron-SR2`.
+* What features to install on startup?
+    Default are `odl-jolokia, odl-restconf, odl-mdsal-clustering`.
+
+### Vagrant
+Virtual Machines are configured as follow:
+* Image: Trusty
+* RAM: 4096
+* CPUs: 4
+* Network:
+    * Bridge "en0: Wi-Fi (AirPort)"
+    * Static IP address: 192.168.50.15#{node_index}
+
+To change the network configuration edit the Vagrantfile.
+
+Useful commands:
+```
+# from the root folder containing the Vagranfile access the virtual machine
+vagrant ssh odl-1
+
+# if you can't access odl-2 or odl-3, export the configured number of nodes and retry
+export NUM_OF_NODES=3
+
+# destroy VMs (force)
+vagrant destroy -f
+
+```
+### Docker
+Containers are configured as follow:
+* Image: Trusty
+* Network:
+    * Static IP address: 192.168.50.15#{node_index}
+
+A specific network is created to hold the cluster networking into its own subnet. Run the following command to see how it's configured:
+```
+docker network inspect odl-cluster-network
+```
+
+* name: odl-cluster-network
+* com.docker.network.bridge.enable_icc=true
+* com.docker.network.bridge.enable_ip_masquerade=true
+* subnet 192.168.50.0/24
+* gateway 192.168.50.1
+
+
+To see where the setup is at, run the command bellow. If the OpenDaylight CLI is shown, it means the node is ready.
+```
+docker exec odl-1 tail -f nohup.out
+```
+
+Useful commands:
+```
+# list all running containers
+docker ps
+
+# remove all containers
+docker rm -f $(docker ps -q)
+
+# delete odl-cluster-network
+docker network rm odl-cluster-network
+
+# execute a command in a container
+docker exec <container_id|container_name> <command>
+
+# list odl image
+docker images odl-node
+
+# remove odl image
+docker rmi odl-node
+
+```
+# Usage
+Execute the `setup_cluster.sh` script, and all should be ready within the next half hour, depending on your network.
+
+```
+./scripts/setup_cluster.sh -p <docker|vagrant>
+```
+## Example
+To create a cluster using containers, run the following:
+```
+./scripts/setup_cluster.sh -p docker
+```
+# Resources
+A Postman collection is provided giving useful requests to ensure the cluster is correctly setup, and to gather info from it through Jolokia.
+This Postman collection also provides requests to manage the cluster, leveraging the [cluster-admin.yang](https://github.com/opendaylight/controller/blob/master/opendaylight/md-sal/sal-cluster-admin-api/src/main/yang/cluster-admin.yang) RPCs.
\ No newline at end of file
diff --git a/tutorials/cluster-nodes/Vagrantfile b/tutorials/cluster-nodes/Vagrantfile
new file mode 100644 (file)
index 0000000..2d27e6d
--- /dev/null
@@ -0,0 +1,40 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+##############################################################################
+# Copyright (c) 2017 Alexis de Talhouët.  All rights reserved.
+#
+# This program and the accompanying materials are made available under the
+# terms of the Eclipse Public License v1.0 which accompanies this distribution,
+# and is available at http://www.eclipse.org/legal/epl-v10.html
+##############################################################################
+
+# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
+VAGRANTFILE_API_VERSION = "2"
+
+$install_odl = <<SCRIPT
+HOME=/home/vagrant/
+mkdir opendaylight
+cp -r /vagrant/opendaylight/* opendaylight/
+nohup /vagrant/scripts/setup_odl.sh > setup_odl.log 2>&1 &
+SCRIPT
+
+Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
+
+  num_nodes = (ENV['NUM_OF_NODES'] || 1).to_i
+
+  config.vm.box = "trusty-server-cloudimg-amd64"
+  config.vm.box_url = "https://cloud-images.ubuntu.com/vagrant/trusty/current/trusty-server-cloudimg-amd64-vagrant-disk1.box"
+
+  num_nodes.times do |n|
+     config.vm.define "odl-#{n+1}" do | node |
+        node.vm.host_name = "odl-#{n+1}"
+        node.vm.network "private_network", :adapter=>2, ip: "192.168.50.15#{n+1}", bridge: "en0: Wi-Fi (AirPort)"
+        node.vm.provider :virtualbox do |v|
+          v.customize ["modifyvm", :id, "--memory", 4096]
+          v.customize ["modifyvm", :id, "--cpus", 4]
+        end
+        node.vm.provision "install_odl", type: "shell", inline: $install_odl
+      end
+  end
+end
diff --git a/tutorials/cluster-nodes/docker-compose.yml b/tutorials/cluster-nodes/docker-compose.yml
new file mode 100644 (file)
index 0000000..cff91f8
--- /dev/null
@@ -0,0 +1,28 @@
+##############################################################################
+# Copyright (c) 2017 Alexis de Talhouët.  All rights reserved.
+#
+# This program and the accompanying materials are made available under the
+# terms of the Eclipse Public License v1.0 which accompanies this distribution,
+# and is available at http://www.eclipse.org/legal/epl-v10.html
+##############################################################################
+
+version: '2'
+
+services:
+  odl-node:
+    build: .
+    image: odl-node
+    container_name: odl-${NODE_NUMBER}
+    hostname: odl-${NODE_NUMBER}
+    privileged: true
+    stdin_open: true
+    tty: true
+    networks:
+      default:
+          ipv4_address: 192.168.50.15${NODE_NUMBER}
+    command: nohup /root/scripts/setup_odl.sh
+
+networks:
+    default:
+        external:
+            name: odl-cluster-network
diff --git a/tutorials/cluster-nodes/scripts/config.properties b/tutorials/cluster-nodes/scripts/config.properties
new file mode 100755 (executable)
index 0000000..e012aa0
--- /dev/null
@@ -0,0 +1,24 @@
+##############################################################################
+# Copyright (c) 2017 Alexis de Talhouët.  All rights reserved.
+#
+# This program and the accompanying materials are made available under the
+# terms of the Eclipse Public License v1.0 which accompanies this distribution,
+# and is available at http://www.eclipse.org/legal/epl-v10.html
+##############################################################################
+
+# define the number of node in your cluster.
+NUM_OF_NODES=3
+
+# define the OpenDaylight version to use.
+ODL_VERSION="0.5.2-Boron-SR2"
+
+# define the features to install by default.
+# odl-mdsal-clustering enables cluster-singleton-service
+# odl-jolokia allows the use of MBEANs through REST API
+# odl-restconf enables the RESTCONF server
+# add here other features to install, separated by a coma
+USER_FEATURES="odl-mdsal-clustering,odl-jolokia,odl-restconf"
+
+# DO NOT MODIFY
+# URL to retrieve the OpenDaylight distribution based on the defined verion.
+DISTRO_URL=https://nexus.opendaylight.org/content/repositories/opendaylight.release/org/opendaylight/integration/distribution-karaf/$ODL_VERSION/distribution-karaf-$ODL_VERSION.tar.gz
diff --git a/tutorials/cluster-nodes/scripts/setup_cluster.sh b/tutorials/cluster-nodes/scripts/setup_cluster.sh
new file mode 100755 (executable)
index 0000000..d309cf2
--- /dev/null
@@ -0,0 +1,147 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2017 Alexis de Talhouët.  All rights reserved.
+#
+# This program and the accompanying materials are made available under the
+# terms of the Eclipse Public License v1.0 which accompanies this distribution,
+# and is available at http://www.eclipse.org/legal/epl-v10.html
+##############################################################################
+
+# This is the main script; it basically ensures you have an available OpenDaylight
+# distribution to deploy on the nodes, or will download it, and it will
+# trigger vagrant or docker to build the cluster.
+
+SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+ROOT="$( cd "$SCRIPTS" && cd .. && pwd)"
+
+function setup_env {
+    source $SCRIPTS/config.properties
+    export NUM_OF_NODES=$NUM_OF_NODES
+    echo "Cluster will be deployed using $ODL_VERSION."
+    echo "The cluster will have $NUM_OF_NODES nodes."
+}
+
+function dowload_odl {
+    if [ ! -d "$ROOT/opendaylight" ]; then
+        echo "Download OpenDaylight distribution"
+        mkdir opendaylight
+        curl $DISTRO_URL | tar xvz -C opendaylight --strip-components=1
+    else
+        echo "OpenDaylight distribution $ODL_VERSION already dowloaded."
+    fi
+}
+
+function setup_odl {
+    env_banner
+    cd $ROOT/opendaylight
+
+    # for the OSX users, know that BSD-sed doesn't work the same as GNU-sed, hence this command won't work.
+    # see http://stackoverflow.com/a/27834828/6937994
+    # this command is intended to work with GNU-sed binary
+    sed -i "/^featuresBoot[ ]*=/ s/management.*/management,$USER_FEATURES/" etc/org.apache.karaf.features.cfg
+    echo "Those features will be installed: $USER_FEATURES"
+
+    # to configure (add/modify/remove) shards that will be
+    # spwaned and shared within the cluster, please refer to
+    # the custom_shard_config.txt located under /bin
+}
+
+function spwan_vms {
+    env_banner
+    cd $ROOT
+    vagrant destroy -f
+    vagrant up
+}
+
+function spwan_containers {
+    env_banner
+    # create docker network specific to ODL cluster
+    if [ `docker network ls | grep -w odl-cluster-network | wc -l | xargs echo ` == 0 ]; then
+        echo "Docker network for OpenDaylight don't exist - creating ..."
+        docker network create -o com.docker.network.bridge.enable_icc=true -o com.docker.network.bridge.enable_ip_masquerade=true --subnet 192.168.50.0/24 --gateway 192.168.50.1  odl-cluster-network
+    fi
+
+    # create all the containers
+    MAX=$NUM_OF_NODES
+    for ((i=1; i<=MAX; i++))
+    do
+        export NODE_NUMBER=$i
+        docker rm -f odl-$i
+        docker-compose -p odl-$i up -d
+    done
+}
+
+function prerequisites {
+    cat <<EOF
+################################################
+##              Setup environment             ##
+################################################
+EOF
+    setup_env
+
+    cat <<EOF
+################################################
+##                 Download ODL               ##
+################################################
+EOF
+    dowload_odl
+
+    cat <<EOF
+################################################
+##              Configure cluster             ##
+################################################
+EOF
+    setup_odl
+}
+
+function env_banner {
+cat <<EOF
+################################################
+##             Spawn cluster nodes            ##
+################################################
+EOF
+}
+
+function end_banner {
+cat <<EOF
+################################################
+##          Your environment is setup         ##
+################################################
+EOF
+}
+
+usage() { echo "Usage: $0 -p <docker|vagrant>" 1>&2; exit 1; }
+
+while getopts ":p:" opt; do
+    case $opt in
+        p)
+            p=$OPTARG
+            ;;
+        \?)
+            echo "Invalid option -$OPTARG" >&2
+            usage
+            exit 1
+            ;;
+    esac
+done
+shift $((OPTIND-1))
+
+if [ -z $p ]; then
+    echo "Option -p requires an argument." >&2
+    usage
+fi
+
+if [ $p == "docker" ]; then
+    prerequisites
+    spwan_containers
+elif [ $p == "vagrant" ]; then
+    prerequisites
+    spwan_vms
+else
+    echo "Invalid argument $p for option -p"
+    usage
+fi
+
+end_banner
+
diff --git a/tutorials/cluster-nodes/scripts/setup_odl.sh b/tutorials/cluster-nodes/scripts/setup_odl.sh
new file mode 100755 (executable)
index 0000000..db5c155
--- /dev/null
@@ -0,0 +1,61 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2017 Alexis de Talhouët.  All rights reserved.
+#
+# This program and the accompanying materials are made available under the
+# terms of the Eclipse Public License v1.0 which accompanies this distribution,
+# and is available at http://www.eclipse.org/legal/epl-v10.html
+##############################################################################
+
+# That script provisions the nodes by installing JDK-8. It installs the OpenDaylight distribution
+# under `$HOME/OpenDaylight`. It also configure the Shards of the OpenDaylight instance:
+# Shard configuration:
+# see (configure-cluster-ipdetect.sh)[https://github.com/opendaylight/integration-distribution/blob/release/boron-sr2/distribution-karaf/src/main/assembly/bin/configure-cluster-ipdetect.sh]
+
+SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+
+source $SCRIPTS/config.properties
+
+function install_packages {
+    # required if using Docker, else could be commented-out
+    sudo apt-get update -y
+    sudo apt-get -y install software-properties-common
+    #install java8
+    echo oracle-java8-installer shared/accepted-oracle-license-v1-1 select true | sudo /usr/bin/debconf-set-selections
+    sudo add-apt-repository ppa:webupd8team/java -y
+    sudo apt-get update
+    sudo apt-get install oracle-java8-installer -y
+    sudo update-java-alternatives -s java-8-oracle
+    sudo apt-get install oracle-java8-set-default -y
+    export JAVA_HOME=/usr/lib/jvm/java-8-oracle
+}
+
+function start_odl {
+    cd $HOME/opendaylight
+
+    seed_nodes=""
+    for i in $(seq $NUM_OF_NODES)
+    do
+        seed_nodes+="192.168.50.15$i "
+    done
+
+    # setup the cluster with all the nodes' IP address
+    ./bin/configure-cluster-ipdetect.sh $seed_nodes
+
+    rm -rf journal snapshots
+    JAVA_MAX_MEM=4G JAVA_MAX_PERM_MEM=512m bin/karaf clean
+}
+
+echo "Install required packages" > $HOME/setup.prog
+install_packages
+
+echo "Starting OpenDaylight" > $HOME/setup.prog
+start_odl
+
+# For the docker container, we have to let the container
+# live else it will stop once the setup is ready
+while [ 1 ];
+do
+  sleep 10
+done