Add cluster deployer template for lispflowmapping project. 36/42136/6
authorJozef Gloncak <jgloncak@cisco.com>
Wed, 20 Jul 2016 11:04:11 +0000 (13:04 +0200)
committerVratko Polák <vrpolak@cisco.com>
Wed, 10 Aug 2016 13:20:51 +0000 (13:20 +0000)
Change-Id: I46b611d40d8e2bde272a56dc27a2f8bea64a6b26
Signed-off-by: Jozef Gloncak <jgloncak@cisco.com>
tools/clustering/cluster-deployer/templates/lispflowmapping/akka.conf.template [new file with mode: 0644]
tools/clustering/cluster-deployer/templates/lispflowmapping/module-shards.conf.template [new file with mode: 0644]
tools/clustering/cluster-deployer/templates/lispflowmapping/modules.conf.template [new file with mode: 0644]
tools/clustering/cluster-deployer/templates/lispflowmapping/org.apache.karaf.features.cfg.template [new file with mode: 0644]

diff --git a/tools/clustering/cluster-deployer/templates/lispflowmapping/akka.conf.template b/tools/clustering/cluster-deployer/templates/lispflowmapping/akka.conf.template
new file mode 100644 (file)
index 0000000..976a42c
--- /dev/null
@@ -0,0 +1,12 @@
+
+odl-cluster-data {
+  akka {
+    cluster {
+      seed-nodes = {{{DS_SEED_NODES}}}
+      auto-down-unreachable-after = 30s
+      roles = [
+        "{{MEMBER_NAME}}"
+      ]
+    }
+  }
+}
diff --git a/tools/clustering/cluster-deployer/templates/lispflowmapping/module-shards.conf.template b/tools/clustering/cluster-deployer/templates/lispflowmapping/module-shards.conf.template
new file mode 100644 (file)
index 0000000..83c2a88
--- /dev/null
@@ -0,0 +1,89 @@
+# This file describes which shards live on which members
+# The format for a module-shards is as follows,
+# {
+#    name = "<friendly_name_of_the_module>"
+#    shards = [
+#        {
+#            name="<any_name_that_is_unique_for_the_module>"
+#            replicas = [
+#                "<name_of_member_on_which_to_run>"
+#            ]
+#     ]
+# }
+#
+# For Helium we support only one shard per module. Beyond Helium
+# we will support more than 1
+# The replicas section is a collection of member names. This information
+# will be used to decide on which members replicas of a particular shard will be
+# located. Once replication is integrated with the distributed data store then
+# this section can have multiple entries.
+#
+#
+
+
+module-shards = [
+    {
+        name = "default"
+        shards = [
+            {
+                name="default"
+                replicas = {{{REPLICAS_1}}}
+            }
+        ]
+    },
+    {
+        name = "topology"
+        shards = [
+            {
+                name="topology"
+                replicas = {{{REPLICAS_2}}}
+            }
+        ]
+    },
+    {
+        name = "inventory"
+        shards = [
+            {
+                name="inventory"
+                replicas = {{{REPLICAS_3}}}
+            }
+        ]
+    },
+    {
+        name = "toaster"
+        shards = [
+            {
+                name="toaster"
+                replicas = {{{REPLICAS_4}}}
+            }
+        ]
+    },
+    {
+        name = "car"
+        shards = [
+            {
+                name="car"
+                replicas = {{{REPLICAS_4}}}
+            }
+        ]
+    },
+    {
+        name = "people"
+        shards = [
+            {
+                name="people"
+                replicas = {{{REPLICAS_4}}}
+            }
+        ]
+    },
+    {
+        name = "car-people"
+        shards = [
+            {
+                name="car-people"
+                replicas = {{{REPLICAS_4}}}
+            }
+        ]
+    }
+
+]
diff --git a/tools/clustering/cluster-deployer/templates/lispflowmapping/modules.conf.template b/tools/clustering/cluster-deployer/templates/lispflowmapping/modules.conf.template
new file mode 100644 (file)
index 0000000..a557c36
--- /dev/null
@@ -0,0 +1,50 @@
+# This file should describe all the modules that need to be placed in a separate shard
+# The format of the configuration is as follows
+# {
+#    name = "<friendly_name_of_module>"
+#    namespace = "<the yang namespace of the module>"
+#    shard-strategy = "module"
+# }
+#
+# Note that at this time the only shard-strategy we support is module which basically
+# will put all the data of a single module in two shards (one for config and one for
+# operational data)
+
+modules = [
+    {
+        name = "inventory"
+        namespace = "urn:opendaylight:inventory"
+        shard-strategy = "module"
+    },
+
+    {
+        name = "topology"
+        namespace = "urn:TBD:params:xml:ns:yang:network-topology"
+        shard-strategy = "module"
+    },
+
+    {
+        name = "toaster"
+        namespace = "http://netconfcentral.org/ns/toaster"
+        shard-strategy = "module"
+    },
+
+    {
+        name = "car"
+        namespace = "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car"
+        shard-strategy = "module"
+    },
+
+    {
+        name = "people"
+        namespace = "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:people"
+        shard-strategy = "module"
+    },
+
+    {
+        name = "car-people"
+        namespace = "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car-people"
+        shard-strategy = "module"
+    }
+
+]
diff --git a/tools/clustering/cluster-deployer/templates/lispflowmapping/org.apache.karaf.features.cfg.template b/tools/clustering/cluster-deployer/templates/lispflowmapping/org.apache.karaf.features.cfg.template
new file mode 100644 (file)
index 0000000..45f597b
--- /dev/null
@@ -0,0 +1,49 @@
+################################################################################
+#
+#    Licensed to the Apache Software Foundation (ASF) under one or more
+#    contributor license agreements.  See the NOTICE file distributed with
+#    this work for additional information regarding copyright ownership.
+#    The ASF licenses this file to You under the Apache License, Version 2.0
+#    (the "License"); you may not use this file except in compliance with
+#    the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+#
+################################################################################
+
+#
+# Defines if the startlvl should be respected during feature startup. The default value is true. The default
+# behavior for 2.x is false (!) for this property
+#
+# Be aware that this property is deprecated and will be removed in Karaf 4.0. So, if you need to
+# set this to false, please use this only as a temporary solution!
+#
+#respectStartLvlDuringFeatureStartup=true
+
+
+#
+# Defines if the startlvl should be respected during feature uninstall. The default value is true.
+# If true, means stop bundles respecting the descend order of start level in a certain feature.
+#
+#respectStartLvlDuringFeatureUninstall=true
+
+#
+# Comma separated list of features repositories to register by default
+#
+featuresRepositories = mvn:org.apache.karaf.features/standard/3.0.6/xml/features,mvn:org.apache.karaf.features/enterprise/3.0.6/xml/features,mvn:org.ops4j.pax.web/pax-web-features/3.2.6/xml/features,mvn:org.apache.karaf.features/spring/3.0.6/xml/features,mvn:org.opendaylight.lispflowmapping/features-lispflowmapping/1.4.0-SNAPSHOT/xml/features
+
+#
+# Comma separated list of features to install at startup
+#
+featuresBoot = config,standard,region,package,kar,ssh,management,odl-jolokia,odl-lispflowmapping-msmr
+
+#
+# Defines if the boot features are started in asynchronous mode (in a dedicated thread)
+#
+featuresBootAsynchronous=false

©2013 OpenDaylight, A Linux Foundation Collaborative Project. All Rights Reserved.
OpenDaylight is a registered trademark of The OpenDaylight Project, Inc.
Linux Foundation and OpenDaylight are registered trademarks of the Linux Foundation.
Linux is a registered trademark of Linus Torvalds.