Modifications to deployer to install a dsbenchmark distribution
authorMoiz Raja <moraja@cisco.com>
Fri, 12 Jun 2015 21:14:32 +0000 (14:14 -0700)
committerGerrit Code Review <gerrit@opendaylight.org>
Mon, 15 Jun 2015 02:55:21 +0000 (02:55 +0000)
- Made a bunch of changes to deploy.py to just make it pep8 compatible
- Made some modifications so that if a template does not exist we
  return nothing. This allows us to put copy some of the templates
  only if they are available
- Added a new template for dsbenchmark tests

Change-Id: I9f3b274533c191d6e42b438dac75bb7830c3986a
Signed-off-by: Moiz Raja <moraja@cisco.com>
test/tools/clustering/cluster-deployer/deploy.py
test/tools/clustering/cluster-deployer/remote_host.py
test/tools/clustering/cluster-deployer/templates/dsbenchmark/akka.conf.template [new file with mode: 0644]
test/tools/clustering/cluster-deployer/templates/dsbenchmark/jolokia.xml.template [new file with mode: 0644]
test/tools/clustering/cluster-deployer/templates/dsbenchmark/module-shards.conf.template [new file with mode: 0644]
test/tools/clustering/cluster-deployer/templates/dsbenchmark/modules.conf.template [new file with mode: 0644]
test/tools/clustering/cluster-deployer/templates/dsbenchmark/org.apache.karaf.features.cfg.template [new file with mode: 0644]
test/tools/clustering/cluster-deployer/templates/dsbenchmark/org.apache.karaf.management.cfg.template [new file with mode: 0644]
test/tools/clustering/cluster-deployer/templates/dsbenchmark/org.opendaylight.controller.cluster.datastore.cfg.template [new file with mode: 0644]

index bb62160cc8cdee309d9bc8a6dbeeeaf715aabec8..102742f03512e04da1bfed2790dc4cd4b1c92ef0 100755 (executable)
 #
 # The input that this script will take is as follows,
 #
-# - A comma separated list of ip addresses/hostnames for each host on which the distribution needs to be deployed
+# - A comma separated list of ip addresses/hostnames for each host on which
+#   the distribution needs to be deployed
 # - The replication factor to be used
-# - The ssh username/password of the remote host(s). Note that this should be the same for each host
+# - The ssh username/password of the remote host(s). Note that this should be
+#   the same for each host
 # - The name of the template to be used.
-#   Note that this template name should match the name of a template folder in the templates directory.
+#   Note that this template name should match the name of a template folder in
+#   the templates directory.
 #   The templates directory can be found in the same directory as this script.
 #
 # Here are the things it will do,
 # - Copy over a distribution of opendaylight to the remote host
 # - Create a timestamped directory on the remote host
 # - Unzip the distribution to the timestamped directory
-# - Copy over the template substituted configuration files to the appropriate location on the remote host
+# - Copy over the template substituted configuration files to the appropriate
+#   location on the remote host
 # - Create a symlink to the timestamped directory
 # - Start karaf
 #
-# -------------------------------------------------------------------------------------------------------------
+# ----------------------------------------------------------------------------
 
 import argparse
 import time
@@ -40,22 +44,31 @@ from remote_host import RemoteHost
 
 parser = argparse.ArgumentParser(description='Cluster Deployer')
 parser.add_argument("--distribution", default="",
-                    help="the absolute path of the distribution on the local host that needs to be deployed. "
-                    "(Must contain version in the form: \"<#>.<#>.<#>-<name>\", e.g. 0.2.0-SNAPSHOT)",
+                    help="the absolute path of the distribution on the local "
+                         "host that needs to be deployed. (Must contain "
+                         "version in the form: \"<#>.<#>.<#>-<name>\", e.g. "
+                         "0.2.0-SNAPSHOT)",
                     required=True)
 parser.add_argument("--rootdir", default="/root",
-                    help="the root directory on the remote host where the distribution is to be deployed",
+                    help="the root directory on the remote host where the "
+                         "distribution is to be deployed",
                     required=True)
-parser.add_argument("--hosts", default="", help="a comma separated list of host names or ip addresses",
+parser.add_argument("--hosts", default="", help="a comma separated list of "
+                                                "host names or ip addresses",
                     required=True)
-parser.add_argument("--clean", action="store_true", default=False, help="clean the deployment on the remote host")
+parser.add_argument("--clean", action="store_true", default=False,
+                    help="clean the deployment on the remote host")
 parser.add_argument("--template", default="openflow",
                     help="the name of the template to be used. "
-                    "This name should match a folder in the templates directory.")
+                    "This name should match a folder in the templates "
+                         "directory.")
 parser.add_argument("--rf", default=3, type=int,
-                    help="replication factor. This is the number of replicas that should be created for each shard.")
-parser.add_argument("--user", default="root", help="the SSH username for the remote host(s)")
-parser.add_argument("--password", default="Ecp123", help="the SSH password for the remote host(s)")
+                    help="replication factor. This is the number of replicas "
+                         "that should be created for each shard.")
+parser.add_argument("--user", default="root", help="the SSH username for the "
+                                                   "remote host(s)")
+parser.add_argument("--password", default="Ecp123",
+                    help="the SSH password for the remote host(s)")
 args = parser.parse_args()
 
 
@@ -71,6 +84,9 @@ class TemplateRenderer:
         if variables is None:
             variables = {}
 
+        if os.path.exists(self.template_root + template_path) is False:
+            return
+
         with open(self.template_root + template_path, "r") as myfile:
             data = myfile.read()
 
@@ -85,8 +101,8 @@ class TemplateRenderer:
 
 
 #
-# The array_str method takes an array of strings and formats it into a string such that
-# it can be used in an akka configuration file
+# The array_str method takes an array of strings and formats it into a
+#  string such that it can be used in an akka configuration file
 #
 def array_str(arr):
     s = "["
@@ -102,8 +118,9 @@ def array_str(arr):
 # The Deployer deploys the controller to one host and configures it
 #
 class Deployer:
-    def __init__(self, host, member_no, template, user, password, rootdir, distribution,
-                 dir_name, hosts, ds_seed_nodes, rpc_seed_nodes, replicas, clean=False):
+    def __init__(self, host, member_no, template, user, password, rootdir,
+                 distribution, dir_name, hosts, ds_seed_nodes, rpc_seed_nodes,
+                 replicas, clean=False):
         self.host = host
         self.member_no = member_no
         self.template = template
@@ -119,7 +136,8 @@ class Deployer:
         self.replicas = replicas
 
         # Connect to the remote host and start doing operations
-        self.remote = RemoteHost(self.host, self.user, self.password, self.rootdir)
+        self.remote = RemoteHost(self.host, self.user, self.password,
+                                 self.rootdir)
 
     def kill_controller(self):
         self.remote.copy_file("kill_controller.sh",  self.rootdir + "/")
@@ -127,11 +145,19 @@ class Deployer:
 
     def deploy(self):
         # Determine distribution version
-        distribution_name = os.path.splitext(os.path.basename(self.distribution))[0]
-        distribution_ver = re.search('(\d+\.\d+\.\d+-\w+\Z)|(\d+\.\d+\.\d+-\w+)(-SR\d+\Z)|(\d+\.\d+\.\d+-\w+)(-SR\d+(\.\d+)\Z)', distribution_name)  # noqa
+        distribution_name \
+            = os.path.splitext(os.path.basename(self.distribution))[0]
+        distribution_ver = re.search('(\d+\.\d+\.\d+-\w+\Z)|'
+                                     '(\d+\.\d+\.\d+-\w+)(-SR\d+\Z)|'
+                                     '(\d+\.\d+\.\d+-\w+)(-SR\d+(\.\d+)\Z)',
+                                     distribution_name)  # noqa
 
         if distribution_ver is None:
-            print distribution_name + " is not a valid distribution version. (Must contain version in the form: \"<#>.<#>.<#>-<name>\" or \"<#>.<#>.<#>-<name>-SR<#>\" or \"<#>.<#>.<#>-<name>\", e.g. 0.2.0-SNAPSHOT)"  # noqa
+            print distribution_name + " is not a valid distribution version." \
+                                      " (Must contain version in the form: " \
+                                      "\"<#>.<#>.<#>-<name>\" or \"<#>.<#>." \
+                                      "<#>-<name>-SR<#>\" or \"<#>.<#>.<#>" \
+                                      "-<name>\", e.g. 0.2.0-SNAPSHOT)"  # noqa
             sys.exit(1)
         distribution_ver = distribution_ver.group()
 
@@ -145,17 +171,27 @@ class Deployer:
                 "DS_SEED_NODES": array_str(self.ds_seed_nodes),
                 "RPC_SEED_NODES": array_str(self.rpc_seed_nodes)
             })
-        module_shards_conf = renderer.render("module-shards.conf.template", "module-shards.conf", self.replicas)
-        modules_conf = renderer.render("modules.conf.template", "modules.conf")
-        features_cfg = renderer.render("org.apache.karaf.features.cfg.template",
-                                       "org.apache.karaf.features.cfg",
-                                       {"ODL_DISTRIBUTION": distribution_ver})
+        module_shards_conf = renderer.render("module-shards.conf.template",
+                                             "module-shards.conf",
+                                             self.replicas)
+        modules_conf = renderer.render("modules.conf.template",
+                                       "modules.conf")
+        features_cfg = \
+            renderer.render("org.apache.karaf.features.cfg.template",
+                            "org.apache.karaf.features.cfg",
+                            {"ODL_DISTRIBUTION": distribution_ver})
         jolokia_xml = renderer.render("jolokia.xml.template", "jolokia.xml")
-        management_cfg = renderer.render("org.apache.karaf.management.cfg.template",
-                                         "org.apache.karaf.management.cfg",
-                                         {"HOST": self.host})
-
-        # Delete all the sub-directories under the deploy directory if the --clean flag is used
+        management_cfg = \
+            renderer.render("org.apache.karaf.management.cfg.template",
+                            "org.apache.karaf.management.cfg",
+                            {"HOST": self.host})
+        datastore_cfg = \
+            renderer.render(
+                "org.opendaylight.controller.cluster.datastore.cfg.template",
+                "org.opendaylight.controller.cluster.datastore.cfg")
+
+        # Delete all the sub-directories under the deploy directory if
+        # the --clean flag is used
         if self.clean is True:
             self.remote.exec_cmd("rm -rf " + self.rootdir + "/deploy/*")
 
@@ -168,22 +204,35 @@ class Deployer:
         # Copy the distribution to the host and unzip it
         odl_file_path = self.dir_name + "/odl.zip"
         self.remote.copy_file(self.distribution, odl_file_path)
-        self.remote.exec_cmd("unzip " + odl_file_path + " -d " + self.dir_name + "/")
+        self.remote.exec_cmd("unzip " + odl_file_path + " -d " +
+                             self.dir_name + "/")
 
         # Rename the distribution directory to odl
-        self.remote.exec_cmd("mv " + self.dir_name + "/" + distribution_name + " " + self.dir_name + "/odl")
+        self.remote.exec_cmd("mv " + self.dir_name + "/" +
+                             distribution_name + " " + self.dir_name + "/odl")
 
         # Copy all the generated files to the server
-        self.remote.mkdir(self.dir_name + "/odl/configuration/initial")
-        self.remote.copy_file(akka_conf, self.dir_name + "/odl/configuration/initial/")
-        self.remote.copy_file(module_shards_conf, self.dir_name + "/odl/configuration/initial/")
-        self.remote.copy_file(modules_conf, self.dir_name + "/odl/configuration/initial/")
-        self.remote.copy_file(features_cfg, self.dir_name + "/odl/etc/")
-        self.remote.copy_file(jolokia_xml, self.dir_name + "/odl/deploy/")
-        self.remote.copy_file(management_cfg, self.dir_name + "/odl/etc/")
+        self.remote.mkdir(self.dir_name
+                          + "/odl/configuration/initial")
+        self.remote.copy_file(akka_conf, self.dir_name
+                              + "/odl/configuration/initial/")
+        self.remote.copy_file(module_shards_conf, self.dir_name
+                              + "/odl/configuration/initial/")
+        self.remote.copy_file(modules_conf, self.dir_name
+                              + "/odl/configuration/initial/")
+        self.remote.copy_file(features_cfg, self.dir_name
+                              + "/odl/etc/")
+        self.remote.copy_file(jolokia_xml, self.dir_name
+                              + "/odl/deploy/")
+        self.remote.copy_file(management_cfg, self.dir_name
+                              + "/odl/etc/")
+
+        if datastore_cfg is not None:
+            self.remote.copy_file(datastore_cfg, self.dir_name + "/odl/etc/")
 
         # Add symlink
-        self.remote.exec_cmd("ln -sfn " + self.dir_name + " " + args.rootdir + "/deploy/current")
+        self.remote.exec_cmd("ln -sfn " + self.dir_name + " "
+                             + args.rootdir + "/deploy/current")
 
         # Run karaf
         self.remote.start_controller(self.dir_name)
@@ -209,22 +258,27 @@ def main():
     replicas = {}
 
     for x in range(0, len(hosts)):
-        ds_seed_nodes.append("akka.tcp://opendaylight-cluster-data@" + hosts[x] + ":2550")
-        rpc_seed_nodes.append("akka.tcp://odl-cluster-rpc@" + hosts[x] + ":2551")
+        ds_seed_nodes.append("akka.tcp://opendaylight-cluster-data@"
+                             + hosts[x] + ":2550")
+        rpc_seed_nodes.append("akka.tcp://odl-cluster-rpc@"
+                              + hosts[x] + ":2551")
         all_replicas.append("member-" + str(x + 1))
 
     for x in range(0, 10):
         if len(all_replicas) > args.rf:
-            replicas["REPLICAS_" + str(x+1)] = array_str(random.sample(all_replicas, args.rf))
+            replicas["REPLICAS_" + str(x+1)] \
+                = array_str(random.sample(all_replicas, args.rf))
         else:
             replicas["REPLICAS_" + str(x+1)] = array_str(all_replicas)
 
     deployers = []
 
     for x in range(0, len(hosts)):
-        deployers.append(Deployer(hosts[x], x + 1, args.template, args.user, args.password,
-                                  args.rootdir, args.distribution, dir_name, hosts, ds_seed_nodes,
-                                  rpc_seed_nodes, replicas, args.clean))
+        deployers.append(Deployer(hosts[x], x + 1, args.template, args.user,
+                                  args.password, args.rootdir,
+                                  args.distribution, dir_name, hosts,
+                                  ds_seed_nodes, rpc_seed_nodes, replicas,
+                                  args.clean))
 
     for x in range(0, len(hosts)):
         deployers[x].kill_controller()
index 34c9f1f065cb496016f5a40ee027f05e264a5e84..bae137b82a6fcf9f067ebaf09a4d636443df17d1 100644 (file)
@@ -6,6 +6,8 @@
 
 from SSHLibrary import SSHLibrary
 
+import os
+
 
 class RemoteHost:
     def __init__(self, host, user, password, rootdir):
@@ -22,12 +24,21 @@ class RemoteHost:
         rc = lib.execute_command(command, return_rc=True)
         lib.close_connection()
         if rc[1] != 0:
-            raise Exception('remote command failed [{0}] with exit code {1}'.format(command, rc))
+            raise Exception('remote command failed [{0}] with exit code {1}'
+                            .format(command, rc))
 
     def mkdir(self, dir_name):
         self.exec_cmd("mkdir -p " + dir_name)
 
     def copy_file(self, src, dest):
+        if src is None:
+            print "src is None not copy anything to " + dest
+            return
+
+        if os.path.exists(src) is False:
+            print "Src file " + src + " was not found"
+            return
+
         lib = SSHLibrary()
         lib.open_connection(self.host)
         lib.login(username=self.user, password=self.password)
@@ -36,7 +47,8 @@ class RemoteHost:
         lib.close_connection()
 
     def kill_controller(self):
-        self.exec_cmd("sudo ps axf | grep karaf | grep -v grep | awk '{print \"kill -9 \" $1}' | sudo sh")
+        self.exec_cmd("sudo ps axf | grep karaf | grep -v grep "
+                      "| awk '{print \"kill -9 \" $1}' | sudo sh")
 
     def start_controller(self, dir_name):
         self.exec_cmd(dir_name + "/odl/bin/start")
diff --git a/test/tools/clustering/cluster-deployer/templates/dsbenchmark/akka.conf.template b/test/tools/clustering/cluster-deployer/templates/dsbenchmark/akka.conf.template
new file mode 100644 (file)
index 0000000..5e69c14
--- /dev/null
@@ -0,0 +1,99 @@
+
+odl-cluster-data {
+  bounded-mailbox {
+    mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
+    mailbox-capacity = 1000
+    mailbox-push-timeout-time = 100ms
+  }
+
+  metric-capture-enabled = true
+
+  akka {
+    loglevel = "INFO"
+    loggers = ["akka.event.slf4j.Slf4jLogger"]
+
+    actor {
+
+      provider = "akka.cluster.ClusterActorRefProvider"
+      serializers {
+                java = "akka.serialization.JavaSerializer"
+                proto = "akka.remote.serialization.ProtobufSerializer"
+              }
+
+              serialization-bindings {
+                  "com.google.protobuf.Message" = proto
+
+              }
+
+      default-dispatcher {
+        # Setting throughput to 1 makes the dispatcher fair. It processes 1 message from
+        # the mailbox before moving on to the next mailbox
+        throughput = 1
+      }
+
+      default-mailbox {
+        # When not using a BalancingDispatcher it is recommended that we use the SingleConsumerOnlyUnboundedMailbox
+        # as it is the most efficient for multiple producer/single consumer use cases
+        mailbox-type="akka.dispatch.SingleConsumerOnlyUnboundedMailbox"
+      }
+    }
+    remote {
+      log-remote-lifecycle-events = off
+      netty.tcp {
+        hostname = "{{HOST}}"
+        port = 2550
+        maximum-frame-size = 419430400
+        send-buffer-size = 52428800
+        receive-buffer-size = 52428800
+      }
+    }
+
+    cluster {
+      seed-nodes = {{{DS_SEED_NODES}}}
+
+      auto-down-unreachable-after = 300s
+
+      roles = [
+        "{{MEMBER_NAME}}"
+      ]
+
+    }
+  }
+}
+
+odl-cluster-rpc {
+  bounded-mailbox {
+    mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
+    mailbox-capacity = 1000
+    mailbox-push-timeout-time = 100ms
+  }
+
+  metric-capture-enabled = true
+
+  akka {
+    loglevel = "INFO"
+    loggers = ["akka.event.slf4j.Slf4jLogger"]
+    log-dead-letters = 10000
+
+    actor {
+      provider = "akka.cluster.ClusterActorRefProvider"
+
+    }
+    remote {
+      log-remote-lifecycle-events = off
+      netty.tcp {
+        hostname = "{{HOST}}"
+        port = 2551
+        maximum-frame-size = 419430400
+        send-buffer-size = 52428800
+        receive-buffer-size = 52428800
+      }
+    }
+
+    cluster {
+      seed-nodes = {{{RPC_SEED_NODES}}}
+
+      auto-down-unreachable-after = 300s
+    }
+  }
+}
diff --git a/test/tools/clustering/cluster-deployer/templates/dsbenchmark/jolokia.xml.template b/test/tools/clustering/cluster-deployer/templates/dsbenchmark/jolokia.xml.template
new file mode 100644 (file)
index 0000000..be150b0
--- /dev/null
@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<features name="jolokia-1.1.5" xmlns="http://karaf.apache.org/xmlns/features/v1.2.0">
+
+    <feature name='feature-jolokia' version='1.1.5' install="auto">
+       <bundle>mvn:org.jolokia/jolokia-osgi/1.1.5</bundle>
+    </feature>
+
+</features>
diff --git a/test/tools/clustering/cluster-deployer/templates/dsbenchmark/module-shards.conf.template b/test/tools/clustering/cluster-deployer/templates/dsbenchmark/module-shards.conf.template
new file mode 100644 (file)
index 0000000..6a39724
--- /dev/null
@@ -0,0 +1,92 @@
+# This file describes which shards live on which members
+# The format for a module-shards is as follows,
+# {
+#    name = "<friendly_name_of_the_module>"
+#    shards = [
+#        {
+#            name="<any_name_that_is_unique_for_the_module>"
+#            replicas = [
+#                "<name_of_member_on_which_to_run>"
+#            ]
+#     ]
+# }
+#
+# For Helium we support only one shard per module. Beyond Helium
+# we will support more than 1
+# The replicas section is a collection of member names. This information
+# will be used to decide on which members replicas of a particular shard will be
+# located. Once replication is integrated with the distributed data store then
+# this section can have multiple entries.
+#
+#
+
+
+module-shards = [
+    {
+        name = "default"
+        shards = [
+            {
+                name="default"
+                replicas = {{{REPLICAS_1}}}
+                
+            }
+        ]
+    },
+    {
+        name = "topology"
+        shards = [
+            {
+                name="topology"
+                replicas = {{{REPLICAS_2}}}
+                
+            }
+        ]
+    },
+    {
+        name = "inventory"
+        shards = [
+            {
+                name="inventory"
+                replicas = {{{REPLICAS_3}}}
+                
+            }
+        ]
+    },
+    {
+        name = "toaster"
+        shards = [
+            {
+                name="toaster"
+                replicas = {{{REPLICAS_4}}}
+            }
+        ]
+    },
+    {
+        name = "car"
+        shards = [
+            {
+                name="car"
+                replicas = {{{REPLICAS_4}}}
+            }
+        ]
+    },
+    {
+        name = "people"
+        shards = [
+            {
+                name="people"
+                replicas = {{{REPLICAS_4}}}
+            }
+        ]
+    },
+    {
+        name = "car-people"
+        shards = [
+            {
+                name="car-people"
+                replicas = {{{REPLICAS_4}}}
+            }
+        ]
+    }
+
+]
diff --git a/test/tools/clustering/cluster-deployer/templates/dsbenchmark/modules.conf.template b/test/tools/clustering/cluster-deployer/templates/dsbenchmark/modules.conf.template
new file mode 100644 (file)
index 0000000..71c12d2
--- /dev/null
@@ -0,0 +1,50 @@
+# This file should describe all the modules that need to be placed in a separate shard
+# The format of the configuration is as follows
+# {
+#    name = "<friendly_name_of_module>"
+#    namespace = "<the yang namespace of the module>"
+#    shard-strategy = "module"
+# }
+#
+# Note that at this time the only shard-strategy we support is module which basically
+# will put all the data of a single module in two shards (one for config and one for
+# operational data)
+
+modules = [
+    {
+        name = "inventory"
+        namespace = "urn:opendaylight:inventory"
+        shard-strategy = "module"
+    },
+
+    {
+        name = "topology"
+        namespace = "urn:TBD:params:xml:ns:yang:network-topology"
+        shard-strategy = "module"
+    },
+
+    {
+        name = "toaster"
+        namespace = "http://netconfcentral.org/ns/toaster"
+        shard-strategy = "module"
+    },
+
+    {
+    name = "car"
+        namespace = "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car"
+        shard-strategy = "module"
+    },
+
+    {
+        name = "people"
+        namespace = "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:people"
+        shard-strategy = "module"
+    },
+    
+    {
+        name = "car-people"
+        namespace = "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car-people"
+        shard-strategy = "module"
+    }    
+
+]
diff --git a/test/tools/clustering/cluster-deployer/templates/dsbenchmark/org.apache.karaf.features.cfg.template b/test/tools/clustering/cluster-deployer/templates/dsbenchmark/org.apache.karaf.features.cfg.template
new file mode 100644 (file)
index 0000000..5799fea
--- /dev/null
@@ -0,0 +1,47 @@
+################################################################################
+#
+#    Licensed to the Apache Software Foundation (ASF) under one or more
+#    contributor license agreements.  See the NOTICE file distributed with
+#    this work for additional information regarding copyright ownership.
+#    The ASF licenses this file to You under the Apache License, Version 2.0
+#    (the "License"); you may not use this file except in compliance with
+#    the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+#
+################################################################################
+
+#
+# Defines if the startlvl should be respected during feature startup. The default value is true. The default
+# behavior for 2.x is false (!) for this property
+#
+# Be aware that this property is deprecated and will be removed in Karaf 4.0. So, if you need to
+# set this to false, please use this only as a temporary solution!
+#
+#respectStartLvlDuringFeatureStartup=true
+
+#
+# Defines if the startlvl should be respected during feature uninstall. The default value is true.
+# If true, means stop bundles respecting the descend order of start level in a certain feature.
+#
+#respectStartLvlDuringFeatureUninstall=true
+
+#
+# Comma separated list of features repositories to register by default
+#
+featuresRepositories = mvn:org.apache.karaf.features/standard/3.0.3/xml/features,mvn:org.apache.karaf.features/enterprise/3.0.3/xml/features,mvn:org.ops4j.pax.web/pax-web-features/3.1.4/xml/features,mvn:org.apache.karaf.features/spring/3.0.3/xml/features,mvn:org.opendaylight.coretutorials/dsbenchmark-features/1.1.0-SNAPSHOT/xml/features
+
+#
+# Comma separated list of features to install at startup
+#
+featuresBoot = config,standard,region,package,kar,ssh,management,odl-dsbenchmark-impl-ui
+#
+# Defines if the boot features are started in asynchronous mode (in a dedicated thread)
+#
+featuresBootAsynchronous=false
diff --git a/test/tools/clustering/cluster-deployer/templates/dsbenchmark/org.apache.karaf.management.cfg.template b/test/tools/clustering/cluster-deployer/templates/dsbenchmark/org.apache.karaf.management.cfg.template
new file mode 100644 (file)
index 0000000..510eebd
--- /dev/null
@@ -0,0 +1,63 @@
+################################################################################
+#
+#    Licensed to the Apache Software Foundation (ASF) under one or more
+#    contributor license agreements.  See the NOTICE file distributed with
+#    this work for additional information regarding copyright ownership.
+#    The ASF licenses this file to You under the Apache License, Version 2.0
+#    (the "License"); you may not use this file except in compliance with
+#    the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+#
+################################################################################
+
+#
+# The properties in this file define the configuration of Apache Karaf's JMX Management
+#
+
+#
+# Port number for RMI registry connection
+#
+rmiRegistryPort = 1099
+
+#
+# Port number for RMI server connection
+#
+rmiServerPort = 44444
+
+#
+# Name of the JAAS realm used for authentication
+#
+jmxRealm = karaf
+
+#
+# The service URL for the JMXConnectorServer
+#
+serviceUrl = service:jmx:rmi://{{HOST}}:${rmiServerPort}/jndi/rmi://{{HOST}}:${rmiRegistryPort}/karaf-${karaf.name}
+
+#
+# Whether any threads started for the JMXConnectorServer should be started as daemon threads
+#
+daemon = true
+
+#
+# Whether the JMXConnectorServer should be started in a separate thread
+#
+threaded = true
+
+#
+# The ObjectName used to register the JMXConnectorServer
+#
+objectName = connector:name=rmi
+
+#
+# Role name used for JMX access authorization
+# If not set, this defaults to the ${karaf.admin.role} configured in etc/system.properties
+#
+# jmxRole=admin
diff --git a/test/tools/clustering/cluster-deployer/templates/dsbenchmark/org.opendaylight.controller.cluster.datastore.cfg.template b/test/tools/clustering/cluster-deployer/templates/dsbenchmark/org.opendaylight.controller.cluster.datastore.cfg.template
new file mode 100644 (file)
index 0000000..60c4656
--- /dev/null
@@ -0,0 +1,75 @@
+# This file specifies property settings for the clustered data store to control its behavior. A
+# property may be applied to every data store type ("config" and "operational") or can be customized
+# differently for each data store type by prefixing the data store type + '.'. For example, specifying
+# the "shard-election-timeout-factor" property would be applied to both data stores whereas specifying
+# "operational.shard-election-timeout-factor" would only apply to the "operational" data store. Similarly,
+# specifying "config.shard-election-timeout-factor" would only apply to the "config" data store.
+
+# The multiplication factor to be used to determine shard election timeout. The shard election timeout
+# is determined by multiplying shardHeartbeatIntervalInMillis with the shardElectionTimeoutFactor.
+shard-election-timeout-factor=20
+
+# The interval at which a shard will send a heart beat message to its remote shard.
+#shard-heartbeat-interval-in-millis=500
+
+# The maximum amount of time to wait for a shard to elect a leader before failing an operation (eg transaction create).
+#shard-leader-election-timeout-in-seconds=30
+
+# Enable or disable data persistence.
+persistent=false
+
+# Disable persistence for the operational data store by default.
+operational.persistent=false
+
+# The maximum amount of time a shard transaction can be idle without receiving any messages before it self-destructs.
+#shard-transaction-idle-timeout-in-minutes=10
+
+# The maximum amount of time a shard transaction three-phase commit can be idle without receiving the 
+# next messages before it aborts the transaction.
+#shard-transaction-commit-timeout-in-seconds=30
+
+# The maximum allowed capacity for each shard's transaction commit queue.
+#shard-transaction-commit-queue-capacity=20000
+
+# The maximum amount of time to wait for a shard to initialize from persistence on startup before 
+# failing an operation (eg transaction create and change listener registration).
+#shard-initialization-timeout-in-seconds=300
+
+# The maximum number of journal log entries to batch on recovery for a shard before committing to the data store.
+#shard-journal-recovery-log-batch-size=1000
+
+# The minimum number of entries to be present in the in-memory journal log before a snapshot is to be taken.
+#shard-snapshot-batch-count=20000
+
+# The percentage of Runtime.totalMemory() used by the in-memory journal log before a snapshot is to be taken.
+#shard-snapshot-data-threshold-percentage=12
+
+# The interval at which the leader of the shard will check if its majority followers are active and 
+# term itself as isolated.
+#shard-isolated-leader-check-interval-in-millis=5000
+
+# The number of transaction modification operations (put, merge, delete) to batch before sending to the 
+# shard transaction actor. Batching improves performance as less modifications messages are sent to the 
+# actor and thus lessens the chance that the transaction actor's mailbox queue could get full.
+#shard-batched-modification-count=1000
+
+# The maximum amount of time for akka operations (remote or local) to complete before failing.
+#operation-timeout-in-seconds=5
+
+# The initial number of transactions per second that are allowed before the data store should begin 
+# applying back pressure. This number is only used as an initial guidance, subsequently the datastore 
+# measures the latency for a commit and auto-adjusts the rate limit.
+#transaction-creation-initial-rate-limit=100
+
+# The maximum thread pool size for each shard's data store data change notification executor.
+#max-shard-data-change-executor-pool-size=20
+
+# The maximum queue size for each shard's data store data change notification executor.
+#max-shard-data-change-executor-queue-size=1000
+
+# The maximum queue size for each shard's data store data change listener.
+#max-shard-data-change-listener-queue-size=1000
+
+# The maximum queue size for each shard's data store executor.
+#max-shard-data-store-executor-queue-size=5000
+