#
# The input that this script will take is as follows,
#
-# - A comma separated list of ip addresses/hostnames for each host on which the distribution needs to be deployed
+# - A comma separated list of ip addresses/hostnames for each host on which
+# the distribution needs to be deployed
# - The replication factor to be used
-# - The ssh username/password of the remote host(s). Note that this should be the same for each host
+# - The ssh username/password of the remote host(s). Note that this should be
+# the same for each host
# - The name of the template to be used.
-# Note that this template name should match the name of a template folder in the templates directory.
+# Note that this template name should match the name of a template folder in
+# the templates directory.
# The templates directory can be found in the same directory as this script.
#
# Here are the things it will do,
# - Copy over a distribution of opendaylight to the remote host
# - Create a timestamped directory on the remote host
# - Unzip the distribution to the timestamped directory
-# - Copy over the template substituted configuration files to the appropriate location on the remote host
+# - Copy over the template substituted configuration files to the appropriate
+# location on the remote host
# - Create a symlink to the timestamped directory
# - Start karaf
#
-# -------------------------------------------------------------------------------------------------------------
+# ----------------------------------------------------------------------------
import argparse
import time
parser = argparse.ArgumentParser(description='Cluster Deployer')
parser.add_argument("--distribution", default="",
- help="the absolute path of the distribution on the local host that needs to be deployed. "
- "(Must contain version in the form: \"<#>.<#>.<#>-<name>\", e.g. 0.2.0-SNAPSHOT)",
+ help="the absolute path of the distribution on the local "
+ "host that needs to be deployed. (Must contain "
+ "version in the form: \"<#>.<#>.<#>-<name>\", e.g. "
+ "0.2.0-SNAPSHOT)",
required=True)
parser.add_argument("--rootdir", default="/root",
- help="the root directory on the remote host where the distribution is to be deployed",
+ help="the root directory on the remote host where the "
+ "distribution is to be deployed",
required=True)
-parser.add_argument("--hosts", default="", help="a comma separated list of host names or ip addresses",
+parser.add_argument("--hosts", default="", help="a comma separated list of "
+ "host names or ip addresses",
required=True)
-parser.add_argument("--clean", action="store_true", default=False, help="clean the deployment on the remote host")
+parser.add_argument("--clean", action="store_true", default=False,
+ help="clean the deployment on the remote host")
parser.add_argument("--template", default="openflow",
help="the name of the template to be used. "
- "This name should match a folder in the templates directory.")
+ "This name should match a folder in the templates "
+ "directory.")
parser.add_argument("--rf", default=3, type=int,
- help="replication factor. This is the number of replicas that should be created for each shard.")
-parser.add_argument("--user", default="root", help="the SSH username for the remote host(s)")
-parser.add_argument("--password", default="Ecp123", help="the SSH password for the remote host(s)")
+ help="replication factor. This is the number of replicas "
+ "that should be created for each shard.")
+parser.add_argument("--user", default="root", help="the SSH username for the "
+ "remote host(s)")
+parser.add_argument("--password", default="Ecp123",
+ help="the SSH password for the remote host(s)")
args = parser.parse_args()
if variables is None:
variables = {}
+ if os.path.exists(self.template_root + template_path) is False:
+ return
+
with open(self.template_root + template_path, "r") as myfile:
data = myfile.read()
#
-# The array_str method takes an array of strings and formats it into a string such that
-# it can be used in an akka configuration file
+# The array_str method takes an array of strings and formats it into a
+# string such that it can be used in an akka configuration file
#
def array_str(arr):
s = "["
# The Deployer deploys the controller to one host and configures it
#
class Deployer:
- def __init__(self, host, member_no, template, user, password, rootdir, distribution,
- dir_name, hosts, ds_seed_nodes, rpc_seed_nodes, replicas, clean=False):
+ def __init__(self, host, member_no, template, user, password, rootdir,
+ distribution, dir_name, hosts, ds_seed_nodes, rpc_seed_nodes,
+ replicas, clean=False):
self.host = host
self.member_no = member_no
self.template = template
self.replicas = replicas
# Connect to the remote host and start doing operations
- self.remote = RemoteHost(self.host, self.user, self.password, self.rootdir)
+ self.remote = RemoteHost(self.host, self.user, self.password,
+ self.rootdir)
def kill_controller(self):
self.remote.copy_file("kill_controller.sh", self.rootdir + "/")
def deploy(self):
# Determine distribution version
- distribution_name = os.path.splitext(os.path.basename(self.distribution))[0]
- distribution_ver = re.search('(\d+\.\d+\.\d+-\w+\Z)|(\d+\.\d+\.\d+-\w+)(-SR\d+\Z)|(\d+\.\d+\.\d+-\w+)(-SR\d+(\.\d+)\Z)', distribution_name) # noqa
+ distribution_name \
+ = os.path.splitext(os.path.basename(self.distribution))[0]
+ distribution_ver = re.search('(\d+\.\d+\.\d+-\w+\Z)|'
+ '(\d+\.\d+\.\d+-\w+)(-SR\d+\Z)|'
+ '(\d+\.\d+\.\d+-\w+)(-SR\d+(\.\d+)\Z)',
+ distribution_name) # noqa
if distribution_ver is None:
- print distribution_name + " is not a valid distribution version. (Must contain version in the form: \"<#>.<#>.<#>-<name>\" or \"<#>.<#>.<#>-<name>-SR<#>\" or \"<#>.<#>.<#>-<name>\", e.g. 0.2.0-SNAPSHOT)" # noqa
+ print distribution_name + " is not a valid distribution version." \
+ " (Must contain version in the form: " \
+ "\"<#>.<#>.<#>-<name>\" or \"<#>.<#>." \
+ "<#>-<name>-SR<#>\" or \"<#>.<#>.<#>" \
+ "-<name>\", e.g. 0.2.0-SNAPSHOT)" # noqa
sys.exit(1)
distribution_ver = distribution_ver.group()
"DS_SEED_NODES": array_str(self.ds_seed_nodes),
"RPC_SEED_NODES": array_str(self.rpc_seed_nodes)
})
- module_shards_conf = renderer.render("module-shards.conf.template", "module-shards.conf", self.replicas)
- modules_conf = renderer.render("modules.conf.template", "modules.conf")
- features_cfg = renderer.render("org.apache.karaf.features.cfg.template",
- "org.apache.karaf.features.cfg",
- {"ODL_DISTRIBUTION": distribution_ver})
+ module_shards_conf = renderer.render("module-shards.conf.template",
+ "module-shards.conf",
+ self.replicas)
+ modules_conf = renderer.render("modules.conf.template",
+ "modules.conf")
+ features_cfg = \
+ renderer.render("org.apache.karaf.features.cfg.template",
+ "org.apache.karaf.features.cfg",
+ {"ODL_DISTRIBUTION": distribution_ver})
jolokia_xml = renderer.render("jolokia.xml.template", "jolokia.xml")
- management_cfg = renderer.render("org.apache.karaf.management.cfg.template",
- "org.apache.karaf.management.cfg",
- {"HOST": self.host})
-
- # Delete all the sub-directories under the deploy directory if the --clean flag is used
+ management_cfg = \
+ renderer.render("org.apache.karaf.management.cfg.template",
+ "org.apache.karaf.management.cfg",
+ {"HOST": self.host})
+ datastore_cfg = \
+ renderer.render(
+ "org.opendaylight.controller.cluster.datastore.cfg.template",
+ "org.opendaylight.controller.cluster.datastore.cfg")
+
+ # Delete all the sub-directories under the deploy directory if
+ # the --clean flag is used
if self.clean is True:
self.remote.exec_cmd("rm -rf " + self.rootdir + "/deploy/*")
# Copy the distribution to the host and unzip it
odl_file_path = self.dir_name + "/odl.zip"
self.remote.copy_file(self.distribution, odl_file_path)
- self.remote.exec_cmd("unzip " + odl_file_path + " -d " + self.dir_name + "/")
+ self.remote.exec_cmd("unzip " + odl_file_path + " -d " +
+ self.dir_name + "/")
# Rename the distribution directory to odl
- self.remote.exec_cmd("mv " + self.dir_name + "/" + distribution_name + " " + self.dir_name + "/odl")
+ self.remote.exec_cmd("mv " + self.dir_name + "/" +
+ distribution_name + " " + self.dir_name + "/odl")
# Copy all the generated files to the server
- self.remote.mkdir(self.dir_name + "/odl/configuration/initial")
- self.remote.copy_file(akka_conf, self.dir_name + "/odl/configuration/initial/")
- self.remote.copy_file(module_shards_conf, self.dir_name + "/odl/configuration/initial/")
- self.remote.copy_file(modules_conf, self.dir_name + "/odl/configuration/initial/")
- self.remote.copy_file(features_cfg, self.dir_name + "/odl/etc/")
- self.remote.copy_file(jolokia_xml, self.dir_name + "/odl/deploy/")
- self.remote.copy_file(management_cfg, self.dir_name + "/odl/etc/")
+ self.remote.mkdir(self.dir_name
+ + "/odl/configuration/initial")
+ self.remote.copy_file(akka_conf, self.dir_name
+ + "/odl/configuration/initial/")
+ self.remote.copy_file(module_shards_conf, self.dir_name
+ + "/odl/configuration/initial/")
+ self.remote.copy_file(modules_conf, self.dir_name
+ + "/odl/configuration/initial/")
+ self.remote.copy_file(features_cfg, self.dir_name
+ + "/odl/etc/")
+ self.remote.copy_file(jolokia_xml, self.dir_name
+ + "/odl/deploy/")
+ self.remote.copy_file(management_cfg, self.dir_name
+ + "/odl/etc/")
+
+ if datastore_cfg is not None:
+ self.remote.copy_file(datastore_cfg, self.dir_name + "/odl/etc/")
# Add symlink
- self.remote.exec_cmd("ln -sfn " + self.dir_name + " " + args.rootdir + "/deploy/current")
+ self.remote.exec_cmd("ln -sfn " + self.dir_name + " "
+ + args.rootdir + "/deploy/current")
# Run karaf
self.remote.start_controller(self.dir_name)
replicas = {}
for x in range(0, len(hosts)):
- ds_seed_nodes.append("akka.tcp://opendaylight-cluster-data@" + hosts[x] + ":2550")
- rpc_seed_nodes.append("akka.tcp://odl-cluster-rpc@" + hosts[x] + ":2551")
+ ds_seed_nodes.append("akka.tcp://opendaylight-cluster-data@"
+ + hosts[x] + ":2550")
+ rpc_seed_nodes.append("akka.tcp://odl-cluster-rpc@"
+ + hosts[x] + ":2551")
all_replicas.append("member-" + str(x + 1))
for x in range(0, 10):
if len(all_replicas) > args.rf:
- replicas["REPLICAS_" + str(x+1)] = array_str(random.sample(all_replicas, args.rf))
+ replicas["REPLICAS_" + str(x+1)] \
+ = array_str(random.sample(all_replicas, args.rf))
else:
replicas["REPLICAS_" + str(x+1)] = array_str(all_replicas)
deployers = []
for x in range(0, len(hosts)):
- deployers.append(Deployer(hosts[x], x + 1, args.template, args.user, args.password,
- args.rootdir, args.distribution, dir_name, hosts, ds_seed_nodes,
- rpc_seed_nodes, replicas, args.clean))
+ deployers.append(Deployer(hosts[x], x + 1, args.template, args.user,
+ args.password, args.rootdir,
+ args.distribution, dir_name, hosts,
+ ds_seed_nodes, rpc_seed_nodes, replicas,
+ args.clean))
for x in range(0, len(hosts)):
deployers[x].kill_controller()
--- /dev/null
+# This file specifies property settings for the clustered data store to control its behavior. A
+# property may be applied to every data store type ("config" and "operational") or can be customized
+# differently for each data store type by prefixing the data store type + '.'. For example, specifying
+# the "shard-election-timeout-factor" property would be applied to both data stores whereas specifying
+# "operational.shard-election-timeout-factor" would only apply to the "operational" data store. Similarly,
+# specifying "config.shard-election-timeout-factor" would only apply to the "config" data store.
+
+# The multiplication factor to be used to determine shard election timeout. The shard election timeout
+# is determined by multiplying shardHeartbeatIntervalInMillis with the shardElectionTimeoutFactor.
+shard-election-timeout-factor=20
+
+# The interval at which a shard will send a heart beat message to its remote shard.
+#shard-heartbeat-interval-in-millis=500
+
+# The maximum amount of time to wait for a shard to elect a leader before failing an operation (eg transaction create).
+#shard-leader-election-timeout-in-seconds=30
+
+# Enable or disable data persistence.
+persistent=false
+
+# Disable persistence for the operational data store by default.
+operational.persistent=false
+
+# The maximum amount of time a shard transaction can be idle without receiving any messages before it self-destructs.
+#shard-transaction-idle-timeout-in-minutes=10
+
+# The maximum amount of time a shard transaction three-phase commit can be idle without receiving the
+# next messages before it aborts the transaction.
+#shard-transaction-commit-timeout-in-seconds=30
+
+# The maximum allowed capacity for each shard's transaction commit queue.
+#shard-transaction-commit-queue-capacity=20000
+
+# The maximum amount of time to wait for a shard to initialize from persistence on startup before
+# failing an operation (eg transaction create and change listener registration).
+#shard-initialization-timeout-in-seconds=300
+
+# The maximum number of journal log entries to batch on recovery for a shard before committing to the data store.
+#shard-journal-recovery-log-batch-size=1000
+
+# The minimum number of entries to be present in the in-memory journal log before a snapshot is to be taken.
+#shard-snapshot-batch-count=20000
+
+# The percentage of Runtime.totalMemory() used by the in-memory journal log before a snapshot is to be taken.
+#shard-snapshot-data-threshold-percentage=12
+
+# The interval at which the leader of the shard will check if its majority followers are active and
+# term itself as isolated.
+#shard-isolated-leader-check-interval-in-millis=5000
+
+# The number of transaction modification operations (put, merge, delete) to batch before sending to the
+# shard transaction actor. Batching improves performance as less modifications messages are sent to the
+# actor and thus lessens the chance that the transaction actor's mailbox queue could get full.
+#shard-batched-modification-count=1000
+
+# The maximum amount of time for akka operations (remote or local) to complete before failing.
+#operation-timeout-in-seconds=5
+
+# The initial number of transactions per second that are allowed before the data store should begin
+# applying back pressure. This number is only used as an initial guidance, subsequently the datastore
+# measures the latency for a commit and auto-adjusts the rate limit.
+#transaction-creation-initial-rate-limit=100
+
+# The maximum thread pool size for each shard's data store data change notification executor.
+#max-shard-data-change-executor-pool-size=20
+
+# The maximum queue size for each shard's data store data change notification executor.
+#max-shard-data-change-executor-queue-size=1000
+
+# The maximum queue size for each shard's data store data change listener.
+#max-shard-data-change-listener-queue-size=1000
+
+# The maximum queue size for each shard's data store executor.
+#max-shard-data-store-executor-queue-size=5000
+