BUG 2344 : Add the multi-node-test template for clustering integration tests release/helium-sr1
authorMoiz Raja <moraja@cisco.com>
Tue, 11 Nov 2014 01:35:04 +0000 (17:35 -0800)
committerMoiz Raja <moraja@cisco.com>
Tue, 11 Nov 2014 02:21:45 +0000 (18:21 -0800)
The multi-node-test template installs all the odl-clustering-test-app and ensures
that the modules.conf and module-shards.conf is generated appropriately for testing
the app in a multi-node cluster

This patch is dependent on the following controller patch,

https://git.opendaylight.org/gerrit/#/c/12705/

Change-Id: I58b2938c3125068ef895733bcec8e26050190b39
Signed-off-by: Moiz Raja <moraja@cisco.com>
test/tools/cluster-deployer/templates/multi-node-test/akka.conf.template [new file with mode: 0644]
test/tools/cluster-deployer/templates/multi-node-test/jolokia.xml.template [new file with mode: 0644]
test/tools/cluster-deployer/templates/multi-node-test/module-shards.conf.template [new file with mode: 0644]
test/tools/cluster-deployer/templates/multi-node-test/modules.conf.template [new file with mode: 0644]
test/tools/cluster-deployer/templates/multi-node-test/org.apache.karaf.features.cfg.template [new file with mode: 0644]
test/tools/cluster-deployer/templates/multi-node-test/org.apache.karaf.management.cfg.template [new file with mode: 0644]

diff --git a/test/tools/cluster-deployer/templates/multi-node-test/akka.conf.template b/test/tools/cluster-deployer/templates/multi-node-test/akka.conf.template
new file mode 100644 (file)
index 0000000..738d538
--- /dev/null
@@ -0,0 +1,83 @@
+
+odl-cluster-data {
+  bounded-mailbox {
+    mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
+    mailbox-capacity = 1000
+    mailbox-push-timeout-time = 100ms
+  }
+
+  metric-capture-enabled = true
+
+  akka {
+    loglevel = "INFO"
+    loggers = ["akka.event.slf4j.Slf4jLogger"]
+
+    actor {
+
+      provider = "akka.cluster.ClusterActorRefProvider"
+      serializers {
+                java = "akka.serialization.JavaSerializer"
+                proto = "akka.remote.serialization.ProtobufSerializer"
+              }
+
+              serialization-bindings {
+                  "com.google.protobuf.Message" = proto
+
+              }
+    }
+    remote {
+      log-remote-lifecycle-events = off
+      netty.tcp {
+        hostname = "{{HOST}}"
+        port = 2550
+        maximum-frame-size = 419430400
+        send-buffer-size = 52428800
+        receive-buffer-size = 52428800
+      }
+    }
+
+    cluster {
+      seed-nodes = {{{DS_SEED_NODES}}}
+
+      auto-down-unreachable-after = 10s
+
+      roles = [
+        "{{MEMBER_NAME}}"
+      ]
+
+    }
+  }
+}
+
+odl-cluster-rpc {
+  bounded-mailbox {
+    mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
+    mailbox-capacity = 1000
+    mailbox-push-timeout-time = 100ms
+  }
+
+  metric-capture-enabled = true
+
+  akka {
+    loglevel = "INFO"
+    loggers = ["akka.event.slf4j.Slf4jLogger"]
+
+    actor {
+      provider = "akka.cluster.ClusterActorRefProvider"
+
+    }
+    remote {
+      log-remote-lifecycle-events = off
+      netty.tcp {
+        hostname = "{{HOST}}"
+        port = 2551
+      }
+    }
+
+    cluster {
+      seed-nodes = {{{RPC_SEED_NODES}}}
+
+      auto-down-unreachable-after = 10s
+    }
+  }
+}
diff --git a/test/tools/cluster-deployer/templates/multi-node-test/jolokia.xml.template b/test/tools/cluster-deployer/templates/multi-node-test/jolokia.xml.template
new file mode 100644 (file)
index 0000000..be150b0
--- /dev/null
@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<features name="jolokia-1.1.5" xmlns="http://karaf.apache.org/xmlns/features/v1.2.0">
+
+    <feature name='feature-jolokia' version='1.1.5' install="auto">
+       <bundle>mvn:org.jolokia/jolokia-osgi/1.1.5</bundle>
+    </feature>
+
+</features>
diff --git a/test/tools/cluster-deployer/templates/multi-node-test/module-shards.conf.template b/test/tools/cluster-deployer/templates/multi-node-test/module-shards.conf.template
new file mode 100644 (file)
index 0000000..6a39724
--- /dev/null
@@ -0,0 +1,92 @@
+# This file describes which shards live on which members
+# The format for a module-shards is as follows,
+# {
+#    name = "<friendly_name_of_the_module>"
+#    shards = [
+#        {
+#            name="<any_name_that_is_unique_for_the_module>"
+#            replicas = [
+#                "<name_of_member_on_which_to_run>"
+#            ]
+#     ]
+# }
+#
+# For Helium we support only one shard per module. Beyond Helium
+# we will support more than 1
+# The replicas section is a collection of member names. This information
+# will be used to decide on which members replicas of a particular shard will be
+# located. Once replication is integrated with the distributed data store then
+# this section can have multiple entries.
+#
+#
+
+
+module-shards = [
+    {
+        name = "default"
+        shards = [
+            {
+                name="default"
+                replicas = {{{REPLICAS_1}}}
+                
+            }
+        ]
+    },
+    {
+        name = "topology"
+        shards = [
+            {
+                name="topology"
+                replicas = {{{REPLICAS_2}}}
+                
+            }
+        ]
+    },
+    {
+        name = "inventory"
+        shards = [
+            {
+                name="inventory"
+                replicas = {{{REPLICAS_3}}}
+                
+            }
+        ]
+    },
+    {
+        name = "toaster"
+        shards = [
+            {
+                name="toaster"
+                replicas = {{{REPLICAS_4}}}
+            }
+        ]
+    },
+    {
+        name = "car"
+        shards = [
+            {
+                name="car"
+                replicas = {{{REPLICAS_4}}}
+            }
+        ]
+    },
+    {
+        name = "people"
+        shards = [
+            {
+                name="people"
+                replicas = {{{REPLICAS_4}}}
+            }
+        ]
+    },
+    {
+        name = "car-people"
+        shards = [
+            {
+                name="car-people"
+                replicas = {{{REPLICAS_4}}}
+            }
+        ]
+    }
+
+]
diff --git a/test/tools/cluster-deployer/templates/multi-node-test/modules.conf.template b/test/tools/cluster-deployer/templates/multi-node-test/modules.conf.template
new file mode 100644 (file)
index 0000000..71c12d2
--- /dev/null
@@ -0,0 +1,50 @@
+# This file should describe all the modules that need to be placed in a separate shard
+# The format of the configuration is as follows
+# {
+#    name = "<friendly_name_of_module>"
+#    namespace = "<the yang namespace of the module>"
+#    shard-strategy = "module"
+# }
+#
+# Note that at this time the only shard-strategy we support is module which basically
+# will put all the data of a single module in two shards (one for config and one for
+# operational data)
+
+modules = [
+    {
+        name = "inventory"
+        namespace = "urn:opendaylight:inventory"
+        shard-strategy = "module"
+    },
+
+    {
+        name = "topology"
+        namespace = "urn:TBD:params:xml:ns:yang:network-topology"
+        shard-strategy = "module"
+    },
+
+    {
+        name = "toaster"
+        namespace = "http://netconfcentral.org/ns/toaster"
+        shard-strategy = "module"
+    },
+
+    {
+    name = "car"
+        namespace = "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car"
+        shard-strategy = "module"
+    },
+
+    {
+        name = "people"
+        namespace = "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:people"
+        shard-strategy = "module"
+    },
+    
+    {
+        name = "car-people"
+        namespace = "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car-people"
+        shard-strategy = "module"
+    }    
+
+]
diff --git a/test/tools/cluster-deployer/templates/multi-node-test/org.apache.karaf.features.cfg.template b/test/tools/cluster-deployer/templates/multi-node-test/org.apache.karaf.features.cfg.template
new file mode 100644 (file)
index 0000000..aaac53c
--- /dev/null
@@ -0,0 +1,48 @@
+################################################################################
+#
+#    Licensed to the Apache Software Foundation (ASF) under one or more
+#    contributor license agreements.  See the NOTICE file distributed with
+#    this work for additional information regarding copyright ownership.
+#    The ASF licenses this file to You under the Apache License, Version 2.0
+#    (the "License"); you may not use this file except in compliance with
+#    the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+#
+################################################################################
+
+#
+# Defines if the startlvl should be respected during feature startup. The default value is true. The default
+# behavior for 2.x is false (!) for this property
+#
+# Be aware that this property is deprecated and will be removed in Karaf 4.0. So, if you need to
+# set this to false, please use this only as a temporary solution!
+#
+#respectStartLvlDuringFeatureStartup=true
+
+#
+# Defines if the startlvl should be respected during feature uninstall. The default value is true.
+# If true, means stop bundles respecting the descend order of start level in a certain feature.
+#
+#respectStartLvlDuringFeatureUninstall=true
+
+#
+# Comma separated list of features repositories to register by default
+#
+featuresRepositories = mvn:org.apache.karaf.features/standard/3.0.1/xml/features,mvn:org.apache.karaf.features/enterprise/3.0.1/xml/features,mvn:org.ops4j.pax.web/pax-web-features/3.1.0/xml/features,mvn:org.apache.karaf.features/spring/3.0.1/xml/features,mvn:org.opendaylight.integration/features-integration/{{ODL_DISTRIBUTION}}/xml/features
+
+#
+# Comma separated list of features to install at startup
+#
+featuresBoot=config,standard,region,package,kar,ssh,management,odl-clustering-test-app,odl-restconf-noauth,odl-mdsal-clustering
+
+#
+# Defines if the boot features are started in asynchronous mode (in a dedicated thread)
+#
+featuresBootAsynchronous=false
diff --git a/test/tools/cluster-deployer/templates/multi-node-test/org.apache.karaf.management.cfg.template b/test/tools/cluster-deployer/templates/multi-node-test/org.apache.karaf.management.cfg.template
new file mode 100644 (file)
index 0000000..510eebd
--- /dev/null
@@ -0,0 +1,63 @@
+################################################################################
+#
+#    Licensed to the Apache Software Foundation (ASF) under one or more
+#    contributor license agreements.  See the NOTICE file distributed with
+#    this work for additional information regarding copyright ownership.
+#    The ASF licenses this file to You under the Apache License, Version 2.0
+#    (the "License"); you may not use this file except in compliance with
+#    the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+#
+################################################################################
+
+#
+# The properties in this file define the configuration of Apache Karaf's JMX Management
+#
+
+#
+# Port number for RMI registry connection
+#
+rmiRegistryPort = 1099
+
+#
+# Port number for RMI server connection
+#
+rmiServerPort = 44444
+
+#
+# Name of the JAAS realm used for authentication
+#
+jmxRealm = karaf
+
+#
+# The service URL for the JMXConnectorServer
+#
+serviceUrl = service:jmx:rmi://{{HOST}}:${rmiServerPort}/jndi/rmi://{{HOST}}:${rmiRegistryPort}/karaf-${karaf.name}
+
+#
+# Whether any threads started for the JMXConnectorServer should be started as daemon threads
+#
+daemon = true
+
+#
+# Whether the JMXConnectorServer should be started in a separate thread
+#
+threaded = true
+
+#
+# The ObjectName used to register the JMXConnectorServer
+#
+objectName = connector:name=rmi
+
+#
+# Role name used for JMX access authorization
+# If not set, this defaults to the ${karaf.admin.role} configured in etc/system.properties
+#
+# jmxRole=admin