Merge "Move detnet Sonar scan to Sonarcloud"
[releng/builder.git] / jjb / integration / common-functions.sh
index bae69e425b164cce3fdd82ebf1cfd1403302c927..f16a93a0ea52f4deb90cbb5c73434de55386a045 100644 (file)
@@ -10,7 +10,7 @@ export FEATURESCONF=/tmp/${BUNDLEFOLDER}/etc/org.apache.karaf.features.cfg
 export CUSTOMPROP=/tmp/${BUNDLEFOLDER}/etc/custom.properties
 export LOGCONF=/tmp/${BUNDLEFOLDER}/etc/org.ops4j.pax.logging.cfg
 export MEMCONF=/tmp/${BUNDLEFOLDER}/bin/setenv
-export CONTROLLERMEM="2048m"
+export CONTROLLERMEM=${CONTROLLERMAXMEM}
 
 # Cluster specific configuration settings
 export AKKACONF=/tmp/${BUNDLEFOLDER}/configuration/initial/akka.conf
@@ -21,6 +21,7 @@ function print_common_env() {
     cat << EOF
 common-functions environment:
 MAVENCONF: ${MAVENCONF}
+ACTUALFEATURES: ${ACTUALFEATURES}
 FEATURESCONF: ${FEATURESCONF}
 CUSTOMPROP: ${CUSTOMPROP}
 LOGCONF: ${LOGCONF}
@@ -29,6 +30,7 @@ CONTROLLERMEM: ${CONTROLLERMEM}
 AKKACONF: ${AKKACONF}
 MODULESCONF: ${MODULESCONF}
 MODULESHARDSCONF: ${MODULESHARDSCONF}
+SUITES: ${SUITES}
 
 EOF
 }
@@ -40,16 +42,21 @@ function set_java_vars() {
     local -r controllermem=$2
     local -r memconf=$3
 
-    echo "Configure\n    java home: ${java_home}\n    max memory: ${controllermem}\n    memconf: ${memconf}"
+    echo "Configure"
+    echo "    java home: ${java_home}"
+    echo "    max memory: ${controllermem}"
+    echo "    memconf: ${memconf}"
 
-    sed -ie 's%^# export JAVA_HOME%export JAVA_HOME=${JAVA_HOME:-'"${java_home}"'}%g' ${memconf}
-    sed -ie 's/JAVA_MAX_MEM="2048m"/JAVA_MAX_MEM='"${controllermem}"'/g' ${memconf}
+    # We do not want expressions to expand here.
+    # shellcheck disable=SC2016
+    sed -ie 's%^# export JAVA_HOME%export JAVA_HOME=${JAVA_HOME:-'"${java_home}"'}%g' "${memconf}"
+    sed -ie 's/JAVA_MAX_MEM="2048m"/JAVA_MAX_MEM='"${controllermem}"'/g' "${memconf}"
     echo "cat ${memconf}"
-    cat ${memconf}
+    cat "${memconf}"
 
     echo "Set Java version"
-    sudo /usr/sbin/alternatives --install /usr/bin/java java ${java_home}/bin/java 1
-    sudo /usr/sbin/alternatives --set java ${java_home}/bin/java
+    sudo /usr/sbin/alternatives --install /usr/bin/java java "${java_home}/bin/java" 1
+    sudo /usr/sbin/alternatives --set java "${java_home}/bin/java"
     echo "JDK default version ..."
     java -version
 
@@ -69,29 +76,30 @@ function configure_karaf_log() {
     local logapi=log4j
 
     # Check what the logging.cfg file is using for the logging api: log4j or log4j2
-    grep "log4j2" ${LOGCONF}
-    if [ $? -eq 0 ]; then
+    if grep "log4j2" "${LOGCONF}"; then
         logapi=log4j2
     fi
 
     echo "Configuring the karaf log... karaf_version: ${karaf_version}, logapi: ${logapi}"
     if [ "${logapi}" == "log4j2" ]; then
         # FIXME: Make log size limit configurable from build parameter.
-        sed -ie 's/log4j2.appender.rolling.policies.size.size = 16MB/log4j2.appender.rolling.policies.size.size = 1GB/g' ${LOGCONF}
+        # Increase default log file size to 1GB
+        sed -ie 's/log4j2.appender.rolling.policies.size.size = 64MB/log4j2.appender.rolling.policies.size.size = 1GB/g' "${LOGCONF}"
         orgmodule="org.opendaylight.yangtools.yang.parser.repo.YangTextSchemaContextResolver"
         orgmodule_="${orgmodule//./_}"
-        echo "${logapi}.logger.${orgmodule_}.name = WARN" >> ${LOGCONF}
-        echo "${logapi}.logger.${orgmodule_}.level = WARN" >> ${LOGCONF}
+        echo "${logapi}.logger.${orgmodule_}.name = WARN" >> "${LOGCONF}"
+        echo "${logapi}.logger.${orgmodule_}.level = WARN" >> "${LOGCONF}"
     else
-        sed -ie 's/log4j.appender.out.maxBackupIndex=10/log4j.appender.out.maxBackupIndex=1/g' ${LOGCONF}
+        sed -ie 's/log4j.appender.out.maxBackupIndex=10/log4j.appender.out.maxBackupIndex=1/g' "${LOGCONF}"
         # FIXME: Make log size limit configurable from build parameter.
-        sed -ie 's/log4j.appender.out.maxFileSize=1MB/log4j.appender.out.maxFileSize=30GB/g' ${LOGCONF}
-        echo "${logapi}.logger.org.opendaylight.yangtools.yang.parser.repo.YangTextSchemaContextResolver = WARN" >> ${LOGCONF}
+        sed -ie 's/log4j.appender.out.maxFileSize=1MB/log4j.appender.out.maxFileSize=30GB/g' "${LOGCONF}"
+        echo "${logapi}.logger.org.opendaylight.yangtools.yang.parser.repo.YangTextSchemaContextResolver = WARN" >> "${LOGCONF}"
     fi
 
     # Add custom logging levels
-    # CONTROLLERDEBUGMAP is expected to be a key:value map of space separated values like "module:level module2:level2"
-    # where module is abbreviated and does not include "org.opendaylight."
+    # CONTROLLERDEBUGMAP is expected to be a key:value map of space separated
+    # values like "module:level module2:level2" where module is abbreviated and
+    # does not include "org.opendaylight."
     unset IFS
     echo "controllerdebugmap: ${controllerdebugmap}"
     if [ -n "${controllerdebugmap}" ]; then
@@ -104,19 +112,88 @@ function configure_karaf_log() {
                 orgmodule="org.opendaylight.${module}"
                 if [ "${logapi}" == "log4j2" ]; then
                     orgmodule_="${orgmodule//./_}"
-                    echo "${logapi}.logger.${orgmodule_}.name = ${orgmodule}" >> ${LOGCONF}
-                    echo "${logapi}.logger.${orgmodule_}.level = ${level}" >> ${LOGCONF}
+                    echo "${logapi}.logger.${orgmodule_}.name = ${orgmodule}" >> "${LOGCONF}"
+                    echo "${logapi}.logger.${orgmodule_}.level = ${level}" >> "${LOGCONF}"
                 else
-                    echo "${logapi}.logger.${orgmodule} = ${level}" >> ${LOGCONF}
+                    echo "${logapi}.logger.${orgmodule} = ${level}" >> "${LOGCONF}"
                 fi
             fi
         done
     fi
 
     echo "cat ${LOGCONF}"
-    cat ${LOGCONF}
+    cat "${LOGCONF}"
 } # function configure_karaf_log()
 
+function configure_karaf_log_for_apex() {
+    # TODO: add the extra steps to this function to do any extra work
+    # in this apex environment like we do in our standard environment.
+    # EX: log size, rollover, etc.
+
+    # Modify ODL Log Levels, if needed, for new distribution. This will modify
+    # the control nodes hiera data which will be used during the puppet deploy
+    # CONTROLLERDEBUGMAP is expected to be a key:value map of space separated
+    # values like "module:level module2:level2" where module is abbreviated and
+    # does not include "org.opendaylight."
+
+    local -r controller_ip=$1
+
+    unset IFS
+    # shellcheck disable=SC2153
+    echo "CONTROLLERDEBUGMAP: ${CONTROLLERDEBUGMAP}"
+    if [ -n "${CONTROLLERDEBUGMAP}" ]; then
+        logging_config='\"opendaylight::log_levels\": {'
+        for kv in ${CONTROLLERDEBUGMAP}; do
+            module="${kv%%:*}"
+            level="${kv#*:}"
+            echo "module: $module, level: $level"
+            # shellcheck disable=SC2157
+            if [ -n "${module}" ] && [ -n "${level}" ]; then
+                orgmodule="org.opendaylight.${module}"
+                logging_config="${logging_config} \\\"${orgmodule}\\\": \\\"${level}\\\","
+            fi
+        done
+        # replace the trailing comma with a closing brace followed by trailing comma
+        logging_config=${logging_config%,}" },"
+        echo "$logging_config"
+
+        # fine a sane line number to inject the custom logging json
+        lineno=$(ssh "$OPENSTACK_CONTROL_NODE_1_IP" "sudo grep -Fn 'opendaylight::log_mechanism' /etc/puppet/hieradata/service_configs.json" | awk -F: '{print $1}')
+        # We purposely want these variables to expand client-side
+        # shellcheck disable=SC2029
+        ssh "$controller_ip" "sudo sed -i \"${lineno}i ${logging_config}\" /etc/puppet/hieradata/service_configs.json"
+        ssh "$controller_ip" "sudo cat /etc/puppet/hieradata/service_configs.json"
+    fi
+} # function configure_karaf_log_for_apex()
+
+function configure_odl_features_for_apex() {
+
+    # if the environment variable $ACTUALFEATURES is not null, then rewrite
+    # the puppet config file with the features given in that variable, otherwise
+    # this function is a noop
+
+    local -r controller_ip=$1
+    local -r config_file=/etc/puppet/hieradata/service_configs.json
+
+cat > /tmp/set_odl_features.sh << EOF
+sudo jq '.["opendaylight::extra_features"] |= []' $config_file > tmp.json && mv tmp.json $config_file
+for feature in "\${ACTUALFEATURES//,/ }"; do
+    sudo jq --arg jq_arg \$feature '.["opendaylight::extra_features"] |= . + [\$jq_arg]' $config_file > tmp && mv tmp $config_file;
+done
+echo "Modified puppet-opendaylight service_configs.json..."
+cat $config_file
+EOF
+
+    echo "Feature configuration script..."
+    cat /tmp/set_odl_features.sh
+
+    if [ -n "${ACTUALFEATURES}" ]; then
+        scp /tmp/set_odl_features.sh "$controller_ip":/tmp/set_odl_features.sh
+        ssh "$controller_ip" "sudo bash /tmp/set_odl_features.sh"
+    fi
+
+} # function configure_odl_features_for_apex()
+
 function get_os_deploy() {
     local -r num_systems=${1:-$NUM_OPENSTACK_SYSTEM}
     case ${num_systems} in
@@ -144,13 +221,18 @@ function get_test_suites() {
         testplan_filepath="${WORKSPACE}/test/csit/testplans/${TESTPLAN}"
     fi
 
+    if [ "${ELASTICSEARCHATTRIBUTE}" != "disabled" ]; then
+        add_test="integration/test/csit/suites/integration/Create_JVM_Plots.robot"
+        echo "${add_test}" >> "$testplan_filepath"
+    fi
+
     echo "Changing the testplan path..."
-    cat "${testplan_filepath}" | sed "s:integration:${WORKSPACE}:" > testplan.txt
+    sed "s:integration:${WORKSPACE}:" "${testplan_filepath}" > testplan.txt
     cat testplan.txt
 
     # Use the testplan if specific SUITES are not defined.
     if [ -z "${SUITES}" ]; then
-        suite_list=`egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' testplan.txt | tr '\012' ' '`
+        suite_list=$(grep -E -v '(^[[:space:]]*#|^[[:space:]]*$)' testplan.txt | tr '\012' ' ')
     else
         suite_list=""
         workpath="${WORKSPACE}/test/csit/suites"
@@ -164,7 +246,7 @@ function get_test_suites() {
         done
     fi
 
-    eval $__suite_list="'$suite_list'"
+    eval "$__suite_list='$suite_list'"
 }
 
 function run_plan() {
@@ -179,7 +261,7 @@ function run_plan() {
         ;;
     esac
 
-    printf "Locating ${type} plan to use...\n"
+    printf "Locating %s plan to use...\\n" "${type}"
     plan_filepath="${WORKSPACE}/test/csit/${type}plans/$plan"
     if [ ! -f "${plan_filepath}" ]; then
         plan_filepath="${WORKSPACE}/test/csit/${type}plans/${STREAMTESTPLAN}"
@@ -189,19 +271,139 @@ function run_plan() {
     fi
 
     if [ -f "${plan_filepath}" ]; then
-        printf "${type} plan exists!!!\n"
-        printf "Changing the ${type} plan path...\n"
-        cat ${plan_filepath} | sed "s:integration:${WORKSPACE}:" > ${type}plan.txt
-        cat ${type}plan.txt
-        for line in $( egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' ${type}plan.txt ); do
-            printf "Executing ${line}...\n"
+        printf "%s plan exists!!!\\n" "${type}"
+        printf "Changing the %s plan path...\\n" "${type}"
+        sed "s:integration:${WORKSPACE}:" "${plan_filepath}" > "${type}plan.txt"
+        cat "${type}plan.txt"
+        # shellcheck disable=SC2013
+        for line in $( grep -E -v '(^[[:space:]]*#|^[[:space:]]*$)' "${type}plan.txt" ); do
+            printf "Executing %s...\\n" "${line}"
             # shellcheck source=${line} disable=SC1091
-            source ${line}
+            source "${line}"
         done
     fi
-    printf "Finished running ${type} plans\n"
+    printf "Finished running %s plans\\n" "${type}"
 } # function run_plan()
 
+# Run scripts to support JVM monitoring.
+function add_jvm_support()
+{
+    if [ "${ELASTICSEARCHATTRIBUTE}" != "disabled" ]; then
+        set_elasticsearch_attribute "${ELASTICSEARCHATTRIBUTE}"
+        set_jvm_common_attribute
+    fi
+} # function add_jvm_support()
+
+#Expected input parameter: long/short/a number
+function set_elasticsearch_attribute()
+{
+short=5000
+long=120000
+default=$short
+
+case $1 in
+short)
+  period=$short
+  ;;
+long)
+  period=$long
+  ;;
+*)
+  # shellcheck disable=SC2166
+  if [[ "$1" =~ ^[0-9]+$ ]] && [ "$1" -ge $short -a "$1" -le $long ]; then
+      period=$1
+  else
+      period=$default
+  fi
+  ;;
+esac
+
+cat > "${WORKSPACE}"/org.apache.karaf.decanter.scheduler.simple.cfg <<EOF
+period=$period
+
+EOF
+
+echo "Copying config files to ODL Controller folder"
+
+# shellcheck disable=SC2086
+for i in $(seq 1 ${NUM_ODL_SYSTEM})
+do
+        CONTROLLERIP=ODL_SYSTEM_${i}_IP
+        echo "Set Decanter Polling Period to ${!CONTROLLERIP}"
+        # shellcheck disable=SC2029
+        ssh "${!CONTROLLERIP}" "mkdir -p \"/tmp/${BUNDLEFOLDER}/etc/opendaylight/karaf/\""
+        scp "${WORKSPACE}"/org.apache.karaf.decanter.scheduler.simple.cfg "${!CONTROLLERIP}":/tmp/"${BUNDLEFOLDER}"/etc/
+done
+} #function set_elasticsearch_attribute
+
+function set_jvm_common_attribute()
+{
+cat > "${WORKSPACE}"/org.apache.karaf.decanter.collector.jmx-local.cfg <<EOF
+type=jmx-local
+url=local
+object.name=java.lang:type=*,name=*
+
+EOF
+
+cat > "${WORKSPACE}"/org.apache.karaf.decanter.collector.jmx-others.cfg <<EOF
+type=jmx-local
+url=local
+object.name=java.lang:type=*
+
+EOF
+
+# shellcheck disable=SC2086
+for i in $(seq 1 ${NUM_ODL_SYSTEM})
+do
+    CONTROLLERIP=ODL_SYSTEM_${i}_IP
+
+    cat > "${WORKSPACE}"/elasticsearch.yml <<EOF
+    discovery.zen.ping.multicast.enabled: false
+
+EOF
+
+    cat > "${WORKSPACE}"/elasticsearch_startup.sh <<EOF
+    cd /tmp/elasticsearch/elasticsearch-1.7.5
+    ls -al
+
+    if [ -d "data" ]; then
+        echo "data directory exists, deleting...."
+        rm -r data
+    else
+        echo "data directory does not exist"
+    fi
+
+    cd /tmp/elasticsearch
+    ls -al
+
+    echo "Starting Elasticsearch node"
+    sudo /tmp/elasticsearch/elasticsearch-1.7.5/bin/elasticsearch > /dev/null 2>&1 &
+    ls -al /tmp/elasticsearch/elasticsearch-1.7.5/bin/elasticsearch
+
+EOF
+    echo "Setup ODL_SYSTEM_IP specific config files for ${!CONTROLLERIP} "
+    cat "${WORKSPACE}"/org.apache.karaf.decanter.collector.jmx-local.cfg
+    cat "${WORKSPACE}"/org.apache.karaf.decanter.collector.jmx-others.cfg
+    cat "${WORKSPACE}"/elasticsearch.yml
+
+
+    echo "Copying config files to ${!CONTROLLERIP}"
+    scp "${WORKSPACE}"/org.apache.karaf.decanter.collector.jmx-local.cfg "${!CONTROLLERIP}":/tmp/"${BUNDLEFOLDER}"/etc/
+    scp "${WORKSPACE}"/org.apache.karaf.decanter.collector.jmx-others.cfg "${!CONTROLLERIP}":/tmp/"${BUNDLEFOLDER}"/etc/
+    scp "${WORKSPACE}"/elasticsearch.yml "${!CONTROLLERIP}":/tmp/
+
+    ssh "${!CONTROLLERIP}" "sudo ls -al /tmp/elasticsearch/"
+    ssh "${!CONTROLLERIP}" "sudo mv /tmp/elasticsearch.yml /tmp/elasticsearch/elasticsearch-1.7.5/config/"
+    ssh "${!CONTROLLERIP}" "cat /tmp/elasticsearch/elasticsearch-1.7.5/config/elasticsearch.yml"
+
+    echo "Copying the elasticsearch_startup script to ${!CONTROLLERIP}"
+    cat "${WORKSPACE}"/elasticsearch_startup.sh
+    scp "${WORKSPACE}"/elasticsearch_startup.sh "${!CONTROLLERIP}":/tmp
+    ssh "${!CONTROLLERIP}" 'bash /tmp/elasticsearch_startup.sh'
+    ssh "${!CONTROLLERIP}" 'ps aux | grep elasticsearch'
+done
+} #function set_jvm_common_attribute
+
 # Return elapsed time. Usage:
 # - Call first time with no arguments and a new timer is returned.
 # - Next call with the first argument as the timer and the elapsed time is returned.
@@ -209,7 +411,7 @@ function timer()
 {
     if [ $# -eq 0 ]; then
         # return the current time
-        printf "$(date "+%s")"
+        printf "%s" "$(date "+%s")"
     else
         local start_time=$1
         end_time=$(date "+%s")
@@ -231,7 +433,7 @@ function timer()
 function csv2ssv() {
     local csv=$1
     if [ -n "${csv}" ]; then
-        ssv=$(echo ${csv} | sed 's/,/ /g' | sed 's/\ \ */\ /g')
+        ssv=$(echo "${csv}" | sed 's/,/ /g' | sed 's/\ \ */\ /g')
     fi
 
     echo "${ssv}"
@@ -239,7 +441,7 @@ function csv2ssv() {
 
 function is_openstack_feature_enabled() {
     local feature=$1
-    for enabled_feature in $(csv2ssv ${ENABLE_OS_SERVICES}); do
+    for enabled_feature in $(csv2ssv "${ENABLE_OS_SERVICES}"); do
         if [ "${enabled_feature}" == "${feature}" ]; then
            echo 1
            return
@@ -260,6 +462,7 @@ DISTROSTREAM: ${DISTROSTREAM}
 BUNDLE_URL: ${BUNDLE_URL}
 CONTROLLERFEATURES: ${CONTROLLERFEATURES}
 CONTROLLERDEBUGMAP: ${CONTROLLERDEBUGMAP}
+CONTROLLERMAXMEM: ${CONTROLLERMAXMEM}
 SCRIPTPLAN: ${SCRIPTPLAN}
 CONFIGPLAN: ${CONFIGPLAN}
 STREAMTESTPLAN: ${STREAMTESTPLAN}
@@ -292,6 +495,7 @@ CREATE_INITIAL_NETWORKS: ${CREATE_INITIAL_NETWORKS}
 LBAAS_SERVICE_PROVIDER: ${LBAAS_SERVICE_PROVIDER}
 ODL_SFC_DRIVER: ${ODL_SFC_DRIVER}
 ODL_SNAT_MODE: ${ODL_SNAT_MODE}
+GROUP_ADD_MOD_ENABLED: ${GROUP_ADD_MOD_ENABLED}
 
 EOF
 }
@@ -302,19 +506,21 @@ function tcpdump_start() {
     local -r filter=$3
     filter_=${filter// /_}
 
-    printf "node ${ip}, ${prefix}_${ip}__${filter}: starting tcpdump\n"
-    ssh ${ip} "nohup sudo /usr/sbin/tcpdump -vvv -ni eth0 ${filter} -w /tmp/tcpdump_${prefix}_${ip}__${filter_}.pcap > /tmp/tcpdump_start.log 2>&1 &"
-    ${SSH} ${ip} "ps -ef | grep tcpdump"
+    printf "node %s, %s_%s__%s: starting tcpdump\\n" "${ip}" "${prefix}" "${ip}" "${filter}"
+    # $fileter needs to be parsed client-side
+    # shellcheck disable=SC2029
+    ssh "${ip}" "nohup sudo /usr/sbin/tcpdump -vvv -ni eth0 ${filter} -w /tmp/tcpdump_${prefix}_${ip}__${filter_}.pcap > /tmp/tcpdump_start.log 2>&1 &"
+    ${SSH} "${ip}" "ps -ef | grep tcpdump"
 }
 
 function tcpdump_stop() {
     local -r ip=$1
 
-    printf "node $ip: stopping tcpdump\n"
-    ${SSH} ${ip} "ps -ef | grep tcpdump.sh"
-    ${SSH} ${ip} "sudo pkill -f tcpdump"
-    ${SSH} ${ip} "sudo xz -9ekvvf /tmp/*.pcap"
-    ${SSH} ${ip} "sudo ls -al /tmp/*.pcap"
+    printf "node %s: stopping tcpdump\\n" "$ip"
+    ${SSH} "${ip}" "ps -ef | grep tcpdump.sh"
+    ${SSH} "${ip}" "sudo pkill -f tcpdump"
+    ${SSH} "${ip}" "sudo xz -9ekvvf /tmp/*.pcap"
+    ${SSH} "${ip}" "sudo ls -al /tmp/*.pcap"
     # copy_logs will copy any *.xz files
 }
 
@@ -323,19 +529,19 @@ function collect_files() {
     local -r ip=$1
     local -r folder=$2
     finddir=/tmp/finder
-    ${SSH} ${ip} "mkdir -p ${finddir}"
-    ${SSH} ${ip} "sudo find /etc > ${finddir}/find.etc.txt"
-    ${SSH} ${ip} "sudo find /opt/stack > ${finddir}/find.opt.stack.txt"
-    ${SSH} ${ip} "sudo find /var > ${finddir}/find2.txt"
-    ${SSH} ${ip} "sudo find /var > ${finddir}/find.var.txt"
-    ${SSH} ${ip} "sudo tar -cf - -C /tmp finder | xz -T 0 > /tmp/find.tar.xz"
-    scp ${ip}:/tmp/find.tar.xz ${folder}
-    mkdir -p ${finddir}
-    rsync --rsync-path="sudo rsync" --list-only -arvhe ssh ${ip}:/etc/ > ${finddir}/rsync.etc.txt
-    rsync --rsync-path="sudo rsync" --list-only -arvhe ssh ${ip}:/opt/stack/ > ${finddir}/rsync.opt.stack.txt
-    rsync --rsync-path="sudo rsync" --list-only -arvhe ssh ${ip}:/var/ > ${finddir}/rsync.var.txt
+    ${SSH} "${ip}" "mkdir -p ${finddir}"
+    ${SSH} "${ip}" "sudo find /etc > ${finddir}/find.etc.txt"
+    ${SSH} "${ip}" "sudo find /opt/stack > ${finddir}/find.opt.stack.txt"
+    ${SSH} "${ip}" "sudo find /var > ${finddir}/find2.txt"
+    ${SSH} "${ip}" "sudo find /var > ${finddir}/find.var.txt"
+    ${SSH} "${ip}" "sudo tar -cf - -C /tmp finder | xz -T 0 > /tmp/find.tar.xz"
+    scp "${ip}":/tmp/find.tar.xz "${folder}"
+    mkdir -p "${finddir}"
+    rsync --rsync-path="sudo rsync" --list-only -arvhe ssh "${ip}":/etc/ > "${finddir}"/rsync.etc.txt
+    rsync --rsync-path="sudo rsync" --list-only -arvhe ssh "${ip}":/opt/stack/ > "${finddir}"/rsync.opt.stack.txt
+    rsync --rsync-path="sudo rsync" --list-only -arvhe ssh "${ip}":/var/ > "${finddir}"/rsync.var.txt
     tar -cf - -C /tmp finder | xz -T 0 > /tmp/rsync.tar.xz
-    cp /tmp/rsync.tar.xz ${folder}
+    cp /tmp/rsync.tar.xz "${folder}"
 }
 
 # List of extra services to extract from journalctl
@@ -366,16 +572,16 @@ function collect_openstack_logs() {
     local -r node_type=${3}
     local oslogs="${folder}/oslogs"
 
-    printf "collect_openstack_logs for ${node_type} node: ${ip} into ${oslogs}\n"
-    rm -rf ${oslogs}
-    mkdir -p ${oslogs}
+    printf "collect_openstack_logs for %s node: %s into %s\\n" "${node_type}" "${ip}" "${oslogs}"
+    rm -rf "${oslogs}"
+    mkdir -p "${oslogs}"
     # There are always some logs in /opt/stack/logs and this also covers the
     # pre-queens branches which always use /opt/stack/logs
-    rsync -avhe ssh ${ip}:/opt/stack/logs/* ${oslogs} # rsync to prevent copying of symbolic links
+    rsync -avhe ssh "${ip}":/opt/stack/logs/* "${oslogs}" # rsync to prevent copying of symbolic links
 
     # Starting with queens break out the logs from journalctl
     if [ "${OPENSTACK_BRANCH}" = "stable/queens" ]; then
-        cat > ${WORKSPACE}/collect_openstack_logs.sh << EOF
+        cat > "${WORKSPACE}"/collect_openstack_logs.sh << EOF
 extra_services_cntl="${extra_services_cntl}"
 extra_services_cmp="${extra_services_cmp}"
 
@@ -383,7 +589,7 @@ function extract_from_journal() {
     local -r services=\${1}
     local -r folder=\${2}
     local -r node_type=\${3}
-    printf "extract_from_journal folder: \${folder}, services: \${services}\n"
+    printf "extract_from_journal folder: \${folder}, services: \${services}\\n"
     for service in \${services}; do
         # strip anything before @ and anything after .
         # devstack@g-api.service will end as g-api
@@ -406,12 +612,12 @@ fi
 ls -al /tmp/oslogs
 EOF
 # cat > ${WORKSPACE}/collect_openstack_logs.sh << EOF
-        printf "collect_openstack_logs for ${node_type} node: ${ip} into ${oslogs}, executing script\n"
-        cat ${WORKSPACE}/collect_openstack_logs.sh
-        scp ${WORKSPACE}/collect_openstack_logs.sh ${ip}:/tmp
-        ${SSH} ${ip} "bash /tmp/collect_openstack_logs.sh > /tmp/collect_openstack_logs.log 2>&1"
-        rsync -avhe ssh ${ip}:/tmp/oslogs/* ${oslogs}
-        scp ${ip}:/tmp/collect_openstack_logs.log ${oslogs}
+        printf "collect_openstack_logs for %s node: %s into %s, executing script\\n" "${node_type}" "${ip}" "${oslogs}"
+        cat "${WORKSPACE}"/collect_openstack_logs.sh
+        scp "${WORKSPACE}"/collect_openstack_logs.sh "${ip}":/tmp
+        ${SSH} "${ip}" "bash /tmp/collect_openstack_logs.sh > /tmp/collect_openstack_logs.log 2>&1"
+        rsync -avhe ssh "${ip}":/tmp/oslogs/* "${oslogs}"
+        scp "${ip}":/tmp/collect_openstack_logs.log "${oslogs}"
     fi # if [ "${OPENSTACK_BRANCH}" = "stable/queens" ]; then
 }
 
@@ -419,87 +625,90 @@ function collect_netvirt_logs() {
     set +e  # We do not want to create red dot just because something went wrong while fetching logs.
 
     cat > extra_debug.sh << EOF
-echo -e "/usr/sbin/lsmod | /usr/bin/grep openvswitch\n"
+echo -e "/usr/sbin/lsmod | /usr/bin/grep openvswitch\\n"
 /usr/sbin/lsmod | /usr/bin/grep openvswitch
-echo -e "\nsudo grep ct_ /var/log/openvswitch/ovs-vswitchd.log\n"
+echo -e "\\nsudo grep ct_ /var/log/openvswitch/ovs-vswitchd.log\\n"
 sudo grep "Datapath supports" /var/log/openvswitch/ovs-vswitchd.log
-echo -e "\nsudo netstat -punta\n"
+echo -e "\\nsudo netstat -punta\\n"
 sudo netstat -punta
-echo -e "\nsudo getenforce\n"
+echo -e "\\nsudo getenforce\\n"
 sudo getenforce
-echo -e "\nsudo systemctl status httpd\n"
+echo -e "\\nsudo systemctl status httpd\\n"
 sudo systemctl status httpd
-echo -e "\nenv\n"
+echo -e "\\nenv\\n"
 env
 source /opt/stack/devstack/openrc admin admin
-echo -e "\nenv after openrc\n"
+echo -e "\\nenv after openrc\\n"
 env
-echo -e "\nsudo du -hs /opt/stack"
+echo -e "\\nsudo du -hs /opt/stack"
 sudo du -hs /opt/stack
-echo -e "\nsudo mount"
+echo -e "\\nsudo mount"
 sudo mount
-echo -e "\ndmesg -T > /tmp/dmesg.log"
+echo -e "\\ndmesg -T > /tmp/dmesg.log"
 dmesg -T > /tmp/dmesg.log
-echo -e "\njournalctl > /tmp/journalctl.log\n"
+echo -e "\\njournalctl > /tmp/journalctl.log\\n"
 sudo journalctl > /tmp/journalctl.log
-echo -e "\novsdb-tool -mm show-log > /tmp/ovsdb-tool.log"
+echo -e "\\novsdb-tool -mm show-log > /tmp/ovsdb-tool.log"
 ovsdb-tool -mm show-log > /tmp/ovsdb-tool.log
 EOF
 
     # Since this log collection work is happening before the archive build macro which also
     # creates the ${WORKSPACE}/archives dir, we have to do it here first.  The mkdir in the
     # archives build step will essentially be a noop.
-    mkdir -p ${WORKSPACE}/archives
+    mkdir -p "${WORKSPACE}"/archives
 
-    mv /tmp/changes.txt ${WORKSPACE}/archives
-    mv /tmp/validations.txt ${WORKSPACE}/archives
-    mv ${WORKSPACE}/rabbit.txt ${WORKSPACE}/archives
-    mv ${WORKSPACE}/haproxy.cfg ${WORKSPACE}/archives
-    ssh ${OPENSTACK_HAPROXY_1_IP} "sudo journalctl -u haproxy > /tmp/haproxy.log"
-    scp ${OPENSTACK_HAPROXY_1_IP}:/tmp/haproxy.log ${WORKSPACE}/archives/
+    mv /tmp/changes.txt "${WORKSPACE}"/archives
+    mv /tmp/validations.txt "${WORKSPACE}"/archives
+    mv "${WORKSPACE}"/rabbit.txt "${WORKSPACE}"/archives
+    mv "${WORKSPACE}"/haproxy.cfg "${WORKSPACE}"/archives
+    ssh "${OPENSTACK_HAPROXY_1_IP}" "sudo journalctl -u haproxy > /tmp/haproxy.log"
+    scp "${OPENSTACK_HAPROXY_1_IP}":/tmp/haproxy.log "${WORKSPACE}"/archives/
 
     sleep 5
     # FIXME: Do not create .tar and gzip before copying.
-    for i in `seq 1 ${NUM_ODL_SYSTEM}`; do
+    for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
         CONTROLLERIP=ODL_SYSTEM_${i}_IP
         echo "collect_logs: for opendaylight controller ip: ${!CONTROLLERIP}"
         NODE_FOLDER="odl_${i}"
-        mkdir -p ${NODE_FOLDER}
-        echo "Lets's take the karaf thread dump again..."
-        ssh ${!CONTROLLERIP} "sudo ps aux" > ${WORKSPACE}/ps_after.log
-        pid=$(grep org.apache.karaf.main.Main ${WORKSPACE}/ps_after.log | grep -v grep | tr -s ' ' | cut -f2 -d' ')
+        mkdir -p "${NODE_FOLDER}"
+        echo "Let's take the karaf thread dump again..."
+        ssh "${!CONTROLLERIP}" "sudo ps aux" > "${WORKSPACE}"/ps_after.log
+        pid=$(grep org.apache.karaf.main.Main "${WORKSPACE}"/ps_after.log | grep -v grep | tr -s ' ' | cut -f2 -d' ')
         echo "karaf main: org.apache.karaf.main.Main, pid:${pid}"
-        ssh ${!CONTROLLERIP} "${JAVA_HOME}/bin/jstack -l ${pid}" > ${WORKSPACE}/karaf_${i}_${pid}_threads_after.log || true
+        # $pid needs to be parsed client-side
+        # shellcheck disable=SC2029
+        ssh "${!CONTROLLERIP}" "${JAVA_HOME}/bin/jstack -l ${pid}" > "${WORKSPACE}/karaf_${i}_${pid}_threads_after.log" || true
         echo "killing karaf process..."
+        # shellcheck disable=SC2016
         ${SSH} "${!CONTROLLERIP}" bash -c 'ps axf | grep karaf | grep -v grep | awk '"'"'{print "kill -9 " $1}'"'"' | sh'
-        ${SSH} ${!CONTROLLERIP} "sudo journalctl > /tmp/journalctl.log"
-        scp ${!CONTROLLERIP}:/tmp/journalctl.log ${NODE_FOLDER}
-        ${SSH} ${!CONTROLLERIP} "dmesg -T > /tmp/dmesg.log"
-        scp ${!CONTROLLERIP}:/tmp/dmesg.log ${NODE_FOLDER}
-        ${SSH} ${!CONTROLLERIP} "tar -cf - -C /tmp/${BUNDLEFOLDER} etc | xz -T 0 > /tmp/etc.tar.xz"
-        scp ${!CONTROLLERIP}:/tmp/etc.tar.xz ${NODE_FOLDER}
-        ${SSH} ${!CONTROLLERIP} "cp -r /tmp/${BUNDLEFOLDER}/data/log /tmp/odl_log"
-        ${SSH} ${!CONTROLLERIP} "tar -cf /tmp/odl${i}_karaf.log.tar /tmp/odl_log/*"
-        scp ${!CONTROLLERIP}:/tmp/odl${i}_karaf.log.tar ${NODE_FOLDER}
-        ${SSH} ${!CONTROLLERIP} "tar -cf /tmp/odl${i}_zrpcd.log.tar /tmp/zrpcd.init.log"
-        scp ${!CONTROLLERIP}:/tmp/odl${i}_zrpcd.log.tar ${NODE_FOLDER}
-        tar -xvf ${NODE_FOLDER}/odl${i}_karaf.log.tar -C ${NODE_FOLDER} --strip-components 2 --transform s/karaf/odl${i}_karaf/g
-        grep "ROBOT MESSAGE\| ERROR " ${NODE_FOLDER}/odl${i}_karaf.log > ${NODE_FOLDER}/odl${i}_err.log
-        grep "ROBOT MESSAGE\| ERROR \| WARN \|Exception" \
-            ${NODE_FOLDER}/odl${i}_karaf.log > ${NODE_FOLDER}/odl${i}_err_warn_exception.log
+        ${SSH} "${!CONTROLLERIP}" "sudo journalctl > /tmp/journalctl.log"
+        scp "${!CONTROLLERIP}":/tmp/journalctl.log "${NODE_FOLDER}"
+        ${SSH} "${!CONTROLLERIP}" "dmesg -T > /tmp/dmesg.log"
+        scp "${!CONTROLLERIP}":/tmp/dmesg.log "${NODE_FOLDER}"
+        ${SSH} "${!CONTROLLERIP}" "tar -cf - -C /tmp/${BUNDLEFOLDER} etc | xz -T 0 > /tmp/etc.tar.xz"
+        scp "${!CONTROLLERIP}":/tmp/etc.tar.xz "${NODE_FOLDER}"
+        ${SSH} "${!CONTROLLERIP}" "cp -r /tmp/${BUNDLEFOLDER}/data/log /tmp/odl_log"
+        ${SSH} "${!CONTROLLERIP}" "tar -cf /tmp/odl${i}_karaf.log.tar /tmp/odl_log/*"
+        scp "${!CONTROLLERIP}:/tmp/odl${i}_karaf.log.tar" "${NODE_FOLDER}"
+        ${SSH} "${!CONTROLLERIP}" "tar -cf /tmp/odl${i}_zrpcd.log.tar /tmp/zrpcd.init.log"
+        scp "${!CONTROLLERIP}:/tmp/odl${i}_zrpcd.log.tar" "${NODE_FOLDER}"
+        tar -xvf "${NODE_FOLDER}/odl${i}_karaf.log.tar" -C "${NODE_FOLDER}" --strip-components 2 --transform "s/karaf/odl${i}_karaf/g"
+        grep "ROBOT MESSAGE\\| ERROR " "${NODE_FOLDER}/odl${i}_karaf.log" > "${NODE_FOLDER}/odl${i}_err.log"
+        grep "ROBOT MESSAGE\\| ERROR \\| WARN \\|Exception" \
+            "${NODE_FOLDER}/odl${i}_karaf.log" > "${NODE_FOLDER}/odl${i}_err_warn_exception.log"
         # Print ROBOT lines and print Exception lines. For exception lines also print the previous line for context
-        sed -n -e '/ROBOT MESSAGE/P' -e '$!N;/Exception/P;D' ${NODE_FOLDER}/odl${i}_karaf.log > ${NODE_FOLDER}/odl${i}_exception.log
-        mv /tmp/odl${i}_exceptions.txt ${NODE_FOLDER}
-        rm ${NODE_FOLDER}/odl${i}_karaf.log.tar
-        mv *_threads* ${NODE_FOLDER}
-        mv ps_* ${NODE_FOLDER}
-        mv ${NODE_FOLDER} ${WORKSPACE}/archives/
+        sed -n -e '/ROBOT MESSAGE/P' -e '$!N;/Exception/P;D' "${NODE_FOLDER}/odl${i}_karaf.log" > "${NODE_FOLDER}/odl${i}_exception.log"
+        mv "/tmp/odl${i}_exceptions.txt" "${NODE_FOLDER}"
+        rm "${NODE_FOLDER}/odl${i}_karaf.log.tar"
+        mv -- *_threads* "${NODE_FOLDER}"
+        mv ps_* "${NODE_FOLDER}"
+        mv "${NODE_FOLDER}" "${WORKSPACE}"/archives/
     done
 
-    print_job_parameters > ${WORKSPACE}/archives/params.txt
+    print_job_parameters > "${WORKSPACE}"/archives/params.txt
 
     # Control Node
-    for i in `seq 1 ${NUM_OPENSTACK_CONTROL_NODES}`; do
+    for i in $(seq 1 "${NUM_OPENSTACK_CONTROL_NODES}"); do
         OSIP=OPENSTACK_CONTROL_NODE_${i}_IP
         if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then
             echo "collect_logs: for openstack combo node ip: ${!OSIP}"
@@ -508,115 +717,115 @@ EOF
             echo "collect_logs: for openstack control node ip: ${!OSIP}"
             NODE_FOLDER="control_${i}"
         fi
-        mkdir -p ${NODE_FOLDER}
+        mkdir -p "${NODE_FOLDER}"
         tcpdump_stop "${!OSIP}"
-        scp extra_debug.sh ${!OSIP}:/tmp
+        scp extra_debug.sh "${!OSIP}":/tmp
         # Capture compute logs if this is a combo node
         if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then
-            scp ${!OSIP}:/etc/nova/nova.conf ${NODE_FOLDER}
-            scp ${!OSIP}:/etc/nova/nova-cpu.conf ${NODE_FOLDER}
-            scp ${!OSIP}:/etc/openstack/clouds.yaml ${NODE_FOLDER}
-            rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/nova-agent.log ${NODE_FOLDER}
+            scp "${!OSIP}":/etc/nova/nova.conf "${NODE_FOLDER}"
+            scp "${!OSIP}":/etc/nova/nova-cpu.conf "${NODE_FOLDER}"
+            scp "${!OSIP}":/etc/openstack/clouds.yaml "${NODE_FOLDER}"
+            rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/nova-agent.log "${NODE_FOLDER}"
         fi
-        ${SSH} ${!OSIP} "bash /tmp/extra_debug.sh > /tmp/extra_debug.log 2>&1"
-        scp ${!OSIP}:/etc/dnsmasq.conf ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/keystone/keystone.conf ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/keystone/keystone-uwsgi-admin.ini ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/keystone/keystone-uwsgi-public.ini ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/kuryr/kuryr.conf ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/neutron/dhcp_agent.ini ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/neutron/metadata_agent.ini ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/neutron/neutron.conf ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/neutron/neutron_lbaas.conf ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/neutron/plugins/ml2/ml2_conf.ini ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/neutron/services/loadbalancer/haproxy/lbaas_agent.ini ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/nova/nova.conf ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/nova/nova-api-uwsgi.ini ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/nova/nova_cell1.conf ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/nova/nova-cpu.conf ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/nova/placement-uwsgi.ini ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/openstack/clouds.yaml ${NODE_FOLDER}
-        scp ${!OSIP}:/opt/stack/devstack/.stackenv ${NODE_FOLDER}
-        scp ${!OSIP}:/opt/stack/devstack/nohup.out ${NODE_FOLDER}/stack.log
-        scp ${!OSIP}:/opt/stack/devstack/openrc ${NODE_FOLDER}
-        scp ${!OSIP}:/opt/stack/requirements/upper-constraints.txt ${NODE_FOLDER}
-        scp ${!OSIP}:/opt/stack/tempest/etc/tempest.conf ${NODE_FOLDER}
-        scp ${!OSIP}:/tmp/*.xz ${NODE_FOLDER}
-        scp ${!OSIP}:/tmp/dmesg.log ${NODE_FOLDER}
-        scp ${!OSIP}:/tmp/extra_debug.log ${NODE_FOLDER}
-        scp ${!OSIP}:/tmp/get_devstack.sh.txt ${NODE_FOLDER}
-        scp ${!OSIP}:/tmp/install_ovs.txt ${NODE_FOLDER}
-        scp ${!OSIP}:/tmp/journalctl.log ${NODE_FOLDER}
-        scp ${!OSIP}:/tmp/ovsdb-tool.log ${NODE_FOLDER}
-        scp ${!OSIP}:/tmp/tcpdump_start.log ${NODE_FOLDER}
+        ${SSH} "${!OSIP}" "bash /tmp/extra_debug.sh > /tmp/extra_debug.log 2>&1"
+        scp "${!OSIP}":/etc/dnsmasq.conf "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/keystone/keystone.conf "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/keystone/keystone-uwsgi-admin.ini "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/keystone/keystone-uwsgi-public.ini "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/kuryr/kuryr.conf "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/neutron/dhcp_agent.ini "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/neutron/metadata_agent.ini "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/neutron/neutron.conf "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/neutron/neutron_lbaas.conf "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/neutron/plugins/ml2/ml2_conf.ini "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/neutron/services/loadbalancer/haproxy/lbaas_agent.ini "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/nova/nova.conf "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/nova/nova-api-uwsgi.ini "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/nova/nova_cell1.conf "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/nova/nova-cpu.conf "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/nova/placement-uwsgi.ini "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/openstack/clouds.yaml "${NODE_FOLDER}"
+        scp "${!OSIP}":/opt/stack/devstack/.stackenv "${NODE_FOLDER}"
+        scp "${!OSIP}":/opt/stack/devstack/nohup.out "${NODE_FOLDER}"/stack.log
+        scp "${!OSIP}":/opt/stack/devstack/openrc "${NODE_FOLDER}"
+        scp "${!OSIP}":/opt/stack/requirements/upper-constraints.txt "${NODE_FOLDER}"
+        scp "${!OSIP}":/opt/stack/tempest/etc/tempest.conf "${NODE_FOLDER}"
+        scp "${!OSIP}":/tmp/*.xz "${NODE_FOLDER}"
+        scp "${!OSIP}":/tmp/dmesg.log "${NODE_FOLDER}"
+        scp "${!OSIP}":/tmp/extra_debug.log "${NODE_FOLDER}"
+        scp "${!OSIP}":/tmp/get_devstack.sh.txt "${NODE_FOLDER}"
+        scp "${!OSIP}":/tmp/install_ovs.txt "${NODE_FOLDER}"
+        scp "${!OSIP}":/tmp/journalctl.log "${NODE_FOLDER}"
+        scp "${!OSIP}":/tmp/ovsdb-tool.log "${NODE_FOLDER}"
+        scp "${!OSIP}":/tmp/tcpdump_start.log "${NODE_FOLDER}"
         collect_files "${!OSIP}" "${NODE_FOLDER}"
-        ${SSH} ${!OSIP} "sudo tar -cf - -C /var/log rabbitmq | xz -T 0 > /tmp/rabbitmq.tar.xz "
-        scp ${!OSIP}:/tmp/rabbitmq.tar.xz ${NODE_FOLDER}
-        rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/etc/hosts ${NODE_FOLDER}
-        rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/usr/lib/systemd/system/haproxy.service ${NODE_FOLDER}
-        rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/audit/audit.log ${NODE_FOLDER}
-        rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/httpd/keystone_access.log ${NODE_FOLDER}
-        rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/httpd/keystone.log ${NODE_FOLDER}
-        rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/messages* ${NODE_FOLDER}
-        rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/openvswitch/ovs-vswitchd.log ${NODE_FOLDER}
-        rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/openvswitch/ovsdb-server.log ${NODE_FOLDER}
+        ${SSH} "${!OSIP}" "sudo tar -cf - -C /var/log rabbitmq | xz -T 0 > /tmp/rabbitmq.tar.xz "
+        scp "${!OSIP}":/tmp/rabbitmq.tar.xz "${NODE_FOLDER}"
+        rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/etc/hosts "${NODE_FOLDER}"
+        rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/usr/lib/systemd/system/haproxy.service "${NODE_FOLDER}"
+        rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/audit/audit.log "${NODE_FOLDER}"
+        rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/httpd/keystone_access.log "${NODE_FOLDER}"
+        rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/httpd/keystone.log "${NODE_FOLDER}"
+        rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/messages* "${NODE_FOLDER}"
+        rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/openvswitch/ovs-vswitchd.log "${NODE_FOLDER}"
+        rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/openvswitch/ovsdb-server.log "${NODE_FOLDER}"
         collect_openstack_logs "${!OSIP}" "${NODE_FOLDER}" "control"
-        mv local.conf_control_${!OSIP} ${NODE_FOLDER}/local.conf
+        mv "local.conf_control_${!OSIP}" "${NODE_FOLDER}/local.conf"
         # qdhcp files are created by robot tests and copied into /tmp/qdhcp during the test
         tar -cf - -C /tmp qdhcp | xz -T 0 > /tmp/qdhcp.tar.xz
-        mv /tmp/qdhcp.tar.xz ${NODE_FOLDER}
-        mv ${NODE_FOLDER} ${WORKSPACE}/archives/
+        mv /tmp/qdhcp.tar.xz "${NODE_FOLDER}"
+        mv "${NODE_FOLDER}" "${WORKSPACE}"/archives/
     done
 
     # Compute Nodes
-    for i in `seq 1 ${NUM_OPENSTACK_COMPUTE_NODES}`; do
-        OSIP=OPENSTACK_COMPUTE_NODE_${i}_IP
+    for i in $(seq 1 "${NUM_OPENSTACK_COMPUTE_NODES}"); do
+        OSIP="OPENSTACK_COMPUTE_NODE_${i}_IP"
         echo "collect_logs: for openstack compute node ip: ${!OSIP}"
         NODE_FOLDER="compute_${i}"
-        mkdir -p ${NODE_FOLDER}
+        mkdir -p "${NODE_FOLDER}"
         tcpdump_stop "${!OSIP}"
-        scp extra_debug.sh ${!OSIP}:/tmp
-        ${SSH} ${!OSIP} "bash /tmp/extra_debug.sh > /tmp/extra_debug.log 2>&1"
-        scp ${!OSIP}:/etc/nova/nova.conf ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/nova/nova-cpu.conf ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/openstack/clouds.yaml ${NODE_FOLDER}
-        scp ${!OSIP}:/opt/stack/devstack/.stackenv ${NODE_FOLDER}
-        scp ${!OSIP}:/opt/stack/devstack/nohup.out ${NODE_FOLDER}/stack.log
-        scp ${!OSIP}:/opt/stack/devstack/openrc ${NODE_FOLDER}
-        scp ${!OSIP}:/opt/stack/requirements/upper-constraints.txt ${NODE_FOLDER}
-        scp ${!OSIP}:/tmp/*.xz ${NODE_FOLDER}/
-        scp ${!OSIP}:/tmp/dmesg.log ${NODE_FOLDER}
-        scp ${!OSIP}:/tmp/extra_debug.log ${NODE_FOLDER}
-        scp ${!OSIP}:/tmp/get_devstack.sh.txt ${NODE_FOLDER}
-        scp ${!OSIP}:/tmp/install_ovs.txt ${NODE_FOLDER}
-        scp ${!OSIP}:/tmp/journalctl.log ${NODE_FOLDER}
-        scp ${!OSIP}:/tmp/ovsdb-tool.log ${NODE_FOLDER}
-        scp ${!OSIP}:/tmp/tcpdump_start.log ${NODE_FOLDER}
+        scp extra_debug.sh "${!OSIP}":/tmp
+        ${SSH} "${!OSIP}" "bash /tmp/extra_debug.sh > /tmp/extra_debug.log 2>&1"
+        scp "${!OSIP}":/etc/nova/nova.conf "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/nova/nova-cpu.conf "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/openstack/clouds.yaml "${NODE_FOLDER}"
+        scp "${!OSIP}":/opt/stack/devstack/.stackenv "${NODE_FOLDER}"
+        scp "${!OSIP}":/opt/stack/devstack/nohup.out "${NODE_FOLDER}"/stack.log
+        scp "${!OSIP}":/opt/stack/devstack/openrc "${NODE_FOLDER}"
+        scp "${!OSIP}":/opt/stack/requirements/upper-constraints.txt "${NODE_FOLDER}"
+        scp "${!OSIP}":/tmp/*.xz "${NODE_FOLDER}"/
+        scp "${!OSIP}":/tmp/dmesg.log "${NODE_FOLDER}"
+        scp "${!OSIP}":/tmp/extra_debug.log "${NODE_FOLDER}"
+        scp "${!OSIP}":/tmp/get_devstack.sh.txt "${NODE_FOLDER}"
+        scp "${!OSIP}":/tmp/install_ovs.txt "${NODE_FOLDER}"
+        scp "${!OSIP}":/tmp/journalctl.log "${NODE_FOLDER}"
+        scp "${!OSIP}":/tmp/ovsdb-tool.log "${NODE_FOLDER}"
+        scp "${!OSIP}":/tmp/tcpdump_start.log "${NODE_FOLDER}"
         collect_files "${!OSIP}" "${NODE_FOLDER}"
-        ${SSH} ${!OSIP} "sudo tar -cf - -C /var/log libvirt | xz -T 0 > /tmp/libvirt.tar.xz "
-        scp ${!OSIP}:/tmp/libvirt.tar.xz ${NODE_FOLDER}
-        rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/etc/hosts ${NODE_FOLDER}
-        rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/audit/audit.log ${NODE_FOLDER}
-        rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/messages* ${NODE_FOLDER}
-        rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/nova-agent.log ${NODE_FOLDER}
-        rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/openvswitch/ovs-vswitchd.log ${NODE_FOLDER}
-        rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/openvswitch/ovsdb-server.log ${NODE_FOLDER}
+        ${SSH} "${!OSIP}" "sudo tar -cf - -C /var/log libvirt | xz -T 0 > /tmp/libvirt.tar.xz "
+        scp "${!OSIP}":/tmp/libvirt.tar.xz "${NODE_FOLDER}"
+        rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/etc/hosts "${NODE_FOLDER}"
+        rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/audit/audit.log "${NODE_FOLDER}"
+        rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/messages* "${NODE_FOLDER}"
+        rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/nova-agent.log "${NODE_FOLDER}"
+        rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/openvswitch/ovs-vswitchd.log "${NODE_FOLDER}"
+        rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/openvswitch/ovsdb-server.log "${NODE_FOLDER}"
         collect_openstack_logs "${!OSIP}" "${NODE_FOLDER}" "compute"
-        mv local.conf_compute_${!OSIP} ${NODE_FOLDER}/local.conf
-        mv ${NODE_FOLDER} ${WORKSPACE}/archives/
+        mv "local.conf_compute_${!OSIP}" "${NODE_FOLDER}"/local.conf
+        mv "${NODE_FOLDER}" "${WORKSPACE}"/archives/
     done
 
     # Tempest
     DEVSTACK_TEMPEST_DIR="/opt/stack/tempest"
     TESTREPO=".stestr"
-    TEMPEST_LOGS_DIR=${WORKSPACE}/archives/tempest
+    TEMPEST_LOGS_DIR="${WORKSPACE}/archives/tempest"
     # Look for tempest test results in the $TESTREPO dir and copy if found
-    if ${SSH} ${OPENSTACK_CONTROL_NODE_1_IP} "sudo sh -c '[ -f ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/0 ]'"; then
-        ${SSH} ${OPENSTACK_CONTROL_NODE_1_IP} "for I in \$(sudo ls ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/ | grep -E '^[0-9]+$'); do sudo sh -c \"${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/subunit-1to2 < ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/\${I} >> ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt\"; done"
-        ${SSH} ${OPENSTACK_CONTROL_NODE_1_IP} "sudo sh -c '${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/python ${DEVSTACK_TEMPEST_DIR}/.tox/tempest/lib/python2.7/site-packages/os_testr/subunit2html.py ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt ${DEVSTACK_TEMPEST_DIR}/tempest_results.html'"
-        mkdir -p ${TEMPEST_LOGS_DIR}
-        scp ${OPENSTACK_CONTROL_NODE_1_IP}:${DEVSTACK_TEMPEST_DIR}/tempest_results.html ${TEMPEST_LOGS_DIR}
-        scp ${OPENSTACK_CONTROL_NODE_1_IP}:${DEVSTACK_TEMPEST_DIR}/tempest.log ${TEMPEST_LOGS_DIR}
+    if ${SSH} "${OPENSTACK_CONTROL_NODE_1_IP}" "sudo sh -c '[ -f ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/0 ]'"; then
+        ${SSH} "${OPENSTACK_CONTROL_NODE_1_IP}" "for I in \$(sudo ls ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/ | grep -E '^[0-9]+$'); do sudo sh -c \"${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/subunit-1to2 < ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/\${I} >> ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt\"; done"
+        ${SSH} "${OPENSTACK_CONTROL_NODE_1_IP}" "sudo sh -c '${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/python ${DEVSTACK_TEMPEST_DIR}/.tox/tempest/lib/python2.7/site-packages/os_testr/subunit2html.py ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt ${DEVSTACK_TEMPEST_DIR}/tempest_results.html'"
+        mkdir -p "${TEMPEST_LOGS_DIR}"
+        scp "${OPENSTACK_CONTROL_NODE_1_IP}:${DEVSTACK_TEMPEST_DIR}/tempest_results.html" "${TEMPEST_LOGS_DIR}"
+        scp "${OPENSTACK_CONTROL_NODE_1_IP}:${DEVSTACK_TEMPEST_DIR}/tempest.log" "${TEMPEST_LOGS_DIR}"
     else
         echo "tempest results not found in ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/0"
     fi
@@ -631,31 +840,41 @@ function join() {
         final=${final}${delim}${str}
     done
 
-    echo ${final}
+    echo "${final}"
 }
 
 function get_nodes_list() {
     # Create the string for nodes
-    for i in `seq 1 ${NUM_ODL_SYSTEM}` ; do
+    for i in $(seq 1 "${NUM_ODL_SYSTEM}") ; do
         CONTROLLERIP=ODL_SYSTEM_${i}_IP
         nodes[$i]=${!CONTROLLERIP}
     done
 
     nodes_list=$(join "${nodes[@]}")
-    echo ${nodes_list}
+    echo "${nodes_list}"
 }
 
 function get_features() {
-    if [ ${CONTROLLERSCOPE} == 'all' ]; then
+    if [ "${CONTROLLERSCOPE}" == 'all' ]; then
         ACTUALFEATURES="odl-integration-compatible-with-all,${CONTROLLERFEATURES}"
-        export CONTROLLERMEM="3072m"
+        # if CONTROLLERMEM still is the default 2G and was not overridden by a
+        # custom job, then we need to make sure to increase it because "all"
+        # features can be heavy
+        if [ "${CONTROLLERMEM}" == "2048m" ]; then
+            export CONTROLLERMEM="3072m"
+        fi
     else
         ACTUALFEATURES="odl-infrautils-ready,${CONTROLLERFEATURES}"
     fi
 
+    if [ "${ELASTICSEARCHATTRIBUTE}" != "disabled" ]; then
+        # Add decanter features to allow JVM monitoring
+        ACTUALFEATURES="${ACTUALFEATURES},decanter-collector-jmx,decanter-appender-elasticsearch-rest"
+    fi
+
     # Some versions of jenkins job builder result in feature list containing spaces
     # and ending in newline. Remove all that.
-    ACTUALFEATURES=`echo "${ACTUALFEATURES}" | tr -d '\n \r'`
+    ACTUALFEATURES=$(echo "${ACTUALFEATURES}" | tr -d '\n \r')
     echo "ACTUALFEATURES: ${ACTUALFEATURES}"
 
     # In the case that we want to install features via karaf shell, a space separated list of
@@ -669,7 +888,7 @@ function get_features() {
 
 # Create the configuration script to be run on controllers.
 function create_configuration_script() {
-    cat > ${WORKSPACE}/configuration-script.sh <<EOF
+    cat > "${WORKSPACE}"/configuration-script.sh <<EOF
 set -x
 source /tmp/common-functions.sh ${BUNDLEFOLDER}
 
@@ -688,7 +907,7 @@ cat ${MAVENCONF}
 
 if [[ "$USEFEATURESBOOT" == "True" ]]; then
     echo "Configuring the startup features..."
-    sed -ie "s/\(featuresBoot=\|featuresBoot =\)/featuresBoot = ${ACTUALFEATURES},/g" ${FEATURESCONF}
+    sed -ie "s/\\(featuresBoot=\\|featuresBoot =\\)/featuresBoot = ${ACTUALFEATURES},/g" ${FEATURESCONF}
 fi
 
 FEATURE_TEST_STRING="features-integration-test"
@@ -697,7 +916,7 @@ if [[ "$KARAF_VERSION" == "karaf4" ]]; then
     FEATURE_TEST_STRING="features-test"
 fi
 
-sed -ie "s%\(featuresRepositories=\|featuresRepositories =\)%featuresRepositories = mvn:org.opendaylight.integration/\${FEATURE_TEST_STRING}/${BUNDLE_VERSION}/xml/features,mvn:org.apache.karaf.decanter/apache-karaf-decanter/1.0.0/xml/features,%g" ${FEATURESCONF}
+sed -ie "s%\\(featuresRepositories=\\|featuresRepositories =\\)%featuresRepositories = mvn:org.opendaylight.integration/\${FEATURE_TEST_STRING}/${BUNDLE_VERSION}/xml/features,mvn:org.apache.karaf.decanter/apache-karaf-decanter/1.2.0/xml/features,%g" ${FEATURESCONF}
 if [[ ! -z "${REPO_URL}" ]]; then
    sed -ie "s%featuresRepositories =%featuresRepositories = ${REPO_URL},%g" ${FEATURESCONF}
 fi
@@ -734,7 +953,7 @@ EOF
 
 # Create the startup script to be run on controllers.
 function create_startup_script() {
-    cat > ${WORKSPACE}/startup-script.sh <<EOF
+    cat > "${WORKSPACE}"/startup-script.sh <<EOF
 echo "Redirecting karaf console output to karaf_console.log"
 export KARAF_REDIRECT="/tmp/${BUNDLEFOLDER}/data/log/karaf_console.log"
 mkdir -p /tmp/${BUNDLEFOLDER}/data/log
@@ -746,16 +965,25 @@ EOF
 }
 
 function create_post_startup_script() {
-    cat > ${WORKSPACE}/post-startup-script.sh <<EOF
-if [[ "$USEFEATURESBOOT" != "True" ]]; then
+    cat > "${WORKSPACE}"/post-startup-script.sh <<EOF
+# wait up to 60s for karaf port 8101 to be opened, polling every 5s
+loop_count=0;
+until [[ \$loop_count -ge 12 ]]; do
+    netstat -na | grep ":::8101" && break;
+    loop_count=\$[\$loop_count+1];
+    sleep 5;
+done
 
-    # wait up to 60s for karaf port 8101 to be opened, polling every 5s
-    loop_count=0;
-    until [[ \$loop_count -ge 12 ]]; do
-        netstat -na | grep 8101 && break;
-        loop_count=\$[\$loop_count+1];
-        sleep 5;
-    done
+# This workaround is required for Karaf decanter to work proper
+# The bundle:refresh command does not fail if the decanter bundles are not present
+echo "ssh to karaf console to do bundle refresh of decanter jmx collector"
+sshpass -p karaf ssh -o StrictHostKeyChecking=no \
+                     -o UserKnownHostsFile=/dev/null \
+                     -o LogLevel=error \
+                     -p 8101 karaf@localhost \
+                     "bundle:refresh org.apache.karaf.decanter.collector.jmx && bundle:refresh org.apache.karaf.decanter.api"
+
+if [[ "$USEFEATURESBOOT" != "True" ]]; then
 
     echo "going to feature:install --no-auto-refresh ${SPACE_SEPARATED_FEATURES} one at a time"
     for feature in ${SPACE_SEPARATED_FEATURES}; do
@@ -786,7 +1014,7 @@ done;
 
 # if we ended up not finding ready status in the above loop, we can output some debugs
 grep 'org.opendaylight.infrautils.*System ready' /tmp/${BUNDLEFOLDER}/data/log/karaf.log
-if [ $? -ne 0 ]; then
+if [ \$? -ne 0 ]; then
     echo "Timeout Controller DOWN"
     echo "Dumping first 500K bytes of karaf log..."
     head --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
@@ -821,47 +1049,51 @@ EOF
 # Copy over the configuration script and configuration files to each controller
 # Execute the configuration script on each controller.
 function copy_and_run_configuration_script() {
-    for i in `seq 1 ${NUM_ODL_SYSTEM}`; do
-        CONTROLLERIP=ODL_SYSTEM_${i}_IP
+    for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
+        CONTROLLERIP="ODL_SYSTEM_${i}_IP"
         echo "Configuring member-${i} with IP address ${!CONTROLLERIP}"
-        scp ${WORKSPACE}/configuration-script.sh ${!CONTROLLERIP}:/tmp/
-        ssh ${!CONTROLLERIP} "bash /tmp/configuration-script.sh ${i}"
+        scp "${WORKSPACE}"/configuration-script.sh "${!CONTROLLERIP}":/tmp/
+        # $i needs to be parsed client-side
+        # shellcheck disable=SC2029
+        ssh "${!CONTROLLERIP}" "bash /tmp/configuration-script.sh ${i}"
     done
 }
 
 # Copy over the startup script to each controller and execute it.
 function copy_and_run_startup_script() {
-    for i in `seq 1 ${NUM_ODL_SYSTEM}`; do
-        CONTROLLERIP=ODL_SYSTEM_${i}_IP
+    for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
+        CONTROLLERIP="ODL_SYSTEM_${i}_IP"
         echo "Starting member-${i} with IP address ${!CONTROLLERIP}"
-        scp ${WORKSPACE}/startup-script.sh ${!CONTROLLERIP}:/tmp/
-        ssh ${!CONTROLLERIP} "bash /tmp/startup-script.sh"
+        scp "${WORKSPACE}"/startup-script.sh "${!CONTROLLERIP}":/tmp/
+        ssh "${!CONTROLLERIP}" "bash /tmp/startup-script.sh"
     done
 }
 
 function copy_and_run_post_startup_script() {
     seed_index=1
-    for i in `seq 1 ${NUM_ODL_SYSTEM}`; do
-        CONTROLLERIP=ODL_SYSTEM_${i}_IP
+    for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
+        CONTROLLERIP="ODL_SYSTEM_${i}_IP"
         echo "Execute the post startup script on controller ${!CONTROLLERIP}"
-        scp ${WORKSPACE}/post-startup-script.sh ${!CONTROLLERIP}:/tmp
-        ssh ${!CONTROLLERIP} "bash /tmp/post-startup-script.sh $(( seed_index++ ))"
-        if [ $(( $i % ${NUM_ODL_SYSTEM} )) == 0 ]; then
+        scp "${WORKSPACE}"/post-startup-script.sh "${!CONTROLLERIP}":/tmp/
+        # $seed_index needs to be parsed client-side
+        # shellcheck disable=SC2029
+        ssh "${!CONTROLLERIP}" "bash /tmp/post-startup-script.sh $(( seed_index++ ))"
+        if [ $(( i % NUM_ODL_SYSTEM )) == 0 ]; then
             seed_index=1
         fi
     done
 }
 
-function create_controller_variables() {
-    echo "Generating controller variables..."
-    for i in `seq 1 ${NUM_ODL_SYSTEM}`; do
-        CONTROLLERIP=ODL_SYSTEM_${i}_IP
-        odl_variables=${odl_variables}" -v ${CONTROLLERIP}:${!CONTROLLERIP}"
-        echo "Lets's take the karaf thread dump"
-        ssh ${!CONTROLLERIP} "sudo ps aux" > ${WORKSPACE}/ps_before.log
-        pid=$(grep org.apache.karaf.main.Main ${WORKSPACE}/ps_before.log | grep -v grep | tr -s ' ' | cut -f2 -d' ')
+function dump_controller_threads() {
+    for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
+        CONTROLLERIP="ODL_SYSTEM_${i}_IP"
+        echo "Let's take the karaf thread dump"
+        ssh "${!CONTROLLERIP}" "sudo ps aux" > "${WORKSPACE}"/ps_before.log
+        pid=$(grep org.apache.karaf.main.Main "${WORKSPACE}"/ps_before.log | grep -v grep | tr -s ' ' | cut -f2 -d' ')
         echo "karaf main: org.apache.karaf.main.Main, pid:${pid}"
-        ssh ${!CONTROLLERIP} "${JAVA_HOME}/bin/jstack -l ${pid}" > ${WORKSPACE}/karaf_${i}_${pid}_threads_before.log || true
+        # $i needs to be parsed client-side
+        # shellcheck disable=SC2029
+        ssh "${!CONTROLLERIP}" "${JAVA_HOME}/bin/jstack -l ${pid}" > "${WORKSPACE}/karaf_${i}_${pid}_threads_before.log" || true
     done
 }
 
@@ -872,7 +1104,7 @@ function build_ovs() {
     local -r rpm_path="$3"
 
     echo "Building OVS ${version} on ${ip} ..."
-    cat > ${WORKSPACE}/build_ovs.sh << EOF
+    cat > "${WORKSPACE}"/build_ovs.sh << EOF
 set -ex -o pipefail
 
 echo '---> Building openvswitch version ${version}'
@@ -927,10 +1159,10 @@ popd
 rm -rf \${TMP}
 EOF
 
-    scp ${WORKSPACE}/build_ovs.sh ${ip}:/tmp
-    ${SSH} ${ip} " bash /tmp/build_ovs.sh >> /tmp/install_ovs.txt 2>&1"
-    scp -r ${ip}:/tmp/ovs_rpms/* "${rpm_path}/"
-    ${SSH} ${ip} "rm -rf /tmp/ovs_rpms"
+    scp "${WORKSPACE}"/build_ovs.sh "${ip}":/tmp
+    ${SSH} "${ip}" " bash /tmp/build_ovs.sh >> /tmp/install_ovs.txt 2>&1"
+    scp -r "${ip}":/tmp/ovs_rpms/* "${rpm_path}/"
+    ${SSH} "${ip}" "rm -rf /tmp/ovs_rpms"
 }
 
 # Install OVS RPMs from yum repo
@@ -939,7 +1171,7 @@ function install_ovs_from_repo() {
     local -r rpm_repo="$2"
 
     echo "Installing OVS from repo ${rpm_repo} on ${ip} ..."
-    cat > ${WORKSPACE}/install_ovs.sh << EOF
+    cat > "${WORKSPACE}"/install_ovs.sh << EOF
 set -ex -o pipefail
 
 echo '---> Installing openvswitch from ${rpm_repo}'
@@ -997,8 +1229,8 @@ NEW_MOD=\$(sudo modinfo -n openvswitch || echo '')
 [ "\${PREV_MOD}" != "\${NEW_MOD}" ] || (echo "Kernel module was not updated" && exit 1)
 EOF
 
-    scp ${WORKSPACE}/install_ovs.sh ${ip}:/tmp
-    ${SSH} ${ip} "bash /tmp/install_ovs.sh >> /tmp/install_ovs.txt 2>&1"
+    scp "${WORKSPACE}"/install_ovs.sh "${ip}":/tmp
+    ${SSH} "${ip}" "bash /tmp/install_ovs.sh >> /tmp/install_ovs.txt 2>&1"
 }
 
 # Install OVS RPMS from path
@@ -1007,10 +1239,10 @@ function install_ovs_from_path() {
     local -r rpm_path="$2"
 
     echo "Creating OVS RPM repo on ${ip} ..."
-    ${SSH} ${ip} "mkdir -p /tmp/ovs_rpms"
-    scp -r "${rpm_path}"/* ${ip}:/tmp/ovs_rpms
-    ${SSH} ${ip} "sudo yum -y install createrepo && createrepo --database /tmp/ovs_rpms"
-    install_ovs_from_repo ${ip} file:/tmp/ovs_rpms
+    ${SSH} "${ip}" "mkdir -p /tmp/ovs_rpms"
+    scp -r "${rpm_path}"/* "${ip}":/tmp/ovs_rpms
+    ${SSH} "${ip}" "sudo yum -y install createrepo && createrepo --database /tmp/ovs_rpms"
+    install_ovs_from_repo "${ip}" file:/tmp/ovs_rpms
 }