Merge changes from topic 'shellcheck'
authorJamo Luhrsen <jluhrsen@gmail.com>
Tue, 4 Jun 2019 21:58:16 +0000 (21:58 +0000)
committerGerrit Code Review <gerrit@opendaylight.org>
Tue, 4 Jun 2019 21:58:16 +0000 (21:58 +0000)
* changes:
  Fix SC2086 Double quote variables
  Fix SC2086 for lines 300 until EOF
  Fix SC2086 for lines 490 until EOF
  Fix SC2086 for lines 575 until EOF
  Fix SC2086 for lines 700 until EOF
  Fix SC2086 for lines 940 until EOF
  Fix SC2004 $/${} unnecessary on arithmetic vars
  Fix SC2006 backtick syntax is legacy

jjb/integration/common-functions.sh

index be80f1323f720e311c0b5c38dba8cafe5d776d9d..fc342a1ed2bde18432c009fb23f245abf0bc097d 100644 (file)
@@ -44,14 +44,14 @@ function set_java_vars() {
 
     echo "Configure\n    java home: ${java_home}\n    max memory: ${controllermem}\n    memconf: ${memconf}"
 
-    sed -ie 's%^# export JAVA_HOME%export JAVA_HOME=${JAVA_HOME:-'"${java_home}"'}%g' ${memconf}
-    sed -ie 's/JAVA_MAX_MEM="2048m"/JAVA_MAX_MEM='"${controllermem}"'/g' ${memconf}
+    sed -ie 's%^# export JAVA_HOME%export JAVA_HOME=${JAVA_HOME:-'"${java_home}"'}%g' "${memconf}"
+    sed -ie 's/JAVA_MAX_MEM="2048m"/JAVA_MAX_MEM='"${controllermem}"'/g' "${memconf}"
     echo "cat ${memconf}"
-    cat ${memconf}
+    cat "${memconf}"
 
     echo "Set Java version"
-    sudo /usr/sbin/alternatives --install /usr/bin/java java ${java_home}/bin/java 1
-    sudo /usr/sbin/alternatives --set java ${java_home}/bin/java
+    sudo /usr/sbin/alternatives --install /usr/bin/java java "${java_home}/bin/java" 1
+    sudo /usr/sbin/alternatives --set java "${java_home}/bin/java"
     echo "JDK default version ..."
     java -version
 
@@ -71,7 +71,7 @@ function configure_karaf_log() {
     local logapi=log4j
 
     # Check what the logging.cfg file is using for the logging api: log4j or log4j2
-    grep "log4j2" ${LOGCONF}
+    grep "log4j2" "${LOGCONF}"
     if [ $? -eq 0 ]; then
         logapi=log4j2
     fi
@@ -80,18 +80,18 @@ function configure_karaf_log() {
     if [ "${logapi}" == "log4j2" ]; then
         # FIXME: Make log size limit configurable from build parameter.
         # From Neon the default karaf file size is 64 MB
-        sed -ie 's/log4j2.appender.rolling.policies.size.size = 64MB/log4j2.appender.rolling.policies.size.size = 1GB/g' ${LOGCONF}
+        sed -ie 's/log4j2.appender.rolling.policies.size.size = 64MB/log4j2.appender.rolling.policies.size.size = 1GB/g' "${LOGCONF}"
         # Flourine still uses 16 MB
-        sed -ie 's/log4j2.appender.rolling.policies.size.size = 16MB/log4j2.appender.rolling.policies.size.size = 1GB/g' ${LOGCONF}
+        sed -ie 's/log4j2.appender.rolling.policies.size.size = 16MB/log4j2.appender.rolling.policies.size.size = 1GB/g' "${LOGCONF}"
         orgmodule="org.opendaylight.yangtools.yang.parser.repo.YangTextSchemaContextResolver"
         orgmodule_="${orgmodule//./_}"
-        echo "${logapi}.logger.${orgmodule_}.name = WARN" >> ${LOGCONF}
-        echo "${logapi}.logger.${orgmodule_}.level = WARN" >> ${LOGCONF}
+        echo "${logapi}.logger.${orgmodule_}.name = WARN" >> "${LOGCONF}"
+        echo "${logapi}.logger.${orgmodule_}.level = WARN" >> "${LOGCONF}"
     else
-        sed -ie 's/log4j.appender.out.maxBackupIndex=10/log4j.appender.out.maxBackupIndex=1/g' ${LOGCONF}
+        sed -ie 's/log4j.appender.out.maxBackupIndex=10/log4j.appender.out.maxBackupIndex=1/g' "${LOGCONF}"
         # FIXME: Make log size limit configurable from build parameter.
-        sed -ie 's/log4j.appender.out.maxFileSize=1MB/log4j.appender.out.maxFileSize=30GB/g' ${LOGCONF}
-        echo "${logapi}.logger.org.opendaylight.yangtools.yang.parser.repo.YangTextSchemaContextResolver = WARN" >> ${LOGCONF}
+        sed -ie 's/log4j.appender.out.maxFileSize=1MB/log4j.appender.out.maxFileSize=30GB/g' "${LOGCONF}"
+        echo "${logapi}.logger.org.opendaylight.yangtools.yang.parser.repo.YangTextSchemaContextResolver = WARN" >> "${LOGCONF}"
     fi
 
     # Add custom logging levels
@@ -110,17 +110,17 @@ function configure_karaf_log() {
                 orgmodule="org.opendaylight.${module}"
                 if [ "${logapi}" == "log4j2" ]; then
                     orgmodule_="${orgmodule//./_}"
-                    echo "${logapi}.logger.${orgmodule_}.name = ${orgmodule}" >> ${LOGCONF}
-                    echo "${logapi}.logger.${orgmodule_}.level = ${level}" >> ${LOGCONF}
+                    echo "${logapi}.logger.${orgmodule_}.name = ${orgmodule}" >> "${LOGCONF}"
+                    echo "${logapi}.logger.${orgmodule_}.level = ${level}" >> "${LOGCONF}"
                 else
-                    echo "${logapi}.logger.${orgmodule} = ${level}" >> ${LOGCONF}
+                    echo "${logapi}.logger.${orgmodule} = ${level}" >> "${LOGCONF}"
                 fi
             fi
         done
     fi
 
     echo "cat ${LOGCONF}"
-    cat ${LOGCONF}
+    cat "${LOGCONF}"
 } # function configure_karaf_log()
 
 function configure_karaf_log_for_apex() {
@@ -153,12 +153,12 @@ function configure_karaf_log_for_apex() {
         done
         # replace the trailing comma with a closing brace followed by trailing comma
         logging_config=${logging_config%,}" },"
-        echo $logging_config
+        echo "$logging_config"
 
         # fine a sane line number to inject the custom logging json
         lineno=$(ssh $OPENSTACK_CONTROL_NODE_1_IP "sudo grep -Fn 'opendaylight::log_mechanism' /etc/puppet/hieradata/service_configs.json" | awk -F: '{print $1}')
-        ssh $controller_ip "sudo sed -i \"${lineno}i ${logging_config}\" /etc/puppet/hieradata/service_configs.json"
-        ssh $controller_ip "sudo cat /etc/puppet/hieradata/service_configs.json"
+        ssh "$controller_ip" "sudo sed -i \"${lineno}i ${logging_config}\" /etc/puppet/hieradata/service_configs.json"
+        ssh "$controller_ip" "sudo cat /etc/puppet/hieradata/service_configs.json"
     fi
 } # function configure_karaf_log_for_apex()
 
@@ -173,7 +173,7 @@ function configure_odl_features_for_apex() {
 
 cat > /tmp/set_odl_features.sh << EOF
 sudo jq '.["opendaylight::extra_features"] |= []' $config_file > tmp.json && mv tmp.json $config_file
-for feature in $(echo $ACTUALFEATURES | sed "s/,/ /g"); do
+for feature in $(echo "$ACTUALFEATURES" | sed "s/,/ /g"); do
     sudo jq --arg jq_arg \$feature '.["opendaylight::extra_features"] |= . + [\$jq_arg]' $config_file > tmp && mv tmp $config_file;
 done
 echo "Modified puppet-opendaylight service_configs.json..."
@@ -184,8 +184,8 @@ EOF
     cat /tmp/set_odl_features.sh
 
     if [ -n "${ACTUALFEATURES}" ]; then
-        scp /tmp/set_odl_features.sh $controller_ip:/tmp/set_odl_features.sh
-        ssh $controller_ip "sudo bash /tmp/set_odl_features.sh"
+        scp /tmp/set_odl_features.sh "$controller_ip":/tmp/set_odl_features.sh
+        ssh "$controller_ip" "sudo bash /tmp/set_odl_features.sh"
     fi
 
 } # function configure_odl_features_for_apex()
@@ -223,7 +223,7 @@ function get_test_suites() {
 
     # Use the testplan if specific SUITES are not defined.
     if [ -z "${SUITES}" ]; then
-        suite_list=`egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' testplan.txt | tr '\012' ' '`
+        suite_list=$(grep -E -v '(^[[:space:]]*#|^[[:space:]]*$)' testplan.txt | tr '\012' ' ')
     else
         suite_list=""
         workpath="${WORKSPACE}/test/csit/suites"
@@ -264,12 +264,12 @@ function run_plan() {
     if [ -f "${plan_filepath}" ]; then
         printf "%s plan exists!!!\n" "${type}"
         printf "Changing the %s plan path...\n" "${type}"
-        cat ${plan_filepath} | sed "s:integration:${WORKSPACE}:" > ${type}plan.txt
-        cat ${type}plan.txt
-        for line in $( egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' ${type}plan.txt ); do
+        cat "${plan_filepath}" | sed "s:integration:${WORKSPACE}:" > "${type}plan.txt"
+        cat "${type}plan.txt"
+        for line in $( grep -E -v '(^[[:space:]]*#|^[[:space:]]*$)' "${type}plan.txt" ); do
             printf "Executing %s...\n" "${line}"
             # shellcheck source=${line} disable=SC1091
-            source ${line}
+            source "${line}"
         done
     fi
     printf "Finished running %s plans\n" "${type}"
@@ -304,7 +304,7 @@ function timer()
 function csv2ssv() {
     local csv=$1
     if [ -n "${csv}" ]; then
-        ssv=$(echo ${csv} | sed 's/,/ /g' | sed 's/\ \ */\ /g')
+        ssv=$(echo "${csv}" | sed 's/,/ /g' | sed 's/\ \ */\ /g')
     fi
 
     echo "${ssv}"
@@ -312,7 +312,7 @@ function csv2ssv() {
 
 function is_openstack_feature_enabled() {
     local feature=$1
-    for enabled_feature in $(csv2ssv ${ENABLE_OS_SERVICES}); do
+    for enabled_feature in $(csv2ssv "${ENABLE_OS_SERVICES}"); do
         if [ "${enabled_feature}" == "${feature}" ]; then
            echo 1
            return
@@ -376,18 +376,20 @@ function tcpdump_start() {
     filter_=${filter// /_}
 
     printf "node %s, %s_%s__%s: starting tcpdump\n" "${ip}" "${prefix}" "${ip}" "${filter}"
-    ssh ${ip} "nohup sudo /usr/sbin/tcpdump -vvv -ni eth0 ${filter} -w /tmp/tcpdump_${prefix}_${ip}__${filter_}.pcap > /tmp/tcpdump_start.log 2>&1 &"
-    ${SSH} ${ip} "ps -ef | grep tcpdump"
+    # $fileter needs to be parsed client-side
+    # shellcheck disable=SC2029
+    ssh "${ip}" "nohup sudo /usr/sbin/tcpdump -vvv -ni eth0 ${filter} -w /tmp/tcpdump_${prefix}_${ip}__${filter_}.pcap > /tmp/tcpdump_start.log 2>&1 &"
+    ${SSH} "${ip}" "ps -ef | grep tcpdump"
 }
 
 function tcpdump_stop() {
     local -r ip=$1
 
     printf "node %s: stopping tcpdump\n" "$ip"
-    ${SSH} ${ip} "ps -ef | grep tcpdump.sh"
-    ${SSH} ${ip} "sudo pkill -f tcpdump"
-    ${SSH} ${ip} "sudo xz -9ekvvf /tmp/*.pcap"
-    ${SSH} ${ip} "sudo ls -al /tmp/*.pcap"
+    ${SSH} "${ip}" "ps -ef | grep tcpdump.sh"
+    ${SSH} "${ip}" "sudo pkill -f tcpdump"
+    ${SSH} "${ip}" "sudo xz -9ekvvf /tmp/*.pcap"
+    ${SSH} "${ip}" "sudo ls -al /tmp/*.pcap"
     # copy_logs will copy any *.xz files
 }
 
@@ -396,19 +398,19 @@ function collect_files() {
     local -r ip=$1
     local -r folder=$2
     finddir=/tmp/finder
-    ${SSH} ${ip} "mkdir -p ${finddir}"
-    ${SSH} ${ip} "sudo find /etc > ${finddir}/find.etc.txt"
-    ${SSH} ${ip} "sudo find /opt/stack > ${finddir}/find.opt.stack.txt"
-    ${SSH} ${ip} "sudo find /var > ${finddir}/find2.txt"
-    ${SSH} ${ip} "sudo find /var > ${finddir}/find.var.txt"
-    ${SSH} ${ip} "sudo tar -cf - -C /tmp finder | xz -T 0 > /tmp/find.tar.xz"
-    scp ${ip}:/tmp/find.tar.xz ${folder}
-    mkdir -p ${finddir}
-    rsync --rsync-path="sudo rsync" --list-only -arvhe ssh ${ip}:/etc/ > ${finddir}/rsync.etc.txt
-    rsync --rsync-path="sudo rsync" --list-only -arvhe ssh ${ip}:/opt/stack/ > ${finddir}/rsync.opt.stack.txt
-    rsync --rsync-path="sudo rsync" --list-only -arvhe ssh ${ip}:/var/ > ${finddir}/rsync.var.txt
+    ${SSH} "${ip}" "mkdir -p ${finddir}"
+    ${SSH} "${ip}" "sudo find /etc > ${finddir}/find.etc.txt"
+    ${SSH} "${ip}" "sudo find /opt/stack > ${finddir}/find.opt.stack.txt"
+    ${SSH} "${ip}" "sudo find /var > ${finddir}/find2.txt"
+    ${SSH} "${ip}" "sudo find /var > ${finddir}/find.var.txt"
+    ${SSH} "${ip}" "sudo tar -cf - -C /tmp finder | xz -T 0 > /tmp/find.tar.xz"
+    scp "${ip}":/tmp/find.tar.xz "${folder}"
+    mkdir -p "${finddir}"
+    rsync --rsync-path="sudo rsync" --list-only -arvhe ssh "${ip}":/etc/ > "${finddir}"/rsync.etc.txt
+    rsync --rsync-path="sudo rsync" --list-only -arvhe ssh "${ip}":/opt/stack/ > "${finddir}"/rsync.opt.stack.txt
+    rsync --rsync-path="sudo rsync" --list-only -arvhe ssh "${ip}":/var/ > "${finddir}"/rsync.var.txt
     tar -cf - -C /tmp finder | xz -T 0 > /tmp/rsync.tar.xz
-    cp /tmp/rsync.tar.xz ${folder}
+    cp /tmp/rsync.tar.xz "${folder}"
 }
 
 # List of extra services to extract from journalctl
@@ -440,15 +442,15 @@ function collect_openstack_logs() {
     local oslogs="${folder}/oslogs"
 
     printf "collect_openstack_logs for %s node: %s into %s\n" "${node_type}" "${ip}" "${oslogs}"
-    rm -rf ${oslogs}
-    mkdir -p ${oslogs}
+    rm -rf "${oslogs}"
+    mkdir -p "${oslogs}"
     # There are always some logs in /opt/stack/logs and this also covers the
     # pre-queens branches which always use /opt/stack/logs
-    rsync -avhe ssh ${ip}:/opt/stack/logs/* ${oslogs} # rsync to prevent copying of symbolic links
+    rsync -avhe ssh "${ip}":/opt/stack/logs/* "${oslogs}" # rsync to prevent copying of symbolic links
 
     # Starting with queens break out the logs from journalctl
     if [ "${OPENSTACK_BRANCH}" = "stable/queens" ]; then
-        cat > ${WORKSPACE}/collect_openstack_logs.sh << EOF
+        cat > "${WORKSPACE}"/collect_openstack_logs.sh << EOF
 extra_services_cntl="${extra_services_cntl}"
 extra_services_cmp="${extra_services_cmp}"
 
@@ -480,11 +482,11 @@ ls -al /tmp/oslogs
 EOF
 # cat > ${WORKSPACE}/collect_openstack_logs.sh << EOF
         printf "collect_openstack_logs for %s node: %s into %s, executing script\n" "${node_type}" "${ip}" "${oslogs}"
-        cat ${WORKSPACE}/collect_openstack_logs.sh
-        scp ${WORKSPACE}/collect_openstack_logs.sh ${ip}:/tmp
-        ${SSH} ${ip} "bash /tmp/collect_openstack_logs.sh > /tmp/collect_openstack_logs.log 2>&1"
-        rsync -avhe ssh ${ip}:/tmp/oslogs/* ${oslogs}
-        scp ${ip}:/tmp/collect_openstack_logs.log ${oslogs}
+        cat "${WORKSPACE}"/collect_openstack_logs.sh
+        scp "${WORKSPACE}"/collect_openstack_logs.sh "${ip}":/tmp
+        ${SSH} "${ip}" "bash /tmp/collect_openstack_logs.sh > /tmp/collect_openstack_logs.log 2>&1"
+        rsync -avhe ssh "${ip}":/tmp/oslogs/* "${oslogs}"
+        scp "${ip}":/tmp/collect_openstack_logs.log "${oslogs}"
     fi # if [ "${OPENSTACK_BRANCH}" = "stable/queens" ]; then
 }
 
@@ -522,57 +524,60 @@ EOF
     # Since this log collection work is happening before the archive build macro which also
     # creates the ${WORKSPACE}/archives dir, we have to do it here first.  The mkdir in the
     # archives build step will essentially be a noop.
-    mkdir -p ${WORKSPACE}/archives
+    mkdir -p "${WORKSPACE}"/archives
 
-    mv /tmp/changes.txt ${WORKSPACE}/archives
-    mv /tmp/validations.txt ${WORKSPACE}/archives
-    mv ${WORKSPACE}/rabbit.txt ${WORKSPACE}/archives
-    mv ${WORKSPACE}/haproxy.cfg ${WORKSPACE}/archives
-    ssh ${OPENSTACK_HAPROXY_1_IP} "sudo journalctl -u haproxy > /tmp/haproxy.log"
-    scp ${OPENSTACK_HAPROXY_1_IP}:/tmp/haproxy.log ${WORKSPACE}/archives/
+    mv /tmp/changes.txt "${WORKSPACE}"/archives
+    mv /tmp/validations.txt "${WORKSPACE}"/archives
+    mv "${WORKSPACE}"/rabbit.txt "${WORKSPACE}"/archives
+    mv "${WORKSPACE}"/haproxy.cfg "${WORKSPACE}"/archives
+    ssh "${OPENSTACK_HAPROXY_1_IP}" "sudo journalctl -u haproxy > /tmp/haproxy.log"
+    scp "${OPENSTACK_HAPROXY_1_IP}":/tmp/haproxy.log "${WORKSPACE}"/archives/
 
     sleep 5
     # FIXME: Do not create .tar and gzip before copying.
-    for i in `seq 1 ${NUM_ODL_SYSTEM}`; do
+    for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
         CONTROLLERIP=ODL_SYSTEM_${i}_IP
         echo "collect_logs: for opendaylight controller ip: ${!CONTROLLERIP}"
         NODE_FOLDER="odl_${i}"
-        mkdir -p ${NODE_FOLDER}
+        mkdir -p "${NODE_FOLDER}"
         echo "Lets's take the karaf thread dump again..."
-        ssh ${!CONTROLLERIP} "sudo ps aux" > ${WORKSPACE}/ps_after.log
-        pid=$(grep org.apache.karaf.main.Main ${WORKSPACE}/ps_after.log | grep -v grep | tr -s ' ' | cut -f2 -d' ')
+        ssh "${!CONTROLLERIP}" "sudo ps aux" > "${WORKSPACE}"/ps_after.log
+        pid=$(grep org.apache.karaf.main.Main "${WORKSPACE}"/ps_after.log | grep -v grep | tr -s ' ' | cut -f2 -d' ')
         echo "karaf main: org.apache.karaf.main.Main, pid:${pid}"
-        ssh ${!CONTROLLERIP} "${JAVA_HOME}/bin/jstack -l ${pid}" > ${WORKSPACE}/karaf_${i}_${pid}_threads_after.log || true
+        # $pid needs to be parsed client-side
+        # shellcheck disable=SC2029
+        ssh "${!CONTROLLERIP}" "${JAVA_HOME}/bin/jstack -l ${pid}" > "${WORKSPACE}/karaf_${i}_${pid}_threads_after.log" || true
         echo "killing karaf process..."
+        # shellcheck disable=SC2016
         ${SSH} "${!CONTROLLERIP}" bash -c 'ps axf | grep karaf | grep -v grep | awk '"'"'{print "kill -9 " $1}'"'"' | sh'
-        ${SSH} ${!CONTROLLERIP} "sudo journalctl > /tmp/journalctl.log"
-        scp ${!CONTROLLERIP}:/tmp/journalctl.log ${NODE_FOLDER}
-        ${SSH} ${!CONTROLLERIP} "dmesg -T > /tmp/dmesg.log"
-        scp ${!CONTROLLERIP}:/tmp/dmesg.log ${NODE_FOLDER}
-        ${SSH} ${!CONTROLLERIP} "tar -cf - -C /tmp/${BUNDLEFOLDER} etc | xz -T 0 > /tmp/etc.tar.xz"
-        scp ${!CONTROLLERIP}:/tmp/etc.tar.xz ${NODE_FOLDER}
-        ${SSH} ${!CONTROLLERIP} "cp -r /tmp/${BUNDLEFOLDER}/data/log /tmp/odl_log"
-        ${SSH} ${!CONTROLLERIP} "tar -cf /tmp/odl${i}_karaf.log.tar /tmp/odl_log/*"
-        scp ${!CONTROLLERIP}:/tmp/odl${i}_karaf.log.tar ${NODE_FOLDER}
-        ${SSH} ${!CONTROLLERIP} "tar -cf /tmp/odl${i}_zrpcd.log.tar /tmp/zrpcd.init.log"
-        scp ${!CONTROLLERIP}:/tmp/odl${i}_zrpcd.log.tar ${NODE_FOLDER}
-        tar -xvf ${NODE_FOLDER}/odl${i}_karaf.log.tar -C ${NODE_FOLDER} --strip-components 2 --transform s/karaf/odl${i}_karaf/g
-        grep "ROBOT MESSAGE\| ERROR " ${NODE_FOLDER}/odl${i}_karaf.log > ${NODE_FOLDER}/odl${i}_err.log
+        ${SSH} "${!CONTROLLERIP}" "sudo journalctl > /tmp/journalctl.log"
+        scp "${!CONTROLLERIP}":/tmp/journalctl.log "${NODE_FOLDER}"
+        ${SSH} "${!CONTROLLERIP}" "dmesg -T > /tmp/dmesg.log"
+        scp "${!CONTROLLERIP}":/tmp/dmesg.log "${NODE_FOLDER}"
+        ${SSH} "${!CONTROLLERIP}" "tar -cf - -C /tmp/${BUNDLEFOLDER} etc | xz -T 0 > /tmp/etc.tar.xz"
+        scp "${!CONTROLLERIP}":/tmp/etc.tar.xz "${NODE_FOLDER}"
+        ${SSH} "${!CONTROLLERIP}" "cp -r /tmp/${BUNDLEFOLDER}/data/log /tmp/odl_log"
+        ${SSH} "${!CONTROLLERIP}" "tar -cf /tmp/odl${i}_karaf.log.tar /tmp/odl_log/*"
+        scp "${!CONTROLLERIP}:/tmp/odl${i}_karaf.log.tar" "${NODE_FOLDER}"
+        ${SSH} "${!CONTROLLERIP}" "tar -cf /tmp/odl${i}_zrpcd.log.tar /tmp/zrpcd.init.log"
+        scp "${!CONTROLLERIP}:/tmp/odl${i}_zrpcd.log.tar" "${NODE_FOLDER}"
+        tar -xvf "${NODE_FOLDER}/odl${i}_karaf.log.tar" -C "${NODE_FOLDER}" --strip-components 2 --transform "s/karaf/odl${i}_karaf/g"
+        grep "ROBOT MESSAGE\| ERROR " "${NODE_FOLDER}/odl${i}_karaf.log" > "${NODE_FOLDER}/odl${i}_err.log"
         grep "ROBOT MESSAGE\| ERROR \| WARN \|Exception" \
-            ${NODE_FOLDER}/odl${i}_karaf.log > ${NODE_FOLDER}/odl${i}_err_warn_exception.log
+            "${NODE_FOLDER}/odl${i}_karaf.log" > "${NODE_FOLDER}/odl${i}_err_warn_exception.log"
         # Print ROBOT lines and print Exception lines. For exception lines also print the previous line for context
-        sed -n -e '/ROBOT MESSAGE/P' -e '$!N;/Exception/P;D' ${NODE_FOLDER}/odl${i}_karaf.log > ${NODE_FOLDER}/odl${i}_exception.log
-        mv /tmp/odl${i}_exceptions.txt ${NODE_FOLDER}
-        rm ${NODE_FOLDER}/odl${i}_karaf.log.tar
-        mv *_threads* ${NODE_FOLDER}
-        mv ps_* ${NODE_FOLDER}
-        mv ${NODE_FOLDER} ${WORKSPACE}/archives/
+        sed -n -e '/ROBOT MESSAGE/P' -e '$!N;/Exception/P;D' "${NODE_FOLDER}/odl${i}_karaf.log" > "${NODE_FOLDER}/odl${i}_exception.log"
+        mv "/tmp/odl${i}_exceptions.txt" "${NODE_FOLDER}"
+        rm "${NODE_FOLDER}/odl${i}_karaf.log.tar"
+        mv -- *_threads* "${NODE_FOLDER}"
+        mv ps_* "${NODE_FOLDER}"
+        mv "${NODE_FOLDER}" "${WORKSPACE}"/archives/
     done
 
-    print_job_parameters > ${WORKSPACE}/archives/params.txt
+    print_job_parameters > "${WORKSPACE}"/archives/params.txt
 
     # Control Node
-    for i in `seq 1 ${NUM_OPENSTACK_CONTROL_NODES}`; do
+    for i in $(seq 1 "${NUM_OPENSTACK_CONTROL_NODES}"); do
         OSIP=OPENSTACK_CONTROL_NODE_${i}_IP
         if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then
             echo "collect_logs: for openstack combo node ip: ${!OSIP}"
@@ -581,115 +586,115 @@ EOF
             echo "collect_logs: for openstack control node ip: ${!OSIP}"
             NODE_FOLDER="control_${i}"
         fi
-        mkdir -p ${NODE_FOLDER}
+        mkdir -p "${NODE_FOLDER}"
         tcpdump_stop "${!OSIP}"
-        scp extra_debug.sh ${!OSIP}:/tmp
+        scp extra_debug.sh "${!OSIP}":/tmp
         # Capture compute logs if this is a combo node
         if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then
-            scp ${!OSIP}:/etc/nova/nova.conf ${NODE_FOLDER}
-            scp ${!OSIP}:/etc/nova/nova-cpu.conf ${NODE_FOLDER}
-            scp ${!OSIP}:/etc/openstack/clouds.yaml ${NODE_FOLDER}
-            rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/nova-agent.log ${NODE_FOLDER}
+            scp "${!OSIP}":/etc/nova/nova.conf "${NODE_FOLDER}"
+            scp "${!OSIP}":/etc/nova/nova-cpu.conf "${NODE_FOLDER}"
+            scp "${!OSIP}":/etc/openstack/clouds.yaml "${NODE_FOLDER}"
+            rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/nova-agent.log "${NODE_FOLDER}"
         fi
-        ${SSH} ${!OSIP} "bash /tmp/extra_debug.sh > /tmp/extra_debug.log 2>&1"
-        scp ${!OSIP}:/etc/dnsmasq.conf ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/keystone/keystone.conf ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/keystone/keystone-uwsgi-admin.ini ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/keystone/keystone-uwsgi-public.ini ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/kuryr/kuryr.conf ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/neutron/dhcp_agent.ini ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/neutron/metadata_agent.ini ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/neutron/neutron.conf ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/neutron/neutron_lbaas.conf ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/neutron/plugins/ml2/ml2_conf.ini ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/neutron/services/loadbalancer/haproxy/lbaas_agent.ini ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/nova/nova.conf ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/nova/nova-api-uwsgi.ini ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/nova/nova_cell1.conf ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/nova/nova-cpu.conf ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/nova/placement-uwsgi.ini ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/openstack/clouds.yaml ${NODE_FOLDER}
-        scp ${!OSIP}:/opt/stack/devstack/.stackenv ${NODE_FOLDER}
-        scp ${!OSIP}:/opt/stack/devstack/nohup.out ${NODE_FOLDER}/stack.log
-        scp ${!OSIP}:/opt/stack/devstack/openrc ${NODE_FOLDER}
-        scp ${!OSIP}:/opt/stack/requirements/upper-constraints.txt ${NODE_FOLDER}
-        scp ${!OSIP}:/opt/stack/tempest/etc/tempest.conf ${NODE_FOLDER}
-        scp ${!OSIP}:/tmp/*.xz ${NODE_FOLDER}
-        scp ${!OSIP}:/tmp/dmesg.log ${NODE_FOLDER}
-        scp ${!OSIP}:/tmp/extra_debug.log ${NODE_FOLDER}
-        scp ${!OSIP}:/tmp/get_devstack.sh.txt ${NODE_FOLDER}
-        scp ${!OSIP}:/tmp/install_ovs.txt ${NODE_FOLDER}
-        scp ${!OSIP}:/tmp/journalctl.log ${NODE_FOLDER}
-        scp ${!OSIP}:/tmp/ovsdb-tool.log ${NODE_FOLDER}
-        scp ${!OSIP}:/tmp/tcpdump_start.log ${NODE_FOLDER}
+        ${SSH} "${!OSIP}" "bash /tmp/extra_debug.sh > /tmp/extra_debug.log 2>&1"
+        scp "${!OSIP}":/etc/dnsmasq.conf "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/keystone/keystone.conf "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/keystone/keystone-uwsgi-admin.ini "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/keystone/keystone-uwsgi-public.ini "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/kuryr/kuryr.conf "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/neutron/dhcp_agent.ini "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/neutron/metadata_agent.ini "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/neutron/neutron.conf "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/neutron/neutron_lbaas.conf "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/neutron/plugins/ml2/ml2_conf.ini "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/neutron/services/loadbalancer/haproxy/lbaas_agent.ini "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/nova/nova.conf "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/nova/nova-api-uwsgi.ini "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/nova/nova_cell1.conf "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/nova/nova-cpu.conf "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/nova/placement-uwsgi.ini "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/openstack/clouds.yaml "${NODE_FOLDER}"
+        scp "${!OSIP}":/opt/stack/devstack/.stackenv "${NODE_FOLDER}"
+        scp "${!OSIP}":/opt/stack/devstack/nohup.out "${NODE_FOLDER}"/stack.log
+        scp "${!OSIP}":/opt/stack/devstack/openrc "${NODE_FOLDER}"
+        scp "${!OSIP}":/opt/stack/requirements/upper-constraints.txt "${NODE_FOLDER}"
+        scp "${!OSIP}":/opt/stack/tempest/etc/tempest.conf "${NODE_FOLDER}"
+        scp "${!OSIP}":/tmp/*.xz "${NODE_FOLDER}"
+        scp "${!OSIP}":/tmp/dmesg.log "${NODE_FOLDER}"
+        scp "${!OSIP}":/tmp/extra_debug.log "${NODE_FOLDER}"
+        scp "${!OSIP}":/tmp/get_devstack.sh.txt "${NODE_FOLDER}"
+        scp "${!OSIP}":/tmp/install_ovs.txt "${NODE_FOLDER}"
+        scp "${!OSIP}":/tmp/journalctl.log "${NODE_FOLDER}"
+        scp "${!OSIP}":/tmp/ovsdb-tool.log "${NODE_FOLDER}"
+        scp "${!OSIP}":/tmp/tcpdump_start.log "${NODE_FOLDER}"
         collect_files "${!OSIP}" "${NODE_FOLDER}"
-        ${SSH} ${!OSIP} "sudo tar -cf - -C /var/log rabbitmq | xz -T 0 > /tmp/rabbitmq.tar.xz "
-        scp ${!OSIP}:/tmp/rabbitmq.tar.xz ${NODE_FOLDER}
-        rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/etc/hosts ${NODE_FOLDER}
-        rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/usr/lib/systemd/system/haproxy.service ${NODE_FOLDER}
-        rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/audit/audit.log ${NODE_FOLDER}
-        rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/httpd/keystone_access.log ${NODE_FOLDER}
-        rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/httpd/keystone.log ${NODE_FOLDER}
-        rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/messages* ${NODE_FOLDER}
-        rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/openvswitch/ovs-vswitchd.log ${NODE_FOLDER}
-        rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/openvswitch/ovsdb-server.log ${NODE_FOLDER}
+        ${SSH} "${!OSIP}" "sudo tar -cf - -C /var/log rabbitmq | xz -T 0 > /tmp/rabbitmq.tar.xz "
+        scp "${!OSIP}":/tmp/rabbitmq.tar.xz "${NODE_FOLDER}"
+        rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/etc/hosts "${NODE_FOLDER}"
+        rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/usr/lib/systemd/system/haproxy.service "${NODE_FOLDER}"
+        rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/audit/audit.log "${NODE_FOLDER}"
+        rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/httpd/keystone_access.log "${NODE_FOLDER}"
+        rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/httpd/keystone.log "${NODE_FOLDER}"
+        rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/messages* "${NODE_FOLDER}"
+        rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/openvswitch/ovs-vswitchd.log "${NODE_FOLDER}"
+        rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/openvswitch/ovsdb-server.log "${NODE_FOLDER}"
         collect_openstack_logs "${!OSIP}" "${NODE_FOLDER}" "control"
-        mv local.conf_control_${!OSIP} ${NODE_FOLDER}/local.conf
+        mv "local.conf_control_${!OSIP}" "${NODE_FOLDER}/local.conf"
         # qdhcp files are created by robot tests and copied into /tmp/qdhcp during the test
         tar -cf - -C /tmp qdhcp | xz -T 0 > /tmp/qdhcp.tar.xz
-        mv /tmp/qdhcp.tar.xz ${NODE_FOLDER}
-        mv ${NODE_FOLDER} ${WORKSPACE}/archives/
+        mv /tmp/qdhcp.tar.xz "${NODE_FOLDER}"
+        mv "${NODE_FOLDER}" "${WORKSPACE}"/archives/
     done
 
     # Compute Nodes
-    for i in `seq 1 ${NUM_OPENSTACK_COMPUTE_NODES}`; do
-        OSIP=OPENSTACK_COMPUTE_NODE_${i}_IP
+    for i in $(seq 1 "${NUM_OPENSTACK_COMPUTE_NODES}"); do
+        OSIP="OPENSTACK_COMPUTE_NODE_${i}_IP"
         echo "collect_logs: for openstack compute node ip: ${!OSIP}"
         NODE_FOLDER="compute_${i}"
-        mkdir -p ${NODE_FOLDER}
+        mkdir -p "${NODE_FOLDER}"
         tcpdump_stop "${!OSIP}"
-        scp extra_debug.sh ${!OSIP}:/tmp
-        ${SSH} ${!OSIP} "bash /tmp/extra_debug.sh > /tmp/extra_debug.log 2>&1"
-        scp ${!OSIP}:/etc/nova/nova.conf ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/nova/nova-cpu.conf ${NODE_FOLDER}
-        scp ${!OSIP}:/etc/openstack/clouds.yaml ${NODE_FOLDER}
-        scp ${!OSIP}:/opt/stack/devstack/.stackenv ${NODE_FOLDER}
-        scp ${!OSIP}:/opt/stack/devstack/nohup.out ${NODE_FOLDER}/stack.log
-        scp ${!OSIP}:/opt/stack/devstack/openrc ${NODE_FOLDER}
-        scp ${!OSIP}:/opt/stack/requirements/upper-constraints.txt ${NODE_FOLDER}
-        scp ${!OSIP}:/tmp/*.xz ${NODE_FOLDER}/
-        scp ${!OSIP}:/tmp/dmesg.log ${NODE_FOLDER}
-        scp ${!OSIP}:/tmp/extra_debug.log ${NODE_FOLDER}
-        scp ${!OSIP}:/tmp/get_devstack.sh.txt ${NODE_FOLDER}
-        scp ${!OSIP}:/tmp/install_ovs.txt ${NODE_FOLDER}
-        scp ${!OSIP}:/tmp/journalctl.log ${NODE_FOLDER}
-        scp ${!OSIP}:/tmp/ovsdb-tool.log ${NODE_FOLDER}
-        scp ${!OSIP}:/tmp/tcpdump_start.log ${NODE_FOLDER}
+        scp extra_debug.sh "${!OSIP}":/tmp
+        ${SSH} "${!OSIP}" "bash /tmp/extra_debug.sh > /tmp/extra_debug.log 2>&1"
+        scp "${!OSIP}":/etc/nova/nova.conf "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/nova/nova-cpu.conf "${NODE_FOLDER}"
+        scp "${!OSIP}":/etc/openstack/clouds.yaml "${NODE_FOLDER}"
+        scp "${!OSIP}":/opt/stack/devstack/.stackenv "${NODE_FOLDER}"
+        scp "${!OSIP}":/opt/stack/devstack/nohup.out "${NODE_FOLDER}"/stack.log
+        scp "${!OSIP}":/opt/stack/devstack/openrc "${NODE_FOLDER}"
+        scp "${!OSIP}":/opt/stack/requirements/upper-constraints.txt "${NODE_FOLDER}"
+        scp "${!OSIP}":/tmp/*.xz "${NODE_FOLDER}"/
+        scp "${!OSIP}":/tmp/dmesg.log "${NODE_FOLDER}"
+        scp "${!OSIP}":/tmp/extra_debug.log "${NODE_FOLDER}"
+        scp "${!OSIP}":/tmp/get_devstack.sh.txt "${NODE_FOLDER}"
+        scp "${!OSIP}":/tmp/install_ovs.txt "${NODE_FOLDER}"
+        scp "${!OSIP}":/tmp/journalctl.log "${NODE_FOLDER}"
+        scp "${!OSIP}":/tmp/ovsdb-tool.log "${NODE_FOLDER}"
+        scp "${!OSIP}":/tmp/tcpdump_start.log "${NODE_FOLDER}"
         collect_files "${!OSIP}" "${NODE_FOLDER}"
-        ${SSH} ${!OSIP} "sudo tar -cf - -C /var/log libvirt | xz -T 0 > /tmp/libvirt.tar.xz "
-        scp ${!OSIP}:/tmp/libvirt.tar.xz ${NODE_FOLDER}
-        rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/etc/hosts ${NODE_FOLDER}
-        rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/audit/audit.log ${NODE_FOLDER}
-        rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/messages* ${NODE_FOLDER}
-        rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/nova-agent.log ${NODE_FOLDER}
-        rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/openvswitch/ovs-vswitchd.log ${NODE_FOLDER}
-        rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/openvswitch/ovsdb-server.log ${NODE_FOLDER}
+        ${SSH} "${!OSIP}" "sudo tar -cf - -C /var/log libvirt | xz -T 0 > /tmp/libvirt.tar.xz "
+        scp "${!OSIP}":/tmp/libvirt.tar.xz "${NODE_FOLDER}"
+        rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/etc/hosts "${NODE_FOLDER}"
+        rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/audit/audit.log "${NODE_FOLDER}"
+        rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/messages* "${NODE_FOLDER}"
+        rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/nova-agent.log "${NODE_FOLDER}"
+        rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/openvswitch/ovs-vswitchd.log "${NODE_FOLDER}"
+        rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/openvswitch/ovsdb-server.log "${NODE_FOLDER}"
         collect_openstack_logs "${!OSIP}" "${NODE_FOLDER}" "compute"
-        mv local.conf_compute_${!OSIP} ${NODE_FOLDER}/local.conf
-        mv ${NODE_FOLDER} ${WORKSPACE}/archives/
+        mv "local.conf_compute_${!OSIP}" "${NODE_FOLDER}"/local.conf
+        mv "${NODE_FOLDER}" "${WORKSPACE}"/archives/
     done
 
     # Tempest
     DEVSTACK_TEMPEST_DIR="/opt/stack/tempest"
     TESTREPO=".stestr"
-    TEMPEST_LOGS_DIR=${WORKSPACE}/archives/tempest
+    TEMPEST_LOGS_DIR="${WORKSPACE}/archives/tempest"
     # Look for tempest test results in the $TESTREPO dir and copy if found
-    if ${SSH} ${OPENSTACK_CONTROL_NODE_1_IP} "sudo sh -c '[ -f ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/0 ]'"; then
-        ${SSH} ${OPENSTACK_CONTROL_NODE_1_IP} "for I in \$(sudo ls ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/ | grep -E '^[0-9]+$'); do sudo sh -c \"${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/subunit-1to2 < ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/\${I} >> ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt\"; done"
-        ${SSH} ${OPENSTACK_CONTROL_NODE_1_IP} "sudo sh -c '${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/python ${DEVSTACK_TEMPEST_DIR}/.tox/tempest/lib/python2.7/site-packages/os_testr/subunit2html.py ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt ${DEVSTACK_TEMPEST_DIR}/tempest_results.html'"
-        mkdir -p ${TEMPEST_LOGS_DIR}
-        scp ${OPENSTACK_CONTROL_NODE_1_IP}:${DEVSTACK_TEMPEST_DIR}/tempest_results.html ${TEMPEST_LOGS_DIR}
-        scp ${OPENSTACK_CONTROL_NODE_1_IP}:${DEVSTACK_TEMPEST_DIR}/tempest.log ${TEMPEST_LOGS_DIR}
+    if ${SSH} "${OPENSTACK_CONTROL_NODE_1_IP}" "sudo sh -c '[ -f ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/0 ]'"; then
+        ${SSH} "${OPENSTACK_CONTROL_NODE_1_IP}" "for I in \$(sudo ls ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/ | grep -E '^[0-9]+$'); do sudo sh -c \"${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/subunit-1to2 < ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/\${I} >> ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt\"; done"
+        ${SSH} "${OPENSTACK_CONTROL_NODE_1_IP}" "sudo sh -c '${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/python ${DEVSTACK_TEMPEST_DIR}/.tox/tempest/lib/python2.7/site-packages/os_testr/subunit2html.py ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt ${DEVSTACK_TEMPEST_DIR}/tempest_results.html'"
+        mkdir -p "${TEMPEST_LOGS_DIR}"
+        scp "${OPENSTACK_CONTROL_NODE_1_IP}:${DEVSTACK_TEMPEST_DIR}/tempest_results.html" "${TEMPEST_LOGS_DIR}"
+        scp "${OPENSTACK_CONTROL_NODE_1_IP}:${DEVSTACK_TEMPEST_DIR}/tempest.log" "${TEMPEST_LOGS_DIR}"
     else
         echo "tempest results not found in ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/0"
     fi
@@ -704,22 +709,22 @@ function join() {
         final=${final}${delim}${str}
     done
 
-    echo ${final}
+    echo "${final}"
 }
 
 function get_nodes_list() {
     # Create the string for nodes
-    for i in `seq 1 ${NUM_ODL_SYSTEM}` ; do
+    for i in $(seq 1 "${NUM_ODL_SYSTEM}") ; do
         CONTROLLERIP=ODL_SYSTEM_${i}_IP
         nodes[$i]=${!CONTROLLERIP}
     done
 
     nodes_list=$(join "${nodes[@]}")
-    echo ${nodes_list}
+    echo "${nodes_list}"
 }
 
 function get_features() {
-    if [ ${CONTROLLERSCOPE} == 'all' ]; then
+    if [ "${CONTROLLERSCOPE}" == 'all' ]; then
         ACTUALFEATURES="odl-integration-compatible-with-all,${CONTROLLERFEATURES}"
         export CONTROLLERMEM="3072m"
     else
@@ -728,7 +733,7 @@ function get_features() {
 
     # Some versions of jenkins job builder result in feature list containing spaces
     # and ending in newline. Remove all that.
-    ACTUALFEATURES=`echo "${ACTUALFEATURES}" | tr -d '\n \r'`
+    ACTUALFEATURES=$(echo "${ACTUALFEATURES}" | tr -d '\n \r')
     echo "ACTUALFEATURES: ${ACTUALFEATURES}"
 
     # In the case that we want to install features via karaf shell, a space separated list of
@@ -742,7 +747,7 @@ function get_features() {
 
 # Create the configuration script to be run on controllers.
 function create_configuration_script() {
-    cat > ${WORKSPACE}/configuration-script.sh <<EOF
+    cat > "${WORKSPACE}"/configuration-script.sh <<EOF
 set -x
 source /tmp/common-functions.sh ${BUNDLEFOLDER}
 
@@ -807,7 +812,7 @@ EOF
 
 # Create the startup script to be run on controllers.
 function create_startup_script() {
-    cat > ${WORKSPACE}/startup-script.sh <<EOF
+    cat > "${WORKSPACE}"/startup-script.sh <<EOF
 echo "Redirecting karaf console output to karaf_console.log"
 export KARAF_REDIRECT="/tmp/${BUNDLEFOLDER}/data/log/karaf_console.log"
 mkdir -p /tmp/${BUNDLEFOLDER}/data/log
@@ -819,7 +824,7 @@ EOF
 }
 
 function create_post_startup_script() {
-    cat > ${WORKSPACE}/post-startup-script.sh <<EOF
+    cat > "${WORKSPACE}"/post-startup-script.sh <<EOF
 if [[ "$USEFEATURESBOOT" != "True" ]]; then
 
     # wait up to 60s for karaf port 8101 to be opened, polling every 5s
@@ -894,32 +899,36 @@ EOF
 # Copy over the configuration script and configuration files to each controller
 # Execute the configuration script on each controller.
 function copy_and_run_configuration_script() {
-    for i in `seq 1 ${NUM_ODL_SYSTEM}`; do
-        CONTROLLERIP=ODL_SYSTEM_${i}_IP
+    for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
+        CONTROLLERIP="ODL_SYSTEM_${i}_IP"
         echo "Configuring member-${i} with IP address ${!CONTROLLERIP}"
-        scp ${WORKSPACE}/configuration-script.sh ${!CONTROLLERIP}:/tmp/
-        ssh ${!CONTROLLERIP} "bash /tmp/configuration-script.sh ${i}"
+        scp "${WORKSPACE}"/configuration-script.sh "${!CONTROLLERIP}":/tmp/
+        # $i needs to be parsed client-side
+        # shellcheck disable=SC2029
+        ssh "${!CONTROLLERIP}" "bash /tmp/configuration-script.sh ${i}"
     done
 }
 
 # Copy over the startup script to each controller and execute it.
 function copy_and_run_startup_script() {
-    for i in `seq 1 ${NUM_ODL_SYSTEM}`; do
-        CONTROLLERIP=ODL_SYSTEM_${i}_IP
+    for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
+        CONTROLLERIP="ODL_SYSTEM_${i}_IP"
         echo "Starting member-${i} with IP address ${!CONTROLLERIP}"
-        scp ${WORKSPACE}/startup-script.sh ${!CONTROLLERIP}:/tmp/
-        ssh ${!CONTROLLERIP} "bash /tmp/startup-script.sh"
+        scp "${WORKSPACE}"/startup-script.sh "${!CONTROLLERIP}":/tmp/
+        ssh "${!CONTROLLERIP}" "bash /tmp/startup-script.sh"
     done
 }
 
 function copy_and_run_post_startup_script() {
     seed_index=1
-    for i in `seq 1 ${NUM_ODL_SYSTEM}`; do
-        CONTROLLERIP=ODL_SYSTEM_${i}_IP
+    for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
+        CONTROLLERIP="ODL_SYSTEM_${i}_IP"
         echo "Execute the post startup script on controller ${!CONTROLLERIP}"
-        scp ${WORKSPACE}/post-startup-script.sh ${!CONTROLLERIP}:/tmp
-        ssh ${!CONTROLLERIP} "bash /tmp/post-startup-script.sh $(( seed_index++ ))"
-        if [ $(( $i % ${NUM_ODL_SYSTEM} )) == 0 ]; then
+        scp "${WORKSPACE}"/post-startup-script.sh "${!CONTROLLERIP}":/
+        # $seed_index needs to be parsed client-side
+        # shellcheck disable=SC2029
+        ssh "${!CONTROLLERIP}" "bash /tmp/post-startup-script.sh $(( seed_index++ ))"
+        if [ $(( i % NUM_ODL_SYSTEM )) == 0 ]; then
             seed_index=1
         fi
     done
@@ -927,14 +936,16 @@ function copy_and_run_post_startup_script() {
 
 function create_controller_variables() {
     echo "Generating controller variables..."
-    for i in `seq 1 ${NUM_ODL_SYSTEM}`; do
-        CONTROLLERIP=ODL_SYSTEM_${i}_IP
+    for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
+        CONTROLLERIP="ODL_SYSTEM_${i}_IP"
         odl_variables=${odl_variables}" -v ${CONTROLLERIP}:${!CONTROLLERIP}"
         echo "Lets's take the karaf thread dump"
-        ssh ${!CONTROLLERIP} "sudo ps aux" > ${WORKSPACE}/ps_before.log
-        pid=$(grep org.apache.karaf.main.Main ${WORKSPACE}/ps_before.log | grep -v grep | tr -s ' ' | cut -f2 -d' ')
+        ssh "${!CONTROLLERIP}" "sudo ps aux" > "${WORKSPACE}"/ps_before.log
+        pid=$(grep org.apache.karaf.main.Main "${WORKSPACE}"/ps_before.log | grep -v grep | tr -s ' ' | cut -f2 -d' ')
         echo "karaf main: org.apache.karaf.main.Main, pid:${pid}"
-        ssh ${!CONTROLLERIP} "${JAVA_HOME}/bin/jstack -l ${pid}" > ${WORKSPACE}/karaf_${i}_${pid}_threads_before.log || true
+        # $i needs to be parsed client-side
+        # shellcheck disable=SC2029
+        ssh "${!CONTROLLERIP}" "${JAVA_HOME}/bin/jstack -l ${pid}" > "${WORKSPACE}/karaf_${i}_${pid}_threads_before.log" || true
     done
 }
 
@@ -945,7 +956,7 @@ function build_ovs() {
     local -r rpm_path="$3"
 
     echo "Building OVS ${version} on ${ip} ..."
-    cat > ${WORKSPACE}/build_ovs.sh << EOF
+    cat > "${WORKSPACE}"/build_ovs.sh << EOF
 set -ex -o pipefail
 
 echo '---> Building openvswitch version ${version}'
@@ -1000,10 +1011,10 @@ popd
 rm -rf \${TMP}
 EOF
 
-    scp ${WORKSPACE}/build_ovs.sh ${ip}:/tmp
-    ${SSH} ${ip} " bash /tmp/build_ovs.sh >> /tmp/install_ovs.txt 2>&1"
-    scp -r ${ip}:/tmp/ovs_rpms/* "${rpm_path}/"
-    ${SSH} ${ip} "rm -rf /tmp/ovs_rpms"
+    scp "${WORKSPACE}"/build_ovs.sh "${ip}":/tmp
+    ${SSH} "${ip}" " bash /tmp/build_ovs.sh >> /tmp/install_ovs.txt 2>&1"
+    scp -r "${ip}":/tmp/ovs_rpms/* "${rpm_path}/"
+    ${SSH} "${ip}" "rm -rf /tmp/ovs_rpms"
 }
 
 # Install OVS RPMs from yum repo
@@ -1012,7 +1023,7 @@ function install_ovs_from_repo() {
     local -r rpm_repo="$2"
 
     echo "Installing OVS from repo ${rpm_repo} on ${ip} ..."
-    cat > ${WORKSPACE}/install_ovs.sh << EOF
+    cat > "${WORKSPACE}"/install_ovs.sh << EOF
 set -ex -o pipefail
 
 echo '---> Installing openvswitch from ${rpm_repo}'
@@ -1070,8 +1081,8 @@ NEW_MOD=\$(sudo modinfo -n openvswitch || echo '')
 [ "\${PREV_MOD}" != "\${NEW_MOD}" ] || (echo "Kernel module was not updated" && exit 1)
 EOF
 
-    scp ${WORKSPACE}/install_ovs.sh ${ip}:/tmp
-    ${SSH} ${ip} "bash /tmp/install_ovs.sh >> /tmp/install_ovs.txt 2>&1"
+    scp "${WORKSPACE}"/install_ovs.sh "${ip}":/tmp
+    ${SSH} "${ip}" "bash /tmp/install_ovs.sh >> /tmp/install_ovs.txt 2>&1"
 }
 
 # Install OVS RPMS from path
@@ -1080,10 +1091,10 @@ function install_ovs_from_path() {
     local -r rpm_path="$2"
 
     echo "Creating OVS RPM repo on ${ip} ..."
-    ${SSH} ${ip} "mkdir -p /tmp/ovs_rpms"
-    scp -r "${rpm_path}"/* ${ip}:/tmp/ovs_rpms
-    ${SSH} ${ip} "sudo yum -y install createrepo && createrepo --database /tmp/ovs_rpms"
-    install_ovs_from_repo ${ip} file:/tmp/ovs_rpms
+    ${SSH} "${ip}" "mkdir -p /tmp/ovs_rpms"
+    scp -r "${rpm_path}"/* "${ip}":/tmp/ovs_rpms
+    ${SSH} "${ip}" "sudo yum -y install createrepo && createrepo --database /tmp/ovs_rpms"
+    install_ovs_from_repo "${ip}" file:/tmp/ovs_rpms
 }