X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?a=blobdiff_plain;f=jjb%2Fintegration%2Fcommon-functions.sh;h=60aeb0e459a81bb3abbc6d020b2535c3332b1d56;hb=244d3967718308ab7309b94a385bfecb48957c47;hp=ccc05e8aeda752f3214efdd20e994a9785c8bd7d;hpb=a5cc46cf7cfe516eb911efb43be9ba50c37cbd44;p=releng%2Fbuilder.git diff --git a/jjb/integration/common-functions.sh b/jjb/integration/common-functions.sh index ccc05e8ae..60aeb0e45 100644 --- a/jjb/integration/common-functions.sh +++ b/jjb/integration/common-functions.sh @@ -42,16 +42,21 @@ function set_java_vars() { local -r controllermem=$2 local -r memconf=$3 - echo "Configure\n java home: ${java_home}\n max memory: ${controllermem}\n memconf: ${memconf}" - - sed -ie 's%^# export JAVA_HOME%export JAVA_HOME=${JAVA_HOME:-'"${java_home}"'}%g' ${memconf} - sed -ie 's/JAVA_MAX_MEM="2048m"/JAVA_MAX_MEM='"${controllermem}"'/g' ${memconf} + echo "Configure" + echo " java home: ${java_home}" + echo " max memory: ${controllermem}" + echo " memconf: ${memconf}" + + # We do not want expressions to expand here. + # shellcheck disable=SC2016 + sed -ie 's%^# export JAVA_HOME%export JAVA_HOME=${JAVA_HOME:-'"${java_home}"'}%g' "${memconf}" + sed -ie 's/JAVA_MAX_MEM="2048m"/JAVA_MAX_MEM='"${controllermem}"'/g' "${memconf}" echo "cat ${memconf}" - cat ${memconf} + cat "${memconf}" echo "Set Java version" - sudo /usr/sbin/alternatives --install /usr/bin/java java ${java_home}/bin/java 1 - sudo /usr/sbin/alternatives --set java ${java_home}/bin/java + sudo /usr/sbin/alternatives --install /usr/bin/java java "${java_home}/bin/java" 1 + sudo /usr/sbin/alternatives --set java "${java_home}/bin/java" echo "JDK default version ..." java -version @@ -71,8 +76,7 @@ function configure_karaf_log() { local logapi=log4j # Check what the logging.cfg file is using for the logging api: log4j or log4j2 - grep "log4j2" ${LOGCONF} - if [ $? -eq 0 ]; then + if grep "log4j2" "${LOGCONF}"; then logapi=log4j2 fi @@ -80,18 +84,18 @@ function configure_karaf_log() { if [ "${logapi}" == "log4j2" ]; then # FIXME: Make log size limit configurable from build parameter. # From Neon the default karaf file size is 64 MB - sed -ie 's/log4j2.appender.rolling.policies.size.size = 64MB/log4j2.appender.rolling.policies.size.size = 1GB/g' ${LOGCONF} + sed -ie 's/log4j2.appender.rolling.policies.size.size = 64MB/log4j2.appender.rolling.policies.size.size = 1GB/g' "${LOGCONF}" # Flourine still uses 16 MB - sed -ie 's/log4j2.appender.rolling.policies.size.size = 16MB/log4j2.appender.rolling.policies.size.size = 1GB/g' ${LOGCONF} + sed -ie 's/log4j2.appender.rolling.policies.size.size = 16MB/log4j2.appender.rolling.policies.size.size = 1GB/g' "${LOGCONF}" orgmodule="org.opendaylight.yangtools.yang.parser.repo.YangTextSchemaContextResolver" orgmodule_="${orgmodule//./_}" - echo "${logapi}.logger.${orgmodule_}.name = WARN" >> ${LOGCONF} - echo "${logapi}.logger.${orgmodule_}.level = WARN" >> ${LOGCONF} + echo "${logapi}.logger.${orgmodule_}.name = WARN" >> "${LOGCONF}" + echo "${logapi}.logger.${orgmodule_}.level = WARN" >> "${LOGCONF}" else - sed -ie 's/log4j.appender.out.maxBackupIndex=10/log4j.appender.out.maxBackupIndex=1/g' ${LOGCONF} + sed -ie 's/log4j.appender.out.maxBackupIndex=10/log4j.appender.out.maxBackupIndex=1/g' "${LOGCONF}" # FIXME: Make log size limit configurable from build parameter. - sed -ie 's/log4j.appender.out.maxFileSize=1MB/log4j.appender.out.maxFileSize=30GB/g' ${LOGCONF} - echo "${logapi}.logger.org.opendaylight.yangtools.yang.parser.repo.YangTextSchemaContextResolver = WARN" >> ${LOGCONF} + sed -ie 's/log4j.appender.out.maxFileSize=1MB/log4j.appender.out.maxFileSize=30GB/g' "${LOGCONF}" + echo "${logapi}.logger.org.opendaylight.yangtools.yang.parser.repo.YangTextSchemaContextResolver = WARN" >> "${LOGCONF}" fi # Add custom logging levels @@ -110,17 +114,17 @@ function configure_karaf_log() { orgmodule="org.opendaylight.${module}" if [ "${logapi}" == "log4j2" ]; then orgmodule_="${orgmodule//./_}" - echo "${logapi}.logger.${orgmodule_}.name = ${orgmodule}" >> ${LOGCONF} - echo "${logapi}.logger.${orgmodule_}.level = ${level}" >> ${LOGCONF} + echo "${logapi}.logger.${orgmodule_}.name = ${orgmodule}" >> "${LOGCONF}" + echo "${logapi}.logger.${orgmodule_}.level = ${level}" >> "${LOGCONF}" else - echo "${logapi}.logger.${orgmodule} = ${level}" >> ${LOGCONF} + echo "${logapi}.logger.${orgmodule} = ${level}" >> "${LOGCONF}" fi fi done fi echo "cat ${LOGCONF}" - cat ${LOGCONF} + cat "${LOGCONF}" } # function configure_karaf_log() function configure_karaf_log_for_apex() { @@ -153,12 +157,14 @@ function configure_karaf_log_for_apex() { done # replace the trailing comma with a closing brace followed by trailing comma logging_config=${logging_config%,}" }," - echo $logging_config + echo "$logging_config" # fine a sane line number to inject the custom logging json - lineno=$(ssh $OPENSTACK_CONTROL_NODE_1_IP "sudo grep -Fn 'opendaylight::log_mechanism' /etc/puppet/hieradata/service_configs.json" | awk -F: '{print $1}') - ssh $controller_ip "sudo sed -i \"${lineno}i ${logging_config}\" /etc/puppet/hieradata/service_configs.json" - ssh $controller_ip "sudo cat /etc/puppet/hieradata/service_configs.json" + lineno=$(ssh "$OPENSTACK_CONTROL_NODE_1_IP" "sudo grep -Fn 'opendaylight::log_mechanism' /etc/puppet/hieradata/service_configs.json" | awk -F: '{print $1}') + # We purposely want these variables to expand client-side + # shellcheck disable=SC2029 + ssh "$controller_ip" "sudo sed -i \"${lineno}i ${logging_config}\" /etc/puppet/hieradata/service_configs.json" + ssh "$controller_ip" "sudo cat /etc/puppet/hieradata/service_configs.json" fi } # function configure_karaf_log_for_apex() @@ -173,7 +179,7 @@ function configure_odl_features_for_apex() { cat > /tmp/set_odl_features.sh << EOF sudo jq '.["opendaylight::extra_features"] |= []' $config_file > tmp.json && mv tmp.json $config_file -for feature in $(echo $ACTUALFEATURES | sed "s/,/ /g"); do +for feature in "\${ACTUALFEATURES//,/ }"; do sudo jq --arg jq_arg \$feature '.["opendaylight::extra_features"] |= . + [\$jq_arg]' $config_file > tmp && mv tmp $config_file; done echo "Modified puppet-opendaylight service_configs.json..." @@ -184,8 +190,8 @@ EOF cat /tmp/set_odl_features.sh if [ -n "${ACTUALFEATURES}" ]; then - scp /tmp/set_odl_features.sh $controller_ip:/tmp/set_odl_features.sh - ssh $controller_ip "sudo bash /tmp/set_odl_features.sh" + scp /tmp/set_odl_features.sh "$controller_ip":/tmp/set_odl_features.sh + ssh "$controller_ip" "sudo bash /tmp/set_odl_features.sh" fi } # function configure_odl_features_for_apex() @@ -218,12 +224,12 @@ function get_test_suites() { fi echo "Changing the testplan path..." - cat "${testplan_filepath}" | sed "s:integration:${WORKSPACE}:" > testplan.txt + sed "s:integration:${WORKSPACE}:" "${testplan_filepath}" > testplan.txt cat testplan.txt # Use the testplan if specific SUITES are not defined. if [ -z "${SUITES}" ]; then - suite_list=$(egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' testplan.txt | tr '\012' ' ') + suite_list=$(grep -E -v '(^[[:space:]]*#|^[[:space:]]*$)' testplan.txt | tr '\012' ' ') else suite_list="" workpath="${WORKSPACE}/test/csit/suites" @@ -237,7 +243,7 @@ function get_test_suites() { done fi - eval $__suite_list="'$suite_list'" + eval "$__suite_list='$suite_list'" } function run_plan() { @@ -252,7 +258,7 @@ function run_plan() { ;; esac - printf "Locating %s plan to use...\n" "${type}" + printf "Locating %s plan to use...\\n" "${type}" plan_filepath="${WORKSPACE}/test/csit/${type}plans/$plan" if [ ! -f "${plan_filepath}" ]; then plan_filepath="${WORKSPACE}/test/csit/${type}plans/${STREAMTESTPLAN}" @@ -262,17 +268,18 @@ function run_plan() { fi if [ -f "${plan_filepath}" ]; then - printf "%s plan exists!!!\n" "${type}" - printf "Changing the %s plan path...\n" "${type}" - cat ${plan_filepath} | sed "s:integration:${WORKSPACE}:" > ${type}plan.txt - cat ${type}plan.txt - for line in $( egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' ${type}plan.txt ); do - printf "Executing %s...\n" "${line}" + printf "%s plan exists!!!\\n" "${type}" + printf "Changing the %s plan path...\\n" "${type}" + sed "s:integration:${WORKSPACE}:" "${plan_filepath}" > "${type}plan.txt" + cat "${type}plan.txt" + # shellcheck disable=SC2013 + for line in $( grep -E -v '(^[[:space:]]*#|^[[:space:]]*$)' "${type}plan.txt" ); do + printf "Executing %s...\\n" "${line}" # shellcheck source=${line} disable=SC1091 - source ${line} + source "${line}" done fi - printf "Finished running %s plans\n" "${type}" + printf "Finished running %s plans\\n" "${type}" } # function run_plan() # Return elapsed time. Usage: @@ -304,7 +311,7 @@ function timer() function csv2ssv() { local csv=$1 if [ -n "${csv}" ]; then - ssv=$(echo ${csv} | sed 's/,/ /g' | sed 's/\ \ */\ /g') + ssv=$(echo "${csv}" | sed 's/,/ /g' | sed 's/\ \ */\ /g') fi echo "${ssv}" @@ -312,7 +319,7 @@ function csv2ssv() { function is_openstack_feature_enabled() { local feature=$1 - for enabled_feature in $(csv2ssv ${ENABLE_OS_SERVICES}); do + for enabled_feature in $(csv2ssv "${ENABLE_OS_SERVICES}"); do if [ "${enabled_feature}" == "${feature}" ]; then echo 1 return @@ -375,19 +382,21 @@ function tcpdump_start() { local -r filter=$3 filter_=${filter// /_} - printf "node %s, %s_%s__%s: starting tcpdump\n" "${ip}" "${prefix}" "${ip}" "${filter}" - ssh ${ip} "nohup sudo /usr/sbin/tcpdump -vvv -ni eth0 ${filter} -w /tmp/tcpdump_${prefix}_${ip}__${filter_}.pcap > /tmp/tcpdump_start.log 2>&1 &" - ${SSH} ${ip} "ps -ef | grep tcpdump" + printf "node %s, %s_%s__%s: starting tcpdump\\n" "${ip}" "${prefix}" "${ip}" "${filter}" + # $fileter needs to be parsed client-side + # shellcheck disable=SC2029 + ssh "${ip}" "nohup sudo /usr/sbin/tcpdump -vvv -ni eth0 ${filter} -w /tmp/tcpdump_${prefix}_${ip}__${filter_}.pcap > /tmp/tcpdump_start.log 2>&1 &" + ${SSH} "${ip}" "ps -ef | grep tcpdump" } function tcpdump_stop() { local -r ip=$1 - printf "node %s: stopping tcpdump\n" "$ip" - ${SSH} ${ip} "ps -ef | grep tcpdump.sh" - ${SSH} ${ip} "sudo pkill -f tcpdump" - ${SSH} ${ip} "sudo xz -9ekvvf /tmp/*.pcap" - ${SSH} ${ip} "sudo ls -al /tmp/*.pcap" + printf "node %s: stopping tcpdump\\n" "$ip" + ${SSH} "${ip}" "ps -ef | grep tcpdump.sh" + ${SSH} "${ip}" "sudo pkill -f tcpdump" + ${SSH} "${ip}" "sudo xz -9ekvvf /tmp/*.pcap" + ${SSH} "${ip}" "sudo ls -al /tmp/*.pcap" # copy_logs will copy any *.xz files } @@ -396,19 +405,19 @@ function collect_files() { local -r ip=$1 local -r folder=$2 finddir=/tmp/finder - ${SSH} ${ip} "mkdir -p ${finddir}" - ${SSH} ${ip} "sudo find /etc > ${finddir}/find.etc.txt" - ${SSH} ${ip} "sudo find /opt/stack > ${finddir}/find.opt.stack.txt" - ${SSH} ${ip} "sudo find /var > ${finddir}/find2.txt" - ${SSH} ${ip} "sudo find /var > ${finddir}/find.var.txt" - ${SSH} ${ip} "sudo tar -cf - -C /tmp finder | xz -T 0 > /tmp/find.tar.xz" - scp ${ip}:/tmp/find.tar.xz ${folder} - mkdir -p ${finddir} - rsync --rsync-path="sudo rsync" --list-only -arvhe ssh ${ip}:/etc/ > ${finddir}/rsync.etc.txt - rsync --rsync-path="sudo rsync" --list-only -arvhe ssh ${ip}:/opt/stack/ > ${finddir}/rsync.opt.stack.txt - rsync --rsync-path="sudo rsync" --list-only -arvhe ssh ${ip}:/var/ > ${finddir}/rsync.var.txt + ${SSH} "${ip}" "mkdir -p ${finddir}" + ${SSH} "${ip}" "sudo find /etc > ${finddir}/find.etc.txt" + ${SSH} "${ip}" "sudo find /opt/stack > ${finddir}/find.opt.stack.txt" + ${SSH} "${ip}" "sudo find /var > ${finddir}/find2.txt" + ${SSH} "${ip}" "sudo find /var > ${finddir}/find.var.txt" + ${SSH} "${ip}" "sudo tar -cf - -C /tmp finder | xz -T 0 > /tmp/find.tar.xz" + scp "${ip}":/tmp/find.tar.xz "${folder}" + mkdir -p "${finddir}" + rsync --rsync-path="sudo rsync" --list-only -arvhe ssh "${ip}":/etc/ > "${finddir}"/rsync.etc.txt + rsync --rsync-path="sudo rsync" --list-only -arvhe ssh "${ip}":/opt/stack/ > "${finddir}"/rsync.opt.stack.txt + rsync --rsync-path="sudo rsync" --list-only -arvhe ssh "${ip}":/var/ > "${finddir}"/rsync.var.txt tar -cf - -C /tmp finder | xz -T 0 > /tmp/rsync.tar.xz - cp /tmp/rsync.tar.xz ${folder} + cp /tmp/rsync.tar.xz "${folder}" } # List of extra services to extract from journalctl @@ -439,16 +448,16 @@ function collect_openstack_logs() { local -r node_type=${3} local oslogs="${folder}/oslogs" - printf "collect_openstack_logs for %s node: %s into %s\n" "${node_type}" "${ip}" "${oslogs}" - rm -rf ${oslogs} - mkdir -p ${oslogs} + printf "collect_openstack_logs for %s node: %s into %s\\n" "${node_type}" "${ip}" "${oslogs}" + rm -rf "${oslogs}" + mkdir -p "${oslogs}" # There are always some logs in /opt/stack/logs and this also covers the # pre-queens branches which always use /opt/stack/logs - rsync -avhe ssh ${ip}:/opt/stack/logs/* ${oslogs} # rsync to prevent copying of symbolic links + rsync -avhe ssh "${ip}":/opt/stack/logs/* "${oslogs}" # rsync to prevent copying of symbolic links # Starting with queens break out the logs from journalctl if [ "${OPENSTACK_BRANCH}" = "stable/queens" ]; then - cat > ${WORKSPACE}/collect_openstack_logs.sh << EOF + cat > "${WORKSPACE}"/collect_openstack_logs.sh << EOF extra_services_cntl="${extra_services_cntl}" extra_services_cmp="${extra_services_cmp}" @@ -456,7 +465,7 @@ function extract_from_journal() { local -r services=\${1} local -r folder=\${2} local -r node_type=\${3} - printf "extract_from_journal folder: \${folder}, services: \${services}\n" + printf "extract_from_journal folder: \${folder}, services: \${services}\\n" for service in \${services}; do # strip anything before @ and anything after . # devstack@g-api.service will end as g-api @@ -479,12 +488,12 @@ fi ls -al /tmp/oslogs EOF # cat > ${WORKSPACE}/collect_openstack_logs.sh << EOF - printf "collect_openstack_logs for %s node: %s into %s, executing script\n" "${node_type}" "${ip}" "${oslogs}" - cat ${WORKSPACE}/collect_openstack_logs.sh - scp ${WORKSPACE}/collect_openstack_logs.sh ${ip}:/tmp - ${SSH} ${ip} "bash /tmp/collect_openstack_logs.sh > /tmp/collect_openstack_logs.log 2>&1" - rsync -avhe ssh ${ip}:/tmp/oslogs/* ${oslogs} - scp ${ip}:/tmp/collect_openstack_logs.log ${oslogs} + printf "collect_openstack_logs for %s node: %s into %s, executing script\\n" "${node_type}" "${ip}" "${oslogs}" + cat "${WORKSPACE}"/collect_openstack_logs.sh + scp "${WORKSPACE}"/collect_openstack_logs.sh "${ip}":/tmp + ${SSH} "${ip}" "bash /tmp/collect_openstack_logs.sh > /tmp/collect_openstack_logs.log 2>&1" + rsync -avhe ssh "${ip}":/tmp/oslogs/* "${oslogs}" + scp "${ip}":/tmp/collect_openstack_logs.log "${oslogs}" fi # if [ "${OPENSTACK_BRANCH}" = "stable/queens" ]; then } @@ -492,30 +501,30 @@ function collect_netvirt_logs() { set +e # We do not want to create red dot just because something went wrong while fetching logs. cat > extra_debug.sh << EOF -echo -e "/usr/sbin/lsmod | /usr/bin/grep openvswitch\n" +echo -e "/usr/sbin/lsmod | /usr/bin/grep openvswitch\\n" /usr/sbin/lsmod | /usr/bin/grep openvswitch -echo -e "\nsudo grep ct_ /var/log/openvswitch/ovs-vswitchd.log\n" +echo -e "\\nsudo grep ct_ /var/log/openvswitch/ovs-vswitchd.log\\n" sudo grep "Datapath supports" /var/log/openvswitch/ovs-vswitchd.log -echo -e "\nsudo netstat -punta\n" +echo -e "\\nsudo netstat -punta\\n" sudo netstat -punta -echo -e "\nsudo getenforce\n" +echo -e "\\nsudo getenforce\\n" sudo getenforce -echo -e "\nsudo systemctl status httpd\n" +echo -e "\\nsudo systemctl status httpd\\n" sudo systemctl status httpd -echo -e "\nenv\n" +echo -e "\\nenv\\n" env source /opt/stack/devstack/openrc admin admin -echo -e "\nenv after openrc\n" +echo -e "\\nenv after openrc\\n" env -echo -e "\nsudo du -hs /opt/stack" +echo -e "\\nsudo du -hs /opt/stack" sudo du -hs /opt/stack -echo -e "\nsudo mount" +echo -e "\\nsudo mount" sudo mount -echo -e "\ndmesg -T > /tmp/dmesg.log" +echo -e "\\ndmesg -T > /tmp/dmesg.log" dmesg -T > /tmp/dmesg.log -echo -e "\njournalctl > /tmp/journalctl.log\n" +echo -e "\\njournalctl > /tmp/journalctl.log\\n" sudo journalctl > /tmp/journalctl.log -echo -e "\novsdb-tool -mm show-log > /tmp/ovsdb-tool.log" +echo -e "\\novsdb-tool -mm show-log > /tmp/ovsdb-tool.log" ovsdb-tool -mm show-log > /tmp/ovsdb-tool.log EOF @@ -560,8 +569,8 @@ EOF ${SSH} "${!CONTROLLERIP}" "tar -cf /tmp/odl${i}_zrpcd.log.tar /tmp/zrpcd.init.log" scp "${!CONTROLLERIP}:/tmp/odl${i}_zrpcd.log.tar" "${NODE_FOLDER}" tar -xvf "${NODE_FOLDER}/odl${i}_karaf.log.tar" -C "${NODE_FOLDER}" --strip-components 2 --transform "s/karaf/odl${i}_karaf/g" - grep "ROBOT MESSAGE\| ERROR " "${NODE_FOLDER}/odl${i}_karaf.log" > "${NODE_FOLDER}/odl${i}_err.log" - grep "ROBOT MESSAGE\| ERROR \| WARN \|Exception" \ + grep "ROBOT MESSAGE\\| ERROR " "${NODE_FOLDER}/odl${i}_karaf.log" > "${NODE_FOLDER}/odl${i}_err.log" + grep "ROBOT MESSAGE\\| ERROR \\| WARN \\|Exception" \ "${NODE_FOLDER}/odl${i}_karaf.log" > "${NODE_FOLDER}/odl${i}_err_warn_exception.log" # Print ROBOT lines and print Exception lines. For exception lines also print the previous line for context sed -n -e '/ROBOT MESSAGE/P' -e '$!N;/Exception/P;D' "${NODE_FOLDER}/odl${i}_karaf.log" > "${NODE_FOLDER}/odl${i}_exception.log" @@ -764,7 +773,7 @@ cat ${MAVENCONF} if [[ "$USEFEATURESBOOT" == "True" ]]; then echo "Configuring the startup features..." - sed -ie "s/\(featuresBoot=\|featuresBoot =\)/featuresBoot = ${ACTUALFEATURES},/g" ${FEATURESCONF} + sed -ie "s/\\(featuresBoot=\\|featuresBoot =\\)/featuresBoot = ${ACTUALFEATURES},/g" ${FEATURESCONF} fi FEATURE_TEST_STRING="features-integration-test" @@ -773,7 +782,7 @@ if [[ "$KARAF_VERSION" == "karaf4" ]]; then FEATURE_TEST_STRING="features-test" fi -sed -ie "s%\(featuresRepositories=\|featuresRepositories =\)%featuresRepositories = mvn:org.opendaylight.integration/\${FEATURE_TEST_STRING}/${BUNDLE_VERSION}/xml/features,mvn:org.apache.karaf.decanter/apache-karaf-decanter/1.0.0/xml/features,%g" ${FEATURESCONF} +sed -ie "s%\\(featuresRepositories=\\|featuresRepositories =\\)%featuresRepositories = mvn:org.opendaylight.integration/\${FEATURE_TEST_STRING}/${BUNDLE_VERSION}/xml/features,mvn:org.apache.karaf.decanter/apache-karaf-decanter/1.0.0/xml/features,%g" ${FEATURESCONF} if [[ ! -z "${REPO_URL}" ]]; then sed -ie "s%featuresRepositories =%featuresRepositories = ${REPO_URL},%g" ${FEATURESCONF} fi @@ -922,7 +931,7 @@ function copy_and_run_post_startup_script() { for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do CONTROLLERIP="ODL_SYSTEM_${i}_IP" echo "Execute the post startup script on controller ${!CONTROLLERIP}" - scp "${WORKSPACE}"/post-startup-script.sh "${!CONTROLLERIP}":/ + scp "${WORKSPACE}"/post-startup-script.sh "${!CONTROLLERIP}":/tmp/ # $seed_index needs to be parsed client-side # shellcheck disable=SC2029 ssh "${!CONTROLLERIP}" "bash /tmp/post-startup-script.sh $(( seed_index++ ))"