3 echo "common-functions.sh is being sourced"
7 # Basic controller configuration settings
8 export MAVENCONF=/tmp/${BUNDLEFOLDER}/etc/org.ops4j.pax.url.mvn.cfg
9 export FEATURESCONF=/tmp/${BUNDLEFOLDER}/etc/org.apache.karaf.features.cfg
10 export CUSTOMPROP=/tmp/${BUNDLEFOLDER}/etc/custom.properties
11 export LOGCONF=/tmp/${BUNDLEFOLDER}/etc/org.ops4j.pax.logging.cfg
12 export MEMCONF=/tmp/${BUNDLEFOLDER}/bin/setenv
13 export CONTROLLERMEM="2048m"
15 # Cluster specific configuration settings
16 export AKKACONF=/tmp/${BUNDLEFOLDER}/configuration/initial/akka.conf
17 export MODULESCONF=/tmp/${BUNDLEFOLDER}/configuration/initial/modules.conf
18 export MODULESHARDSCONF=/tmp/${BUNDLEFOLDER}/configuration/initial/module-shards.conf
20 function print_common_env() {
22 common-functions environment:
23 MAVENCONF: ${MAVENCONF}
24 ACTUALFEATURES: ${ACTUALFEATURES}
25 FEATURESCONF: ${FEATURESCONF}
26 CUSTOMPROP: ${CUSTOMPROP}
29 CONTROLLERMEM: ${CONTROLLERMEM}
31 MODULESCONF: ${MODULESCONF}
32 MODULESHARDSCONF: ${MODULESHARDSCONF}
39 # Setup JAVA_HOME and MAX_MEM Value in ODL startup config file
40 function set_java_vars() {
42 local -r controllermem=$2
46 echo " java home: ${java_home}"
47 echo " max memory: ${controllermem}"
48 echo " memconf: ${memconf}"
50 # We do not want expressions to expand here.
51 # shellcheck disable=SC2016
52 sed -ie 's%^# export JAVA_HOME%export JAVA_HOME=${JAVA_HOME:-'"${java_home}"'}%g' "${memconf}"
53 sed -ie 's/JAVA_MAX_MEM="2048m"/JAVA_MAX_MEM='"${controllermem}"'/g' "${memconf}"
57 echo "Set Java version"
58 sudo /usr/sbin/alternatives --install /usr/bin/java java "${java_home}/bin/java" 1
59 sudo /usr/sbin/alternatives --set java "${java_home}/bin/java"
60 echo "JDK default version ..."
64 export JAVA_HOME="${java_home}"
66 # shellcheck disable=SC2037
67 JAVA_RESOLVED=$(readlink -e "${java_home}/bin/java")
68 echo "Java binary pointed at by JAVA_HOME: ${JAVA_RESOLVED}"
71 # shellcheck disable=SC2034
72 # foo appears unused. Verify it or export it.
73 function configure_karaf_log() {
74 local -r karaf_version=$1
75 local -r controllerdebugmap=$2
78 # Check what the logging.cfg file is using for the logging api: log4j or log4j2
79 if grep "log4j2" "${LOGCONF}"; then
83 echo "Configuring the karaf log... karaf_version: ${karaf_version}, logapi: ${logapi}"
84 if [ "${logapi}" == "log4j2" ]; then
85 # FIXME: Make log size limit configurable from build parameter.
86 # From Neon the default karaf file size is 64 MB
87 sed -ie 's/log4j2.appender.rolling.policies.size.size = 64MB/log4j2.appender.rolling.policies.size.size = 1GB/g' "${LOGCONF}"
88 # Flourine still uses 16 MB
89 sed -ie 's/log4j2.appender.rolling.policies.size.size = 16MB/log4j2.appender.rolling.policies.size.size = 1GB/g' "${LOGCONF}"
90 orgmodule="org.opendaylight.yangtools.yang.parser.repo.YangTextSchemaContextResolver"
91 orgmodule_="${orgmodule//./_}"
92 echo "${logapi}.logger.${orgmodule_}.name = WARN" >> "${LOGCONF}"
93 echo "${logapi}.logger.${orgmodule_}.level = WARN" >> "${LOGCONF}"
95 sed -ie 's/log4j.appender.out.maxBackupIndex=10/log4j.appender.out.maxBackupIndex=1/g' "${LOGCONF}"
96 # FIXME: Make log size limit configurable from build parameter.
97 sed -ie 's/log4j.appender.out.maxFileSize=1MB/log4j.appender.out.maxFileSize=30GB/g' "${LOGCONF}"
98 echo "${logapi}.logger.org.opendaylight.yangtools.yang.parser.repo.YangTextSchemaContextResolver = WARN" >> "${LOGCONF}"
101 # Add custom logging levels
102 # CONTROLLERDEBUGMAP is expected to be a key:value map of space separated
103 # values like "module:level module2:level2" where module is abbreviated and
104 # does not include "org.opendaylight."
106 echo "controllerdebugmap: ${controllerdebugmap}"
107 if [ -n "${controllerdebugmap}" ]; then
108 for kv in ${controllerdebugmap}; do
111 echo "module: $module, level: $level"
112 # shellcheck disable=SC2157
113 if [ -n "${module}" ] && [ -n "${level}" ]; then
114 orgmodule="org.opendaylight.${module}"
115 if [ "${logapi}" == "log4j2" ]; then
116 orgmodule_="${orgmodule//./_}"
117 echo "${logapi}.logger.${orgmodule_}.name = ${orgmodule}" >> "${LOGCONF}"
118 echo "${logapi}.logger.${orgmodule_}.level = ${level}" >> "${LOGCONF}"
120 echo "${logapi}.logger.${orgmodule} = ${level}" >> "${LOGCONF}"
126 echo "cat ${LOGCONF}"
128 } # function configure_karaf_log()
130 function configure_karaf_log_for_apex() {
131 # TODO: add the extra steps to this function to do any extra work
132 # in this apex environment like we do in our standard environment.
133 # EX: log size, rollover, etc.
135 # Modify ODL Log Levels, if needed, for new distribution. This will modify
136 # the control nodes hiera data which will be used during the puppet deploy
137 # CONTROLLERDEBUGMAP is expected to be a key:value map of space separated
138 # values like "module:level module2:level2" where module is abbreviated and
139 # does not include "org.opendaylight."
141 local -r controller_ip=$1
144 # shellcheck disable=SC2153
145 echo "CONTROLLERDEBUGMAP: ${CONTROLLERDEBUGMAP}"
146 if [ -n "${CONTROLLERDEBUGMAP}" ]; then
147 logging_config='\"opendaylight::log_levels\": {'
148 for kv in ${CONTROLLERDEBUGMAP}; do
151 echo "module: $module, level: $level"
152 # shellcheck disable=SC2157
153 if [ -n "${module}" ] && [ -n "${level}" ]; then
154 orgmodule="org.opendaylight.${module}"
155 logging_config="${logging_config} \\\"${orgmodule}\\\": \\\"${level}\\\","
158 # replace the trailing comma with a closing brace followed by trailing comma
159 logging_config=${logging_config%,}" },"
160 echo "$logging_config"
162 # fine a sane line number to inject the custom logging json
163 lineno=$(ssh "$OPENSTACK_CONTROL_NODE_1_IP" "sudo grep -Fn 'opendaylight::log_mechanism' /etc/puppet/hieradata/service_configs.json" | awk -F: '{print $1}')
164 # We purposely want these variables to expand client-side
165 # shellcheck disable=SC2029
166 ssh "$controller_ip" "sudo sed -i \"${lineno}i ${logging_config}\" /etc/puppet/hieradata/service_configs.json"
167 ssh "$controller_ip" "sudo cat /etc/puppet/hieradata/service_configs.json"
169 } # function configure_karaf_log_for_apex()
171 function configure_odl_features_for_apex() {
173 # if the environment variable $ACTUALFEATURES is not null, then rewrite
174 # the puppet config file with the features given in that variable, otherwise
175 # this function is a noop
177 local -r controller_ip=$1
178 local -r config_file=/etc/puppet/hieradata/service_configs.json
180 cat > /tmp/set_odl_features.sh << EOF
181 sudo jq '.["opendaylight::extra_features"] |= []' $config_file > tmp.json && mv tmp.json $config_file
182 for feature in "\${ACTUALFEATURES//,/ }"; do
183 sudo jq --arg jq_arg \$feature '.["opendaylight::extra_features"] |= . + [\$jq_arg]' $config_file > tmp && mv tmp $config_file;
185 echo "Modified puppet-opendaylight service_configs.json..."
189 echo "Feature configuration script..."
190 cat /tmp/set_odl_features.sh
192 if [ -n "${ACTUALFEATURES}" ]; then
193 scp /tmp/set_odl_features.sh "$controller_ip":/tmp/set_odl_features.sh
194 ssh "$controller_ip" "sudo bash /tmp/set_odl_features.sh"
197 } # function configure_odl_features_for_apex()
199 function get_os_deploy() {
200 local -r num_systems=${1:-$NUM_OPENSTACK_SYSTEM}
201 case ${num_systems} in
203 OPENSTACK_TOPO="1cmb-0ctl-0cmp"
206 OPENSTACK_TOPO="1cmb-0ctl-1cmp"
209 OPENSTACK_TOPO="0cmb-1ctl-2cmp"
212 export OPENSTACK_TOPO
215 function get_test_suites() {
217 #let the caller pick the name of the variable we will assign the suites to
218 local __suite_list=$1
220 echo "Locating test plan to use..."
221 testplan_filepath="${WORKSPACE}/test/csit/testplans/${STREAMTESTPLAN}"
222 if [ ! -f "${testplan_filepath}" ]; then
223 testplan_filepath="${WORKSPACE}/test/csit/testplans/${TESTPLAN}"
226 if [ "${ELASTICSEARCHATTRIBUTE}" != "disabled" ]; then
227 add_test="integration/test/csit/suites/integration/Create_JVM_Plots.robot"
228 echo "${add_test}" >> "$testplan_filepath"
231 echo "Changing the testplan path..."
232 sed "s:integration:${WORKSPACE}:" "${testplan_filepath}" > testplan.txt
235 # Use the testplan if specific SUITES are not defined.
236 if [ -z "${SUITES}" ]; then
237 suite_list=$(grep -E -v '(^[[:space:]]*#|^[[:space:]]*$)' testplan.txt | tr '\012' ' ')
240 workpath="${WORKSPACE}/test/csit/suites"
241 for suite in ${SUITES}; do
242 fullsuite="${workpath}/${suite}"
243 if [ -z "${suite_list}" ]; then
244 suite_list+=${fullsuite}
246 suite_list+=" "${fullsuite}
251 eval "$__suite_list='$suite_list'"
254 function run_plan() {
266 printf "Locating %s plan to use...\\n" "${type}"
267 plan_filepath="${WORKSPACE}/test/csit/${type}plans/$plan"
268 if [ ! -f "${plan_filepath}" ]; then
269 plan_filepath="${WORKSPACE}/test/csit/${type}plans/${STREAMTESTPLAN}"
270 if [ ! -f "${plan_filepath}" ]; then
271 plan_filepath="${WORKSPACE}/test/csit/${type}plans/${TESTPLAN}"
275 if [ -f "${plan_filepath}" ]; then
276 printf "%s plan exists!!!\\n" "${type}"
277 printf "Changing the %s plan path...\\n" "${type}"
278 sed "s:integration:${WORKSPACE}:" "${plan_filepath}" > "${type}plan.txt"
279 cat "${type}plan.txt"
280 # shellcheck disable=SC2013
281 for line in $( grep -E -v '(^[[:space:]]*#|^[[:space:]]*$)' "${type}plan.txt" ); do
282 printf "Executing %s...\\n" "${line}"
283 # shellcheck source=${line} disable=SC1091
287 printf "Finished running %s plans\\n" "${type}"
288 } # function run_plan()
290 # Run scripts to support JVM monitoring.
291 function add_jvm_support()
293 if [ "${ELASTICSEARCHATTRIBUTE}" != "disabled" ]; then
294 set_elasticsearch_attribute "${ELASTICSEARCHATTRIBUTE}"
295 #run_script="${WORKSPACE}/test/csit/scripts/set_elasticsearch_attribute.sh ${ELASTICSEARCHATTRIBUTE}"
296 #printf "Executing %s...\\n" "${run_script}"
297 ## shellcheck source=${line} disable=SC1091
298 #source "${run_script}"
300 set_jvm_common_attribute
301 #run_script="${WORKSPACE}/test/csit/scripts/set_jvm_common_attribute.sh"
302 #printf "Executing %s...\\n" "${run_script}"
303 ## shellcheck source=${line} disable=SC1091
304 #source "${run_script}"
306 } # function add_jvm_support()
308 #Expected input parameter: long/short/a number
309 function set_elasticsearch_attribute()
323 # shellcheck disable=SC2166
324 if [[ "$1" =~ ^[0-9]+$ ]] && [ "$1" -ge $short -a "$1" -le $long ]; then
332 cat > "${WORKSPACE}"/org.apache.karaf.decanter.scheduler.simple.cfg <<EOF
337 echo "Copying config files to ODL Controller folder"
339 # shellcheck disable=SC2086
340 for i in $(seq 1 ${NUM_ODL_SYSTEM})
342 CONTROLLERIP=ODL_SYSTEM_${i}_IP
343 echo "Setup long duration config to ${!CONTROLLERIP}"
344 # We purposely want these variables to expand client-side
345 # shellcheck disable=SC2029
346 ssh "${!CONTROLLERIP}" "mkdir -p /tmp/${BUNDLEFOLDER}/etc/opendaylight/karaf/"
347 scp "${WORKSPACE}"/org.apache.karaf.decanter.scheduler.simple.cfg "${!CONTROLLERIP}":/tmp/"${BUNDLEFOLDER}"/etc/
349 } #function set_elasticsearch_attribute
351 function set_jvm_common_attribute()
353 cat > "${WORKSPACE}"/org.apache.karaf.decanter.collector.jmx-local.cfg <<EOF
356 object.name=java.lang:type=*,name=*
360 cat > "${WORKSPACE}"/org.apache.karaf.decanter.collector.jmx-others.cfg <<EOF
363 object.name=java.lang:type=*
367 # shellcheck disable=SC2086
368 for i in $(seq 1 ${NUM_ODL_SYSTEM})
370 CONTROLLERIP=ODL_SYSTEM_${i}_IP
371 CLUSTERNAME=$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 12)
373 cat > "${WORKSPACE}"/elasticsearch.yml <<EOF
374 cluster.name: ${CLUSTERNAME}
375 network.host: ${!CONTROLLERIP}
376 discovery.zen.ping.multicast.enabled: false
379 cat > "${WORKSPACE}"/org.apache.karaf.decanter.appender.elasticsearch.cfg <<EOF
380 host=${!CONTROLLERIP}
382 clusterName=${CLUSTERNAME}
386 cat > "${WORKSPACE}"/elasticsearch_startup.sh <<EOF
387 cd /tmp/elasticsearch/elasticsearch-1.7.5
390 if [ -d "data" ]; then
391 echo "data directory exists, deleting...."
394 echo "data directory does not exist"
397 cd /tmp/elasticsearch
400 echo "Starting Elasticsearch node"
401 sudo /tmp/elasticsearch/elasticsearch-1.7.5/bin/elasticsearch > /dev/null 2>&1 &
402 ls -al /tmp/elasticsearch/elasticsearch-1.7.5/bin/elasticsearch
405 echo "Setup ODL_SYSTEM_IP specific config files for ${!CONTROLLERIP} "
407 cat "${WORKSPACE}"/org.apache.karaf.decanter.appender.elasticsearch.cfg
408 cat "${WORKSPACE}"/org.apache.karaf.decanter.collector.jmx-local.cfg
409 cat "${WORKSPACE}"/org.apache.karaf.decanter.collector.jmx-others.cfg
410 cat "${WORKSPACE}"/elasticsearch.yml
413 echo "Copying config files to ${!CONTROLLERIP}"
415 scp "${WORKSPACE}"/org.apache.karaf.decanter.appender.elasticsearch.cfg "${!CONTROLLERIP}":/tmp/"${BUNDLEFOLDER}"/etc/
416 scp "${WORKSPACE}"/org.apache.karaf.decanter.collector.jmx-local.cfg "${!CONTROLLERIP}":/tmp/"${BUNDLEFOLDER}"/etc/
417 scp "${WORKSPACE}"/org.apache.karaf.decanter.collector.jmx-others.cfg "${!CONTROLLERIP}":/tmp/"${BUNDLEFOLDER}"/etc/
419 scp "${WORKSPACE}"/elasticsearch.yml "${!CONTROLLERIP}":/tmp/
421 ssh "${!CONTROLLERIP}" "sudo ls -al /tmp/elasticsearch/"
423 ssh "${!CONTROLLERIP}" "sudo mv /tmp/elasticsearch.yml /tmp/elasticsearch/elasticsearch-1.7.5/config/"
424 ssh "${!CONTROLLERIP}" "cat /tmp/elasticsearch/elasticsearch-1.7.5/config/elasticsearch.yml"
426 echo "Copying the elasticsearch_startup script to ${!CONTROLLERIP}"
427 cat "${WORKSPACE}"/elasticsearch_startup.sh
428 scp "${WORKSPACE}"/elasticsearch_startup.sh "${!CONTROLLERIP}":/tmp
429 ssh "${!CONTROLLERIP}" 'bash /tmp/elasticsearch_startup.sh'
430 ssh "${!CONTROLLERIP}" 'ps aux | grep elasticsearch'
432 } #function set_jvm_common_attribute
434 # Return elapsed time. Usage:
435 # - Call first time with no arguments and a new timer is returned.
436 # - Next call with the first argument as the timer and the elapsed time is returned.
439 if [ $# -eq 0 ]; then
440 # return the current time
441 printf "%s" "$(date "+%s")"
444 end_time=$(date "+%s")
446 if [ -z "$start_time" ]; then
447 start_time=$end_time;
450 delta_time=$((end_time - start_time))
451 ds=$((delta_time % 60))
452 dm=$(((delta_time / 60) % 60))
453 dh=$((delta_time / 3600))
454 # return the elapsed time
455 printf "%d:%02d:%02d" $dh $dm $ds
459 # convert commas in csv strings to spaces (ssv)
462 if [ -n "${csv}" ]; then
463 ssv=$(echo "${csv}" | sed 's/,/ /g' | sed 's/\ \ */\ /g')
469 function is_openstack_feature_enabled() {
471 for enabled_feature in $(csv2ssv "${ENABLE_OS_SERVICES}"); do
472 if [ "${enabled_feature}" == "${feature}" ]; then
482 # shellcheck disable=SC2153
483 function print_job_parameters() {
487 DISTROBRANCH: ${DISTROBRANCH}
488 DISTROSTREAM: ${DISTROSTREAM}
489 BUNDLE_URL: ${BUNDLE_URL}
490 CONTROLLERFEATURES: ${CONTROLLERFEATURES}
491 CONTROLLERDEBUGMAP: ${CONTROLLERDEBUGMAP}
492 SCRIPTPLAN: ${SCRIPTPLAN}
493 CONFIGPLAN: ${CONFIGPLAN}
494 STREAMTESTPLAN: ${STREAMTESTPLAN}
495 TESTPLAN: ${TESTPLAN}
497 PATCHREFSPEC: ${PATCHREFSPEC}
498 OPENSTACK_BRANCH: ${OPENSTACK_BRANCH}
499 DEVSTACK_HASH: ${DEVSTACK_HASH}
500 ODL_ML2_DRIVER_REPO: ${ODL_ML2_DRIVER_REPO}
501 ODL_ML2_BRANCH: ${ODL_ML2_BRANCH}
502 ODL_ML2_DRIVER_VERSION: ${ODL_ML2_DRIVER_VERSION}
503 ODL_ML2_PORT_BINDING: ${ODL_ML2_PORT_BINDING}
504 DEVSTACK_KUBERNETES_PLUGIN_REPO: ${DEVSTACK_KUBERNETES_PLUGIN_REPO}
505 DEVSTACK_LBAAS_PLUGIN_REPO: ${DEVSTACK_LBAAS_PLUGIN_REPO}
506 DEVSTACK_NETWORKING_SFC_PLUGIN_REPO: ${DEVSTACK_NETWORKING_SFC_PLUGIN_REPO}
507 IPSEC_VXLAN_TUNNELS_ENABLED: ${IPSEC_VXLAN_TUNNELS_ENABLED}
508 PUBLIC_BRIDGE: ${PUBLIC_BRIDGE}
509 ENABLE_HAPROXY_FOR_NEUTRON: ${ENABLE_HAPROXY_FOR_NEUTRON}
510 ENABLE_OS_SERVICES: ${ENABLE_OS_SERVICES}
511 ENABLE_OS_COMPUTE_SERVICES: ${ENABLE_OS_COMPUTE_SERVICES}
512 ENABLE_OS_NETWORK_SERVICES: ${ENABLE_OS_NETWORK_SERVICES}
513 ENABLE_OS_PLUGINS: ${ENABLE_OS_PLUGINS}
514 DISABLE_OS_SERVICES: ${DISABLE_OS_SERVICES}
515 TENANT_NETWORK_TYPE: ${TENANT_NETWORK_TYPE}
516 SECURITY_GROUP_MODE: ${SECURITY_GROUP_MODE}
517 ENABLE_ITM_DIRECT_TUNNELS: ${ENABLE_ITM_DIRECT_TUNNELS}
518 PUBLIC_PHYSICAL_NETWORK: ${PUBLIC_PHYSICAL_NETWORK}
519 ENABLE_NETWORKING_L2GW: ${ENABLE_NETWORKING_L2GW}
520 CREATE_INITIAL_NETWORKS: ${CREATE_INITIAL_NETWORKS}
521 LBAAS_SERVICE_PROVIDER: ${LBAAS_SERVICE_PROVIDER}
522 ODL_SFC_DRIVER: ${ODL_SFC_DRIVER}
523 ODL_SNAT_MODE: ${ODL_SNAT_MODE}
528 function tcpdump_start() {
532 filter_=${filter// /_}
534 printf "node %s, %s_%s__%s: starting tcpdump\\n" "${ip}" "${prefix}" "${ip}" "${filter}"
535 # $fileter needs to be parsed client-side
536 # shellcheck disable=SC2029
537 ssh "${ip}" "nohup sudo /usr/sbin/tcpdump -vvv -ni eth0 ${filter} -w /tmp/tcpdump_${prefix}_${ip}__${filter_}.pcap > /tmp/tcpdump_start.log 2>&1 &"
538 ${SSH} "${ip}" "ps -ef | grep tcpdump"
541 function tcpdump_stop() {
544 printf "node %s: stopping tcpdump\\n" "$ip"
545 ${SSH} "${ip}" "ps -ef | grep tcpdump.sh"
546 ${SSH} "${ip}" "sudo pkill -f tcpdump"
547 ${SSH} "${ip}" "sudo xz -9ekvvf /tmp/*.pcap"
548 ${SSH} "${ip}" "sudo ls -al /tmp/*.pcap"
549 # copy_logs will copy any *.xz files
552 # Collect the list of files on the hosts
553 function collect_files() {
557 ${SSH} "${ip}" "mkdir -p ${finddir}"
558 ${SSH} "${ip}" "sudo find /etc > ${finddir}/find.etc.txt"
559 ${SSH} "${ip}" "sudo find /opt/stack > ${finddir}/find.opt.stack.txt"
560 ${SSH} "${ip}" "sudo find /var > ${finddir}/find2.txt"
561 ${SSH} "${ip}" "sudo find /var > ${finddir}/find.var.txt"
562 ${SSH} "${ip}" "sudo tar -cf - -C /tmp finder | xz -T 0 > /tmp/find.tar.xz"
563 scp "${ip}":/tmp/find.tar.xz "${folder}"
564 mkdir -p "${finddir}"
565 rsync --rsync-path="sudo rsync" --list-only -arvhe ssh "${ip}":/etc/ > "${finddir}"/rsync.etc.txt
566 rsync --rsync-path="sudo rsync" --list-only -arvhe ssh "${ip}":/opt/stack/ > "${finddir}"/rsync.opt.stack.txt
567 rsync --rsync-path="sudo rsync" --list-only -arvhe ssh "${ip}":/var/ > "${finddir}"/rsync.var.txt
568 tar -cf - -C /tmp finder | xz -T 0 > /tmp/rsync.tar.xz
569 cp /tmp/rsync.tar.xz "${folder}"
572 # List of extra services to extract from journalctl
573 # Add new services on a separate line, in alpha order, add \ at the end
574 extra_services_cntl=" \
578 openvswitch.service \
579 ovs-vswitchd.service \
580 ovsdb-server.service \
581 rabbitmq-server.service \
584 extra_services_cmp=" \
586 openvswitch.service \
587 ovs-vswitchd.service \
588 ovsdb-server.service \
591 # Collect the logs for the openstack services
592 # First get all the services started by devstack which would have devstack@ as a prefix
593 # Next get all the extra services
594 function collect_openstack_logs() {
597 local -r node_type=${3}
598 local oslogs="${folder}/oslogs"
600 printf "collect_openstack_logs for %s node: %s into %s\\n" "${node_type}" "${ip}" "${oslogs}"
603 # There are always some logs in /opt/stack/logs and this also covers the
604 # pre-queens branches which always use /opt/stack/logs
605 rsync -avhe ssh "${ip}":/opt/stack/logs/* "${oslogs}" # rsync to prevent copying of symbolic links
607 # Starting with queens break out the logs from journalctl
608 if [ "${OPENSTACK_BRANCH}" = "stable/queens" ]; then
609 cat > "${WORKSPACE}"/collect_openstack_logs.sh << EOF
610 extra_services_cntl="${extra_services_cntl}"
611 extra_services_cmp="${extra_services_cmp}"
613 function extract_from_journal() {
614 local -r services=\${1}
615 local -r folder=\${2}
616 local -r node_type=\${3}
617 printf "extract_from_journal folder: \${folder}, services: \${services}\\n"
618 for service in \${services}; do
619 # strip anything before @ and anything after .
620 # devstack@g-api.service will end as g-api
621 service_="\${service#*@}"
622 service_="\${service_%.*}"
623 sudo journalctl -u "\${service}" > "\${folder}/\${service_}.log"
629 systemctl list-unit-files --all > /tmp/oslogs/systemctl.units.log 2>&1
630 svcs=\$(grep devstack@ /tmp/oslogs/systemctl.units.log | awk '{print \$1}')
631 extract_from_journal "\${svcs}" "/tmp/oslogs"
632 if [ "\${node_type}" = "control" ]; then
633 extract_from_journal "\${extra_services_cntl}" "/tmp/oslogs"
635 extract_from_journal "\${extra_services_cmp}" "/tmp/oslogs"
639 # cat > ${WORKSPACE}/collect_openstack_logs.sh << EOF
640 printf "collect_openstack_logs for %s node: %s into %s, executing script\\n" "${node_type}" "${ip}" "${oslogs}"
641 cat "${WORKSPACE}"/collect_openstack_logs.sh
642 scp "${WORKSPACE}"/collect_openstack_logs.sh "${ip}":/tmp
643 ${SSH} "${ip}" "bash /tmp/collect_openstack_logs.sh > /tmp/collect_openstack_logs.log 2>&1"
644 rsync -avhe ssh "${ip}":/tmp/oslogs/* "${oslogs}"
645 scp "${ip}":/tmp/collect_openstack_logs.log "${oslogs}"
646 fi # if [ "${OPENSTACK_BRANCH}" = "stable/queens" ]; then
649 function collect_netvirt_logs() {
650 set +e # We do not want to create red dot just because something went wrong while fetching logs.
652 cat > extra_debug.sh << EOF
653 echo -e "/usr/sbin/lsmod | /usr/bin/grep openvswitch\\n"
654 /usr/sbin/lsmod | /usr/bin/grep openvswitch
655 echo -e "\\nsudo grep ct_ /var/log/openvswitch/ovs-vswitchd.log\\n"
656 sudo grep "Datapath supports" /var/log/openvswitch/ovs-vswitchd.log
657 echo -e "\\nsudo netstat -punta\\n"
659 echo -e "\\nsudo getenforce\\n"
661 echo -e "\\nsudo systemctl status httpd\\n"
662 sudo systemctl status httpd
665 source /opt/stack/devstack/openrc admin admin
666 echo -e "\\nenv after openrc\\n"
668 echo -e "\\nsudo du -hs /opt/stack"
669 sudo du -hs /opt/stack
670 echo -e "\\nsudo mount"
672 echo -e "\\ndmesg -T > /tmp/dmesg.log"
673 dmesg -T > /tmp/dmesg.log
674 echo -e "\\njournalctl > /tmp/journalctl.log\\n"
675 sudo journalctl > /tmp/journalctl.log
676 echo -e "\\novsdb-tool -mm show-log > /tmp/ovsdb-tool.log"
677 ovsdb-tool -mm show-log > /tmp/ovsdb-tool.log
680 # Since this log collection work is happening before the archive build macro which also
681 # creates the ${WORKSPACE}/archives dir, we have to do it here first. The mkdir in the
682 # archives build step will essentially be a noop.
683 mkdir -p "${WORKSPACE}"/archives
685 mv /tmp/changes.txt "${WORKSPACE}"/archives
686 mv /tmp/validations.txt "${WORKSPACE}"/archives
687 mv "${WORKSPACE}"/rabbit.txt "${WORKSPACE}"/archives
688 mv "${WORKSPACE}"/haproxy.cfg "${WORKSPACE}"/archives
689 ssh "${OPENSTACK_HAPROXY_1_IP}" "sudo journalctl -u haproxy > /tmp/haproxy.log"
690 scp "${OPENSTACK_HAPROXY_1_IP}":/tmp/haproxy.log "${WORKSPACE}"/archives/
693 # FIXME: Do not create .tar and gzip before copying.
694 for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
695 CONTROLLERIP=ODL_SYSTEM_${i}_IP
696 echo "collect_logs: for opendaylight controller ip: ${!CONTROLLERIP}"
697 NODE_FOLDER="odl_${i}"
698 mkdir -p "${NODE_FOLDER}"
699 echo "Lets's take the karaf thread dump again..."
700 ssh "${!CONTROLLERIP}" "sudo ps aux" > "${WORKSPACE}"/ps_after.log
701 pid=$(grep org.apache.karaf.main.Main "${WORKSPACE}"/ps_after.log | grep -v grep | tr -s ' ' | cut -f2 -d' ')
702 echo "karaf main: org.apache.karaf.main.Main, pid:${pid}"
703 # $pid needs to be parsed client-side
704 # shellcheck disable=SC2029
705 ssh "${!CONTROLLERIP}" "${JAVA_HOME}/bin/jstack -l ${pid}" > "${WORKSPACE}/karaf_${i}_${pid}_threads_after.log" || true
706 echo "killing karaf process..."
707 # shellcheck disable=SC2016
708 ${SSH} "${!CONTROLLERIP}" bash -c 'ps axf | grep karaf | grep -v grep | awk '"'"'{print "kill -9 " $1}'"'"' | sh'
709 ${SSH} "${!CONTROLLERIP}" "sudo journalctl > /tmp/journalctl.log"
710 scp "${!CONTROLLERIP}":/tmp/journalctl.log "${NODE_FOLDER}"
711 ${SSH} "${!CONTROLLERIP}" "dmesg -T > /tmp/dmesg.log"
712 scp "${!CONTROLLERIP}":/tmp/dmesg.log "${NODE_FOLDER}"
713 ${SSH} "${!CONTROLLERIP}" "tar -cf - -C /tmp/${BUNDLEFOLDER} etc | xz -T 0 > /tmp/etc.tar.xz"
714 scp "${!CONTROLLERIP}":/tmp/etc.tar.xz "${NODE_FOLDER}"
715 ${SSH} "${!CONTROLLERIP}" "cp -r /tmp/${BUNDLEFOLDER}/data/log /tmp/odl_log"
716 ${SSH} "${!CONTROLLERIP}" "tar -cf /tmp/odl${i}_karaf.log.tar /tmp/odl_log/*"
717 scp "${!CONTROLLERIP}:/tmp/odl${i}_karaf.log.tar" "${NODE_FOLDER}"
718 ${SSH} "${!CONTROLLERIP}" "tar -cf /tmp/odl${i}_zrpcd.log.tar /tmp/zrpcd.init.log"
719 scp "${!CONTROLLERIP}:/tmp/odl${i}_zrpcd.log.tar" "${NODE_FOLDER}"
720 tar -xvf "${NODE_FOLDER}/odl${i}_karaf.log.tar" -C "${NODE_FOLDER}" --strip-components 2 --transform "s/karaf/odl${i}_karaf/g"
721 grep "ROBOT MESSAGE\\| ERROR " "${NODE_FOLDER}/odl${i}_karaf.log" > "${NODE_FOLDER}/odl${i}_err.log"
722 grep "ROBOT MESSAGE\\| ERROR \\| WARN \\|Exception" \
723 "${NODE_FOLDER}/odl${i}_karaf.log" > "${NODE_FOLDER}/odl${i}_err_warn_exception.log"
724 # Print ROBOT lines and print Exception lines. For exception lines also print the previous line for context
725 sed -n -e '/ROBOT MESSAGE/P' -e '$!N;/Exception/P;D' "${NODE_FOLDER}/odl${i}_karaf.log" > "${NODE_FOLDER}/odl${i}_exception.log"
726 mv "/tmp/odl${i}_exceptions.txt" "${NODE_FOLDER}"
727 rm "${NODE_FOLDER}/odl${i}_karaf.log.tar"
728 mv -- *_threads* "${NODE_FOLDER}"
729 mv ps_* "${NODE_FOLDER}"
730 mv "${NODE_FOLDER}" "${WORKSPACE}"/archives/
733 print_job_parameters > "${WORKSPACE}"/archives/params.txt
736 for i in $(seq 1 "${NUM_OPENSTACK_CONTROL_NODES}"); do
737 OSIP=OPENSTACK_CONTROL_NODE_${i}_IP
738 if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then
739 echo "collect_logs: for openstack combo node ip: ${!OSIP}"
740 NODE_FOLDER="combo_${i}"
742 echo "collect_logs: for openstack control node ip: ${!OSIP}"
743 NODE_FOLDER="control_${i}"
745 mkdir -p "${NODE_FOLDER}"
746 tcpdump_stop "${!OSIP}"
747 scp extra_debug.sh "${!OSIP}":/tmp
748 # Capture compute logs if this is a combo node
749 if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then
750 scp "${!OSIP}":/etc/nova/nova.conf "${NODE_FOLDER}"
751 scp "${!OSIP}":/etc/nova/nova-cpu.conf "${NODE_FOLDER}"
752 scp "${!OSIP}":/etc/openstack/clouds.yaml "${NODE_FOLDER}"
753 rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/nova-agent.log "${NODE_FOLDER}"
755 ${SSH} "${!OSIP}" "bash /tmp/extra_debug.sh > /tmp/extra_debug.log 2>&1"
756 scp "${!OSIP}":/etc/dnsmasq.conf "${NODE_FOLDER}"
757 scp "${!OSIP}":/etc/keystone/keystone.conf "${NODE_FOLDER}"
758 scp "${!OSIP}":/etc/keystone/keystone-uwsgi-admin.ini "${NODE_FOLDER}"
759 scp "${!OSIP}":/etc/keystone/keystone-uwsgi-public.ini "${NODE_FOLDER}"
760 scp "${!OSIP}":/etc/kuryr/kuryr.conf "${NODE_FOLDER}"
761 scp "${!OSIP}":/etc/neutron/dhcp_agent.ini "${NODE_FOLDER}"
762 scp "${!OSIP}":/etc/neutron/metadata_agent.ini "${NODE_FOLDER}"
763 scp "${!OSIP}":/etc/neutron/neutron.conf "${NODE_FOLDER}"
764 scp "${!OSIP}":/etc/neutron/neutron_lbaas.conf "${NODE_FOLDER}"
765 scp "${!OSIP}":/etc/neutron/plugins/ml2/ml2_conf.ini "${NODE_FOLDER}"
766 scp "${!OSIP}":/etc/neutron/services/loadbalancer/haproxy/lbaas_agent.ini "${NODE_FOLDER}"
767 scp "${!OSIP}":/etc/nova/nova.conf "${NODE_FOLDER}"
768 scp "${!OSIP}":/etc/nova/nova-api-uwsgi.ini "${NODE_FOLDER}"
769 scp "${!OSIP}":/etc/nova/nova_cell1.conf "${NODE_FOLDER}"
770 scp "${!OSIP}":/etc/nova/nova-cpu.conf "${NODE_FOLDER}"
771 scp "${!OSIP}":/etc/nova/placement-uwsgi.ini "${NODE_FOLDER}"
772 scp "${!OSIP}":/etc/openstack/clouds.yaml "${NODE_FOLDER}"
773 scp "${!OSIP}":/opt/stack/devstack/.stackenv "${NODE_FOLDER}"
774 scp "${!OSIP}":/opt/stack/devstack/nohup.out "${NODE_FOLDER}"/stack.log
775 scp "${!OSIP}":/opt/stack/devstack/openrc "${NODE_FOLDER}"
776 scp "${!OSIP}":/opt/stack/requirements/upper-constraints.txt "${NODE_FOLDER}"
777 scp "${!OSIP}":/opt/stack/tempest/etc/tempest.conf "${NODE_FOLDER}"
778 scp "${!OSIP}":/tmp/*.xz "${NODE_FOLDER}"
779 scp "${!OSIP}":/tmp/dmesg.log "${NODE_FOLDER}"
780 scp "${!OSIP}":/tmp/extra_debug.log "${NODE_FOLDER}"
781 scp "${!OSIP}":/tmp/get_devstack.sh.txt "${NODE_FOLDER}"
782 scp "${!OSIP}":/tmp/install_ovs.txt "${NODE_FOLDER}"
783 scp "${!OSIP}":/tmp/journalctl.log "${NODE_FOLDER}"
784 scp "${!OSIP}":/tmp/ovsdb-tool.log "${NODE_FOLDER}"
785 scp "${!OSIP}":/tmp/tcpdump_start.log "${NODE_FOLDER}"
786 collect_files "${!OSIP}" "${NODE_FOLDER}"
787 ${SSH} "${!OSIP}" "sudo tar -cf - -C /var/log rabbitmq | xz -T 0 > /tmp/rabbitmq.tar.xz "
788 scp "${!OSIP}":/tmp/rabbitmq.tar.xz "${NODE_FOLDER}"
789 rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/etc/hosts "${NODE_FOLDER}"
790 rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/usr/lib/systemd/system/haproxy.service "${NODE_FOLDER}"
791 rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/audit/audit.log "${NODE_FOLDER}"
792 rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/httpd/keystone_access.log "${NODE_FOLDER}"
793 rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/httpd/keystone.log "${NODE_FOLDER}"
794 rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/messages* "${NODE_FOLDER}"
795 rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/openvswitch/ovs-vswitchd.log "${NODE_FOLDER}"
796 rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/openvswitch/ovsdb-server.log "${NODE_FOLDER}"
797 collect_openstack_logs "${!OSIP}" "${NODE_FOLDER}" "control"
798 mv "local.conf_control_${!OSIP}" "${NODE_FOLDER}/local.conf"
799 # qdhcp files are created by robot tests and copied into /tmp/qdhcp during the test
800 tar -cf - -C /tmp qdhcp | xz -T 0 > /tmp/qdhcp.tar.xz
801 mv /tmp/qdhcp.tar.xz "${NODE_FOLDER}"
802 mv "${NODE_FOLDER}" "${WORKSPACE}"/archives/
806 for i in $(seq 1 "${NUM_OPENSTACK_COMPUTE_NODES}"); do
807 OSIP="OPENSTACK_COMPUTE_NODE_${i}_IP"
808 echo "collect_logs: for openstack compute node ip: ${!OSIP}"
809 NODE_FOLDER="compute_${i}"
810 mkdir -p "${NODE_FOLDER}"
811 tcpdump_stop "${!OSIP}"
812 scp extra_debug.sh "${!OSIP}":/tmp
813 ${SSH} "${!OSIP}" "bash /tmp/extra_debug.sh > /tmp/extra_debug.log 2>&1"
814 scp "${!OSIP}":/etc/nova/nova.conf "${NODE_FOLDER}"
815 scp "${!OSIP}":/etc/nova/nova-cpu.conf "${NODE_FOLDER}"
816 scp "${!OSIP}":/etc/openstack/clouds.yaml "${NODE_FOLDER}"
817 scp "${!OSIP}":/opt/stack/devstack/.stackenv "${NODE_FOLDER}"
818 scp "${!OSIP}":/opt/stack/devstack/nohup.out "${NODE_FOLDER}"/stack.log
819 scp "${!OSIP}":/opt/stack/devstack/openrc "${NODE_FOLDER}"
820 scp "${!OSIP}":/opt/stack/requirements/upper-constraints.txt "${NODE_FOLDER}"
821 scp "${!OSIP}":/tmp/*.xz "${NODE_FOLDER}"/
822 scp "${!OSIP}":/tmp/dmesg.log "${NODE_FOLDER}"
823 scp "${!OSIP}":/tmp/extra_debug.log "${NODE_FOLDER}"
824 scp "${!OSIP}":/tmp/get_devstack.sh.txt "${NODE_FOLDER}"
825 scp "${!OSIP}":/tmp/install_ovs.txt "${NODE_FOLDER}"
826 scp "${!OSIP}":/tmp/journalctl.log "${NODE_FOLDER}"
827 scp "${!OSIP}":/tmp/ovsdb-tool.log "${NODE_FOLDER}"
828 scp "${!OSIP}":/tmp/tcpdump_start.log "${NODE_FOLDER}"
829 collect_files "${!OSIP}" "${NODE_FOLDER}"
830 ${SSH} "${!OSIP}" "sudo tar -cf - -C /var/log libvirt | xz -T 0 > /tmp/libvirt.tar.xz "
831 scp "${!OSIP}":/tmp/libvirt.tar.xz "${NODE_FOLDER}"
832 rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/etc/hosts "${NODE_FOLDER}"
833 rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/audit/audit.log "${NODE_FOLDER}"
834 rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/messages* "${NODE_FOLDER}"
835 rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/nova-agent.log "${NODE_FOLDER}"
836 rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/openvswitch/ovs-vswitchd.log "${NODE_FOLDER}"
837 rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/openvswitch/ovsdb-server.log "${NODE_FOLDER}"
838 collect_openstack_logs "${!OSIP}" "${NODE_FOLDER}" "compute"
839 mv "local.conf_compute_${!OSIP}" "${NODE_FOLDER}"/local.conf
840 mv "${NODE_FOLDER}" "${WORKSPACE}"/archives/
844 DEVSTACK_TEMPEST_DIR="/opt/stack/tempest"
846 TEMPEST_LOGS_DIR="${WORKSPACE}/archives/tempest"
847 # Look for tempest test results in the $TESTREPO dir and copy if found
848 if ${SSH} "${OPENSTACK_CONTROL_NODE_1_IP}" "sudo sh -c '[ -f ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/0 ]'"; then
849 ${SSH} "${OPENSTACK_CONTROL_NODE_1_IP}" "for I in \$(sudo ls ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/ | grep -E '^[0-9]+$'); do sudo sh -c \"${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/subunit-1to2 < ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/\${I} >> ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt\"; done"
850 ${SSH} "${OPENSTACK_CONTROL_NODE_1_IP}" "sudo sh -c '${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/python ${DEVSTACK_TEMPEST_DIR}/.tox/tempest/lib/python2.7/site-packages/os_testr/subunit2html.py ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt ${DEVSTACK_TEMPEST_DIR}/tempest_results.html'"
851 mkdir -p "${TEMPEST_LOGS_DIR}"
852 scp "${OPENSTACK_CONTROL_NODE_1_IP}:${DEVSTACK_TEMPEST_DIR}/tempest_results.html" "${TEMPEST_LOGS_DIR}"
853 scp "${OPENSTACK_CONTROL_NODE_1_IP}:${DEVSTACK_TEMPEST_DIR}/tempest.log" "${TEMPEST_LOGS_DIR}"
855 echo "tempest results not found in ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/0"
857 } # collect_netvirt_logs()
859 # Utility function for joining strings.
865 final=${final}${delim}${str}
871 function get_nodes_list() {
872 # Create the string for nodes
873 for i in $(seq 1 "${NUM_ODL_SYSTEM}") ; do
874 CONTROLLERIP=ODL_SYSTEM_${i}_IP
875 nodes[$i]=${!CONTROLLERIP}
878 nodes_list=$(join "${nodes[@]}")
882 function get_features() {
883 if [ "${CONTROLLERSCOPE}" == 'all' ]; then
884 ACTUALFEATURES="odl-integration-compatible-with-all,${CONTROLLERFEATURES}"
885 export CONTROLLERMEM="3072m"
887 ACTUALFEATURES="odl-infrautils-ready,${CONTROLLERFEATURES}"
890 if [ "${ELASTICSEARCHATTRIBUTE}" != "disabled" ]; then
891 # Add decanter features to allow JVM monitoring
892 ACTUALFEATURES="${ACTUALFEATURES},decanter-collector-jmx,decanter-appender-elasticsearch"
895 # Some versions of jenkins job builder result in feature list containing spaces
896 # and ending in newline. Remove all that.
897 ACTUALFEATURES=$(echo "${ACTUALFEATURES}" | tr -d '\n \r')
898 echo "ACTUALFEATURES: ${ACTUALFEATURES}"
900 # In the case that we want to install features via karaf shell, a space separated list of
901 # ACTUALFEATURES IS NEEDED
902 SPACE_SEPARATED_FEATURES=$(echo "${ACTUALFEATURES}" | tr ',' ' ')
903 echo "SPACE_SEPARATED_FEATURES: ${SPACE_SEPARATED_FEATURES}"
905 export ACTUALFEATURES
906 export SPACE_SEPARATED_FEATURES
909 # Create the configuration script to be run on controllers.
910 function create_configuration_script() {
911 cat > "${WORKSPACE}"/configuration-script.sh <<EOF
913 source /tmp/common-functions.sh ${BUNDLEFOLDER}
915 echo "Changing to /tmp"
918 echo "Downloading the distribution from ${ACTUAL_BUNDLE_URL}"
919 wget --progress=dot:mega '${ACTUAL_BUNDLE_URL}'
921 echo "Extracting the new controller..."
924 echo "Adding external repositories..."
925 sed -ie "s%org.ops4j.pax.url.mvn.repositories=%org.ops4j.pax.url.mvn.repositories=https://nexus.opendaylight.org/content/repositories/opendaylight.snapshot@id=opendaylight-snapshot@snapshots, https://nexus.opendaylight.org/content/repositories/public@id=opendaylight-mirror, http://repo1.maven.org/maven2@id=central, http://repository.springsource.com/maven/bundles/release@id=spring.ebr.release, http://repository.springsource.com/maven/bundles/external@id=spring.ebr.external, http://zodiac.springsource.com/maven/bundles/release@id=gemini, http://repository.apache.org/content/groups/snapshots-group@id=apache@snapshots@noreleases, https://oss.sonatype.org/content/repositories/snapshots@id=sonatype.snapshots.deploy@snapshots@noreleases, https://oss.sonatype.org/content/repositories/ops4j-snapshots@id=ops4j.sonatype.snapshots.deploy@snapshots@noreleases%g" ${MAVENCONF}
928 if [[ "$USEFEATURESBOOT" == "True" ]]; then
929 echo "Configuring the startup features..."
930 sed -ie "s/\\(featuresBoot=\\|featuresBoot =\\)/featuresBoot = ${ACTUALFEATURES},/g" ${FEATURESCONF}
933 FEATURE_TEST_STRING="features-integration-test"
934 KARAF_VERSION=${KARAF_VERSION:-karaf4}
935 if [[ "$KARAF_VERSION" == "karaf4" ]]; then
936 FEATURE_TEST_STRING="features-test"
939 sed -ie "s%\\(featuresRepositories=\\|featuresRepositories =\\)%featuresRepositories = mvn:org.opendaylight.integration/\${FEATURE_TEST_STRING}/${BUNDLE_VERSION}/xml/features,mvn:org.apache.karaf.decanter/apache-karaf-decanter/1.0.0/xml/features,%g" ${FEATURESCONF}
940 if [[ ! -z "${REPO_URL}" ]]; then
941 sed -ie "s%featuresRepositories =%featuresRepositories = ${REPO_URL},%g" ${FEATURESCONF}
945 configure_karaf_log "${KARAF_VERSION}" "${CONTROLLERDEBUGMAP}"
947 set_java_vars "${JAVA_HOME}" "${CONTROLLERMEM}" "${MEMCONF}"
949 echo "Listing all open ports on controller system..."
952 # Copy shard file if exists
953 if [ -f /tmp/custom_shard_config.txt ]; then
954 echo "Custom shard config exists!!!"
955 echo "Copying the shard config..."
956 cp /tmp/custom_shard_config.txt /tmp/${BUNDLEFOLDER}/bin/
959 echo "Configuring cluster"
960 /tmp/${BUNDLEFOLDER}/bin/configure_cluster.sh \$1 ${nodes_list}
962 echo "Dump akka.conf"
965 echo "Dump modules.conf"
968 echo "Dump module-shards.conf"
969 cat ${MODULESHARDSCONF}
971 # cat > ${WORKSPACE}/configuration-script.sh <<EOF
974 # Create the startup script to be run on controllers.
975 function create_startup_script() {
976 cat > "${WORKSPACE}"/startup-script.sh <<EOF
977 echo "Redirecting karaf console output to karaf_console.log"
978 export KARAF_REDIRECT="/tmp/${BUNDLEFOLDER}/data/log/karaf_console.log"
979 mkdir -p /tmp/${BUNDLEFOLDER}/data/log
981 echo "Starting controller..."
982 /tmp/${BUNDLEFOLDER}/bin/start
984 # cat > ${WORKSPACE}/startup-script.sh <<EOF
987 function create_post_startup_script() {
988 cat > "${WORKSPACE}"/post-startup-script.sh <<EOF
989 # wait up to 60s for karaf port 8101 to be opened, polling every 5s
991 until [[ \$loop_count -ge 12 ]]; do
992 netstat -na | grep ":::8101" && break;
993 loop_count=\$[\$loop_count+1];
997 # This workaround is required for Karaf decanter to work proper
998 # The bundle:refresh command does not fail if the decanter bundles are not present
999 echo "ssh to karaf console to do bundle refresh of decanter jmx collector"
1000 sshpass -p karaf ssh -o StrictHostKeyChecking=no \
1001 -o UserKnownHostsFile=/dev/null \
1003 -p 8101 karaf@localhost \
1004 "bundle:refresh org.apache.karaf.decanter.collector.jmx && bundle:refresh org.apache.karaf.decanter.appender.elasticsearch"
1006 if [[ "$USEFEATURESBOOT" != "True" ]]; then
1008 echo "going to feature:install --no-auto-refresh ${SPACE_SEPARATED_FEATURES} one at a time"
1009 for feature in ${SPACE_SEPARATED_FEATURES}; do
1010 sshpass -p karaf ssh -o StrictHostKeyChecking=no \
1011 -o UserKnownHostsFile=/dev/null \
1013 -p 8101 karaf@localhost \
1014 feature:install --no-auto-refresh \$feature;
1017 echo "ssh to karaf console to list -i installed features"
1018 sshpass -p karaf ssh -o StrictHostKeyChecking=no \
1019 -o UserKnownHostsFile=/dev/null \
1021 -p 8101 karaf@localhost \
1025 echo "Waiting up to 3 minutes for controller to come up, checking every 5 seconds..."
1026 for i in {1..36}; do
1028 grep 'org.opendaylight.infrautils.*System ready' /tmp/${BUNDLEFOLDER}/data/log/karaf.log
1029 if [ \$? -eq 0 ]; then
1030 echo "Controller is UP"
1035 # if we ended up not finding ready status in the above loop, we can output some debugs
1036 grep 'org.opendaylight.infrautils.*System ready' /tmp/${BUNDLEFOLDER}/data/log/karaf.log
1037 if [ $? -ne 0 ]; then
1038 echo "Timeout Controller DOWN"
1039 echo "Dumping first 500K bytes of karaf log..."
1040 head --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
1041 echo "Dumping last 500K bytes of karaf log..."
1042 tail --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
1043 echo "Listing all open ports on controller system"
1048 echo "Listing all open ports on controller system..."
1051 function exit_on_log_file_message {
1052 echo "looking for \"\$1\" in log file"
1053 if grep --quiet "\$1" "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"; then
1054 echo ABORTING: found "\$1"
1055 echo "Dumping first 500K bytes of karaf log..."
1056 head --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
1057 echo "Dumping last 500K bytes of karaf log..."
1058 tail --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
1063 exit_on_log_file_message 'BindException: Address already in use'
1064 exit_on_log_file_message 'server is unhealthy'
1066 # cat > ${WORKSPACE}/post-startup-script.sh <<EOF
1069 # Copy over the configuration script and configuration files to each controller
1070 # Execute the configuration script on each controller.
1071 function copy_and_run_configuration_script() {
1072 for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
1073 CONTROLLERIP="ODL_SYSTEM_${i}_IP"
1074 echo "Configuring member-${i} with IP address ${!CONTROLLERIP}"
1075 scp "${WORKSPACE}"/configuration-script.sh "${!CONTROLLERIP}":/tmp/
1076 # $i needs to be parsed client-side
1077 # shellcheck disable=SC2029
1078 ssh "${!CONTROLLERIP}" "bash /tmp/configuration-script.sh ${i}"
1082 # Copy over the startup script to each controller and execute it.
1083 function copy_and_run_startup_script() {
1084 for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
1085 CONTROLLERIP="ODL_SYSTEM_${i}_IP"
1086 echo "Starting member-${i} with IP address ${!CONTROLLERIP}"
1087 scp "${WORKSPACE}"/startup-script.sh "${!CONTROLLERIP}":/tmp/
1088 ssh "${!CONTROLLERIP}" "bash /tmp/startup-script.sh"
1092 function copy_and_run_post_startup_script() {
1094 for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
1095 CONTROLLERIP="ODL_SYSTEM_${i}_IP"
1096 echo "Execute the post startup script on controller ${!CONTROLLERIP}"
1097 scp "${WORKSPACE}"/post-startup-script.sh "${!CONTROLLERIP}":/tmp/
1098 # $seed_index needs to be parsed client-side
1099 # shellcheck disable=SC2029
1100 ssh "${!CONTROLLERIP}" "bash /tmp/post-startup-script.sh $(( seed_index++ ))"
1101 if [ $(( i % NUM_ODL_SYSTEM )) == 0 ]; then
1107 function dump_controller_threads() {
1108 for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
1109 CONTROLLERIP="ODL_SYSTEM_${i}_IP"
1110 echo "Lets's take the karaf thread dump"
1111 ssh "${!CONTROLLERIP}" "sudo ps aux" > "${WORKSPACE}"/ps_before.log
1112 pid=$(grep org.apache.karaf.main.Main "${WORKSPACE}"/ps_before.log | grep -v grep | tr -s ' ' | cut -f2 -d' ')
1113 echo "karaf main: org.apache.karaf.main.Main, pid:${pid}"
1114 # $i needs to be parsed client-side
1115 # shellcheck disable=SC2029
1116 ssh "${!CONTROLLERIP}" "${JAVA_HOME}/bin/jstack -l ${pid}" > "${WORKSPACE}/karaf_${i}_${pid}_threads_before.log" || true
1120 # Function to build OVS from git repo
1121 function build_ovs() {
1124 local -r rpm_path="$3"
1126 echo "Building OVS ${version} on ${ip} ..."
1127 cat > "${WORKSPACE}"/build_ovs.sh << EOF
1130 echo '---> Building openvswitch version ${version}'
1132 # Install running kernel devel packages
1133 K_VERSION=\$(uname -r)
1134 YUM_OPTS="-y --disablerepo=* --enablerepo=base,updates,extra,C*-base,C*-updates,C*-extras"
1135 # Install centos-release to update vault repos from which to fetch
1136 # kernel devel packages
1137 sudo yum \${YUM_OPTS} install centos-release yum-utils @'Development Tools' rpm-build
1138 sudo yum \${YUM_OPTS} install kernel-{devel,headers}-\${K_VERSION}
1143 git clone https://github.com/openvswitch/ovs.git
1146 if [ "${version}" = "v2.6.1-nsh" ]; then
1148 echo "Will apply nsh patches for OVS version 2.6.1"
1149 git clone https://github.com/yyang13/ovs_nsh_patches.git ../ovs_nsh_patches
1150 git apply ../ovs_nsh_patches/v2.6.1_centos7/*.patch
1152 git checkout ${version}
1155 # On early versions of OVS, flake warnings would fail the build.
1157 sudo pip uninstall -y flake8
1159 # Get rid of sphinx dep as it conflicts with the already
1160 # installed one (via pip). Docs wont be built.
1161 sed -i "/BuildRequires:.*sphinx.*/d" rhel/openvswitch-fedora.spec.in
1163 sed -e 's/@VERSION@/0.0.1/' rhel/openvswitch-fedora.spec.in > /tmp/ovs.spec
1164 sed -e 's/@VERSION@/0.0.1/' rhel/openvswitch-kmod-fedora.spec.in > /tmp/ovs-kmod.spec
1165 sed -e 's/@VERSION@/0.0.1/' rhel/openvswitch-dkms.spec.in > /tmp/ovs-dkms.spec
1166 sudo yum-builddep \${YUM_OPTS} /tmp/ovs.spec /tmp/ovs-kmod.spec /tmp/ovs-dkms.spec
1167 rm /tmp/ovs.spec /tmp/ovs-kmod.spec /tmp/ovs-dkms.spec
1169 ./configure --build=x86_64-redhat-linux-gnu --host=x86_64-redhat-linux-gnu --with-linux=/lib/modules/\${K_VERSION}/build --program-prefix= --disable-dependency-tracking --prefix=/usr --exec-prefix=/usr --bindir=/usr/bin --sbindir=/usr/sbin --sysconfdir=/etc --datadir=/usr/share --includedir=/usr/include --libdir=/usr/lib64 --libexecdir=/usr/libexec --localstatedir=/var --sharedstatedir=/var/lib --mandir=/usr/share/man --infodir=/usr/share/info --enable-libcapng --enable-ssl --with-pkidir=/var/lib/openvswitch/pki PYTHON=/usr/bin/python2
1170 make rpm-fedora RPMBUILD_OPT="--without check"
1171 # Build dkms only for now
1172 # make rpm-fedora-kmod RPMBUILD_OPT='-D "kversion \${K_VERSION}"'
1173 rpmbuild -D "_topdir \$(pwd)/rpm/rpmbuild" -bb --without check rhel/openvswitch-dkms.spec
1175 mkdir -p /tmp/ovs_rpms
1176 cp -r rpm/rpmbuild/RPMS/* /tmp/ovs_rpms/
1182 scp "${WORKSPACE}"/build_ovs.sh "${ip}":/tmp
1183 ${SSH} "${ip}" " bash /tmp/build_ovs.sh >> /tmp/install_ovs.txt 2>&1"
1184 scp -r "${ip}":/tmp/ovs_rpms/* "${rpm_path}/"
1185 ${SSH} "${ip}" "rm -rf /tmp/ovs_rpms"
1188 # Install OVS RPMs from yum repo
1189 function install_ovs_from_repo() {
1191 local -r rpm_repo="$2"
1193 echo "Installing OVS from repo ${rpm_repo} on ${ip} ..."
1194 cat > "${WORKSPACE}"/install_ovs.sh << EOF
1197 echo '---> Installing openvswitch from ${rpm_repo}'
1199 # We need repoquery from yum-utils.
1200 sudo yum -y install yum-utils
1202 # Get openvswitch packages offered by custom repo.
1203 # dkms package will have priority over kmod.
1204 OVS_REPO_OPTS="--repofrompath=ovs-repo,${rpm_repo} --disablerepo=* --enablerepo=ovs-repo"
1205 OVS_PKGS=\$(repoquery \${OVS_REPO_OPTS} openvswitch)
1206 OVS_SEL_PKG=\$(repoquery \${OVS_REPO_OPTS} openvswitch-selinux-policy)
1207 OVS_DKMS_PKG=\$(repoquery \${OVS_REPO_OPTS} openvswitch-dkms)
1208 OVS_KMOD_PKG=\$(repoquery \${OVS_REPO_OPTS} openvswitch-kmod)
1209 [ -n "\${OVS_SEL_PKG}" ] && OVS_PKGS="\${OVS_PKGS} \${OVS_SEL_PKG}"
1210 [ -n "\${OVS_DKMS_PKG}" ] && OVS_PKGS="\${OVS_PKGS} \${OVS_DKMS_PKG}"
1211 [ -z "\${OVS_DKMS_PKG}" ] && [ -n "\${OVS_KMOD_PKG}" ] && OVS_PKGS="\${OVS_PKGS} \${OVS_KMOD_PKG}"
1213 # Bail with error if custom repo was provided but we could not
1214 # find suitable packages there.
1215 [ -z "\${OVS_PKGS}" ] && echo "No OVS packages found in custom repo." && exit 1
1217 # Install kernel & devel packages for the openvswitch dkms package.
1218 if [ -n "\${OVS_DKMS_PKG}" ]; then
1219 # install centos-release to update vault repos from which to fetch
1220 # kernel devel packages
1221 sudo yum -y install centos-release
1222 K_VERSION=\$(uname -r)
1223 YUM_OPTS="-y --disablerepo=* --enablerepo=base,updates,extra,C*-base,C*-updates,C*-extras"
1224 sudo yum \${YUM_OPTS} install kernel-{headers,devel}-\${K_VERSION} @'Development Tools' python-six
1227 PREV_MOD=\$(sudo modinfo -n openvswitch || echo '')
1229 # Install OVS offered by custom repo.
1230 sudo yum-config-manager --add-repo "${rpm_repo}"
1231 sudo yum -y versionlock delete openvswitch-*
1232 sudo yum -y remove openvswitch-*
1233 sudo yum -y --nogpgcheck install \${OVS_PKGS}
1234 sudo yum -y versionlock add \${OVS_PKGS}
1236 # Most recent OVS versions have some incompatibility with certain versions of iptables
1237 # This below line will overcome that problem.
1238 sudo modprobe openvswitch
1240 # Start OVS and print details
1241 sudo systemctl start openvswitch
1242 sudo systemctl enable openvswitch
1243 sudo ovs-vsctl --retry -t 5 show
1244 sudo modinfo openvswitch
1246 # dkms rpm install can fail silently (probably because the OVS version is
1247 # incompatible with the running kernel), verify module was updated.
1248 NEW_MOD=\$(sudo modinfo -n openvswitch || echo '')
1249 [ "\${PREV_MOD}" != "\${NEW_MOD}" ] || (echo "Kernel module was not updated" && exit 1)
1252 scp "${WORKSPACE}"/install_ovs.sh "${ip}":/tmp
1253 ${SSH} "${ip}" "bash /tmp/install_ovs.sh >> /tmp/install_ovs.txt 2>&1"
1256 # Install OVS RPMS from path
1257 function install_ovs_from_path() {
1259 local -r rpm_path="$2"
1261 echo "Creating OVS RPM repo on ${ip} ..."
1262 ${SSH} "${ip}" "mkdir -p /tmp/ovs_rpms"
1263 scp -r "${rpm_path}"/* "${ip}":/tmp/ovs_rpms
1264 ${SSH} "${ip}" "sudo yum -y install createrepo && createrepo --database /tmp/ovs_rpms"
1265 install_ovs_from_repo "${ip}" file:/tmp/ovs_rpms