3 echo "common-functions.sh is being sourced"
7 # Basic controller configuration settings
8 export MAVENCONF=/tmp/${BUNDLEFOLDER}/etc/org.ops4j.pax.url.mvn.cfg
9 export FEATURESCONF=/tmp/${BUNDLEFOLDER}/etc/org.apache.karaf.features.cfg
10 export CUSTOMPROP=/tmp/${BUNDLEFOLDER}/etc/custom.properties
11 export LOGCONF=/tmp/${BUNDLEFOLDER}/etc/org.ops4j.pax.logging.cfg
12 export MEMCONF=/tmp/${BUNDLEFOLDER}/bin/setenv
13 export CONTROLLERMEM="2048m"
15 # Cluster specific configuration settings
16 export AKKACONF=/tmp/${BUNDLEFOLDER}/configuration/initial/akka.conf
17 export MODULESCONF=/tmp/${BUNDLEFOLDER}/configuration/initial/modules.conf
18 export MODULESHARDSCONF=/tmp/${BUNDLEFOLDER}/configuration/initial/module-shards.conf
20 function print_common_env() {
22 common-functions environment:
23 MAVENCONF: ${MAVENCONF}
24 ACTUALFEATURES: ${ACTUALFEATURES}
25 FEATURESCONF: ${FEATURESCONF}
26 CUSTOMPROP: ${CUSTOMPROP}
29 CONTROLLERMEM: ${CONTROLLERMEM}
31 MODULESCONF: ${MODULESCONF}
32 MODULESHARDSCONF: ${MODULESHARDSCONF}
39 # Setup JAVA_HOME and MAX_MEM Value in ODL startup config file
40 function set_java_vars() {
42 local -r controllermem=$2
45 echo "Configure\n java home: ${java_home}\n max memory: ${controllermem}\n memconf: ${memconf}"
47 sed -ie 's%^# export JAVA_HOME%export JAVA_HOME=${JAVA_HOME:-'"${java_home}"'}%g' ${memconf}
48 sed -ie 's/JAVA_MAX_MEM="2048m"/JAVA_MAX_MEM='"${controllermem}"'/g' ${memconf}
52 echo "Set Java version"
53 sudo /usr/sbin/alternatives --install /usr/bin/java java ${java_home}/bin/java 1
54 sudo /usr/sbin/alternatives --set java ${java_home}/bin/java
55 echo "JDK default version ..."
59 export JAVA_HOME="${java_home}"
61 # shellcheck disable=SC2037
62 JAVA_RESOLVED=$(readlink -e "${java_home}/bin/java")
63 echo "Java binary pointed at by JAVA_HOME: ${JAVA_RESOLVED}"
66 # shellcheck disable=SC2034
67 # foo appears unused. Verify it or export it.
68 function configure_karaf_log() {
69 local -r karaf_version=$1
70 local -r controllerdebugmap=$2
73 # Check what the logging.cfg file is using for the logging api: log4j or log4j2
74 grep "log4j2" ${LOGCONF}
79 echo "Configuring the karaf log... karaf_version: ${karaf_version}, logapi: ${logapi}"
80 if [ "${logapi}" == "log4j2" ]; then
81 # FIXME: Make log size limit configurable from build parameter.
82 # From Neon the default karaf file size is 64 MB
83 sed -ie 's/log4j2.appender.rolling.policies.size.size = 64MB/log4j2.appender.rolling.policies.size.size = 1GB/g' ${LOGCONF}
84 # Flourine still uses 16 MB
85 sed -ie 's/log4j2.appender.rolling.policies.size.size = 16MB/log4j2.appender.rolling.policies.size.size = 1GB/g' ${LOGCONF}
86 orgmodule="org.opendaylight.yangtools.yang.parser.repo.YangTextSchemaContextResolver"
87 orgmodule_="${orgmodule//./_}"
88 echo "${logapi}.logger.${orgmodule_}.name = WARN" >> ${LOGCONF}
89 echo "${logapi}.logger.${orgmodule_}.level = WARN" >> ${LOGCONF}
91 sed -ie 's/log4j.appender.out.maxBackupIndex=10/log4j.appender.out.maxBackupIndex=1/g' ${LOGCONF}
92 # FIXME: Make log size limit configurable from build parameter.
93 sed -ie 's/log4j.appender.out.maxFileSize=1MB/log4j.appender.out.maxFileSize=30GB/g' ${LOGCONF}
94 echo "${logapi}.logger.org.opendaylight.yangtools.yang.parser.repo.YangTextSchemaContextResolver = WARN" >> ${LOGCONF}
97 # Add custom logging levels
98 # CONTROLLERDEBUGMAP is expected to be a key:value map of space separated
99 # values like "module:level module2:level2" where module is abbreviated and
100 # does not include "org.opendaylight."
102 echo "controllerdebugmap: ${controllerdebugmap}"
103 if [ -n "${controllerdebugmap}" ]; then
104 for kv in ${controllerdebugmap}; do
107 echo "module: $module, level: $level"
108 # shellcheck disable=SC2157
109 if [ -n "${module}" ] && [ -n "${level}" ]; then
110 orgmodule="org.opendaylight.${module}"
111 if [ "${logapi}" == "log4j2" ]; then
112 orgmodule_="${orgmodule//./_}"
113 echo "${logapi}.logger.${orgmodule_}.name = ${orgmodule}" >> ${LOGCONF}
114 echo "${logapi}.logger.${orgmodule_}.level = ${level}" >> ${LOGCONF}
116 echo "${logapi}.logger.${orgmodule} = ${level}" >> ${LOGCONF}
122 echo "cat ${LOGCONF}"
124 } # function configure_karaf_log()
126 function configure_karaf_log_for_apex() {
127 # TODO: add the extra steps to this function to do any extra work
128 # in this apex environment like we do in our standard environment.
129 # EX: log size, rollover, etc.
131 # Modify ODL Log Levels, if needed, for new distribution. This will modify
132 # the control nodes hiera data which will be used during the puppet deploy
133 # CONTROLLERDEBUGMAP is expected to be a key:value map of space separated
134 # values like "module:level module2:level2" where module is abbreviated and
135 # does not include "org.opendaylight."
137 local -r controller_ip=$1
140 # shellcheck disable=SC2153
141 echo "CONTROLLERDEBUGMAP: ${CONTROLLERDEBUGMAP}"
142 if [ -n "${CONTROLLERDEBUGMAP}" ]; then
143 logging_config='\"opendaylight::log_levels\": {'
144 for kv in ${CONTROLLERDEBUGMAP}; do
147 echo "module: $module, level: $level"
148 # shellcheck disable=SC2157
149 if [ -n "${module}" ] && [ -n "${level}" ]; then
150 orgmodule="org.opendaylight.${module}"
151 logging_config="${logging_config} \\\"${orgmodule}\\\": \\\"${level}\\\","
154 # replace the trailing comma with a closing brace followed by trailing comma
155 logging_config=${logging_config%,}" },"
158 # fine a sane line number to inject the custom logging json
159 lineno=$(ssh $OPENSTACK_CONTROL_NODE_1_IP "sudo grep -Fn 'opendaylight::log_mechanism' /etc/puppet/hieradata/service_configs.json" | awk -F: '{print $1}')
160 ssh $controller_ip "sudo sed -i \"${lineno}i ${logging_config}\" /etc/puppet/hieradata/service_configs.json"
161 ssh $controller_ip "sudo cat /etc/puppet/hieradata/service_configs.json"
163 } # function configure_karaf_log_for_apex()
165 function configure_odl_features_for_apex() {
167 # if the environment variable $ACTUALFEATURES is not null, then rewrite
168 # the puppet config file with the features given in that variable, otherwise
169 # this function is a noop
171 local -r controller_ip=$1
172 local -r config_file=/etc/puppet/hieradata/service_configs.json
174 cat > /tmp/set_odl_features.sh << EOF
175 sudo jq '.["opendaylight::extra_features"] |= []' $config_file > tmp.json && mv tmp.json $config_file
176 for feature in $(echo $ACTUALFEATURES | sed "s/,/ /g"); do
177 sudo jq --arg jq_arg \$feature '.["opendaylight::extra_features"] |= . + [\$jq_arg]' $config_file > tmp && mv tmp $config_file;
179 echo "Modified puppet-opendaylight service_configs.json..."
183 echo "Feature configuration script..."
184 cat /tmp/set_odl_features.sh
186 if [ -n "${ACTUALFEATURES}" ]; then
187 scp /tmp/set_odl_features.sh $controller_ip:/tmp/set_odl_features.sh
188 ssh $controller_ip "sudo bash /tmp/set_odl_features.sh"
191 } # function configure_odl_features_for_apex()
193 function get_os_deploy() {
194 local -r num_systems=${1:-$NUM_OPENSTACK_SYSTEM}
195 case ${num_systems} in
197 OPENSTACK_TOPO="1cmb-0ctl-0cmp"
200 OPENSTACK_TOPO="1cmb-0ctl-1cmp"
203 OPENSTACK_TOPO="0cmb-1ctl-2cmp"
206 export OPENSTACK_TOPO
209 function get_test_suites() {
211 #let the caller pick the name of the variable we will assign the suites to
212 local __suite_list=$1
214 echo "Locating test plan to use..."
215 testplan_filepath="${WORKSPACE}/test/csit/testplans/${STREAMTESTPLAN}"
216 if [ ! -f "${testplan_filepath}" ]; then
217 testplan_filepath="${WORKSPACE}/test/csit/testplans/${TESTPLAN}"
220 echo "Changing the testplan path..."
221 cat "${testplan_filepath}" | sed "s:integration:${WORKSPACE}:" > testplan.txt
224 # Use the testplan if specific SUITES are not defined.
225 if [ -z "${SUITES}" ]; then
226 suite_list=$(egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' testplan.txt | tr '\012' ' ')
229 workpath="${WORKSPACE}/test/csit/suites"
230 for suite in ${SUITES}; do
231 fullsuite="${workpath}/${suite}"
232 if [ -z "${suite_list}" ]; then
233 suite_list+=${fullsuite}
235 suite_list+=" "${fullsuite}
240 eval $__suite_list="'$suite_list'"
243 function run_plan() {
255 printf "Locating %s plan to use...\n" "${type}"
256 plan_filepath="${WORKSPACE}/test/csit/${type}plans/$plan"
257 if [ ! -f "${plan_filepath}" ]; then
258 plan_filepath="${WORKSPACE}/test/csit/${type}plans/${STREAMTESTPLAN}"
259 if [ ! -f "${plan_filepath}" ]; then
260 plan_filepath="${WORKSPACE}/test/csit/${type}plans/${TESTPLAN}"
264 if [ -f "${plan_filepath}" ]; then
265 printf "%s plan exists!!!\n" "${type}"
266 printf "Changing the %s plan path...\n" "${type}"
267 cat ${plan_filepath} | sed "s:integration:${WORKSPACE}:" > ${type}plan.txt
269 for line in $( egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' ${type}plan.txt ); do
270 printf "Executing %s...\n" "${line}"
271 # shellcheck source=${line} disable=SC1091
275 printf "Finished running %s plans\n" "${type}"
276 } # function run_plan()
278 # Return elapsed time. Usage:
279 # - Call first time with no arguments and a new timer is returned.
280 # - Next call with the first argument as the timer and the elapsed time is returned.
283 if [ $# -eq 0 ]; then
284 # return the current time
285 printf "%s" "$(date "+%s")"
288 end_time=$(date "+%s")
290 if [ -z "$start_time" ]; then
291 start_time=$end_time;
294 delta_time=$((end_time - start_time))
295 ds=$((delta_time % 60))
296 dm=$(((delta_time / 60) % 60))
297 dh=$((delta_time / 3600))
298 # return the elapsed time
299 printf "%d:%02d:%02d" $dh $dm $ds
303 # convert commas in csv strings to spaces (ssv)
306 if [ -n "${csv}" ]; then
307 ssv=$(echo ${csv} | sed 's/,/ /g' | sed 's/\ \ */\ /g')
313 function is_openstack_feature_enabled() {
315 for enabled_feature in $(csv2ssv ${ENABLE_OS_SERVICES}); do
316 if [ "${enabled_feature}" == "${feature}" ]; then
326 # shellcheck disable=SC2153
327 function print_job_parameters() {
331 DISTROBRANCH: ${DISTROBRANCH}
332 DISTROSTREAM: ${DISTROSTREAM}
333 BUNDLE_URL: ${BUNDLE_URL}
334 CONTROLLERFEATURES: ${CONTROLLERFEATURES}
335 CONTROLLERDEBUGMAP: ${CONTROLLERDEBUGMAP}
336 SCRIPTPLAN: ${SCRIPTPLAN}
337 CONFIGPLAN: ${CONFIGPLAN}
338 STREAMTESTPLAN: ${STREAMTESTPLAN}
339 TESTPLAN: ${TESTPLAN}
341 PATCHREFSPEC: ${PATCHREFSPEC}
342 OPENSTACK_BRANCH: ${OPENSTACK_BRANCH}
343 DEVSTACK_HASH: ${DEVSTACK_HASH}
344 ODL_ML2_DRIVER_REPO: ${ODL_ML2_DRIVER_REPO}
345 ODL_ML2_BRANCH: ${ODL_ML2_BRANCH}
346 ODL_ML2_DRIVER_VERSION: ${ODL_ML2_DRIVER_VERSION}
347 ODL_ML2_PORT_BINDING: ${ODL_ML2_PORT_BINDING}
348 DEVSTACK_KUBERNETES_PLUGIN_REPO: ${DEVSTACK_KUBERNETES_PLUGIN_REPO}
349 DEVSTACK_LBAAS_PLUGIN_REPO: ${DEVSTACK_LBAAS_PLUGIN_REPO}
350 DEVSTACK_NETWORKING_SFC_PLUGIN_REPO: ${DEVSTACK_NETWORKING_SFC_PLUGIN_REPO}
351 IPSEC_VXLAN_TUNNELS_ENABLED: ${IPSEC_VXLAN_TUNNELS_ENABLED}
352 PUBLIC_BRIDGE: ${PUBLIC_BRIDGE}
353 ENABLE_HAPROXY_FOR_NEUTRON: ${ENABLE_HAPROXY_FOR_NEUTRON}
354 ENABLE_OS_SERVICES: ${ENABLE_OS_SERVICES}
355 ENABLE_OS_COMPUTE_SERVICES: ${ENABLE_OS_COMPUTE_SERVICES}
356 ENABLE_OS_NETWORK_SERVICES: ${ENABLE_OS_NETWORK_SERVICES}
357 ENABLE_OS_PLUGINS: ${ENABLE_OS_PLUGINS}
358 DISABLE_OS_SERVICES: ${DISABLE_OS_SERVICES}
359 TENANT_NETWORK_TYPE: ${TENANT_NETWORK_TYPE}
360 SECURITY_GROUP_MODE: ${SECURITY_GROUP_MODE}
361 ENABLE_ITM_DIRECT_TUNNELS: ${ENABLE_ITM_DIRECT_TUNNELS}
362 PUBLIC_PHYSICAL_NETWORK: ${PUBLIC_PHYSICAL_NETWORK}
363 ENABLE_NETWORKING_L2GW: ${ENABLE_NETWORKING_L2GW}
364 CREATE_INITIAL_NETWORKS: ${CREATE_INITIAL_NETWORKS}
365 LBAAS_SERVICE_PROVIDER: ${LBAAS_SERVICE_PROVIDER}
366 ODL_SFC_DRIVER: ${ODL_SFC_DRIVER}
367 ODL_SNAT_MODE: ${ODL_SNAT_MODE}
372 function tcpdump_start() {
376 filter_=${filter// /_}
378 printf "node %s, %s_%s__%s: starting tcpdump\n" "${ip}" "${prefix}" "${ip}" "${filter}"
379 ssh ${ip} "nohup sudo /usr/sbin/tcpdump -vvv -ni eth0 ${filter} -w /tmp/tcpdump_${prefix}_${ip}__${filter_}.pcap > /tmp/tcpdump_start.log 2>&1 &"
380 ${SSH} ${ip} "ps -ef | grep tcpdump"
383 function tcpdump_stop() {
386 printf "node %s: stopping tcpdump\n" "$ip"
387 ${SSH} ${ip} "ps -ef | grep tcpdump.sh"
388 ${SSH} ${ip} "sudo pkill -f tcpdump"
389 ${SSH} ${ip} "sudo xz -9ekvvf /tmp/*.pcap"
390 ${SSH} ${ip} "sudo ls -al /tmp/*.pcap"
391 # copy_logs will copy any *.xz files
394 # Collect the list of files on the hosts
395 function collect_files() {
399 ${SSH} ${ip} "mkdir -p ${finddir}"
400 ${SSH} ${ip} "sudo find /etc > ${finddir}/find.etc.txt"
401 ${SSH} ${ip} "sudo find /opt/stack > ${finddir}/find.opt.stack.txt"
402 ${SSH} ${ip} "sudo find /var > ${finddir}/find2.txt"
403 ${SSH} ${ip} "sudo find /var > ${finddir}/find.var.txt"
404 ${SSH} ${ip} "sudo tar -cf - -C /tmp finder | xz -T 0 > /tmp/find.tar.xz"
405 scp ${ip}:/tmp/find.tar.xz ${folder}
407 rsync --rsync-path="sudo rsync" --list-only -arvhe ssh ${ip}:/etc/ > ${finddir}/rsync.etc.txt
408 rsync --rsync-path="sudo rsync" --list-only -arvhe ssh ${ip}:/opt/stack/ > ${finddir}/rsync.opt.stack.txt
409 rsync --rsync-path="sudo rsync" --list-only -arvhe ssh ${ip}:/var/ > ${finddir}/rsync.var.txt
410 tar -cf - -C /tmp finder | xz -T 0 > /tmp/rsync.tar.xz
411 cp /tmp/rsync.tar.xz ${folder}
414 # List of extra services to extract from journalctl
415 # Add new services on a separate line, in alpha order, add \ at the end
416 extra_services_cntl=" \
420 openvswitch.service \
421 ovs-vswitchd.service \
422 ovsdb-server.service \
423 rabbitmq-server.service \
426 extra_services_cmp=" \
428 openvswitch.service \
429 ovs-vswitchd.service \
430 ovsdb-server.service \
433 # Collect the logs for the openstack services
434 # First get all the services started by devstack which would have devstack@ as a prefix
435 # Next get all the extra services
436 function collect_openstack_logs() {
439 local -r node_type=${3}
440 local oslogs="${folder}/oslogs"
442 printf "collect_openstack_logs for %s node: %s into %s\n" "${node_type}" "${ip}" "${oslogs}"
445 # There are always some logs in /opt/stack/logs and this also covers the
446 # pre-queens branches which always use /opt/stack/logs
447 rsync -avhe ssh ${ip}:/opt/stack/logs/* ${oslogs} # rsync to prevent copying of symbolic links
449 # Starting with queens break out the logs from journalctl
450 if [ "${OPENSTACK_BRANCH}" = "stable/queens" ]; then
451 cat > ${WORKSPACE}/collect_openstack_logs.sh << EOF
452 extra_services_cntl="${extra_services_cntl}"
453 extra_services_cmp="${extra_services_cmp}"
455 function extract_from_journal() {
456 local -r services=\${1}
457 local -r folder=\${2}
458 local -r node_type=\${3}
459 printf "extract_from_journal folder: \${folder}, services: \${services}\n"
460 for service in \${services}; do
461 # strip anything before @ and anything after .
462 # devstack@g-api.service will end as g-api
463 service_="\${service#*@}"
464 service_="\${service_%.*}"
465 sudo journalctl -u "\${service}" > "\${folder}/\${service_}.log"
471 systemctl list-unit-files --all > /tmp/oslogs/systemctl.units.log 2>&1
472 svcs=\$(grep devstack@ /tmp/oslogs/systemctl.units.log | awk '{print \$1}')
473 extract_from_journal "\${svcs}" "/tmp/oslogs"
474 if [ "\${node_type}" = "control" ]; then
475 extract_from_journal "\${extra_services_cntl}" "/tmp/oslogs"
477 extract_from_journal "\${extra_services_cmp}" "/tmp/oslogs"
481 # cat > ${WORKSPACE}/collect_openstack_logs.sh << EOF
482 printf "collect_openstack_logs for %s node: %s into %s, executing script\n" "${node_type}" "${ip}" "${oslogs}"
483 cat ${WORKSPACE}/collect_openstack_logs.sh
484 scp ${WORKSPACE}/collect_openstack_logs.sh ${ip}:/tmp
485 ${SSH} ${ip} "bash /tmp/collect_openstack_logs.sh > /tmp/collect_openstack_logs.log 2>&1"
486 rsync -avhe ssh ${ip}:/tmp/oslogs/* ${oslogs}
487 scp ${ip}:/tmp/collect_openstack_logs.log ${oslogs}
488 fi # if [ "${OPENSTACK_BRANCH}" = "stable/queens" ]; then
491 function collect_netvirt_logs() {
492 set +e # We do not want to create red dot just because something went wrong while fetching logs.
494 cat > extra_debug.sh << EOF
495 echo -e "/usr/sbin/lsmod | /usr/bin/grep openvswitch\n"
496 /usr/sbin/lsmod | /usr/bin/grep openvswitch
497 echo -e "\nsudo grep ct_ /var/log/openvswitch/ovs-vswitchd.log\n"
498 sudo grep "Datapath supports" /var/log/openvswitch/ovs-vswitchd.log
499 echo -e "\nsudo netstat -punta\n"
501 echo -e "\nsudo getenforce\n"
503 echo -e "\nsudo systemctl status httpd\n"
504 sudo systemctl status httpd
507 source /opt/stack/devstack/openrc admin admin
508 echo -e "\nenv after openrc\n"
510 echo -e "\nsudo du -hs /opt/stack"
511 sudo du -hs /opt/stack
512 echo -e "\nsudo mount"
514 echo -e "\ndmesg -T > /tmp/dmesg.log"
515 dmesg -T > /tmp/dmesg.log
516 echo -e "\njournalctl > /tmp/journalctl.log\n"
517 sudo journalctl > /tmp/journalctl.log
518 echo -e "\novsdb-tool -mm show-log > /tmp/ovsdb-tool.log"
519 ovsdb-tool -mm show-log > /tmp/ovsdb-tool.log
522 # Since this log collection work is happening before the archive build macro which also
523 # creates the ${WORKSPACE}/archives dir, we have to do it here first. The mkdir in the
524 # archives build step will essentially be a noop.
525 mkdir -p ${WORKSPACE}/archives
527 mv /tmp/changes.txt ${WORKSPACE}/archives
528 mv /tmp/validations.txt ${WORKSPACE}/archives
529 mv ${WORKSPACE}/rabbit.txt ${WORKSPACE}/archives
530 mv ${WORKSPACE}/haproxy.cfg ${WORKSPACE}/archives
531 ssh ${OPENSTACK_HAPROXY_1_IP} "sudo journalctl -u haproxy > /tmp/haproxy.log"
532 scp ${OPENSTACK_HAPROXY_1_IP}:/tmp/haproxy.log ${WORKSPACE}/archives/
535 # FIXME: Do not create .tar and gzip before copying.
536 for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
537 CONTROLLERIP=ODL_SYSTEM_${i}_IP
538 echo "collect_logs: for opendaylight controller ip: ${!CONTROLLERIP}"
539 NODE_FOLDER="odl_${i}"
540 mkdir -p ${NODE_FOLDER}
541 echo "Lets's take the karaf thread dump again..."
542 ssh ${!CONTROLLERIP} "sudo ps aux" > ${WORKSPACE}/ps_after.log
543 pid=$(grep org.apache.karaf.main.Main ${WORKSPACE}/ps_after.log | grep -v grep | tr -s ' ' | cut -f2 -d' ')
544 echo "karaf main: org.apache.karaf.main.Main, pid:${pid}"
545 ssh ${!CONTROLLERIP} "${JAVA_HOME}/bin/jstack -l ${pid}" > ${WORKSPACE}/karaf_${i}_${pid}_threads_after.log || true
546 echo "killing karaf process..."
547 ${SSH} "${!CONTROLLERIP}" bash -c 'ps axf | grep karaf | grep -v grep | awk '"'"'{print "kill -9 " $1}'"'"' | sh'
548 ${SSH} ${!CONTROLLERIP} "sudo journalctl > /tmp/journalctl.log"
549 scp ${!CONTROLLERIP}:/tmp/journalctl.log ${NODE_FOLDER}
550 ${SSH} ${!CONTROLLERIP} "dmesg -T > /tmp/dmesg.log"
551 scp ${!CONTROLLERIP}:/tmp/dmesg.log ${NODE_FOLDER}
552 ${SSH} ${!CONTROLLERIP} "tar -cf - -C /tmp/${BUNDLEFOLDER} etc | xz -T 0 > /tmp/etc.tar.xz"
553 scp ${!CONTROLLERIP}:/tmp/etc.tar.xz ${NODE_FOLDER}
554 ${SSH} ${!CONTROLLERIP} "cp -r /tmp/${BUNDLEFOLDER}/data/log /tmp/odl_log"
555 ${SSH} ${!CONTROLLERIP} "tar -cf /tmp/odl${i}_karaf.log.tar /tmp/odl_log/*"
556 scp ${!CONTROLLERIP}:/tmp/odl${i}_karaf.log.tar ${NODE_FOLDER}
557 ${SSH} ${!CONTROLLERIP} "tar -cf /tmp/odl${i}_zrpcd.log.tar /tmp/zrpcd.init.log"
558 scp ${!CONTROLLERIP}:/tmp/odl${i}_zrpcd.log.tar ${NODE_FOLDER}
559 tar -xvf ${NODE_FOLDER}/odl${i}_karaf.log.tar -C ${NODE_FOLDER} --strip-components 2 --transform s/karaf/odl${i}_karaf/g
560 grep "ROBOT MESSAGE\| ERROR " ${NODE_FOLDER}/odl${i}_karaf.log > ${NODE_FOLDER}/odl${i}_err.log
561 grep "ROBOT MESSAGE\| ERROR \| WARN \|Exception" \
562 ${NODE_FOLDER}/odl${i}_karaf.log > ${NODE_FOLDER}/odl${i}_err_warn_exception.log
563 # Print ROBOT lines and print Exception lines. For exception lines also print the previous line for context
564 sed -n -e '/ROBOT MESSAGE/P' -e '$!N;/Exception/P;D' ${NODE_FOLDER}/odl${i}_karaf.log > ${NODE_FOLDER}/odl${i}_exception.log
565 mv /tmp/odl${i}_exceptions.txt ${NODE_FOLDER}
566 rm ${NODE_FOLDER}/odl${i}_karaf.log.tar
567 mv *_threads* ${NODE_FOLDER}
568 mv ps_* ${NODE_FOLDER}
569 mv ${NODE_FOLDER} ${WORKSPACE}/archives/
572 print_job_parameters > ${WORKSPACE}/archives/params.txt
575 for i in $(seq 1 "${NUM_OPENSTACK_CONTROL_NODES}"); do
576 OSIP=OPENSTACK_CONTROL_NODE_${i}_IP
577 if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then
578 echo "collect_logs: for openstack combo node ip: ${!OSIP}"
579 NODE_FOLDER="combo_${i}"
581 echo "collect_logs: for openstack control node ip: ${!OSIP}"
582 NODE_FOLDER="control_${i}"
584 mkdir -p ${NODE_FOLDER}
585 tcpdump_stop "${!OSIP}"
586 scp extra_debug.sh ${!OSIP}:/tmp
587 # Capture compute logs if this is a combo node
588 if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then
589 scp ${!OSIP}:/etc/nova/nova.conf ${NODE_FOLDER}
590 scp ${!OSIP}:/etc/nova/nova-cpu.conf ${NODE_FOLDER}
591 scp ${!OSIP}:/etc/openstack/clouds.yaml ${NODE_FOLDER}
592 rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/nova-agent.log ${NODE_FOLDER}
594 ${SSH} ${!OSIP} "bash /tmp/extra_debug.sh > /tmp/extra_debug.log 2>&1"
595 scp ${!OSIP}:/etc/dnsmasq.conf ${NODE_FOLDER}
596 scp ${!OSIP}:/etc/keystone/keystone.conf ${NODE_FOLDER}
597 scp ${!OSIP}:/etc/keystone/keystone-uwsgi-admin.ini ${NODE_FOLDER}
598 scp ${!OSIP}:/etc/keystone/keystone-uwsgi-public.ini ${NODE_FOLDER}
599 scp ${!OSIP}:/etc/kuryr/kuryr.conf ${NODE_FOLDER}
600 scp ${!OSIP}:/etc/neutron/dhcp_agent.ini ${NODE_FOLDER}
601 scp ${!OSIP}:/etc/neutron/metadata_agent.ini ${NODE_FOLDER}
602 scp ${!OSIP}:/etc/neutron/neutron.conf ${NODE_FOLDER}
603 scp ${!OSIP}:/etc/neutron/neutron_lbaas.conf ${NODE_FOLDER}
604 scp ${!OSIP}:/etc/neutron/plugins/ml2/ml2_conf.ini ${NODE_FOLDER}
605 scp ${!OSIP}:/etc/neutron/services/loadbalancer/haproxy/lbaas_agent.ini ${NODE_FOLDER}
606 scp ${!OSIP}:/etc/nova/nova.conf ${NODE_FOLDER}
607 scp ${!OSIP}:/etc/nova/nova-api-uwsgi.ini ${NODE_FOLDER}
608 scp ${!OSIP}:/etc/nova/nova_cell1.conf ${NODE_FOLDER}
609 scp ${!OSIP}:/etc/nova/nova-cpu.conf ${NODE_FOLDER}
610 scp ${!OSIP}:/etc/nova/placement-uwsgi.ini ${NODE_FOLDER}
611 scp ${!OSIP}:/etc/openstack/clouds.yaml ${NODE_FOLDER}
612 scp ${!OSIP}:/opt/stack/devstack/.stackenv ${NODE_FOLDER}
613 scp ${!OSIP}:/opt/stack/devstack/nohup.out ${NODE_FOLDER}/stack.log
614 scp ${!OSIP}:/opt/stack/devstack/openrc ${NODE_FOLDER}
615 scp ${!OSIP}:/opt/stack/requirements/upper-constraints.txt ${NODE_FOLDER}
616 scp ${!OSIP}:/opt/stack/tempest/etc/tempest.conf ${NODE_FOLDER}
617 scp ${!OSIP}:/tmp/*.xz ${NODE_FOLDER}
618 scp ${!OSIP}:/tmp/dmesg.log ${NODE_FOLDER}
619 scp ${!OSIP}:/tmp/extra_debug.log ${NODE_FOLDER}
620 scp ${!OSIP}:/tmp/get_devstack.sh.txt ${NODE_FOLDER}
621 scp ${!OSIP}:/tmp/install_ovs.txt ${NODE_FOLDER}
622 scp ${!OSIP}:/tmp/journalctl.log ${NODE_FOLDER}
623 scp ${!OSIP}:/tmp/ovsdb-tool.log ${NODE_FOLDER}
624 scp ${!OSIP}:/tmp/tcpdump_start.log ${NODE_FOLDER}
625 collect_files "${!OSIP}" "${NODE_FOLDER}"
626 ${SSH} ${!OSIP} "sudo tar -cf - -C /var/log rabbitmq | xz -T 0 > /tmp/rabbitmq.tar.xz "
627 scp ${!OSIP}:/tmp/rabbitmq.tar.xz ${NODE_FOLDER}
628 rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/etc/hosts ${NODE_FOLDER}
629 rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/usr/lib/systemd/system/haproxy.service ${NODE_FOLDER}
630 rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/audit/audit.log ${NODE_FOLDER}
631 rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/httpd/keystone_access.log ${NODE_FOLDER}
632 rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/httpd/keystone.log ${NODE_FOLDER}
633 rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/messages* ${NODE_FOLDER}
634 rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/openvswitch/ovs-vswitchd.log ${NODE_FOLDER}
635 rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/openvswitch/ovsdb-server.log ${NODE_FOLDER}
636 collect_openstack_logs "${!OSIP}" "${NODE_FOLDER}" "control"
637 mv local.conf_control_${!OSIP} ${NODE_FOLDER}/local.conf
638 # qdhcp files are created by robot tests and copied into /tmp/qdhcp during the test
639 tar -cf - -C /tmp qdhcp | xz -T 0 > /tmp/qdhcp.tar.xz
640 mv /tmp/qdhcp.tar.xz ${NODE_FOLDER}
641 mv ${NODE_FOLDER} ${WORKSPACE}/archives/
645 for i in $(seq 1 "${NUM_OPENSTACK_COMPUTE_NODES}"); do
646 OSIP=OPENSTACK_COMPUTE_NODE_${i}_IP
647 echo "collect_logs: for openstack compute node ip: ${!OSIP}"
648 NODE_FOLDER="compute_${i}"
649 mkdir -p ${NODE_FOLDER}
650 tcpdump_stop "${!OSIP}"
651 scp extra_debug.sh ${!OSIP}:/tmp
652 ${SSH} ${!OSIP} "bash /tmp/extra_debug.sh > /tmp/extra_debug.log 2>&1"
653 scp ${!OSIP}:/etc/nova/nova.conf ${NODE_FOLDER}
654 scp ${!OSIP}:/etc/nova/nova-cpu.conf ${NODE_FOLDER}
655 scp ${!OSIP}:/etc/openstack/clouds.yaml ${NODE_FOLDER}
656 scp ${!OSIP}:/opt/stack/devstack/.stackenv ${NODE_FOLDER}
657 scp ${!OSIP}:/opt/stack/devstack/nohup.out ${NODE_FOLDER}/stack.log
658 scp ${!OSIP}:/opt/stack/devstack/openrc ${NODE_FOLDER}
659 scp ${!OSIP}:/opt/stack/requirements/upper-constraints.txt ${NODE_FOLDER}
660 scp ${!OSIP}:/tmp/*.xz ${NODE_FOLDER}/
661 scp ${!OSIP}:/tmp/dmesg.log ${NODE_FOLDER}
662 scp ${!OSIP}:/tmp/extra_debug.log ${NODE_FOLDER}
663 scp ${!OSIP}:/tmp/get_devstack.sh.txt ${NODE_FOLDER}
664 scp ${!OSIP}:/tmp/install_ovs.txt ${NODE_FOLDER}
665 scp ${!OSIP}:/tmp/journalctl.log ${NODE_FOLDER}
666 scp ${!OSIP}:/tmp/ovsdb-tool.log ${NODE_FOLDER}
667 scp ${!OSIP}:/tmp/tcpdump_start.log ${NODE_FOLDER}
668 collect_files "${!OSIP}" "${NODE_FOLDER}"
669 ${SSH} ${!OSIP} "sudo tar -cf - -C /var/log libvirt | xz -T 0 > /tmp/libvirt.tar.xz "
670 scp ${!OSIP}:/tmp/libvirt.tar.xz ${NODE_FOLDER}
671 rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/etc/hosts ${NODE_FOLDER}
672 rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/audit/audit.log ${NODE_FOLDER}
673 rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/messages* ${NODE_FOLDER}
674 rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/nova-agent.log ${NODE_FOLDER}
675 rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/openvswitch/ovs-vswitchd.log ${NODE_FOLDER}
676 rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/openvswitch/ovsdb-server.log ${NODE_FOLDER}
677 collect_openstack_logs "${!OSIP}" "${NODE_FOLDER}" "compute"
678 mv local.conf_compute_${!OSIP} ${NODE_FOLDER}/local.conf
679 mv ${NODE_FOLDER} ${WORKSPACE}/archives/
683 DEVSTACK_TEMPEST_DIR="/opt/stack/tempest"
685 TEMPEST_LOGS_DIR=${WORKSPACE}/archives/tempest
686 # Look for tempest test results in the $TESTREPO dir and copy if found
687 if ${SSH} ${OPENSTACK_CONTROL_NODE_1_IP} "sudo sh -c '[ -f ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/0 ]'"; then
688 ${SSH} ${OPENSTACK_CONTROL_NODE_1_IP} "for I in \$(sudo ls ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/ | grep -E '^[0-9]+$'); do sudo sh -c \"${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/subunit-1to2 < ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/\${I} >> ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt\"; done"
689 ${SSH} ${OPENSTACK_CONTROL_NODE_1_IP} "sudo sh -c '${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/python ${DEVSTACK_TEMPEST_DIR}/.tox/tempest/lib/python2.7/site-packages/os_testr/subunit2html.py ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt ${DEVSTACK_TEMPEST_DIR}/tempest_results.html'"
690 mkdir -p ${TEMPEST_LOGS_DIR}
691 scp ${OPENSTACK_CONTROL_NODE_1_IP}:${DEVSTACK_TEMPEST_DIR}/tempest_results.html ${TEMPEST_LOGS_DIR}
692 scp ${OPENSTACK_CONTROL_NODE_1_IP}:${DEVSTACK_TEMPEST_DIR}/tempest.log ${TEMPEST_LOGS_DIR}
694 echo "tempest results not found in ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/0"
696 } # collect_netvirt_logs()
698 # Utility function for joining strings.
704 final=${final}${delim}${str}
710 function get_nodes_list() {
711 # Create the string for nodes
712 for i in $(seq 1 "${NUM_ODL_SYSTEM}") ; do
713 CONTROLLERIP=ODL_SYSTEM_${i}_IP
714 nodes[$i]=${!CONTROLLERIP}
717 nodes_list=$(join "${nodes[@]}")
721 function get_features() {
722 if [ "${CONTROLLERSCOPE}" == 'all' ]; then
723 ACTUALFEATURES="odl-integration-compatible-with-all,${CONTROLLERFEATURES}"
724 export CONTROLLERMEM="3072m"
726 ACTUALFEATURES="odl-infrautils-ready,${CONTROLLERFEATURES}"
729 # Some versions of jenkins job builder result in feature list containing spaces
730 # and ending in newline. Remove all that.
731 ACTUALFEATURES=$(echo "${ACTUALFEATURES}" | tr -d '\n \r')
732 echo "ACTUALFEATURES: ${ACTUALFEATURES}"
734 # In the case that we want to install features via karaf shell, a space separated list of
735 # ACTUALFEATURES IS NEEDED
736 SPACE_SEPARATED_FEATURES=$(echo "${ACTUALFEATURES}" | tr ',' ' ')
737 echo "SPACE_SEPARATED_FEATURES: ${SPACE_SEPARATED_FEATURES}"
739 export ACTUALFEATURES
740 export SPACE_SEPARATED_FEATURES
743 # Create the configuration script to be run on controllers.
744 function create_configuration_script() {
745 cat > "${WORKSPACE}"/configuration-script.sh <<EOF
747 source /tmp/common-functions.sh ${BUNDLEFOLDER}
749 echo "Changing to /tmp"
752 echo "Downloading the distribution from ${ACTUAL_BUNDLE_URL}"
753 wget --progress=dot:mega '${ACTUAL_BUNDLE_URL}'
755 echo "Extracting the new controller..."
758 echo "Adding external repositories..."
759 sed -ie "s%org.ops4j.pax.url.mvn.repositories=%org.ops4j.pax.url.mvn.repositories=https://nexus.opendaylight.org/content/repositories/opendaylight.snapshot@id=opendaylight-snapshot@snapshots, https://nexus.opendaylight.org/content/repositories/public@id=opendaylight-mirror, http://repo1.maven.org/maven2@id=central, http://repository.springsource.com/maven/bundles/release@id=spring.ebr.release, http://repository.springsource.com/maven/bundles/external@id=spring.ebr.external, http://zodiac.springsource.com/maven/bundles/release@id=gemini, http://repository.apache.org/content/groups/snapshots-group@id=apache@snapshots@noreleases, https://oss.sonatype.org/content/repositories/snapshots@id=sonatype.snapshots.deploy@snapshots@noreleases, https://oss.sonatype.org/content/repositories/ops4j-snapshots@id=ops4j.sonatype.snapshots.deploy@snapshots@noreleases%g" ${MAVENCONF}
762 if [[ "$USEFEATURESBOOT" == "True" ]]; then
763 echo "Configuring the startup features..."
764 sed -ie "s/\(featuresBoot=\|featuresBoot =\)/featuresBoot = ${ACTUALFEATURES},/g" ${FEATURESCONF}
767 FEATURE_TEST_STRING="features-integration-test"
768 KARAF_VERSION=${KARAF_VERSION:-karaf4}
769 if [[ "$KARAF_VERSION" == "karaf4" ]]; then
770 FEATURE_TEST_STRING="features-test"
773 sed -ie "s%\(featuresRepositories=\|featuresRepositories =\)%featuresRepositories = mvn:org.opendaylight.integration/\${FEATURE_TEST_STRING}/${BUNDLE_VERSION}/xml/features,mvn:org.apache.karaf.decanter/apache-karaf-decanter/1.0.0/xml/features,%g" ${FEATURESCONF}
774 if [[ ! -z "${REPO_URL}" ]]; then
775 sed -ie "s%featuresRepositories =%featuresRepositories = ${REPO_URL},%g" ${FEATURESCONF}
779 configure_karaf_log "${KARAF_VERSION}" "${CONTROLLERDEBUGMAP}"
781 set_java_vars "${JAVA_HOME}" "${CONTROLLERMEM}" "${MEMCONF}"
783 echo "Listing all open ports on controller system..."
786 # Copy shard file if exists
787 if [ -f /tmp/custom_shard_config.txt ]; then
788 echo "Custom shard config exists!!!"
789 echo "Copying the shard config..."
790 cp /tmp/custom_shard_config.txt /tmp/${BUNDLEFOLDER}/bin/
793 echo "Configuring cluster"
794 /tmp/${BUNDLEFOLDER}/bin/configure_cluster.sh \$1 ${nodes_list}
796 echo "Dump akka.conf"
799 echo "Dump modules.conf"
802 echo "Dump module-shards.conf"
803 cat ${MODULESHARDSCONF}
805 # cat > ${WORKSPACE}/configuration-script.sh <<EOF
808 # Create the startup script to be run on controllers.
809 function create_startup_script() {
810 cat > "${WORKSPACE}"/startup-script.sh <<EOF
811 echo "Redirecting karaf console output to karaf_console.log"
812 export KARAF_REDIRECT="/tmp/${BUNDLEFOLDER}/data/log/karaf_console.log"
813 mkdir -p /tmp/${BUNDLEFOLDER}/data/log
815 echo "Starting controller..."
816 /tmp/${BUNDLEFOLDER}/bin/start
818 # cat > ${WORKSPACE}/startup-script.sh <<EOF
821 function create_post_startup_script() {
822 cat > "${WORKSPACE}"/post-startup-script.sh <<EOF
823 if [[ "$USEFEATURESBOOT" != "True" ]]; then
825 # wait up to 60s for karaf port 8101 to be opened, polling every 5s
827 until [[ \$loop_count -ge 12 ]]; do
828 netstat -na | grep 8101 && break;
829 loop_count=\$[\$loop_count+1];
833 echo "going to feature:install --no-auto-refresh ${SPACE_SEPARATED_FEATURES} one at a time"
834 for feature in ${SPACE_SEPARATED_FEATURES}; do
835 sshpass -p karaf ssh -o StrictHostKeyChecking=no \
836 -o UserKnownHostsFile=/dev/null \
838 -p 8101 karaf@localhost \
839 feature:install --no-auto-refresh \$feature;
842 echo "ssh to karaf console to list -i installed features"
843 sshpass -p karaf ssh -o StrictHostKeyChecking=no \
844 -o UserKnownHostsFile=/dev/null \
846 -p 8101 karaf@localhost \
850 echo "Waiting up to 3 minutes for controller to come up, checking every 5 seconds..."
853 grep 'org.opendaylight.infrautils.*System ready' /tmp/${BUNDLEFOLDER}/data/log/karaf.log
854 if [ \$? -eq 0 ]; then
855 echo "Controller is UP"
860 # if we ended up not finding ready status in the above loop, we can output some debugs
861 grep 'org.opendaylight.infrautils.*System ready' /tmp/${BUNDLEFOLDER}/data/log/karaf.log
862 if [ $? -ne 0 ]; then
863 echo "Timeout Controller DOWN"
864 echo "Dumping first 500K bytes of karaf log..."
865 head --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
866 echo "Dumping last 500K bytes of karaf log..."
867 tail --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
868 echo "Listing all open ports on controller system"
873 echo "Listing all open ports on controller system..."
876 function exit_on_log_file_message {
877 echo "looking for \"\$1\" in log file"
878 if grep --quiet "\$1" "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"; then
879 echo ABORTING: found "\$1"
880 echo "Dumping first 500K bytes of karaf log..."
881 head --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
882 echo "Dumping last 500K bytes of karaf log..."
883 tail --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
888 exit_on_log_file_message 'BindException: Address already in use'
889 exit_on_log_file_message 'server is unhealthy'
891 # cat > ${WORKSPACE}/post-startup-script.sh <<EOF
894 # Copy over the configuration script and configuration files to each controller
895 # Execute the configuration script on each controller.
896 function copy_and_run_configuration_script() {
897 for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
898 CONTROLLERIP="ODL_SYSTEM_${i}_IP"
899 echo "Configuring member-${i} with IP address ${!CONTROLLERIP}"
900 scp "${WORKSPACE}"/configuration-script.sh "${!CONTROLLERIP}":/tmp/
901 # $i needs to be parsed client-side
902 # shellcheck disable=SC2029
903 ssh "${!CONTROLLERIP}" "bash /tmp/configuration-script.sh ${i}"
907 # Copy over the startup script to each controller and execute it.
908 function copy_and_run_startup_script() {
909 for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
910 CONTROLLERIP="ODL_SYSTEM_${i}_IP"
911 echo "Starting member-${i} with IP address ${!CONTROLLERIP}"
912 scp "${WORKSPACE}"/startup-script.sh "${!CONTROLLERIP}":/tmp/
913 ssh "${!CONTROLLERIP}" "bash /tmp/startup-script.sh"
917 function copy_and_run_post_startup_script() {
919 for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
920 CONTROLLERIP="ODL_SYSTEM_${i}_IP"
921 echo "Execute the post startup script on controller ${!CONTROLLERIP}"
922 scp "${WORKSPACE}"/post-startup-script.sh "${!CONTROLLERIP}":/
923 # $seed_index needs to be parsed client-side
924 # shellcheck disable=SC2029
925 ssh "${!CONTROLLERIP}" "bash /tmp/post-startup-script.sh $(( seed_index++ ))"
926 if [ $(( i % NUM_ODL_SYSTEM )) == 0 ]; then
932 function create_controller_variables() {
933 echo "Generating controller variables..."
934 for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
935 CONTROLLERIP="ODL_SYSTEM_${i}_IP"
936 odl_variables=${odl_variables}" -v ${CONTROLLERIP}:${!CONTROLLERIP}"
937 echo "Lets's take the karaf thread dump"
938 ssh "${!CONTROLLERIP}" "sudo ps aux" > "${WORKSPACE}"/ps_before.log
939 pid=$(grep org.apache.karaf.main.Main "${WORKSPACE}"/ps_before.log | grep -v grep | tr -s ' ' | cut -f2 -d' ')
940 echo "karaf main: org.apache.karaf.main.Main, pid:${pid}"
941 # $i needs to be parsed client-side
942 # shellcheck disable=SC2029
943 ssh "${!CONTROLLERIP}" "${JAVA_HOME}/bin/jstack -l ${pid}" > "${WORKSPACE}/karaf_${i}_${pid}_threads_before.log" || true
947 # Function to build OVS from git repo
948 function build_ovs() {
951 local -r rpm_path="$3"
953 echo "Building OVS ${version} on ${ip} ..."
954 cat > "${WORKSPACE}"/build_ovs.sh << EOF
957 echo '---> Building openvswitch version ${version}'
959 # Install running kernel devel packages
960 K_VERSION=\$(uname -r)
961 YUM_OPTS="-y --disablerepo=* --enablerepo=base,updates,extra,C*-base,C*-updates,C*-extras"
962 # Install centos-release to update vault repos from which to fetch
963 # kernel devel packages
964 sudo yum \${YUM_OPTS} install centos-release yum-utils @'Development Tools' rpm-build
965 sudo yum \${YUM_OPTS} install kernel-{devel,headers}-\${K_VERSION}
970 git clone https://github.com/openvswitch/ovs.git
973 if [ "${version}" = "v2.6.1-nsh" ]; then
975 echo "Will apply nsh patches for OVS version 2.6.1"
976 git clone https://github.com/yyang13/ovs_nsh_patches.git ../ovs_nsh_patches
977 git apply ../ovs_nsh_patches/v2.6.1_centos7/*.patch
979 git checkout ${version}
982 # On early versions of OVS, flake warnings would fail the build.
984 sudo pip uninstall -y flake8
986 # Get rid of sphinx dep as it conflicts with the already
987 # installed one (via pip). Docs wont be built.
988 sed -i "/BuildRequires:.*sphinx.*/d" rhel/openvswitch-fedora.spec.in
990 sed -e 's/@VERSION@/0.0.1/' rhel/openvswitch-fedora.spec.in > /tmp/ovs.spec
991 sed -e 's/@VERSION@/0.0.1/' rhel/openvswitch-kmod-fedora.spec.in > /tmp/ovs-kmod.spec
992 sed -e 's/@VERSION@/0.0.1/' rhel/openvswitch-dkms.spec.in > /tmp/ovs-dkms.spec
993 sudo yum-builddep \${YUM_OPTS} /tmp/ovs.spec /tmp/ovs-kmod.spec /tmp/ovs-dkms.spec
994 rm /tmp/ovs.spec /tmp/ovs-kmod.spec /tmp/ovs-dkms.spec
996 ./configure --build=x86_64-redhat-linux-gnu --host=x86_64-redhat-linux-gnu --with-linux=/lib/modules/\${K_VERSION}/build --program-prefix= --disable-dependency-tracking --prefix=/usr --exec-prefix=/usr --bindir=/usr/bin --sbindir=/usr/sbin --sysconfdir=/etc --datadir=/usr/share --includedir=/usr/include --libdir=/usr/lib64 --libexecdir=/usr/libexec --localstatedir=/var --sharedstatedir=/var/lib --mandir=/usr/share/man --infodir=/usr/share/info --enable-libcapng --enable-ssl --with-pkidir=/var/lib/openvswitch/pki PYTHON=/usr/bin/python2
997 make rpm-fedora RPMBUILD_OPT="--without check"
998 # Build dkms only for now
999 # make rpm-fedora-kmod RPMBUILD_OPT='-D "kversion \${K_VERSION}"'
1000 rpmbuild -D "_topdir \$(pwd)/rpm/rpmbuild" -bb --without check rhel/openvswitch-dkms.spec
1002 mkdir -p /tmp/ovs_rpms
1003 cp -r rpm/rpmbuild/RPMS/* /tmp/ovs_rpms/
1009 scp "${WORKSPACE}"/build_ovs.sh "${ip}":/tmp
1010 ${SSH} "${ip}" " bash /tmp/build_ovs.sh >> /tmp/install_ovs.txt 2>&1"
1011 scp -r "${ip}":/tmp/ovs_rpms/* "${rpm_path}/"
1012 ${SSH} "${ip}" "rm -rf /tmp/ovs_rpms"
1015 # Install OVS RPMs from yum repo
1016 function install_ovs_from_repo() {
1018 local -r rpm_repo="$2"
1020 echo "Installing OVS from repo ${rpm_repo} on ${ip} ..."
1021 cat > "${WORKSPACE}"/install_ovs.sh << EOF
1024 echo '---> Installing openvswitch from ${rpm_repo}'
1026 # We need repoquery from yum-utils.
1027 sudo yum -y install yum-utils
1029 # Get openvswitch packages offered by custom repo.
1030 # dkms package will have priority over kmod.
1031 OVS_REPO_OPTS="--repofrompath=ovs-repo,${rpm_repo} --disablerepo=* --enablerepo=ovs-repo"
1032 OVS_PKGS=\$(repoquery \${OVS_REPO_OPTS} openvswitch)
1033 OVS_SEL_PKG=\$(repoquery \${OVS_REPO_OPTS} openvswitch-selinux-policy)
1034 OVS_DKMS_PKG=\$(repoquery \${OVS_REPO_OPTS} openvswitch-dkms)
1035 OVS_KMOD_PKG=\$(repoquery \${OVS_REPO_OPTS} openvswitch-kmod)
1036 [ -n "\${OVS_SEL_PKG}" ] && OVS_PKGS="\${OVS_PKGS} \${OVS_SEL_PKG}"
1037 [ -n "\${OVS_DKMS_PKG}" ] && OVS_PKGS="\${OVS_PKGS} \${OVS_DKMS_PKG}"
1038 [ -z "\${OVS_DKMS_PKG}" ] && [ -n "\${OVS_KMOD_PKG}" ] && OVS_PKGS="\${OVS_PKGS} \${OVS_KMOD_PKG}"
1040 # Bail with error if custom repo was provided but we could not
1041 # find suitable packages there.
1042 [ -z "\${OVS_PKGS}" ] && echo "No OVS packages found in custom repo." && exit 1
1044 # Install kernel & devel packages for the openvswitch dkms package.
1045 if [ -n "\${OVS_DKMS_PKG}" ]; then
1046 # install centos-release to update vault repos from which to fetch
1047 # kernel devel packages
1048 sudo yum -y install centos-release
1049 K_VERSION=\$(uname -r)
1050 YUM_OPTS="-y --disablerepo=* --enablerepo=base,updates,extra,C*-base,C*-updates,C*-extras"
1051 sudo yum \${YUM_OPTS} install kernel-{headers,devel}-\${K_VERSION} @'Development Tools' python-six
1054 PREV_MOD=\$(sudo modinfo -n openvswitch || echo '')
1056 # Install OVS offered by custom repo.
1057 sudo yum-config-manager --add-repo "${rpm_repo}"
1058 sudo yum -y versionlock delete openvswitch-*
1059 sudo yum -y remove openvswitch-*
1060 sudo yum -y --nogpgcheck install \${OVS_PKGS}
1061 sudo yum -y versionlock add \${OVS_PKGS}
1063 # Most recent OVS versions have some incompatibility with certain versions of iptables
1064 # This below line will overcome that problem.
1065 sudo modprobe openvswitch
1067 # Start OVS and print details
1068 sudo systemctl start openvswitch
1069 sudo systemctl enable openvswitch
1070 sudo ovs-vsctl --retry -t 5 show
1071 sudo modinfo openvswitch
1073 # dkms rpm install can fail silently (probably because the OVS version is
1074 # incompatible with the running kernel), verify module was updated.
1075 NEW_MOD=\$(sudo modinfo -n openvswitch || echo '')
1076 [ "\${PREV_MOD}" != "\${NEW_MOD}" ] || (echo "Kernel module was not updated" && exit 1)
1079 scp "${WORKSPACE}"/install_ovs.sh "${ip}":/tmp
1080 ${SSH} "${ip}" "bash /tmp/install_ovs.sh >> /tmp/install_ovs.txt 2>&1"
1083 # Install OVS RPMS from path
1084 function install_ovs_from_path() {
1086 local -r rpm_path="$2"
1088 echo "Creating OVS RPM repo on ${ip} ..."
1089 ${SSH} "${ip}" "mkdir -p /tmp/ovs_rpms"
1090 scp -r "${rpm_path}"/* "${ip}":/tmp/ovs_rpms
1091 ${SSH} "${ip}" "sudo yum -y install createrepo && createrepo --database /tmp/ovs_rpms"
1092 install_ovs_from_repo "${ip}" file:/tmp/ovs_rpms