Fix SC2086 for lines 300 until EOF
[releng/builder.git] / jjb / integration / common-functions.sh
1 #!/bin/bash
2
3 echo "common-functions.sh is being sourced"
4
5 BUNDLEFOLDER=$1
6
7 # Basic controller configuration settings
8 export MAVENCONF=/tmp/${BUNDLEFOLDER}/etc/org.ops4j.pax.url.mvn.cfg
9 export FEATURESCONF=/tmp/${BUNDLEFOLDER}/etc/org.apache.karaf.features.cfg
10 export CUSTOMPROP=/tmp/${BUNDLEFOLDER}/etc/custom.properties
11 export LOGCONF=/tmp/${BUNDLEFOLDER}/etc/org.ops4j.pax.logging.cfg
12 export MEMCONF=/tmp/${BUNDLEFOLDER}/bin/setenv
13 export CONTROLLERMEM="2048m"
14
15 # Cluster specific configuration settings
16 export AKKACONF=/tmp/${BUNDLEFOLDER}/configuration/initial/akka.conf
17 export MODULESCONF=/tmp/${BUNDLEFOLDER}/configuration/initial/modules.conf
18 export MODULESHARDSCONF=/tmp/${BUNDLEFOLDER}/configuration/initial/module-shards.conf
19
20 function print_common_env() {
21     cat << EOF
22 common-functions environment:
23 MAVENCONF: ${MAVENCONF}
24 ACTUALFEATURES: ${ACTUALFEATURES}
25 FEATURESCONF: ${FEATURESCONF}
26 CUSTOMPROP: ${CUSTOMPROP}
27 LOGCONF: ${LOGCONF}
28 MEMCONF: ${MEMCONF}
29 CONTROLLERMEM: ${CONTROLLERMEM}
30 AKKACONF: ${AKKACONF}
31 MODULESCONF: ${MODULESCONF}
32 MODULESHARDSCONF: ${MODULESHARDSCONF}
33 SUITES: ${SUITES}
34
35 EOF
36 }
37 print_common_env
38
39 # Setup JAVA_HOME and MAX_MEM Value in ODL startup config file
40 function set_java_vars() {
41     local -r java_home=$1
42     local -r controllermem=$2
43     local -r memconf=$3
44
45     echo "Configure\n    java home: ${java_home}\n    max memory: ${controllermem}\n    memconf: ${memconf}"
46
47     sed -ie 's%^# export JAVA_HOME%export JAVA_HOME=${JAVA_HOME:-'"${java_home}"'}%g' ${memconf}
48     sed -ie 's/JAVA_MAX_MEM="2048m"/JAVA_MAX_MEM='"${controllermem}"'/g' ${memconf}
49     echo "cat ${memconf}"
50     cat ${memconf}
51
52     echo "Set Java version"
53     sudo /usr/sbin/alternatives --install /usr/bin/java java ${java_home}/bin/java 1
54     sudo /usr/sbin/alternatives --set java ${java_home}/bin/java
55     echo "JDK default version ..."
56     java -version
57
58     echo "Set JAVA_HOME"
59     export JAVA_HOME="${java_home}"
60
61     # shellcheck disable=SC2037
62     JAVA_RESOLVED=$(readlink -e "${java_home}/bin/java")
63     echo "Java binary pointed at by JAVA_HOME: ${JAVA_RESOLVED}"
64 } # set_java_vars()
65
66 # shellcheck disable=SC2034
67 # foo appears unused. Verify it or export it.
68 function configure_karaf_log() {
69     local -r karaf_version=$1
70     local -r controllerdebugmap=$2
71     local logapi=log4j
72
73     # Check what the logging.cfg file is using for the logging api: log4j or log4j2
74     grep "log4j2" ${LOGCONF}
75     if [ $? -eq 0 ]; then
76         logapi=log4j2
77     fi
78
79     echo "Configuring the karaf log... karaf_version: ${karaf_version}, logapi: ${logapi}"
80     if [ "${logapi}" == "log4j2" ]; then
81         # FIXME: Make log size limit configurable from build parameter.
82         # From Neon the default karaf file size is 64 MB
83         sed -ie 's/log4j2.appender.rolling.policies.size.size = 64MB/log4j2.appender.rolling.policies.size.size = 1GB/g' ${LOGCONF}
84         # Flourine still uses 16 MB
85         sed -ie 's/log4j2.appender.rolling.policies.size.size = 16MB/log4j2.appender.rolling.policies.size.size = 1GB/g' ${LOGCONF}
86         orgmodule="org.opendaylight.yangtools.yang.parser.repo.YangTextSchemaContextResolver"
87         orgmodule_="${orgmodule//./_}"
88         echo "${logapi}.logger.${orgmodule_}.name = WARN" >> ${LOGCONF}
89         echo "${logapi}.logger.${orgmodule_}.level = WARN" >> ${LOGCONF}
90     else
91         sed -ie 's/log4j.appender.out.maxBackupIndex=10/log4j.appender.out.maxBackupIndex=1/g' ${LOGCONF}
92         # FIXME: Make log size limit configurable from build parameter.
93         sed -ie 's/log4j.appender.out.maxFileSize=1MB/log4j.appender.out.maxFileSize=30GB/g' ${LOGCONF}
94         echo "${logapi}.logger.org.opendaylight.yangtools.yang.parser.repo.YangTextSchemaContextResolver = WARN" >> ${LOGCONF}
95     fi
96
97     # Add custom logging levels
98     # CONTROLLERDEBUGMAP is expected to be a key:value map of space separated
99     # values like "module:level module2:level2" where module is abbreviated and
100     # does not include "org.opendaylight."
101     unset IFS
102     echo "controllerdebugmap: ${controllerdebugmap}"
103     if [ -n "${controllerdebugmap}" ]; then
104         for kv in ${controllerdebugmap}; do
105             module="${kv%%:*}"
106             level="${kv#*:}"
107             echo "module: $module, level: $level"
108             # shellcheck disable=SC2157
109             if [ -n "${module}" ] && [ -n "${level}" ]; then
110                 orgmodule="org.opendaylight.${module}"
111                 if [ "${logapi}" == "log4j2" ]; then
112                     orgmodule_="${orgmodule//./_}"
113                     echo "${logapi}.logger.${orgmodule_}.name = ${orgmodule}" >> ${LOGCONF}
114                     echo "${logapi}.logger.${orgmodule_}.level = ${level}" >> ${LOGCONF}
115                 else
116                     echo "${logapi}.logger.${orgmodule} = ${level}" >> ${LOGCONF}
117                 fi
118             fi
119         done
120     fi
121
122     echo "cat ${LOGCONF}"
123     cat ${LOGCONF}
124 } # function configure_karaf_log()
125
126 function configure_karaf_log_for_apex() {
127     # TODO: add the extra steps to this function to do any extra work
128     # in this apex environment like we do in our standard environment.
129     # EX: log size, rollover, etc.
130
131     # Modify ODL Log Levels, if needed, for new distribution. This will modify
132     # the control nodes hiera data which will be used during the puppet deploy
133     # CONTROLLERDEBUGMAP is expected to be a key:value map of space separated
134     # values like "module:level module2:level2" where module is abbreviated and
135     # does not include "org.opendaylight."
136
137     local -r controller_ip=$1
138
139     unset IFS
140     # shellcheck disable=SC2153
141     echo "CONTROLLERDEBUGMAP: ${CONTROLLERDEBUGMAP}"
142     if [ -n "${CONTROLLERDEBUGMAP}" ]; then
143         logging_config='\"opendaylight::log_levels\": {'
144         for kv in ${CONTROLLERDEBUGMAP}; do
145             module="${kv%%:*}"
146             level="${kv#*:}"
147             echo "module: $module, level: $level"
148             # shellcheck disable=SC2157
149             if [ -n "${module}" ] && [ -n "${level}" ]; then
150                 orgmodule="org.opendaylight.${module}"
151                 logging_config="${logging_config} \\\"${orgmodule}\\\": \\\"${level}\\\","
152             fi
153         done
154         # replace the trailing comma with a closing brace followed by trailing comma
155         logging_config=${logging_config%,}" },"
156         echo $logging_config
157
158         # fine a sane line number to inject the custom logging json
159         lineno=$(ssh $OPENSTACK_CONTROL_NODE_1_IP "sudo grep -Fn 'opendaylight::log_mechanism' /etc/puppet/hieradata/service_configs.json" | awk -F: '{print $1}')
160         ssh $controller_ip "sudo sed -i \"${lineno}i ${logging_config}\" /etc/puppet/hieradata/service_configs.json"
161         ssh $controller_ip "sudo cat /etc/puppet/hieradata/service_configs.json"
162     fi
163 } # function configure_karaf_log_for_apex()
164
165 function configure_odl_features_for_apex() {
166
167     # if the environment variable $ACTUALFEATURES is not null, then rewrite
168     # the puppet config file with the features given in that variable, otherwise
169     # this function is a noop
170
171     local -r controller_ip=$1
172     local -r config_file=/etc/puppet/hieradata/service_configs.json
173
174 cat > /tmp/set_odl_features.sh << EOF
175 sudo jq '.["opendaylight::extra_features"] |= []' $config_file > tmp.json && mv tmp.json $config_file
176 for feature in $(echo $ACTUALFEATURES | sed "s/,/ /g"); do
177     sudo jq --arg jq_arg \$feature '.["opendaylight::extra_features"] |= . + [\$jq_arg]' $config_file > tmp && mv tmp $config_file;
178 done
179 echo "Modified puppet-opendaylight service_configs.json..."
180 cat $config_file
181 EOF
182
183     echo "Feature configuration script..."
184     cat /tmp/set_odl_features.sh
185
186     if [ -n "${ACTUALFEATURES}" ]; then
187         scp /tmp/set_odl_features.sh $controller_ip:/tmp/set_odl_features.sh
188         ssh $controller_ip "sudo bash /tmp/set_odl_features.sh"
189     fi
190
191 } # function configure_odl_features_for_apex()
192
193 function get_os_deploy() {
194     local -r num_systems=${1:-$NUM_OPENSTACK_SYSTEM}
195     case ${num_systems} in
196     1)
197         OPENSTACK_TOPO="1cmb-0ctl-0cmp"
198         ;;
199     2)
200         OPENSTACK_TOPO="1cmb-0ctl-1cmp"
201         ;;
202     3|*)
203         OPENSTACK_TOPO="0cmb-1ctl-2cmp"
204         ;;
205     esac
206     export OPENSTACK_TOPO
207 }
208
209 function get_test_suites() {
210
211     #let the caller pick the name of the variable we will assign the suites to
212     local __suite_list=$1
213
214     echo "Locating test plan to use..."
215     testplan_filepath="${WORKSPACE}/test/csit/testplans/${STREAMTESTPLAN}"
216     if [ ! -f "${testplan_filepath}" ]; then
217         testplan_filepath="${WORKSPACE}/test/csit/testplans/${TESTPLAN}"
218     fi
219
220     echo "Changing the testplan path..."
221     cat "${testplan_filepath}" | sed "s:integration:${WORKSPACE}:" > testplan.txt
222     cat testplan.txt
223
224     # Use the testplan if specific SUITES are not defined.
225     if [ -z "${SUITES}" ]; then
226         suite_list=$(egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' testplan.txt | tr '\012' ' ')
227     else
228         suite_list=""
229         workpath="${WORKSPACE}/test/csit/suites"
230         for suite in ${SUITES}; do
231             fullsuite="${workpath}/${suite}"
232             if [ -z "${suite_list}" ]; then
233                 suite_list+=${fullsuite}
234             else
235                 suite_list+=" "${fullsuite}
236             fi
237         done
238     fi
239
240     eval $__suite_list="'$suite_list'"
241 }
242
243 function run_plan() {
244     local -r type=$1
245
246     case ${type} in
247     script)
248         plan=$SCRIPTPLAN
249         ;;
250     config|*)
251         plan=$CONFIGPLAN
252         ;;
253     esac
254
255     printf "Locating %s plan to use...\n" "${type}"
256     plan_filepath="${WORKSPACE}/test/csit/${type}plans/$plan"
257     if [ ! -f "${plan_filepath}" ]; then
258         plan_filepath="${WORKSPACE}/test/csit/${type}plans/${STREAMTESTPLAN}"
259         if [ ! -f "${plan_filepath}" ]; then
260             plan_filepath="${WORKSPACE}/test/csit/${type}plans/${TESTPLAN}"
261         fi
262     fi
263
264     if [ -f "${plan_filepath}" ]; then
265         printf "%s plan exists!!!\n" "${type}"
266         printf "Changing the %s plan path...\n" "${type}"
267         cat ${plan_filepath} | sed "s:integration:${WORKSPACE}:" > ${type}plan.txt
268         cat ${type}plan.txt
269         for line in $( egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' ${type}plan.txt ); do
270             printf "Executing %s...\n" "${line}"
271             # shellcheck source=${line} disable=SC1091
272             source ${line}
273         done
274     fi
275     printf "Finished running %s plans\n" "${type}"
276 } # function run_plan()
277
278 # Return elapsed time. Usage:
279 # - Call first time with no arguments and a new timer is returned.
280 # - Next call with the first argument as the timer and the elapsed time is returned.
281 function timer()
282 {
283     if [ $# -eq 0 ]; then
284         # return the current time
285         printf "%s" "$(date "+%s")"
286     else
287         local start_time=$1
288         end_time=$(date "+%s")
289
290         if [ -z "$start_time" ]; then
291             start_time=$end_time;
292         fi
293
294         delta_time=$((end_time - start_time))
295         ds=$((delta_time % 60))
296         dm=$(((delta_time / 60) % 60))
297         dh=$((delta_time / 3600))
298         # return the elapsed time
299         printf "%d:%02d:%02d" $dh $dm $ds
300     fi
301 }
302
303 # convert commas in csv strings to spaces (ssv)
304 function csv2ssv() {
305     local csv=$1
306     if [ -n "${csv}" ]; then
307         ssv=$(echo "${csv}" | sed 's/,/ /g' | sed 's/\ \ */\ /g')
308     fi
309
310     echo "${ssv}"
311 } # csv2ssv
312
313 function is_openstack_feature_enabled() {
314     local feature=$1
315     for enabled_feature in $(csv2ssv "${ENABLE_OS_SERVICES}"); do
316         if [ "${enabled_feature}" == "${feature}" ]; then
317            echo 1
318            return
319         fi
320     done
321     echo 0
322 }
323
324 SSH="ssh -t -t"
325
326 # shellcheck disable=SC2153
327 function print_job_parameters() {
328     cat << EOF
329
330 Job parameters:
331 DISTROBRANCH: ${DISTROBRANCH}
332 DISTROSTREAM: ${DISTROSTREAM}
333 BUNDLE_URL: ${BUNDLE_URL}
334 CONTROLLERFEATURES: ${CONTROLLERFEATURES}
335 CONTROLLERDEBUGMAP: ${CONTROLLERDEBUGMAP}
336 SCRIPTPLAN: ${SCRIPTPLAN}
337 CONFIGPLAN: ${CONFIGPLAN}
338 STREAMTESTPLAN: ${STREAMTESTPLAN}
339 TESTPLAN: ${TESTPLAN}
340 SUITES: ${SUITES}
341 PATCHREFSPEC: ${PATCHREFSPEC}
342 OPENSTACK_BRANCH: ${OPENSTACK_BRANCH}
343 DEVSTACK_HASH: ${DEVSTACK_HASH}
344 ODL_ML2_DRIVER_REPO: ${ODL_ML2_DRIVER_REPO}
345 ODL_ML2_BRANCH: ${ODL_ML2_BRANCH}
346 ODL_ML2_DRIVER_VERSION: ${ODL_ML2_DRIVER_VERSION}
347 ODL_ML2_PORT_BINDING: ${ODL_ML2_PORT_BINDING}
348 DEVSTACK_KUBERNETES_PLUGIN_REPO: ${DEVSTACK_KUBERNETES_PLUGIN_REPO}
349 DEVSTACK_LBAAS_PLUGIN_REPO: ${DEVSTACK_LBAAS_PLUGIN_REPO}
350 DEVSTACK_NETWORKING_SFC_PLUGIN_REPO: ${DEVSTACK_NETWORKING_SFC_PLUGIN_REPO}
351 IPSEC_VXLAN_TUNNELS_ENABLED: ${IPSEC_VXLAN_TUNNELS_ENABLED}
352 PUBLIC_BRIDGE: ${PUBLIC_BRIDGE}
353 ENABLE_HAPROXY_FOR_NEUTRON: ${ENABLE_HAPROXY_FOR_NEUTRON}
354 ENABLE_OS_SERVICES: ${ENABLE_OS_SERVICES}
355 ENABLE_OS_COMPUTE_SERVICES: ${ENABLE_OS_COMPUTE_SERVICES}
356 ENABLE_OS_NETWORK_SERVICES: ${ENABLE_OS_NETWORK_SERVICES}
357 ENABLE_OS_PLUGINS: ${ENABLE_OS_PLUGINS}
358 DISABLE_OS_SERVICES: ${DISABLE_OS_SERVICES}
359 TENANT_NETWORK_TYPE: ${TENANT_NETWORK_TYPE}
360 SECURITY_GROUP_MODE: ${SECURITY_GROUP_MODE}
361 ENABLE_ITM_DIRECT_TUNNELS: ${ENABLE_ITM_DIRECT_TUNNELS}
362 PUBLIC_PHYSICAL_NETWORK: ${PUBLIC_PHYSICAL_NETWORK}
363 ENABLE_NETWORKING_L2GW: ${ENABLE_NETWORKING_L2GW}
364 CREATE_INITIAL_NETWORKS: ${CREATE_INITIAL_NETWORKS}
365 LBAAS_SERVICE_PROVIDER: ${LBAAS_SERVICE_PROVIDER}
366 ODL_SFC_DRIVER: ${ODL_SFC_DRIVER}
367 ODL_SNAT_MODE: ${ODL_SNAT_MODE}
368
369 EOF
370 }
371
372 function tcpdump_start() {
373     local -r prefix=$1
374     local -r ip=$2
375     local -r filter=$3
376     filter_=${filter// /_}
377
378     printf "node %s, %s_%s__%s: starting tcpdump\n" "${ip}" "${prefix}" "${ip}" "${filter}"
379     # $fileter needs to be parsed client-side
380     # shellcheck disable=SC2029
381     ssh "${ip}" "nohup sudo /usr/sbin/tcpdump -vvv -ni eth0 ${filter} -w /tmp/tcpdump_${prefix}_${ip}__${filter_}.pcap > /tmp/tcpdump_start.log 2>&1 &"
382     ${SSH} "${ip}" "ps -ef | grep tcpdump"
383 }
384
385 function tcpdump_stop() {
386     local -r ip=$1
387
388     printf "node %s: stopping tcpdump\n" "$ip"
389     ${SSH} "${ip}" "ps -ef | grep tcpdump.sh"
390     ${SSH} "${ip}" "sudo pkill -f tcpdump"
391     ${SSH} "${ip}" "sudo xz -9ekvvf /tmp/*.pcap"
392     ${SSH} "${ip}" "sudo ls -al /tmp/*.pcap"
393     # copy_logs will copy any *.xz files
394 }
395
396 # Collect the list of files on the hosts
397 function collect_files() {
398     local -r ip=$1
399     local -r folder=$2
400     finddir=/tmp/finder
401     ${SSH} "${ip}" "mkdir -p ${finddir}"
402     ${SSH} "${ip}" "sudo find /etc > ${finddir}/find.etc.txt"
403     ${SSH} "${ip}" "sudo find /opt/stack > ${finddir}/find.opt.stack.txt"
404     ${SSH} "${ip}" "sudo find /var > ${finddir}/find2.txt"
405     ${SSH} "${ip}" "sudo find /var > ${finddir}/find.var.txt"
406     ${SSH} "${ip}" "sudo tar -cf - -C /tmp finder | xz -T 0 > /tmp/find.tar.xz"
407     scp "${ip}":/tmp/find.tar.xz "${folder}"
408     mkdir -p "${finddir}"
409     rsync --rsync-path="sudo rsync" --list-only -arvhe ssh "${ip}":/etc/ > "${finddir}"/rsync.etc.txt
410     rsync --rsync-path="sudo rsync" --list-only -arvhe ssh "${ip}":/opt/stack/ > "${finddir}"/rsync.opt.stack.txt
411     rsync --rsync-path="sudo rsync" --list-only -arvhe ssh "${ip}":/var/ > "${finddir}"/rsync.var.txt
412     tar -cf - -C /tmp finder | xz -T 0 > /tmp/rsync.tar.xz
413     cp /tmp/rsync.tar.xz "${folder}"
414 }
415
416 # List of extra services to extract from journalctl
417 # Add new services on a separate line, in alpha order, add \ at the end
418 extra_services_cntl=" \
419     dnsmasq.service \
420     httpd.service \
421     libvirtd.service \
422     openvswitch.service \
423     ovs-vswitchd.service \
424     ovsdb-server.service \
425     rabbitmq-server.service \
426 "
427
428 extra_services_cmp=" \
429     libvirtd.service \
430     openvswitch.service \
431     ovs-vswitchd.service \
432     ovsdb-server.service \
433 "
434
435 # Collect the logs for the openstack services
436 # First get all the services started by devstack which would have devstack@ as a prefix
437 # Next get all the extra services
438 function collect_openstack_logs() {
439     local -r ip=${1}
440     local -r folder=${2}
441     local -r node_type=${3}
442     local oslogs="${folder}/oslogs"
443
444     printf "collect_openstack_logs for %s node: %s into %s\n" "${node_type}" "${ip}" "${oslogs}"
445     rm -rf "${oslogs}"
446     mkdir -p "${oslogs}"
447     # There are always some logs in /opt/stack/logs and this also covers the
448     # pre-queens branches which always use /opt/stack/logs
449     rsync -avhe ssh "${ip}":/opt/stack/logs/* "${oslogs}" # rsync to prevent copying of symbolic links
450
451     # Starting with queens break out the logs from journalctl
452     if [ "${OPENSTACK_BRANCH}" = "stable/queens" ]; then
453         cat > "${WORKSPACE}"/collect_openstack_logs.sh << EOF
454 extra_services_cntl="${extra_services_cntl}"
455 extra_services_cmp="${extra_services_cmp}"
456
457 function extract_from_journal() {
458     local -r services=\${1}
459     local -r folder=\${2}
460     local -r node_type=\${3}
461     printf "extract_from_journal folder: \${folder}, services: \${services}\n"
462     for service in \${services}; do
463         # strip anything before @ and anything after .
464         # devstack@g-api.service will end as g-api
465         service_="\${service#*@}"
466         service_="\${service_%.*}"
467         sudo journalctl -u "\${service}" > "\${folder}/\${service_}.log"
468     done
469 }
470
471 rm -rf /tmp/oslogs
472 mkdir -p /tmp/oslogs
473 systemctl list-unit-files --all > /tmp/oslogs/systemctl.units.log 2>&1
474 svcs=\$(grep devstack@ /tmp/oslogs/systemctl.units.log | awk '{print \$1}')
475 extract_from_journal "\${svcs}" "/tmp/oslogs"
476 if [ "\${node_type}" = "control" ]; then
477     extract_from_journal "\${extra_services_cntl}" "/tmp/oslogs"
478 else
479     extract_from_journal "\${extra_services_cmp}" "/tmp/oslogs"
480 fi
481 ls -al /tmp/oslogs
482 EOF
483 # cat > ${WORKSPACE}/collect_openstack_logs.sh << EOF
484         printf "collect_openstack_logs for %s node: %s into %s, executing script\n" "${node_type}" "${ip}" "${oslogs}"
485         cat "${WORKSPACE}"/collect_openstack_logs.sh
486         scp "${WORKSPACE}"/collect_openstack_logs.sh "${ip}":/tmp
487         ${SSH} "${ip}" "bash /tmp/collect_openstack_logs.sh > /tmp/collect_openstack_logs.log 2>&1"
488         rsync -avhe ssh "${ip}":/tmp/oslogs/* "${oslogs}"
489         scp "${ip}":/tmp/collect_openstack_logs.log "${oslogs}"
490     fi # if [ "${OPENSTACK_BRANCH}" = "stable/queens" ]; then
491 }
492
493 function collect_netvirt_logs() {
494     set +e  # We do not want to create red dot just because something went wrong while fetching logs.
495
496     cat > extra_debug.sh << EOF
497 echo -e "/usr/sbin/lsmod | /usr/bin/grep openvswitch\n"
498 /usr/sbin/lsmod | /usr/bin/grep openvswitch
499 echo -e "\nsudo grep ct_ /var/log/openvswitch/ovs-vswitchd.log\n"
500 sudo grep "Datapath supports" /var/log/openvswitch/ovs-vswitchd.log
501 echo -e "\nsudo netstat -punta\n"
502 sudo netstat -punta
503 echo -e "\nsudo getenforce\n"
504 sudo getenforce
505 echo -e "\nsudo systemctl status httpd\n"
506 sudo systemctl status httpd
507 echo -e "\nenv\n"
508 env
509 source /opt/stack/devstack/openrc admin admin
510 echo -e "\nenv after openrc\n"
511 env
512 echo -e "\nsudo du -hs /opt/stack"
513 sudo du -hs /opt/stack
514 echo -e "\nsudo mount"
515 sudo mount
516 echo -e "\ndmesg -T > /tmp/dmesg.log"
517 dmesg -T > /tmp/dmesg.log
518 echo -e "\njournalctl > /tmp/journalctl.log\n"
519 sudo journalctl > /tmp/journalctl.log
520 echo -e "\novsdb-tool -mm show-log > /tmp/ovsdb-tool.log"
521 ovsdb-tool -mm show-log > /tmp/ovsdb-tool.log
522 EOF
523
524     # Since this log collection work is happening before the archive build macro which also
525     # creates the ${WORKSPACE}/archives dir, we have to do it here first.  The mkdir in the
526     # archives build step will essentially be a noop.
527     mkdir -p "${WORKSPACE}"/archives
528
529     mv /tmp/changes.txt "${WORKSPACE}"/archives
530     mv /tmp/validations.txt "${WORKSPACE}"/archives
531     mv "${WORKSPACE}"/rabbit.txt "${WORKSPACE}"/archives
532     mv "${WORKSPACE}"/haproxy.cfg "${WORKSPACE}"/archives
533     ssh "${OPENSTACK_HAPROXY_1_IP}" "sudo journalctl -u haproxy > /tmp/haproxy.log"
534     scp "${OPENSTACK_HAPROXY_1_IP}":/tmp/haproxy.log "${WORKSPACE}"/archives/
535
536     sleep 5
537     # FIXME: Do not create .tar and gzip before copying.
538     for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
539         CONTROLLERIP=ODL_SYSTEM_${i}_IP
540         echo "collect_logs: for opendaylight controller ip: ${!CONTROLLERIP}"
541         NODE_FOLDER="odl_${i}"
542         mkdir -p "${NODE_FOLDER}"
543         echo "Lets's take the karaf thread dump again..."
544         ssh "${!CONTROLLERIP}" "sudo ps aux" > "${WORKSPACE}"/ps_after.log
545         pid=$(grep org.apache.karaf.main.Main "${WORKSPACE}"/ps_after.log | grep -v grep | tr -s ' ' | cut -f2 -d' ')
546         echo "karaf main: org.apache.karaf.main.Main, pid:${pid}"
547         # $pid needs to be parsed client-side
548         # shellcheck disable=SC2029
549         ssh "${!CONTROLLERIP}" "${JAVA_HOME}/bin/jstack -l ${pid}" > "${WORKSPACE}/karaf_${i}_${pid}_threads_after.log" || true
550         echo "killing karaf process..."
551         # shellcheck disable=SC2016
552         ${SSH} "${!CONTROLLERIP}" bash -c 'ps axf | grep karaf | grep -v grep | awk '"'"'{print "kill -9 " $1}'"'"' | sh'
553         ${SSH} "${!CONTROLLERIP}" "sudo journalctl > /tmp/journalctl.log"
554         scp "${!CONTROLLERIP}":/tmp/journalctl.log "${NODE_FOLDER}"
555         ${SSH} "${!CONTROLLERIP}" "dmesg -T > /tmp/dmesg.log"
556         scp "${!CONTROLLERIP}":/tmp/dmesg.log "${NODE_FOLDER}"
557         ${SSH} "${!CONTROLLERIP}" "tar -cf - -C /tmp/${BUNDLEFOLDER} etc | xz -T 0 > /tmp/etc.tar.xz"
558         scp "${!CONTROLLERIP}":/tmp/etc.tar.xz "${NODE_FOLDER}"
559         ${SSH} "${!CONTROLLERIP}" "cp -r /tmp/${BUNDLEFOLDER}/data/log /tmp/odl_log"
560         ${SSH} "${!CONTROLLERIP}" "tar -cf /tmp/odl${i}_karaf.log.tar /tmp/odl_log/*"
561         scp "${!CONTROLLERIP}:/tmp/odl${i}_karaf.log.tar" "${NODE_FOLDER}"
562         ${SSH} "${!CONTROLLERIP}" "tar -cf /tmp/odl${i}_zrpcd.log.tar /tmp/zrpcd.init.log"
563         scp "${!CONTROLLERIP}:/tmp/odl${i}_zrpcd.log.tar" "${NODE_FOLDER}"
564         tar -xvf "${NODE_FOLDER}/odl${i}_karaf.log.tar" -C "${NODE_FOLDER}" --strip-components 2 --transform "s/karaf/odl${i}_karaf/g"
565         grep "ROBOT MESSAGE\| ERROR " "${NODE_FOLDER}/odl${i}_karaf.log" > "${NODE_FOLDER}/odl${i}_err.log"
566         grep "ROBOT MESSAGE\| ERROR \| WARN \|Exception" \
567             "${NODE_FOLDER}/odl${i}_karaf.log" > "${NODE_FOLDER}/odl${i}_err_warn_exception.log"
568         # Print ROBOT lines and print Exception lines. For exception lines also print the previous line for context
569         sed -n -e '/ROBOT MESSAGE/P' -e '$!N;/Exception/P;D' "${NODE_FOLDER}/odl${i}_karaf.log" > "${NODE_FOLDER}/odl${i}_exception.log"
570         mv "/tmp/odl${i}_exceptions.txt" "${NODE_FOLDER}"
571         rm "${NODE_FOLDER}/odl${i}_karaf.log.tar"
572         mv -- *_threads* "${NODE_FOLDER}"
573         mv ps_* "${NODE_FOLDER}"
574         mv "${NODE_FOLDER}" "${WORKSPACE}"/archives/
575     done
576
577     print_job_parameters > "${WORKSPACE}"/archives/params.txt
578
579     # Control Node
580     for i in $(seq 1 "${NUM_OPENSTACK_CONTROL_NODES}"); do
581         OSIP=OPENSTACK_CONTROL_NODE_${i}_IP
582         if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then
583             echo "collect_logs: for openstack combo node ip: ${!OSIP}"
584             NODE_FOLDER="combo_${i}"
585         else
586             echo "collect_logs: for openstack control node ip: ${!OSIP}"
587             NODE_FOLDER="control_${i}"
588         fi
589         mkdir -p "${NODE_FOLDER}"
590         tcpdump_stop "${!OSIP}"
591         scp extra_debug.sh "${!OSIP}":/tmp
592         # Capture compute logs if this is a combo node
593         if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then
594             scp "${!OSIP}":/etc/nova/nova.conf "${NODE_FOLDER}"
595             scp "${!OSIP}":/etc/nova/nova-cpu.conf "${NODE_FOLDER}"
596             scp "${!OSIP}":/etc/openstack/clouds.yaml "${NODE_FOLDER}"
597             rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/nova-agent.log "${NODE_FOLDER}"
598         fi
599         ${SSH} "${!OSIP}" "bash /tmp/extra_debug.sh > /tmp/extra_debug.log 2>&1"
600         scp "${!OSIP}":/etc/dnsmasq.conf "${NODE_FOLDER}"
601         scp "${!OSIP}":/etc/keystone/keystone.conf "${NODE_FOLDER}"
602         scp "${!OSIP}":/etc/keystone/keystone-uwsgi-admin.ini "${NODE_FOLDER}"
603         scp "${!OSIP}":/etc/keystone/keystone-uwsgi-public.ini "${NODE_FOLDER}"
604         scp "${!OSIP}":/etc/kuryr/kuryr.conf "${NODE_FOLDER}"
605         scp "${!OSIP}":/etc/neutron/dhcp_agent.ini "${NODE_FOLDER}"
606         scp "${!OSIP}":/etc/neutron/metadata_agent.ini "${NODE_FOLDER}"
607         scp "${!OSIP}":/etc/neutron/neutron.conf "${NODE_FOLDER}"
608         scp "${!OSIP}":/etc/neutron/neutron_lbaas.conf "${NODE_FOLDER}"
609         scp "${!OSIP}":/etc/neutron/plugins/ml2/ml2_conf.ini "${NODE_FOLDER}"
610         scp "${!OSIP}":/etc/neutron/services/loadbalancer/haproxy/lbaas_agent.ini "${NODE_FOLDER}"
611         scp "${!OSIP}":/etc/nova/nova.conf "${NODE_FOLDER}"
612         scp "${!OSIP}":/etc/nova/nova-api-uwsgi.ini "${NODE_FOLDER}"
613         scp "${!OSIP}":/etc/nova/nova_cell1.conf "${NODE_FOLDER}"
614         scp "${!OSIP}":/etc/nova/nova-cpu.conf "${NODE_FOLDER}"
615         scp "${!OSIP}":/etc/nova/placement-uwsgi.ini "${NODE_FOLDER}"
616         scp "${!OSIP}":/etc/openstack/clouds.yaml "${NODE_FOLDER}"
617         scp "${!OSIP}":/opt/stack/devstack/.stackenv "${NODE_FOLDER}"
618         scp "${!OSIP}":/opt/stack/devstack/nohup.out "${NODE_FOLDER}"/stack.log
619         scp "${!OSIP}":/opt/stack/devstack/openrc "${NODE_FOLDER}"
620         scp "${!OSIP}":/opt/stack/requirements/upper-constraints.txt "${NODE_FOLDER}"
621         scp "${!OSIP}":/opt/stack/tempest/etc/tempest.conf "${NODE_FOLDER}"
622         scp "${!OSIP}":/tmp/*.xz "${NODE_FOLDER}"
623         scp "${!OSIP}":/tmp/dmesg.log "${NODE_FOLDER}"
624         scp "${!OSIP}":/tmp/extra_debug.log "${NODE_FOLDER}"
625         scp "${!OSIP}":/tmp/get_devstack.sh.txt "${NODE_FOLDER}"
626         scp "${!OSIP}":/tmp/install_ovs.txt "${NODE_FOLDER}"
627         scp "${!OSIP}":/tmp/journalctl.log "${NODE_FOLDER}"
628         scp "${!OSIP}":/tmp/ovsdb-tool.log "${NODE_FOLDER}"
629         scp "${!OSIP}":/tmp/tcpdump_start.log "${NODE_FOLDER}"
630         collect_files "${!OSIP}" "${NODE_FOLDER}"
631         ${SSH} "${!OSIP}" "sudo tar -cf - -C /var/log rabbitmq | xz -T 0 > /tmp/rabbitmq.tar.xz "
632         scp "${!OSIP}":/tmp/rabbitmq.tar.xz "${NODE_FOLDER}"
633         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/etc/hosts "${NODE_FOLDER}"
634         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/usr/lib/systemd/system/haproxy.service "${NODE_FOLDER}"
635         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/audit/audit.log "${NODE_FOLDER}"
636         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/httpd/keystone_access.log "${NODE_FOLDER}"
637         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/httpd/keystone.log "${NODE_FOLDER}"
638         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/messages* "${NODE_FOLDER}"
639         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/openvswitch/ovs-vswitchd.log "${NODE_FOLDER}"
640         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/openvswitch/ovsdb-server.log "${NODE_FOLDER}"
641         collect_openstack_logs "${!OSIP}" "${NODE_FOLDER}" "control"
642         mv "local.conf_control_${!OSIP}" "${NODE_FOLDER}/local.conf"
643         # qdhcp files are created by robot tests and copied into /tmp/qdhcp during the test
644         tar -cf - -C /tmp qdhcp | xz -T 0 > /tmp/qdhcp.tar.xz
645         mv /tmp/qdhcp.tar.xz "${NODE_FOLDER}"
646         mv "${NODE_FOLDER}" "${WORKSPACE}"/archives/
647     done
648
649     # Compute Nodes
650     for i in $(seq 1 "${NUM_OPENSTACK_COMPUTE_NODES}"); do
651         OSIP="OPENSTACK_COMPUTE_NODE_${i}_IP"
652         echo "collect_logs: for openstack compute node ip: ${!OSIP}"
653         NODE_FOLDER="compute_${i}"
654         mkdir -p "${NODE_FOLDER}"
655         tcpdump_stop "${!OSIP}"
656         scp extra_debug.sh "${!OSIP}":/tmp
657         ${SSH} "${!OSIP}" "bash /tmp/extra_debug.sh > /tmp/extra_debug.log 2>&1"
658         scp "${!OSIP}":/etc/nova/nova.conf "${NODE_FOLDER}"
659         scp "${!OSIP}":/etc/nova/nova-cpu.conf "${NODE_FOLDER}"
660         scp "${!OSIP}":/etc/openstack/clouds.yaml "${NODE_FOLDER}"
661         scp "${!OSIP}":/opt/stack/devstack/.stackenv "${NODE_FOLDER}"
662         scp "${!OSIP}":/opt/stack/devstack/nohup.out "${NODE_FOLDER}"/stack.log
663         scp "${!OSIP}":/opt/stack/devstack/openrc "${NODE_FOLDER}"
664         scp "${!OSIP}":/opt/stack/requirements/upper-constraints.txt "${NODE_FOLDER}"
665         scp "${!OSIP}":/tmp/*.xz "${NODE_FOLDER}"/
666         scp "${!OSIP}":/tmp/dmesg.log "${NODE_FOLDER}"
667         scp "${!OSIP}":/tmp/extra_debug.log "${NODE_FOLDER}"
668         scp "${!OSIP}":/tmp/get_devstack.sh.txt "${NODE_FOLDER}"
669         scp "${!OSIP}":/tmp/install_ovs.txt "${NODE_FOLDER}"
670         scp "${!OSIP}":/tmp/journalctl.log "${NODE_FOLDER}"
671         scp "${!OSIP}":/tmp/ovsdb-tool.log "${NODE_FOLDER}"
672         scp "${!OSIP}":/tmp/tcpdump_start.log "${NODE_FOLDER}"
673         collect_files "${!OSIP}" "${NODE_FOLDER}"
674         ${SSH} "${!OSIP}" "sudo tar -cf - -C /var/log libvirt | xz -T 0 > /tmp/libvirt.tar.xz "
675         scp "${!OSIP}":/tmp/libvirt.tar.xz "${NODE_FOLDER}"
676         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/etc/hosts "${NODE_FOLDER}"
677         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/audit/audit.log "${NODE_FOLDER}"
678         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/messages* "${NODE_FOLDER}"
679         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/nova-agent.log "${NODE_FOLDER}"
680         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/openvswitch/ovs-vswitchd.log "${NODE_FOLDER}"
681         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/openvswitch/ovsdb-server.log "${NODE_FOLDER}"
682         collect_openstack_logs "${!OSIP}" "${NODE_FOLDER}" "compute"
683         mv "local.conf_compute_${!OSIP}" "${NODE_FOLDER}"/local.conf
684         mv "${NODE_FOLDER}" "${WORKSPACE}"/archives/
685     done
686
687     # Tempest
688     DEVSTACK_TEMPEST_DIR="/opt/stack/tempest"
689     TESTREPO=".stestr"
690     TEMPEST_LOGS_DIR="${WORKSPACE}/archives/tempest"
691     # Look for tempest test results in the $TESTREPO dir and copy if found
692     if ${SSH} "${OPENSTACK_CONTROL_NODE_1_IP}" "sudo sh -c '[ -f ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/0 ]'"; then
693         ${SSH} "${OPENSTACK_CONTROL_NODE_1_IP}" "for I in \$(sudo ls ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/ | grep -E '^[0-9]+$'); do sudo sh -c \"${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/subunit-1to2 < ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/\${I} >> ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt\"; done"
694         ${SSH} "${OPENSTACK_CONTROL_NODE_1_IP}" "sudo sh -c '${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/python ${DEVSTACK_TEMPEST_DIR}/.tox/tempest/lib/python2.7/site-packages/os_testr/subunit2html.py ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt ${DEVSTACK_TEMPEST_DIR}/tempest_results.html'"
695         mkdir -p "${TEMPEST_LOGS_DIR}"
696         scp "${OPENSTACK_CONTROL_NODE_1_IP}:${DEVSTACK_TEMPEST_DIR}/tempest_results.html" "${TEMPEST_LOGS_DIR}"
697         scp "${OPENSTACK_CONTROL_NODE_1_IP}:${DEVSTACK_TEMPEST_DIR}/tempest.log" "${TEMPEST_LOGS_DIR}"
698     else
699         echo "tempest results not found in ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/0"
700     fi
701 } # collect_netvirt_logs()
702
703 # Utility function for joining strings.
704 function join() {
705     delim=' '
706     final=$1; shift
707
708     for str in "$@" ; do
709         final=${final}${delim}${str}
710     done
711
712     echo "${final}"
713 }
714
715 function get_nodes_list() {
716     # Create the string for nodes
717     for i in $(seq 1 "${NUM_ODL_SYSTEM}") ; do
718         CONTROLLERIP=ODL_SYSTEM_${i}_IP
719         nodes[$i]=${!CONTROLLERIP}
720     done
721
722     nodes_list=$(join "${nodes[@]}")
723     echo "${nodes_list}"
724 }
725
726 function get_features() {
727     if [ "${CONTROLLERSCOPE}" == 'all' ]; then
728         ACTUALFEATURES="odl-integration-compatible-with-all,${CONTROLLERFEATURES}"
729         export CONTROLLERMEM="3072m"
730     else
731         ACTUALFEATURES="odl-infrautils-ready,${CONTROLLERFEATURES}"
732     fi
733
734     # Some versions of jenkins job builder result in feature list containing spaces
735     # and ending in newline. Remove all that.
736     ACTUALFEATURES=$(echo "${ACTUALFEATURES}" | tr -d '\n \r')
737     echo "ACTUALFEATURES: ${ACTUALFEATURES}"
738
739     # In the case that we want to install features via karaf shell, a space separated list of
740     # ACTUALFEATURES IS NEEDED
741     SPACE_SEPARATED_FEATURES=$(echo "${ACTUALFEATURES}" | tr ',' ' ')
742     echo "SPACE_SEPARATED_FEATURES: ${SPACE_SEPARATED_FEATURES}"
743
744     export ACTUALFEATURES
745     export SPACE_SEPARATED_FEATURES
746 }
747
748 # Create the configuration script to be run on controllers.
749 function create_configuration_script() {
750     cat > "${WORKSPACE}"/configuration-script.sh <<EOF
751 set -x
752 source /tmp/common-functions.sh ${BUNDLEFOLDER}
753
754 echo "Changing to /tmp"
755 cd /tmp
756
757 echo "Downloading the distribution from ${ACTUAL_BUNDLE_URL}"
758 wget --progress=dot:mega '${ACTUAL_BUNDLE_URL}'
759
760 echo "Extracting the new controller..."
761 unzip -q ${BUNDLE}
762
763 echo "Adding external repositories..."
764 sed -ie "s%org.ops4j.pax.url.mvn.repositories=%org.ops4j.pax.url.mvn.repositories=https://nexus.opendaylight.org/content/repositories/opendaylight.snapshot@id=opendaylight-snapshot@snapshots, https://nexus.opendaylight.org/content/repositories/public@id=opendaylight-mirror, http://repo1.maven.org/maven2@id=central, http://repository.springsource.com/maven/bundles/release@id=spring.ebr.release, http://repository.springsource.com/maven/bundles/external@id=spring.ebr.external, http://zodiac.springsource.com/maven/bundles/release@id=gemini, http://repository.apache.org/content/groups/snapshots-group@id=apache@snapshots@noreleases, https://oss.sonatype.org/content/repositories/snapshots@id=sonatype.snapshots.deploy@snapshots@noreleases, https://oss.sonatype.org/content/repositories/ops4j-snapshots@id=ops4j.sonatype.snapshots.deploy@snapshots@noreleases%g" ${MAVENCONF}
765 cat ${MAVENCONF}
766
767 if [[ "$USEFEATURESBOOT" == "True" ]]; then
768     echo "Configuring the startup features..."
769     sed -ie "s/\(featuresBoot=\|featuresBoot =\)/featuresBoot = ${ACTUALFEATURES},/g" ${FEATURESCONF}
770 fi
771
772 FEATURE_TEST_STRING="features-integration-test"
773 KARAF_VERSION=${KARAF_VERSION:-karaf4}
774 if [[ "$KARAF_VERSION" == "karaf4" ]]; then
775     FEATURE_TEST_STRING="features-test"
776 fi
777
778 sed -ie "s%\(featuresRepositories=\|featuresRepositories =\)%featuresRepositories = mvn:org.opendaylight.integration/\${FEATURE_TEST_STRING}/${BUNDLE_VERSION}/xml/features,mvn:org.apache.karaf.decanter/apache-karaf-decanter/1.0.0/xml/features,%g" ${FEATURESCONF}
779 if [[ ! -z "${REPO_URL}" ]]; then
780    sed -ie "s%featuresRepositories =%featuresRepositories = ${REPO_URL},%g" ${FEATURESCONF}
781 fi
782 cat ${FEATURESCONF}
783
784 configure_karaf_log "${KARAF_VERSION}" "${CONTROLLERDEBUGMAP}"
785
786 set_java_vars "${JAVA_HOME}" "${CONTROLLERMEM}" "${MEMCONF}"
787
788 echo "Listing all open ports on controller system..."
789 netstat -pnatu
790
791 # Copy shard file if exists
792 if [ -f /tmp/custom_shard_config.txt ]; then
793     echo "Custom shard config exists!!!"
794     echo "Copying the shard config..."
795     cp /tmp/custom_shard_config.txt /tmp/${BUNDLEFOLDER}/bin/
796 fi
797
798 echo "Configuring cluster"
799 /tmp/${BUNDLEFOLDER}/bin/configure_cluster.sh \$1 ${nodes_list}
800
801 echo "Dump akka.conf"
802 cat ${AKKACONF}
803
804 echo "Dump modules.conf"
805 cat ${MODULESCONF}
806
807 echo "Dump module-shards.conf"
808 cat ${MODULESHARDSCONF}
809 EOF
810 # cat > ${WORKSPACE}/configuration-script.sh <<EOF
811 }
812
813 # Create the startup script to be run on controllers.
814 function create_startup_script() {
815     cat > "${WORKSPACE}"/startup-script.sh <<EOF
816 echo "Redirecting karaf console output to karaf_console.log"
817 export KARAF_REDIRECT="/tmp/${BUNDLEFOLDER}/data/log/karaf_console.log"
818 mkdir -p /tmp/${BUNDLEFOLDER}/data/log
819
820 echo "Starting controller..."
821 /tmp/${BUNDLEFOLDER}/bin/start
822 EOF
823 # cat > ${WORKSPACE}/startup-script.sh <<EOF
824 }
825
826 function create_post_startup_script() {
827     cat > "${WORKSPACE}"/post-startup-script.sh <<EOF
828 if [[ "$USEFEATURESBOOT" != "True" ]]; then
829
830     # wait up to 60s for karaf port 8101 to be opened, polling every 5s
831     loop_count=0;
832     until [[ \$loop_count -ge 12 ]]; do
833         netstat -na | grep 8101 && break;
834         loop_count=\$[\$loop_count+1];
835         sleep 5;
836     done
837
838     echo "going to feature:install --no-auto-refresh ${SPACE_SEPARATED_FEATURES} one at a time"
839     for feature in ${SPACE_SEPARATED_FEATURES}; do
840         sshpass -p karaf ssh -o StrictHostKeyChecking=no \
841                              -o UserKnownHostsFile=/dev/null \
842                              -o LogLevel=error \
843                              -p 8101 karaf@localhost \
844                              feature:install --no-auto-refresh \$feature;
845     done
846
847     echo "ssh to karaf console to list -i installed features"
848     sshpass -p karaf ssh -o StrictHostKeyChecking=no \
849                          -o UserKnownHostsFile=/dev/null \
850                          -o LogLevel=error \
851                          -p 8101 karaf@localhost \
852                          feature:list -i
853 fi
854
855 echo "Waiting up to 3 minutes for controller to come up, checking every 5 seconds..."
856 for i in {1..36}; do
857     sleep 5;
858     grep 'org.opendaylight.infrautils.*System ready' /tmp/${BUNDLEFOLDER}/data/log/karaf.log
859     if [ \$? -eq 0 ]; then
860         echo "Controller is UP"
861         break
862     fi
863 done;
864
865 # if we ended up not finding ready status in the above loop, we can output some debugs
866 grep 'org.opendaylight.infrautils.*System ready' /tmp/${BUNDLEFOLDER}/data/log/karaf.log
867 if [ $? -ne 0 ]; then
868     echo "Timeout Controller DOWN"
869     echo "Dumping first 500K bytes of karaf log..."
870     head --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
871     echo "Dumping last 500K bytes of karaf log..."
872     tail --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
873     echo "Listing all open ports on controller system"
874     netstat -pnatu
875     exit 1
876 fi
877
878 echo "Listing all open ports on controller system..."
879 netstat -pnatu
880
881 function exit_on_log_file_message {
882     echo "looking for \"\$1\" in log file"
883     if grep --quiet "\$1" "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"; then
884         echo ABORTING: found "\$1"
885         echo "Dumping first 500K bytes of karaf log..."
886         head --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
887         echo "Dumping last 500K bytes of karaf log..."
888         tail --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
889         exit 1
890     fi
891 }
892
893 exit_on_log_file_message 'BindException: Address already in use'
894 exit_on_log_file_message 'server is unhealthy'
895 EOF
896 # cat > ${WORKSPACE}/post-startup-script.sh <<EOF
897 }
898
899 # Copy over the configuration script and configuration files to each controller
900 # Execute the configuration script on each controller.
901 function copy_and_run_configuration_script() {
902     for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
903         CONTROLLERIP="ODL_SYSTEM_${i}_IP"
904         echo "Configuring member-${i} with IP address ${!CONTROLLERIP}"
905         scp "${WORKSPACE}"/configuration-script.sh "${!CONTROLLERIP}":/tmp/
906         # $i needs to be parsed client-side
907         # shellcheck disable=SC2029
908         ssh "${!CONTROLLERIP}" "bash /tmp/configuration-script.sh ${i}"
909     done
910 }
911
912 # Copy over the startup script to each controller and execute it.
913 function copy_and_run_startup_script() {
914     for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
915         CONTROLLERIP="ODL_SYSTEM_${i}_IP"
916         echo "Starting member-${i} with IP address ${!CONTROLLERIP}"
917         scp "${WORKSPACE}"/startup-script.sh "${!CONTROLLERIP}":/tmp/
918         ssh "${!CONTROLLERIP}" "bash /tmp/startup-script.sh"
919     done
920 }
921
922 function copy_and_run_post_startup_script() {
923     seed_index=1
924     for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
925         CONTROLLERIP="ODL_SYSTEM_${i}_IP"
926         echo "Execute the post startup script on controller ${!CONTROLLERIP}"
927         scp "${WORKSPACE}"/post-startup-script.sh "${!CONTROLLERIP}":/
928         # $seed_index needs to be parsed client-side
929         # shellcheck disable=SC2029
930         ssh "${!CONTROLLERIP}" "bash /tmp/post-startup-script.sh $(( seed_index++ ))"
931         if [ $(( i % NUM_ODL_SYSTEM )) == 0 ]; then
932             seed_index=1
933         fi
934     done
935 }
936
937 function create_controller_variables() {
938     echo "Generating controller variables..."
939     for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
940         CONTROLLERIP="ODL_SYSTEM_${i}_IP"
941         odl_variables=${odl_variables}" -v ${CONTROLLERIP}:${!CONTROLLERIP}"
942         echo "Lets's take the karaf thread dump"
943         ssh "${!CONTROLLERIP}" "sudo ps aux" > "${WORKSPACE}"/ps_before.log
944         pid=$(grep org.apache.karaf.main.Main "${WORKSPACE}"/ps_before.log | grep -v grep | tr -s ' ' | cut -f2 -d' ')
945         echo "karaf main: org.apache.karaf.main.Main, pid:${pid}"
946         # $i needs to be parsed client-side
947         # shellcheck disable=SC2029
948         ssh "${!CONTROLLERIP}" "${JAVA_HOME}/bin/jstack -l ${pid}" > "${WORKSPACE}/karaf_${i}_${pid}_threads_before.log" || true
949     done
950 }
951
952 # Function to build OVS from git repo
953 function build_ovs() {
954     local -r ip=$1
955     local -r version=$2
956     local -r rpm_path="$3"
957
958     echo "Building OVS ${version} on ${ip} ..."
959     cat > "${WORKSPACE}"/build_ovs.sh << EOF
960 set -ex -o pipefail
961
962 echo '---> Building openvswitch version ${version}'
963
964 # Install running kernel devel packages
965 K_VERSION=\$(uname -r)
966 YUM_OPTS="-y --disablerepo=* --enablerepo=base,updates,extra,C*-base,C*-updates,C*-extras"
967 # Install centos-release to update vault repos from which to fetch
968 # kernel devel packages
969 sudo yum \${YUM_OPTS} install centos-release yum-utils @'Development Tools' rpm-build
970 sudo yum \${YUM_OPTS} install kernel-{devel,headers}-\${K_VERSION}
971
972 TMP=\$(mktemp -d)
973 pushd \${TMP}
974
975 git clone https://github.com/openvswitch/ovs.git
976 cd ovs
977
978 if [ "${version}" = "v2.6.1-nsh" ]; then
979     git checkout v2.6.1
980     echo "Will apply nsh patches for OVS version 2.6.1"
981     git clone https://github.com/yyang13/ovs_nsh_patches.git ../ovs_nsh_patches
982     git apply ../ovs_nsh_patches/v2.6.1_centos7/*.patch
983 else
984     git checkout ${version}
985 fi
986
987 # On early versions of OVS, flake warnings would fail the build.
988 # Remove it.
989 sudo pip uninstall -y flake8
990
991 # Get rid of sphinx dep as it conflicts with the already
992 # installed one (via pip). Docs wont be built.
993 sed -i "/BuildRequires:.*sphinx.*/d" rhel/openvswitch-fedora.spec.in
994
995 sed -e 's/@VERSION@/0.0.1/' rhel/openvswitch-fedora.spec.in > /tmp/ovs.spec
996 sed -e 's/@VERSION@/0.0.1/' rhel/openvswitch-kmod-fedora.spec.in > /tmp/ovs-kmod.spec
997 sed -e 's/@VERSION@/0.0.1/' rhel/openvswitch-dkms.spec.in > /tmp/ovs-dkms.spec
998 sudo yum-builddep \${YUM_OPTS} /tmp/ovs.spec /tmp/ovs-kmod.spec /tmp/ovs-dkms.spec
999 rm /tmp/ovs.spec /tmp/ovs-kmod.spec /tmp/ovs-dkms.spec
1000 ./boot.sh
1001 ./configure --build=x86_64-redhat-linux-gnu --host=x86_64-redhat-linux-gnu --with-linux=/lib/modules/\${K_VERSION}/build --program-prefix= --disable-dependency-tracking --prefix=/usr --exec-prefix=/usr --bindir=/usr/bin --sbindir=/usr/sbin --sysconfdir=/etc --datadir=/usr/share --includedir=/usr/include --libdir=/usr/lib64 --libexecdir=/usr/libexec --localstatedir=/var --sharedstatedir=/var/lib --mandir=/usr/share/man --infodir=/usr/share/info --enable-libcapng --enable-ssl --with-pkidir=/var/lib/openvswitch/pki PYTHON=/usr/bin/python2
1002 make rpm-fedora RPMBUILD_OPT="--without check"
1003 # Build dkms only for now
1004 # make rpm-fedora-kmod RPMBUILD_OPT='-D "kversion \${K_VERSION}"'
1005 rpmbuild -D "_topdir \$(pwd)/rpm/rpmbuild" -bb --without check rhel/openvswitch-dkms.spec
1006
1007 mkdir -p /tmp/ovs_rpms
1008 cp -r rpm/rpmbuild/RPMS/* /tmp/ovs_rpms/
1009
1010 popd
1011 rm -rf \${TMP}
1012 EOF
1013
1014     scp "${WORKSPACE}"/build_ovs.sh "${ip}":/tmp
1015     ${SSH} "${ip}" " bash /tmp/build_ovs.sh >> /tmp/install_ovs.txt 2>&1"
1016     scp -r "${ip}":/tmp/ovs_rpms/* "${rpm_path}/"
1017     ${SSH} "${ip}" "rm -rf /tmp/ovs_rpms"
1018 }
1019
1020 # Install OVS RPMs from yum repo
1021 function install_ovs_from_repo() {
1022     local -r ip=$1
1023     local -r rpm_repo="$2"
1024
1025     echo "Installing OVS from repo ${rpm_repo} on ${ip} ..."
1026     cat > "${WORKSPACE}"/install_ovs.sh << EOF
1027 set -ex -o pipefail
1028
1029 echo '---> Installing openvswitch from ${rpm_repo}'
1030
1031 # We need repoquery from yum-utils.
1032 sudo yum -y install yum-utils
1033
1034 # Get openvswitch packages offered by custom repo.
1035 # dkms package will have priority over kmod.
1036 OVS_REPO_OPTS="--repofrompath=ovs-repo,${rpm_repo} --disablerepo=* --enablerepo=ovs-repo"
1037 OVS_PKGS=\$(repoquery \${OVS_REPO_OPTS} openvswitch)
1038 OVS_SEL_PKG=\$(repoquery \${OVS_REPO_OPTS} openvswitch-selinux-policy)
1039 OVS_DKMS_PKG=\$(repoquery \${OVS_REPO_OPTS} openvswitch-dkms)
1040 OVS_KMOD_PKG=\$(repoquery \${OVS_REPO_OPTS} openvswitch-kmod)
1041 [ -n "\${OVS_SEL_PKG}" ] && OVS_PKGS="\${OVS_PKGS} \${OVS_SEL_PKG}"
1042 [ -n "\${OVS_DKMS_PKG}" ] && OVS_PKGS="\${OVS_PKGS} \${OVS_DKMS_PKG}"
1043 [ -z "\${OVS_DKMS_PKG}" ] && [ -n "\${OVS_KMOD_PKG}" ] && OVS_PKGS="\${OVS_PKGS} \${OVS_KMOD_PKG}"
1044
1045 # Bail with error if custom repo was provided but we could not
1046 # find suitable packages there.
1047 [ -z "\${OVS_PKGS}" ] && echo "No OVS packages found in custom repo." && exit 1
1048
1049 # Install kernel & devel packages for the openvswitch dkms package.
1050 if [ -n "\${OVS_DKMS_PKG}" ]; then
1051     # install centos-release to update vault repos from which to fetch
1052     # kernel devel packages
1053     sudo yum -y install centos-release
1054     K_VERSION=\$(uname -r)
1055     YUM_OPTS="-y --disablerepo=* --enablerepo=base,updates,extra,C*-base,C*-updates,C*-extras"
1056     sudo yum \${YUM_OPTS} install kernel-{headers,devel}-\${K_VERSION} @'Development Tools' python-six
1057 fi
1058
1059 PREV_MOD=\$(sudo modinfo -n openvswitch || echo '')
1060
1061 # Install OVS offered by custom repo.
1062 sudo yum-config-manager --add-repo "${rpm_repo}"
1063 sudo yum -y versionlock delete openvswitch-*
1064 sudo yum -y remove openvswitch-*
1065 sudo yum -y --nogpgcheck install \${OVS_PKGS}
1066 sudo yum -y versionlock add \${OVS_PKGS}
1067
1068 # Most recent OVS versions have some incompatibility with certain versions of iptables
1069 # This below line will overcome that problem.
1070 sudo modprobe openvswitch
1071
1072 # Start OVS and print details
1073 sudo systemctl start openvswitch
1074 sudo systemctl enable openvswitch
1075 sudo ovs-vsctl --retry -t 5 show
1076 sudo modinfo openvswitch
1077
1078 # dkms rpm install can fail silently (probably because the OVS version is
1079 # incompatible with the running kernel), verify module was updated.
1080 NEW_MOD=\$(sudo modinfo -n openvswitch || echo '')
1081 [ "\${PREV_MOD}" != "\${NEW_MOD}" ] || (echo "Kernel module was not updated" && exit 1)
1082 EOF
1083
1084     scp "${WORKSPACE}"/install_ovs.sh "${ip}":/tmp
1085     ${SSH} "${ip}" "bash /tmp/install_ovs.sh >> /tmp/install_ovs.txt 2>&1"
1086 }
1087
1088 # Install OVS RPMS from path
1089 function install_ovs_from_path() {
1090     local -r ip=$1
1091     local -r rpm_path="$2"
1092
1093     echo "Creating OVS RPM repo on ${ip} ..."
1094     ${SSH} "${ip}" "mkdir -p /tmp/ovs_rpms"
1095     scp -r "${rpm_path}"/* "${ip}":/tmp/ovs_rpms
1096     ${SSH} "${ip}" "sudo yum -y install createrepo && createrepo --database /tmp/ovs_rpms"
1097     install_ovs_from_repo "${ip}" file:/tmp/ovs_rpms
1098 }
1099
1100