Merge "add support for JVM monitoring"
[releng/builder.git] / jjb / integration / common-functions.sh
1 #!/bin/bash
2
3 echo "common-functions.sh is being sourced"
4
5 BUNDLEFOLDER=$1
6
7 # Basic controller configuration settings
8 export MAVENCONF=/tmp/${BUNDLEFOLDER}/etc/org.ops4j.pax.url.mvn.cfg
9 export FEATURESCONF=/tmp/${BUNDLEFOLDER}/etc/org.apache.karaf.features.cfg
10 export CUSTOMPROP=/tmp/${BUNDLEFOLDER}/etc/custom.properties
11 export LOGCONF=/tmp/${BUNDLEFOLDER}/etc/org.ops4j.pax.logging.cfg
12 export MEMCONF=/tmp/${BUNDLEFOLDER}/bin/setenv
13 export CONTROLLERMEM="2048m"
14
15 # Cluster specific configuration settings
16 export AKKACONF=/tmp/${BUNDLEFOLDER}/configuration/initial/akka.conf
17 export MODULESCONF=/tmp/${BUNDLEFOLDER}/configuration/initial/modules.conf
18 export MODULESHARDSCONF=/tmp/${BUNDLEFOLDER}/configuration/initial/module-shards.conf
19
20 function print_common_env() {
21     cat << EOF
22 common-functions environment:
23 MAVENCONF: ${MAVENCONF}
24 ACTUALFEATURES: ${ACTUALFEATURES}
25 FEATURESCONF: ${FEATURESCONF}
26 CUSTOMPROP: ${CUSTOMPROP}
27 LOGCONF: ${LOGCONF}
28 MEMCONF: ${MEMCONF}
29 CONTROLLERMEM: ${CONTROLLERMEM}
30 AKKACONF: ${AKKACONF}
31 MODULESCONF: ${MODULESCONF}
32 MODULESHARDSCONF: ${MODULESHARDSCONF}
33 SUITES: ${SUITES}
34
35 EOF
36 }
37 print_common_env
38
39 # Setup JAVA_HOME and MAX_MEM Value in ODL startup config file
40 function set_java_vars() {
41     local -r java_home=$1
42     local -r controllermem=$2
43     local -r memconf=$3
44
45     echo "Configure"
46     echo "    java home: ${java_home}"
47     echo "    max memory: ${controllermem}"
48     echo "    memconf: ${memconf}"
49
50     # We do not want expressions to expand here.
51     # shellcheck disable=SC2016
52     sed -ie 's%^# export JAVA_HOME%export JAVA_HOME=${JAVA_HOME:-'"${java_home}"'}%g' "${memconf}"
53     sed -ie 's/JAVA_MAX_MEM="2048m"/JAVA_MAX_MEM='"${controllermem}"'/g' "${memconf}"
54     echo "cat ${memconf}"
55     cat "${memconf}"
56
57     echo "Set Java version"
58     sudo /usr/sbin/alternatives --install /usr/bin/java java "${java_home}/bin/java" 1
59     sudo /usr/sbin/alternatives --set java "${java_home}/bin/java"
60     echo "JDK default version ..."
61     java -version
62
63     echo "Set JAVA_HOME"
64     export JAVA_HOME="${java_home}"
65
66     # shellcheck disable=SC2037
67     JAVA_RESOLVED=$(readlink -e "${java_home}/bin/java")
68     echo "Java binary pointed at by JAVA_HOME: ${JAVA_RESOLVED}"
69 } # set_java_vars()
70
71 # shellcheck disable=SC2034
72 # foo appears unused. Verify it or export it.
73 function configure_karaf_log() {
74     local -r karaf_version=$1
75     local -r controllerdebugmap=$2
76     local logapi=log4j
77
78     # Check what the logging.cfg file is using for the logging api: log4j or log4j2
79     if grep "log4j2" "${LOGCONF}"; then
80         logapi=log4j2
81     fi
82
83     echo "Configuring the karaf log... karaf_version: ${karaf_version}, logapi: ${logapi}"
84     if [ "${logapi}" == "log4j2" ]; then
85         # FIXME: Make log size limit configurable from build parameter.
86         # From Neon the default karaf file size is 64 MB
87         sed -ie 's/log4j2.appender.rolling.policies.size.size = 64MB/log4j2.appender.rolling.policies.size.size = 1GB/g' "${LOGCONF}"
88         # Flourine still uses 16 MB
89         sed -ie 's/log4j2.appender.rolling.policies.size.size = 16MB/log4j2.appender.rolling.policies.size.size = 1GB/g' "${LOGCONF}"
90         orgmodule="org.opendaylight.yangtools.yang.parser.repo.YangTextSchemaContextResolver"
91         orgmodule_="${orgmodule//./_}"
92         echo "${logapi}.logger.${orgmodule_}.name = WARN" >> "${LOGCONF}"
93         echo "${logapi}.logger.${orgmodule_}.level = WARN" >> "${LOGCONF}"
94     else
95         sed -ie 's/log4j.appender.out.maxBackupIndex=10/log4j.appender.out.maxBackupIndex=1/g' "${LOGCONF}"
96         # FIXME: Make log size limit configurable from build parameter.
97         sed -ie 's/log4j.appender.out.maxFileSize=1MB/log4j.appender.out.maxFileSize=30GB/g' "${LOGCONF}"
98         echo "${logapi}.logger.org.opendaylight.yangtools.yang.parser.repo.YangTextSchemaContextResolver = WARN" >> "${LOGCONF}"
99     fi
100
101     # Add custom logging levels
102     # CONTROLLERDEBUGMAP is expected to be a key:value map of space separated
103     # values like "module:level module2:level2" where module is abbreviated and
104     # does not include "org.opendaylight."
105     unset IFS
106     echo "controllerdebugmap: ${controllerdebugmap}"
107     if [ -n "${controllerdebugmap}" ]; then
108         for kv in ${controllerdebugmap}; do
109             module="${kv%%:*}"
110             level="${kv#*:}"
111             echo "module: $module, level: $level"
112             # shellcheck disable=SC2157
113             if [ -n "${module}" ] && [ -n "${level}" ]; then
114                 orgmodule="org.opendaylight.${module}"
115                 if [ "${logapi}" == "log4j2" ]; then
116                     orgmodule_="${orgmodule//./_}"
117                     echo "${logapi}.logger.${orgmodule_}.name = ${orgmodule}" >> "${LOGCONF}"
118                     echo "${logapi}.logger.${orgmodule_}.level = ${level}" >> "${LOGCONF}"
119                 else
120                     echo "${logapi}.logger.${orgmodule} = ${level}" >> "${LOGCONF}"
121                 fi
122             fi
123         done
124     fi
125
126     echo "cat ${LOGCONF}"
127     cat "${LOGCONF}"
128 } # function configure_karaf_log()
129
130 function configure_karaf_log_for_apex() {
131     # TODO: add the extra steps to this function to do any extra work
132     # in this apex environment like we do in our standard environment.
133     # EX: log size, rollover, etc.
134
135     # Modify ODL Log Levels, if needed, for new distribution. This will modify
136     # the control nodes hiera data which will be used during the puppet deploy
137     # CONTROLLERDEBUGMAP is expected to be a key:value map of space separated
138     # values like "module:level module2:level2" where module is abbreviated and
139     # does not include "org.opendaylight."
140
141     local -r controller_ip=$1
142
143     unset IFS
144     # shellcheck disable=SC2153
145     echo "CONTROLLERDEBUGMAP: ${CONTROLLERDEBUGMAP}"
146     if [ -n "${CONTROLLERDEBUGMAP}" ]; then
147         logging_config='\"opendaylight::log_levels\": {'
148         for kv in ${CONTROLLERDEBUGMAP}; do
149             module="${kv%%:*}"
150             level="${kv#*:}"
151             echo "module: $module, level: $level"
152             # shellcheck disable=SC2157
153             if [ -n "${module}" ] && [ -n "${level}" ]; then
154                 orgmodule="org.opendaylight.${module}"
155                 logging_config="${logging_config} \\\"${orgmodule}\\\": \\\"${level}\\\","
156             fi
157         done
158         # replace the trailing comma with a closing brace followed by trailing comma
159         logging_config=${logging_config%,}" },"
160         echo "$logging_config"
161
162         # fine a sane line number to inject the custom logging json
163         lineno=$(ssh "$OPENSTACK_CONTROL_NODE_1_IP" "sudo grep -Fn 'opendaylight::log_mechanism' /etc/puppet/hieradata/service_configs.json" | awk -F: '{print $1}')
164         # We purposely want these variables to expand client-side
165         # shellcheck disable=SC2029
166         ssh "$controller_ip" "sudo sed -i \"${lineno}i ${logging_config}\" /etc/puppet/hieradata/service_configs.json"
167         ssh "$controller_ip" "sudo cat /etc/puppet/hieradata/service_configs.json"
168     fi
169 } # function configure_karaf_log_for_apex()
170
171 function configure_odl_features_for_apex() {
172
173     # if the environment variable $ACTUALFEATURES is not null, then rewrite
174     # the puppet config file with the features given in that variable, otherwise
175     # this function is a noop
176
177     local -r controller_ip=$1
178     local -r config_file=/etc/puppet/hieradata/service_configs.json
179
180 cat > /tmp/set_odl_features.sh << EOF
181 sudo jq '.["opendaylight::extra_features"] |= []' $config_file > tmp.json && mv tmp.json $config_file
182 for feature in "\${ACTUALFEATURES//,/ }"; do
183     sudo jq --arg jq_arg \$feature '.["opendaylight::extra_features"] |= . + [\$jq_arg]' $config_file > tmp && mv tmp $config_file;
184 done
185 echo "Modified puppet-opendaylight service_configs.json..."
186 cat $config_file
187 EOF
188
189     echo "Feature configuration script..."
190     cat /tmp/set_odl_features.sh
191
192     if [ -n "${ACTUALFEATURES}" ]; then
193         scp /tmp/set_odl_features.sh "$controller_ip":/tmp/set_odl_features.sh
194         ssh "$controller_ip" "sudo bash /tmp/set_odl_features.sh"
195     fi
196
197 } # function configure_odl_features_for_apex()
198
199 function get_os_deploy() {
200     local -r num_systems=${1:-$NUM_OPENSTACK_SYSTEM}
201     case ${num_systems} in
202     1)
203         OPENSTACK_TOPO="1cmb-0ctl-0cmp"
204         ;;
205     2)
206         OPENSTACK_TOPO="1cmb-0ctl-1cmp"
207         ;;
208     3|*)
209         OPENSTACK_TOPO="0cmb-1ctl-2cmp"
210         ;;
211     esac
212     export OPENSTACK_TOPO
213 }
214
215 function get_test_suites() {
216
217     #let the caller pick the name of the variable we will assign the suites to
218     local __suite_list=$1
219
220     echo "Locating test plan to use..."
221     testplan_filepath="${WORKSPACE}/test/csit/testplans/${STREAMTESTPLAN}"
222     if [ ! -f "${testplan_filepath}" ]; then
223         testplan_filepath="${WORKSPACE}/test/csit/testplans/${TESTPLAN}"
224     fi
225
226     add_test="integration/test/csit/suites/integration/Create_JVM_Plots.robot" # we should always add for preparing JVM monitoring
227     echo >> "$testplan_filepath"
228     echo "${add_test}" >> "$testplan_filepath"
229
230     echo "Changing the testplan path..."
231     sed "s:integration:${WORKSPACE}:" "${testplan_filepath}" > testplan.txt
232     cat testplan.txt
233
234     # Use the testplan if specific SUITES are not defined.
235     if [ -z "${SUITES}" ]; then
236         suite_list=$(grep -E -v '(^[[:space:]]*#|^[[:space:]]*$)' testplan.txt | tr '\012' ' ')
237     else
238         suite_list=""
239         workpath="${WORKSPACE}/test/csit/suites"
240         for suite in ${SUITES}; do
241             fullsuite="${workpath}/${suite}"
242             if [ -z "${suite_list}" ]; then
243                 suite_list+=${fullsuite}
244             else
245                 suite_list+=" "${fullsuite}
246             fi
247         done
248     fi
249
250     eval "$__suite_list='$suite_list'"
251 }
252
253 function run_plan() {
254     local -r type=$1
255
256     case ${type} in
257     script)
258         plan=$SCRIPTPLAN
259         ;;
260     config|*)
261         plan=$CONFIGPLAN
262         ;;
263     esac
264
265     printf "Locating %s plan to use...\\n" "${type}"
266     plan_filepath="${WORKSPACE}/test/csit/${type}plans/$plan"
267     if [ ! -f "${plan_filepath}" ]; then
268         plan_filepath="${WORKSPACE}/test/csit/${type}plans/${STREAMTESTPLAN}"
269         if [ ! -f "${plan_filepath}" ]; then
270             plan_filepath="${WORKSPACE}/test/csit/${type}plans/${TESTPLAN}"
271         fi
272     fi
273
274     if [ -f "${plan_filepath}" ]; then
275         printf "%s plan exists!!!\\n" "${type}"
276         printf "Changing the %s plan path...\\n" "${type}"
277         sed "s:integration:${WORKSPACE}:" "${plan_filepath}" > "${type}plan.txt"
278         cat "${type}plan.txt"
279         # shellcheck disable=SC2013
280         for line in $( grep -E -v '(^[[:space:]]*#|^[[:space:]]*$)' "${type}plan.txt" ); do
281             printf "Executing %s...\\n" "${line}"
282             # shellcheck source=${line} disable=SC1091
283             source "${line}"
284         done
285     fi
286     printf "Finished running %s plans\\n" "${type}"
287 } # function run_plan()
288
289 # Run scripts to support JVM monitoring.
290 function add_jvm_support()
291 {
292     # TODO unite short and long version to one script and parametrize the input: short/long/any number
293     if [ "${ELASTICSEARCHATTRIBUTE}" == "short" ]; then
294         run_script="${WORKSPACE}/test/csit/scripts/set_elasticsearch_attribute_short.sh"
295     else
296         run_script="${WORKSPACE}/test/csit/scripts/set_elasticsearch_attribute_long.sh"
297     fi
298     printf "Executing %s...\\n" "${run_script}"
299     # shellcheck source=${line} disable=SC1091
300     source "${run_script}"
301
302     run_script="${WORKSPACE}/test/csit/scripts/set_jvm_common_attribute.sh"
303     printf "Executing %s...\\n" "${run_script}"
304     # shellcheck source=${line} disable=SC1091
305     source "${run_script}"
306 } # function add_jvm_support()
307
308 # Return elapsed time. Usage:
309 # - Call first time with no arguments and a new timer is returned.
310 # - Next call with the first argument as the timer and the elapsed time is returned.
311 function timer()
312 {
313     if [ $# -eq 0 ]; then
314         # return the current time
315         printf "%s" "$(date "+%s")"
316     else
317         local start_time=$1
318         end_time=$(date "+%s")
319
320         if [ -z "$start_time" ]; then
321             start_time=$end_time;
322         fi
323
324         delta_time=$((end_time - start_time))
325         ds=$((delta_time % 60))
326         dm=$(((delta_time / 60) % 60))
327         dh=$((delta_time / 3600))
328         # return the elapsed time
329         printf "%d:%02d:%02d" $dh $dm $ds
330     fi
331 }
332
333 # convert commas in csv strings to spaces (ssv)
334 function csv2ssv() {
335     local csv=$1
336     if [ -n "${csv}" ]; then
337         ssv=$(echo "${csv}" | sed 's/,/ /g' | sed 's/\ \ */\ /g')
338     fi
339
340     echo "${ssv}"
341 } # csv2ssv
342
343 function is_openstack_feature_enabled() {
344     local feature=$1
345     for enabled_feature in $(csv2ssv "${ENABLE_OS_SERVICES}"); do
346         if [ "${enabled_feature}" == "${feature}" ]; then
347            echo 1
348            return
349         fi
350     done
351     echo 0
352 }
353
354 SSH="ssh -t -t"
355
356 # shellcheck disable=SC2153
357 function print_job_parameters() {
358     cat << EOF
359
360 Job parameters:
361 DISTROBRANCH: ${DISTROBRANCH}
362 DISTROSTREAM: ${DISTROSTREAM}
363 BUNDLE_URL: ${BUNDLE_URL}
364 CONTROLLERFEATURES: ${CONTROLLERFEATURES}
365 CONTROLLERDEBUGMAP: ${CONTROLLERDEBUGMAP}
366 SCRIPTPLAN: ${SCRIPTPLAN}
367 CONFIGPLAN: ${CONFIGPLAN}
368 STREAMTESTPLAN: ${STREAMTESTPLAN}
369 TESTPLAN: ${TESTPLAN}
370 SUITES: ${SUITES}
371 PATCHREFSPEC: ${PATCHREFSPEC}
372 OPENSTACK_BRANCH: ${OPENSTACK_BRANCH}
373 DEVSTACK_HASH: ${DEVSTACK_HASH}
374 ODL_ML2_DRIVER_REPO: ${ODL_ML2_DRIVER_REPO}
375 ODL_ML2_BRANCH: ${ODL_ML2_BRANCH}
376 ODL_ML2_DRIVER_VERSION: ${ODL_ML2_DRIVER_VERSION}
377 ODL_ML2_PORT_BINDING: ${ODL_ML2_PORT_BINDING}
378 DEVSTACK_KUBERNETES_PLUGIN_REPO: ${DEVSTACK_KUBERNETES_PLUGIN_REPO}
379 DEVSTACK_LBAAS_PLUGIN_REPO: ${DEVSTACK_LBAAS_PLUGIN_REPO}
380 DEVSTACK_NETWORKING_SFC_PLUGIN_REPO: ${DEVSTACK_NETWORKING_SFC_PLUGIN_REPO}
381 IPSEC_VXLAN_TUNNELS_ENABLED: ${IPSEC_VXLAN_TUNNELS_ENABLED}
382 PUBLIC_BRIDGE: ${PUBLIC_BRIDGE}
383 ENABLE_HAPROXY_FOR_NEUTRON: ${ENABLE_HAPROXY_FOR_NEUTRON}
384 ENABLE_OS_SERVICES: ${ENABLE_OS_SERVICES}
385 ENABLE_OS_COMPUTE_SERVICES: ${ENABLE_OS_COMPUTE_SERVICES}
386 ENABLE_OS_NETWORK_SERVICES: ${ENABLE_OS_NETWORK_SERVICES}
387 ENABLE_OS_PLUGINS: ${ENABLE_OS_PLUGINS}
388 DISABLE_OS_SERVICES: ${DISABLE_OS_SERVICES}
389 TENANT_NETWORK_TYPE: ${TENANT_NETWORK_TYPE}
390 SECURITY_GROUP_MODE: ${SECURITY_GROUP_MODE}
391 ENABLE_ITM_DIRECT_TUNNELS: ${ENABLE_ITM_DIRECT_TUNNELS}
392 PUBLIC_PHYSICAL_NETWORK: ${PUBLIC_PHYSICAL_NETWORK}
393 ENABLE_NETWORKING_L2GW: ${ENABLE_NETWORKING_L2GW}
394 CREATE_INITIAL_NETWORKS: ${CREATE_INITIAL_NETWORKS}
395 LBAAS_SERVICE_PROVIDER: ${LBAAS_SERVICE_PROVIDER}
396 ODL_SFC_DRIVER: ${ODL_SFC_DRIVER}
397 ODL_SNAT_MODE: ${ODL_SNAT_MODE}
398
399 EOF
400 }
401
402 function tcpdump_start() {
403     local -r prefix=$1
404     local -r ip=$2
405     local -r filter=$3
406     filter_=${filter// /_}
407
408     printf "node %s, %s_%s__%s: starting tcpdump\\n" "${ip}" "${prefix}" "${ip}" "${filter}"
409     # $fileter needs to be parsed client-side
410     # shellcheck disable=SC2029
411     ssh "${ip}" "nohup sudo /usr/sbin/tcpdump -vvv -ni eth0 ${filter} -w /tmp/tcpdump_${prefix}_${ip}__${filter_}.pcap > /tmp/tcpdump_start.log 2>&1 &"
412     ${SSH} "${ip}" "ps -ef | grep tcpdump"
413 }
414
415 function tcpdump_stop() {
416     local -r ip=$1
417
418     printf "node %s: stopping tcpdump\\n" "$ip"
419     ${SSH} "${ip}" "ps -ef | grep tcpdump.sh"
420     ${SSH} "${ip}" "sudo pkill -f tcpdump"
421     ${SSH} "${ip}" "sudo xz -9ekvvf /tmp/*.pcap"
422     ${SSH} "${ip}" "sudo ls -al /tmp/*.pcap"
423     # copy_logs will copy any *.xz files
424 }
425
426 # Collect the list of files on the hosts
427 function collect_files() {
428     local -r ip=$1
429     local -r folder=$2
430     finddir=/tmp/finder
431     ${SSH} "${ip}" "mkdir -p ${finddir}"
432     ${SSH} "${ip}" "sudo find /etc > ${finddir}/find.etc.txt"
433     ${SSH} "${ip}" "sudo find /opt/stack > ${finddir}/find.opt.stack.txt"
434     ${SSH} "${ip}" "sudo find /var > ${finddir}/find2.txt"
435     ${SSH} "${ip}" "sudo find /var > ${finddir}/find.var.txt"
436     ${SSH} "${ip}" "sudo tar -cf - -C /tmp finder | xz -T 0 > /tmp/find.tar.xz"
437     scp "${ip}":/tmp/find.tar.xz "${folder}"
438     mkdir -p "${finddir}"
439     rsync --rsync-path="sudo rsync" --list-only -arvhe ssh "${ip}":/etc/ > "${finddir}"/rsync.etc.txt
440     rsync --rsync-path="sudo rsync" --list-only -arvhe ssh "${ip}":/opt/stack/ > "${finddir}"/rsync.opt.stack.txt
441     rsync --rsync-path="sudo rsync" --list-only -arvhe ssh "${ip}":/var/ > "${finddir}"/rsync.var.txt
442     tar -cf - -C /tmp finder | xz -T 0 > /tmp/rsync.tar.xz
443     cp /tmp/rsync.tar.xz "${folder}"
444 }
445
446 # List of extra services to extract from journalctl
447 # Add new services on a separate line, in alpha order, add \ at the end
448 extra_services_cntl=" \
449     dnsmasq.service \
450     httpd.service \
451     libvirtd.service \
452     openvswitch.service \
453     ovs-vswitchd.service \
454     ovsdb-server.service \
455     rabbitmq-server.service \
456 "
457
458 extra_services_cmp=" \
459     libvirtd.service \
460     openvswitch.service \
461     ovs-vswitchd.service \
462     ovsdb-server.service \
463 "
464
465 # Collect the logs for the openstack services
466 # First get all the services started by devstack which would have devstack@ as a prefix
467 # Next get all the extra services
468 function collect_openstack_logs() {
469     local -r ip=${1}
470     local -r folder=${2}
471     local -r node_type=${3}
472     local oslogs="${folder}/oslogs"
473
474     printf "collect_openstack_logs for %s node: %s into %s\\n" "${node_type}" "${ip}" "${oslogs}"
475     rm -rf "${oslogs}"
476     mkdir -p "${oslogs}"
477     # There are always some logs in /opt/stack/logs and this also covers the
478     # pre-queens branches which always use /opt/stack/logs
479     rsync -avhe ssh "${ip}":/opt/stack/logs/* "${oslogs}" # rsync to prevent copying of symbolic links
480
481     # Starting with queens break out the logs from journalctl
482     if [ "${OPENSTACK_BRANCH}" = "stable/queens" ]; then
483         cat > "${WORKSPACE}"/collect_openstack_logs.sh << EOF
484 extra_services_cntl="${extra_services_cntl}"
485 extra_services_cmp="${extra_services_cmp}"
486
487 function extract_from_journal() {
488     local -r services=\${1}
489     local -r folder=\${2}
490     local -r node_type=\${3}
491     printf "extract_from_journal folder: \${folder}, services: \${services}\\n"
492     for service in \${services}; do
493         # strip anything before @ and anything after .
494         # devstack@g-api.service will end as g-api
495         service_="\${service#*@}"
496         service_="\${service_%.*}"
497         sudo journalctl -u "\${service}" > "\${folder}/\${service_}.log"
498     done
499 }
500
501 rm -rf /tmp/oslogs
502 mkdir -p /tmp/oslogs
503 systemctl list-unit-files --all > /tmp/oslogs/systemctl.units.log 2>&1
504 svcs=\$(grep devstack@ /tmp/oslogs/systemctl.units.log | awk '{print \$1}')
505 extract_from_journal "\${svcs}" "/tmp/oslogs"
506 if [ "\${node_type}" = "control" ]; then
507     extract_from_journal "\${extra_services_cntl}" "/tmp/oslogs"
508 else
509     extract_from_journal "\${extra_services_cmp}" "/tmp/oslogs"
510 fi
511 ls -al /tmp/oslogs
512 EOF
513 # cat > ${WORKSPACE}/collect_openstack_logs.sh << EOF
514         printf "collect_openstack_logs for %s node: %s into %s, executing script\\n" "${node_type}" "${ip}" "${oslogs}"
515         cat "${WORKSPACE}"/collect_openstack_logs.sh
516         scp "${WORKSPACE}"/collect_openstack_logs.sh "${ip}":/tmp
517         ${SSH} "${ip}" "bash /tmp/collect_openstack_logs.sh > /tmp/collect_openstack_logs.log 2>&1"
518         rsync -avhe ssh "${ip}":/tmp/oslogs/* "${oslogs}"
519         scp "${ip}":/tmp/collect_openstack_logs.log "${oslogs}"
520     fi # if [ "${OPENSTACK_BRANCH}" = "stable/queens" ]; then
521 }
522
523 function collect_netvirt_logs() {
524     set +e  # We do not want to create red dot just because something went wrong while fetching logs.
525
526     cat > extra_debug.sh << EOF
527 echo -e "/usr/sbin/lsmod | /usr/bin/grep openvswitch\\n"
528 /usr/sbin/lsmod | /usr/bin/grep openvswitch
529 echo -e "\\nsudo grep ct_ /var/log/openvswitch/ovs-vswitchd.log\\n"
530 sudo grep "Datapath supports" /var/log/openvswitch/ovs-vswitchd.log
531 echo -e "\\nsudo netstat -punta\\n"
532 sudo netstat -punta
533 echo -e "\\nsudo getenforce\\n"
534 sudo getenforce
535 echo -e "\\nsudo systemctl status httpd\\n"
536 sudo systemctl status httpd
537 echo -e "\\nenv\\n"
538 env
539 source /opt/stack/devstack/openrc admin admin
540 echo -e "\\nenv after openrc\\n"
541 env
542 echo -e "\\nsudo du -hs /opt/stack"
543 sudo du -hs /opt/stack
544 echo -e "\\nsudo mount"
545 sudo mount
546 echo -e "\\ndmesg -T > /tmp/dmesg.log"
547 dmesg -T > /tmp/dmesg.log
548 echo -e "\\njournalctl > /tmp/journalctl.log\\n"
549 sudo journalctl > /tmp/journalctl.log
550 echo -e "\\novsdb-tool -mm show-log > /tmp/ovsdb-tool.log"
551 ovsdb-tool -mm show-log > /tmp/ovsdb-tool.log
552 EOF
553
554     # Since this log collection work is happening before the archive build macro which also
555     # creates the ${WORKSPACE}/archives dir, we have to do it here first.  The mkdir in the
556     # archives build step will essentially be a noop.
557     mkdir -p "${WORKSPACE}"/archives
558
559     mv /tmp/changes.txt "${WORKSPACE}"/archives
560     mv /tmp/validations.txt "${WORKSPACE}"/archives
561     mv "${WORKSPACE}"/rabbit.txt "${WORKSPACE}"/archives
562     mv "${WORKSPACE}"/haproxy.cfg "${WORKSPACE}"/archives
563     ssh "${OPENSTACK_HAPROXY_1_IP}" "sudo journalctl -u haproxy > /tmp/haproxy.log"
564     scp "${OPENSTACK_HAPROXY_1_IP}":/tmp/haproxy.log "${WORKSPACE}"/archives/
565
566     sleep 5
567     # FIXME: Do not create .tar and gzip before copying.
568     for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
569         CONTROLLERIP=ODL_SYSTEM_${i}_IP
570         echo "collect_logs: for opendaylight controller ip: ${!CONTROLLERIP}"
571         NODE_FOLDER="odl_${i}"
572         mkdir -p "${NODE_FOLDER}"
573         echo "Lets's take the karaf thread dump again..."
574         ssh "${!CONTROLLERIP}" "sudo ps aux" > "${WORKSPACE}"/ps_after.log
575         pid=$(grep org.apache.karaf.main.Main "${WORKSPACE}"/ps_after.log | grep -v grep | tr -s ' ' | cut -f2 -d' ')
576         echo "karaf main: org.apache.karaf.main.Main, pid:${pid}"
577         # $pid needs to be parsed client-side
578         # shellcheck disable=SC2029
579         ssh "${!CONTROLLERIP}" "${JAVA_HOME}/bin/jstack -l ${pid}" > "${WORKSPACE}/karaf_${i}_${pid}_threads_after.log" || true
580         echo "killing karaf process..."
581         # shellcheck disable=SC2016
582         ${SSH} "${!CONTROLLERIP}" bash -c 'ps axf | grep karaf | grep -v grep | awk '"'"'{print "kill -9 " $1}'"'"' | sh'
583         ${SSH} "${!CONTROLLERIP}" "sudo journalctl > /tmp/journalctl.log"
584         scp "${!CONTROLLERIP}":/tmp/journalctl.log "${NODE_FOLDER}"
585         ${SSH} "${!CONTROLLERIP}" "dmesg -T > /tmp/dmesg.log"
586         scp "${!CONTROLLERIP}":/tmp/dmesg.log "${NODE_FOLDER}"
587         ${SSH} "${!CONTROLLERIP}" "tar -cf - -C /tmp/${BUNDLEFOLDER} etc | xz -T 0 > /tmp/etc.tar.xz"
588         scp "${!CONTROLLERIP}":/tmp/etc.tar.xz "${NODE_FOLDER}"
589         ${SSH} "${!CONTROLLERIP}" "cp -r /tmp/${BUNDLEFOLDER}/data/log /tmp/odl_log"
590         ${SSH} "${!CONTROLLERIP}" "tar -cf /tmp/odl${i}_karaf.log.tar /tmp/odl_log/*"
591         scp "${!CONTROLLERIP}:/tmp/odl${i}_karaf.log.tar" "${NODE_FOLDER}"
592         ${SSH} "${!CONTROLLERIP}" "tar -cf /tmp/odl${i}_zrpcd.log.tar /tmp/zrpcd.init.log"
593         scp "${!CONTROLLERIP}:/tmp/odl${i}_zrpcd.log.tar" "${NODE_FOLDER}"
594         tar -xvf "${NODE_FOLDER}/odl${i}_karaf.log.tar" -C "${NODE_FOLDER}" --strip-components 2 --transform "s/karaf/odl${i}_karaf/g"
595         grep "ROBOT MESSAGE\\| ERROR " "${NODE_FOLDER}/odl${i}_karaf.log" > "${NODE_FOLDER}/odl${i}_err.log"
596         grep "ROBOT MESSAGE\\| ERROR \\| WARN \\|Exception" \
597             "${NODE_FOLDER}/odl${i}_karaf.log" > "${NODE_FOLDER}/odl${i}_err_warn_exception.log"
598         # Print ROBOT lines and print Exception lines. For exception lines also print the previous line for context
599         sed -n -e '/ROBOT MESSAGE/P' -e '$!N;/Exception/P;D' "${NODE_FOLDER}/odl${i}_karaf.log" > "${NODE_FOLDER}/odl${i}_exception.log"
600         mv "/tmp/odl${i}_exceptions.txt" "${NODE_FOLDER}"
601         rm "${NODE_FOLDER}/odl${i}_karaf.log.tar"
602         mv -- *_threads* "${NODE_FOLDER}"
603         mv ps_* "${NODE_FOLDER}"
604         mv "${NODE_FOLDER}" "${WORKSPACE}"/archives/
605     done
606
607     print_job_parameters > "${WORKSPACE}"/archives/params.txt
608
609     # Control Node
610     for i in $(seq 1 "${NUM_OPENSTACK_CONTROL_NODES}"); do
611         OSIP=OPENSTACK_CONTROL_NODE_${i}_IP
612         if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then
613             echo "collect_logs: for openstack combo node ip: ${!OSIP}"
614             NODE_FOLDER="combo_${i}"
615         else
616             echo "collect_logs: for openstack control node ip: ${!OSIP}"
617             NODE_FOLDER="control_${i}"
618         fi
619         mkdir -p "${NODE_FOLDER}"
620         tcpdump_stop "${!OSIP}"
621         scp extra_debug.sh "${!OSIP}":/tmp
622         # Capture compute logs if this is a combo node
623         if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then
624             scp "${!OSIP}":/etc/nova/nova.conf "${NODE_FOLDER}"
625             scp "${!OSIP}":/etc/nova/nova-cpu.conf "${NODE_FOLDER}"
626             scp "${!OSIP}":/etc/openstack/clouds.yaml "${NODE_FOLDER}"
627             rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/nova-agent.log "${NODE_FOLDER}"
628         fi
629         ${SSH} "${!OSIP}" "bash /tmp/extra_debug.sh > /tmp/extra_debug.log 2>&1"
630         scp "${!OSIP}":/etc/dnsmasq.conf "${NODE_FOLDER}"
631         scp "${!OSIP}":/etc/keystone/keystone.conf "${NODE_FOLDER}"
632         scp "${!OSIP}":/etc/keystone/keystone-uwsgi-admin.ini "${NODE_FOLDER}"
633         scp "${!OSIP}":/etc/keystone/keystone-uwsgi-public.ini "${NODE_FOLDER}"
634         scp "${!OSIP}":/etc/kuryr/kuryr.conf "${NODE_FOLDER}"
635         scp "${!OSIP}":/etc/neutron/dhcp_agent.ini "${NODE_FOLDER}"
636         scp "${!OSIP}":/etc/neutron/metadata_agent.ini "${NODE_FOLDER}"
637         scp "${!OSIP}":/etc/neutron/neutron.conf "${NODE_FOLDER}"
638         scp "${!OSIP}":/etc/neutron/neutron_lbaas.conf "${NODE_FOLDER}"
639         scp "${!OSIP}":/etc/neutron/plugins/ml2/ml2_conf.ini "${NODE_FOLDER}"
640         scp "${!OSIP}":/etc/neutron/services/loadbalancer/haproxy/lbaas_agent.ini "${NODE_FOLDER}"
641         scp "${!OSIP}":/etc/nova/nova.conf "${NODE_FOLDER}"
642         scp "${!OSIP}":/etc/nova/nova-api-uwsgi.ini "${NODE_FOLDER}"
643         scp "${!OSIP}":/etc/nova/nova_cell1.conf "${NODE_FOLDER}"
644         scp "${!OSIP}":/etc/nova/nova-cpu.conf "${NODE_FOLDER}"
645         scp "${!OSIP}":/etc/nova/placement-uwsgi.ini "${NODE_FOLDER}"
646         scp "${!OSIP}":/etc/openstack/clouds.yaml "${NODE_FOLDER}"
647         scp "${!OSIP}":/opt/stack/devstack/.stackenv "${NODE_FOLDER}"
648         scp "${!OSIP}":/opt/stack/devstack/nohup.out "${NODE_FOLDER}"/stack.log
649         scp "${!OSIP}":/opt/stack/devstack/openrc "${NODE_FOLDER}"
650         scp "${!OSIP}":/opt/stack/requirements/upper-constraints.txt "${NODE_FOLDER}"
651         scp "${!OSIP}":/opt/stack/tempest/etc/tempest.conf "${NODE_FOLDER}"
652         scp "${!OSIP}":/tmp/*.xz "${NODE_FOLDER}"
653         scp "${!OSIP}":/tmp/dmesg.log "${NODE_FOLDER}"
654         scp "${!OSIP}":/tmp/extra_debug.log "${NODE_FOLDER}"
655         scp "${!OSIP}":/tmp/get_devstack.sh.txt "${NODE_FOLDER}"
656         scp "${!OSIP}":/tmp/install_ovs.txt "${NODE_FOLDER}"
657         scp "${!OSIP}":/tmp/journalctl.log "${NODE_FOLDER}"
658         scp "${!OSIP}":/tmp/ovsdb-tool.log "${NODE_FOLDER}"
659         scp "${!OSIP}":/tmp/tcpdump_start.log "${NODE_FOLDER}"
660         collect_files "${!OSIP}" "${NODE_FOLDER}"
661         ${SSH} "${!OSIP}" "sudo tar -cf - -C /var/log rabbitmq | xz -T 0 > /tmp/rabbitmq.tar.xz "
662         scp "${!OSIP}":/tmp/rabbitmq.tar.xz "${NODE_FOLDER}"
663         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/etc/hosts "${NODE_FOLDER}"
664         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/usr/lib/systemd/system/haproxy.service "${NODE_FOLDER}"
665         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/audit/audit.log "${NODE_FOLDER}"
666         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/httpd/keystone_access.log "${NODE_FOLDER}"
667         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/httpd/keystone.log "${NODE_FOLDER}"
668         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/messages* "${NODE_FOLDER}"
669         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/openvswitch/ovs-vswitchd.log "${NODE_FOLDER}"
670         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/openvswitch/ovsdb-server.log "${NODE_FOLDER}"
671         collect_openstack_logs "${!OSIP}" "${NODE_FOLDER}" "control"
672         mv "local.conf_control_${!OSIP}" "${NODE_FOLDER}/local.conf"
673         # qdhcp files are created by robot tests and copied into /tmp/qdhcp during the test
674         tar -cf - -C /tmp qdhcp | xz -T 0 > /tmp/qdhcp.tar.xz
675         mv /tmp/qdhcp.tar.xz "${NODE_FOLDER}"
676         mv "${NODE_FOLDER}" "${WORKSPACE}"/archives/
677     done
678
679     # Compute Nodes
680     for i in $(seq 1 "${NUM_OPENSTACK_COMPUTE_NODES}"); do
681         OSIP="OPENSTACK_COMPUTE_NODE_${i}_IP"
682         echo "collect_logs: for openstack compute node ip: ${!OSIP}"
683         NODE_FOLDER="compute_${i}"
684         mkdir -p "${NODE_FOLDER}"
685         tcpdump_stop "${!OSIP}"
686         scp extra_debug.sh "${!OSIP}":/tmp
687         ${SSH} "${!OSIP}" "bash /tmp/extra_debug.sh > /tmp/extra_debug.log 2>&1"
688         scp "${!OSIP}":/etc/nova/nova.conf "${NODE_FOLDER}"
689         scp "${!OSIP}":/etc/nova/nova-cpu.conf "${NODE_FOLDER}"
690         scp "${!OSIP}":/etc/openstack/clouds.yaml "${NODE_FOLDER}"
691         scp "${!OSIP}":/opt/stack/devstack/.stackenv "${NODE_FOLDER}"
692         scp "${!OSIP}":/opt/stack/devstack/nohup.out "${NODE_FOLDER}"/stack.log
693         scp "${!OSIP}":/opt/stack/devstack/openrc "${NODE_FOLDER}"
694         scp "${!OSIP}":/opt/stack/requirements/upper-constraints.txt "${NODE_FOLDER}"
695         scp "${!OSIP}":/tmp/*.xz "${NODE_FOLDER}"/
696         scp "${!OSIP}":/tmp/dmesg.log "${NODE_FOLDER}"
697         scp "${!OSIP}":/tmp/extra_debug.log "${NODE_FOLDER}"
698         scp "${!OSIP}":/tmp/get_devstack.sh.txt "${NODE_FOLDER}"
699         scp "${!OSIP}":/tmp/install_ovs.txt "${NODE_FOLDER}"
700         scp "${!OSIP}":/tmp/journalctl.log "${NODE_FOLDER}"
701         scp "${!OSIP}":/tmp/ovsdb-tool.log "${NODE_FOLDER}"
702         scp "${!OSIP}":/tmp/tcpdump_start.log "${NODE_FOLDER}"
703         collect_files "${!OSIP}" "${NODE_FOLDER}"
704         ${SSH} "${!OSIP}" "sudo tar -cf - -C /var/log libvirt | xz -T 0 > /tmp/libvirt.tar.xz "
705         scp "${!OSIP}":/tmp/libvirt.tar.xz "${NODE_FOLDER}"
706         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/etc/hosts "${NODE_FOLDER}"
707         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/audit/audit.log "${NODE_FOLDER}"
708         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/messages* "${NODE_FOLDER}"
709         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/nova-agent.log "${NODE_FOLDER}"
710         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/openvswitch/ovs-vswitchd.log "${NODE_FOLDER}"
711         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/openvswitch/ovsdb-server.log "${NODE_FOLDER}"
712         collect_openstack_logs "${!OSIP}" "${NODE_FOLDER}" "compute"
713         mv "local.conf_compute_${!OSIP}" "${NODE_FOLDER}"/local.conf
714         mv "${NODE_FOLDER}" "${WORKSPACE}"/archives/
715     done
716
717     # Tempest
718     DEVSTACK_TEMPEST_DIR="/opt/stack/tempest"
719     TESTREPO=".stestr"
720     TEMPEST_LOGS_DIR="${WORKSPACE}/archives/tempest"
721     # Look for tempest test results in the $TESTREPO dir and copy if found
722     if ${SSH} "${OPENSTACK_CONTROL_NODE_1_IP}" "sudo sh -c '[ -f ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/0 ]'"; then
723         ${SSH} "${OPENSTACK_CONTROL_NODE_1_IP}" "for I in \$(sudo ls ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/ | grep -E '^[0-9]+$'); do sudo sh -c \"${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/subunit-1to2 < ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/\${I} >> ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt\"; done"
724         ${SSH} "${OPENSTACK_CONTROL_NODE_1_IP}" "sudo sh -c '${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/python ${DEVSTACK_TEMPEST_DIR}/.tox/tempest/lib/python2.7/site-packages/os_testr/subunit2html.py ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt ${DEVSTACK_TEMPEST_DIR}/tempest_results.html'"
725         mkdir -p "${TEMPEST_LOGS_DIR}"
726         scp "${OPENSTACK_CONTROL_NODE_1_IP}:${DEVSTACK_TEMPEST_DIR}/tempest_results.html" "${TEMPEST_LOGS_DIR}"
727         scp "${OPENSTACK_CONTROL_NODE_1_IP}:${DEVSTACK_TEMPEST_DIR}/tempest.log" "${TEMPEST_LOGS_DIR}"
728     else
729         echo "tempest results not found in ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/0"
730     fi
731 } # collect_netvirt_logs()
732
733 # Utility function for joining strings.
734 function join() {
735     delim=' '
736     final=$1; shift
737
738     for str in "$@" ; do
739         final=${final}${delim}${str}
740     done
741
742     echo "${final}"
743 }
744
745 function get_nodes_list() {
746     # Create the string for nodes
747     for i in $(seq 1 "${NUM_ODL_SYSTEM}") ; do
748         CONTROLLERIP=ODL_SYSTEM_${i}_IP
749         nodes[$i]=${!CONTROLLERIP}
750     done
751
752     nodes_list=$(join "${nodes[@]}")
753     echo "${nodes_list}"
754 }
755
756 function get_features() {
757     if [ "${CONTROLLERSCOPE}" == 'all' ]; then
758         ACTUALFEATURES="odl-integration-compatible-with-all,${CONTROLLERFEATURES}"
759         export CONTROLLERMEM="3072m"
760     else
761         ACTUALFEATURES="odl-infrautils-ready,${CONTROLLERFEATURES}"
762     fi
763
764     # Add decanter features to allow JVM monitoring
765     ACTUALFEATURES="${ACTUALFEATURES},decanter-collector-jmx,decanter-appender-elasticsearch"
766
767     # Some versions of jenkins job builder result in feature list containing spaces
768     # and ending in newline. Remove all that.
769     ACTUALFEATURES=$(echo "${ACTUALFEATURES}" | tr -d '\n \r')
770     echo "ACTUALFEATURES: ${ACTUALFEATURES}"
771
772     # In the case that we want to install features via karaf shell, a space separated list of
773     # ACTUALFEATURES IS NEEDED
774     SPACE_SEPARATED_FEATURES=$(echo "${ACTUALFEATURES}" | tr ',' ' ')
775     echo "SPACE_SEPARATED_FEATURES: ${SPACE_SEPARATED_FEATURES}"
776
777     export ACTUALFEATURES
778     export SPACE_SEPARATED_FEATURES
779 }
780
781 # Create the configuration script to be run on controllers.
782 function create_configuration_script() {
783     cat > "${WORKSPACE}"/configuration-script.sh <<EOF
784 set -x
785 source /tmp/common-functions.sh ${BUNDLEFOLDER}
786
787 echo "Changing to /tmp"
788 cd /tmp
789
790 echo "Downloading the distribution from ${ACTUAL_BUNDLE_URL}"
791 wget --progress=dot:mega '${ACTUAL_BUNDLE_URL}'
792
793 echo "Extracting the new controller..."
794 unzip -q ${BUNDLE}
795
796 echo "Adding external repositories..."
797 sed -ie "s%org.ops4j.pax.url.mvn.repositories=%org.ops4j.pax.url.mvn.repositories=https://nexus.opendaylight.org/content/repositories/opendaylight.snapshot@id=opendaylight-snapshot@snapshots, https://nexus.opendaylight.org/content/repositories/public@id=opendaylight-mirror, http://repo1.maven.org/maven2@id=central, http://repository.springsource.com/maven/bundles/release@id=spring.ebr.release, http://repository.springsource.com/maven/bundles/external@id=spring.ebr.external, http://zodiac.springsource.com/maven/bundles/release@id=gemini, http://repository.apache.org/content/groups/snapshots-group@id=apache@snapshots@noreleases, https://oss.sonatype.org/content/repositories/snapshots@id=sonatype.snapshots.deploy@snapshots@noreleases, https://oss.sonatype.org/content/repositories/ops4j-snapshots@id=ops4j.sonatype.snapshots.deploy@snapshots@noreleases%g" ${MAVENCONF}
798 cat ${MAVENCONF}
799
800 if [[ "$USEFEATURESBOOT" == "True" ]]; then
801     echo "Configuring the startup features..."
802     sed -ie "s/\\(featuresBoot=\\|featuresBoot =\\)/featuresBoot = ${ACTUALFEATURES},/g" ${FEATURESCONF}
803 fi
804
805 FEATURE_TEST_STRING="features-integration-test"
806 KARAF_VERSION=${KARAF_VERSION:-karaf4}
807 if [[ "$KARAF_VERSION" == "karaf4" ]]; then
808     FEATURE_TEST_STRING="features-test"
809 fi
810
811 sed -ie "s%\\(featuresRepositories=\\|featuresRepositories =\\)%featuresRepositories = mvn:org.opendaylight.integration/\${FEATURE_TEST_STRING}/${BUNDLE_VERSION}/xml/features,mvn:org.apache.karaf.decanter/apache-karaf-decanter/1.0.0/xml/features,%g" ${FEATURESCONF}
812 if [[ ! -z "${REPO_URL}" ]]; then
813    sed -ie "s%featuresRepositories =%featuresRepositories = ${REPO_URL},%g" ${FEATURESCONF}
814 fi
815 cat ${FEATURESCONF}
816
817 configure_karaf_log "${KARAF_VERSION}" "${CONTROLLERDEBUGMAP}"
818
819 set_java_vars "${JAVA_HOME}" "${CONTROLLERMEM}" "${MEMCONF}"
820
821 echo "Listing all open ports on controller system..."
822 netstat -pnatu
823
824 # Copy shard file if exists
825 if [ -f /tmp/custom_shard_config.txt ]; then
826     echo "Custom shard config exists!!!"
827     echo "Copying the shard config..."
828     cp /tmp/custom_shard_config.txt /tmp/${BUNDLEFOLDER}/bin/
829 fi
830
831 echo "Configuring cluster"
832 /tmp/${BUNDLEFOLDER}/bin/configure_cluster.sh \$1 ${nodes_list}
833
834 echo "Dump akka.conf"
835 cat ${AKKACONF}
836
837 echo "Dump modules.conf"
838 cat ${MODULESCONF}
839
840 echo "Dump module-shards.conf"
841 cat ${MODULESHARDSCONF}
842 EOF
843 # cat > ${WORKSPACE}/configuration-script.sh <<EOF
844 }
845
846 # Create the startup script to be run on controllers.
847 function create_startup_script() {
848     cat > "${WORKSPACE}"/startup-script.sh <<EOF
849 echo "Redirecting karaf console output to karaf_console.log"
850 export KARAF_REDIRECT="/tmp/${BUNDLEFOLDER}/data/log/karaf_console.log"
851 mkdir -p /tmp/${BUNDLEFOLDER}/data/log
852
853 echo "Starting controller..."
854 /tmp/${BUNDLEFOLDER}/bin/start
855 EOF
856 # cat > ${WORKSPACE}/startup-script.sh <<EOF
857 }
858
859 function create_post_startup_script() {
860     cat > "${WORKSPACE}"/post-startup-script.sh <<EOF
861 # wait up to 60s for karaf port 8101 to be opened, polling every 5s
862 loop_count=0;
863 until [[ \$loop_count -ge 12 ]]; do
864     netstat -na | grep 8101 && break;
865     loop_count=\$[\$loop_count+1];
866     sleep 5;
867 done
868
869 # This workaround is required for Karaf decanter to work proper
870 # The bundle:refresh command does not fail if the decanter bundles are not present
871 echo "ssh to karaf console to do bundle refresh of decanter jmx collector"
872 sshpass -p karaf ssh -o StrictHostKeyChecking=no \
873                      -o UserKnownHostsFile=/dev/null \
874                      -o LogLevel=error \
875                      -p 8101 karaf@localhost \
876                      "bundle:refresh org.apache.karaf.decanter.collector.jmx && bundle:refresh org.apache.karaf.decanter.appender.elasticsearch"
877
878 if [[ "$USEFEATURESBOOT" != "True" ]]; then
879
880     echo "going to feature:install --no-auto-refresh ${SPACE_SEPARATED_FEATURES} one at a time"
881     for feature in ${SPACE_SEPARATED_FEATURES}; do
882         sshpass -p karaf ssh -o StrictHostKeyChecking=no \
883                              -o UserKnownHostsFile=/dev/null \
884                              -o LogLevel=error \
885                              -p 8101 karaf@localhost \
886                              feature:install --no-auto-refresh \$feature;
887     done
888
889     echo "ssh to karaf console to list -i installed features"
890     sshpass -p karaf ssh -o StrictHostKeyChecking=no \
891                          -o UserKnownHostsFile=/dev/null \
892                          -o LogLevel=error \
893                          -p 8101 karaf@localhost \
894                          feature:list -i
895 fi
896
897 echo "Waiting up to 3 minutes for controller to come up, checking every 5 seconds..."
898 for i in {1..36}; do
899     sleep 5;
900     grep 'org.opendaylight.infrautils.*System ready' /tmp/${BUNDLEFOLDER}/data/log/karaf.log
901     if [ \$? -eq 0 ]; then
902         echo "Controller is UP"
903         break
904     fi
905 done;
906
907 # if we ended up not finding ready status in the above loop, we can output some debugs
908 grep 'org.opendaylight.infrautils.*System ready' /tmp/${BUNDLEFOLDER}/data/log/karaf.log
909 if [ $? -ne 0 ]; then
910     echo "Timeout Controller DOWN"
911     echo "Dumping first 500K bytes of karaf log..."
912     head --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
913     echo "Dumping last 500K bytes of karaf log..."
914     tail --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
915     echo "Listing all open ports on controller system"
916     netstat -pnatu
917     exit 1
918 fi
919
920 echo "Listing all open ports on controller system..."
921 netstat -pnatu
922
923 function exit_on_log_file_message {
924     echo "looking for \"\$1\" in log file"
925     if grep --quiet "\$1" "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"; then
926         echo ABORTING: found "\$1"
927         echo "Dumping first 500K bytes of karaf log..."
928         head --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
929         echo "Dumping last 500K bytes of karaf log..."
930         tail --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
931         exit 1
932     fi
933 }
934
935 exit_on_log_file_message 'BindException: Address already in use'
936 exit_on_log_file_message 'server is unhealthy'
937 EOF
938 # cat > ${WORKSPACE}/post-startup-script.sh <<EOF
939 }
940
941 # Copy over the configuration script and configuration files to each controller
942 # Execute the configuration script on each controller.
943 function copy_and_run_configuration_script() {
944     for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
945         CONTROLLERIP="ODL_SYSTEM_${i}_IP"
946         echo "Configuring member-${i} with IP address ${!CONTROLLERIP}"
947         scp "${WORKSPACE}"/configuration-script.sh "${!CONTROLLERIP}":/tmp/
948         # $i needs to be parsed client-side
949         # shellcheck disable=SC2029
950         ssh "${!CONTROLLERIP}" "bash /tmp/configuration-script.sh ${i}"
951     done
952 }
953
954 # Copy over the startup script to each controller and execute it.
955 function copy_and_run_startup_script() {
956     for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
957         CONTROLLERIP="ODL_SYSTEM_${i}_IP"
958         echo "Starting member-${i} with IP address ${!CONTROLLERIP}"
959         scp "${WORKSPACE}"/startup-script.sh "${!CONTROLLERIP}":/tmp/
960         ssh "${!CONTROLLERIP}" "bash /tmp/startup-script.sh"
961     done
962 }
963
964 function copy_and_run_post_startup_script() {
965     seed_index=1
966     for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
967         CONTROLLERIP="ODL_SYSTEM_${i}_IP"
968         echo "Execute the post startup script on controller ${!CONTROLLERIP}"
969         scp "${WORKSPACE}"/post-startup-script.sh "${!CONTROLLERIP}":/tmp/
970         # $seed_index needs to be parsed client-side
971         # shellcheck disable=SC2029
972         ssh "${!CONTROLLERIP}" "bash /tmp/post-startup-script.sh $(( seed_index++ ))"
973         if [ $(( i % NUM_ODL_SYSTEM )) == 0 ]; then
974             seed_index=1
975         fi
976     done
977 }
978
979 function dump_controller_threads() {
980     for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
981         CONTROLLERIP="ODL_SYSTEM_${i}_IP"
982         echo "Lets's take the karaf thread dump"
983         ssh "${!CONTROLLERIP}" "sudo ps aux" > "${WORKSPACE}"/ps_before.log
984         pid=$(grep org.apache.karaf.main.Main "${WORKSPACE}"/ps_before.log | grep -v grep | tr -s ' ' | cut -f2 -d' ')
985         echo "karaf main: org.apache.karaf.main.Main, pid:${pid}"
986         # $i needs to be parsed client-side
987         # shellcheck disable=SC2029
988         ssh "${!CONTROLLERIP}" "${JAVA_HOME}/bin/jstack -l ${pid}" > "${WORKSPACE}/karaf_${i}_${pid}_threads_before.log" || true
989     done
990 }
991
992 # Function to build OVS from git repo
993 function build_ovs() {
994     local -r ip=$1
995     local -r version=$2
996     local -r rpm_path="$3"
997
998     echo "Building OVS ${version} on ${ip} ..."
999     cat > "${WORKSPACE}"/build_ovs.sh << EOF
1000 set -ex -o pipefail
1001
1002 echo '---> Building openvswitch version ${version}'
1003
1004 # Install running kernel devel packages
1005 K_VERSION=\$(uname -r)
1006 YUM_OPTS="-y --disablerepo=* --enablerepo=base,updates,extra,C*-base,C*-updates,C*-extras"
1007 # Install centos-release to update vault repos from which to fetch
1008 # kernel devel packages
1009 sudo yum \${YUM_OPTS} install centos-release yum-utils @'Development Tools' rpm-build
1010 sudo yum \${YUM_OPTS} install kernel-{devel,headers}-\${K_VERSION}
1011
1012 TMP=\$(mktemp -d)
1013 pushd \${TMP}
1014
1015 git clone https://github.com/openvswitch/ovs.git
1016 cd ovs
1017
1018 if [ "${version}" = "v2.6.1-nsh" ]; then
1019     git checkout v2.6.1
1020     echo "Will apply nsh patches for OVS version 2.6.1"
1021     git clone https://github.com/yyang13/ovs_nsh_patches.git ../ovs_nsh_patches
1022     git apply ../ovs_nsh_patches/v2.6.1_centos7/*.patch
1023 else
1024     git checkout ${version}
1025 fi
1026
1027 # On early versions of OVS, flake warnings would fail the build.
1028 # Remove it.
1029 sudo pip uninstall -y flake8
1030
1031 # Get rid of sphinx dep as it conflicts with the already
1032 # installed one (via pip). Docs wont be built.
1033 sed -i "/BuildRequires:.*sphinx.*/d" rhel/openvswitch-fedora.spec.in
1034
1035 sed -e 's/@VERSION@/0.0.1/' rhel/openvswitch-fedora.spec.in > /tmp/ovs.spec
1036 sed -e 's/@VERSION@/0.0.1/' rhel/openvswitch-kmod-fedora.spec.in > /tmp/ovs-kmod.spec
1037 sed -e 's/@VERSION@/0.0.1/' rhel/openvswitch-dkms.spec.in > /tmp/ovs-dkms.spec
1038 sudo yum-builddep \${YUM_OPTS} /tmp/ovs.spec /tmp/ovs-kmod.spec /tmp/ovs-dkms.spec
1039 rm /tmp/ovs.spec /tmp/ovs-kmod.spec /tmp/ovs-dkms.spec
1040 ./boot.sh
1041 ./configure --build=x86_64-redhat-linux-gnu --host=x86_64-redhat-linux-gnu --with-linux=/lib/modules/\${K_VERSION}/build --program-prefix= --disable-dependency-tracking --prefix=/usr --exec-prefix=/usr --bindir=/usr/bin --sbindir=/usr/sbin --sysconfdir=/etc --datadir=/usr/share --includedir=/usr/include --libdir=/usr/lib64 --libexecdir=/usr/libexec --localstatedir=/var --sharedstatedir=/var/lib --mandir=/usr/share/man --infodir=/usr/share/info --enable-libcapng --enable-ssl --with-pkidir=/var/lib/openvswitch/pki PYTHON=/usr/bin/python2
1042 make rpm-fedora RPMBUILD_OPT="--without check"
1043 # Build dkms only for now
1044 # make rpm-fedora-kmod RPMBUILD_OPT='-D "kversion \${K_VERSION}"'
1045 rpmbuild -D "_topdir \$(pwd)/rpm/rpmbuild" -bb --without check rhel/openvswitch-dkms.spec
1046
1047 mkdir -p /tmp/ovs_rpms
1048 cp -r rpm/rpmbuild/RPMS/* /tmp/ovs_rpms/
1049
1050 popd
1051 rm -rf \${TMP}
1052 EOF
1053
1054     scp "${WORKSPACE}"/build_ovs.sh "${ip}":/tmp
1055     ${SSH} "${ip}" " bash /tmp/build_ovs.sh >> /tmp/install_ovs.txt 2>&1"
1056     scp -r "${ip}":/tmp/ovs_rpms/* "${rpm_path}/"
1057     ${SSH} "${ip}" "rm -rf /tmp/ovs_rpms"
1058 }
1059
1060 # Install OVS RPMs from yum repo
1061 function install_ovs_from_repo() {
1062     local -r ip=$1
1063     local -r rpm_repo="$2"
1064
1065     echo "Installing OVS from repo ${rpm_repo} on ${ip} ..."
1066     cat > "${WORKSPACE}"/install_ovs.sh << EOF
1067 set -ex -o pipefail
1068
1069 echo '---> Installing openvswitch from ${rpm_repo}'
1070
1071 # We need repoquery from yum-utils.
1072 sudo yum -y install yum-utils
1073
1074 # Get openvswitch packages offered by custom repo.
1075 # dkms package will have priority over kmod.
1076 OVS_REPO_OPTS="--repofrompath=ovs-repo,${rpm_repo} --disablerepo=* --enablerepo=ovs-repo"
1077 OVS_PKGS=\$(repoquery \${OVS_REPO_OPTS} openvswitch)
1078 OVS_SEL_PKG=\$(repoquery \${OVS_REPO_OPTS} openvswitch-selinux-policy)
1079 OVS_DKMS_PKG=\$(repoquery \${OVS_REPO_OPTS} openvswitch-dkms)
1080 OVS_KMOD_PKG=\$(repoquery \${OVS_REPO_OPTS} openvswitch-kmod)
1081 [ -n "\${OVS_SEL_PKG}" ] && OVS_PKGS="\${OVS_PKGS} \${OVS_SEL_PKG}"
1082 [ -n "\${OVS_DKMS_PKG}" ] && OVS_PKGS="\${OVS_PKGS} \${OVS_DKMS_PKG}"
1083 [ -z "\${OVS_DKMS_PKG}" ] && [ -n "\${OVS_KMOD_PKG}" ] && OVS_PKGS="\${OVS_PKGS} \${OVS_KMOD_PKG}"
1084
1085 # Bail with error if custom repo was provided but we could not
1086 # find suitable packages there.
1087 [ -z "\${OVS_PKGS}" ] && echo "No OVS packages found in custom repo." && exit 1
1088
1089 # Install kernel & devel packages for the openvswitch dkms package.
1090 if [ -n "\${OVS_DKMS_PKG}" ]; then
1091     # install centos-release to update vault repos from which to fetch
1092     # kernel devel packages
1093     sudo yum -y install centos-release
1094     K_VERSION=\$(uname -r)
1095     YUM_OPTS="-y --disablerepo=* --enablerepo=base,updates,extra,C*-base,C*-updates,C*-extras"
1096     sudo yum \${YUM_OPTS} install kernel-{headers,devel}-\${K_VERSION} @'Development Tools' python-six
1097 fi
1098
1099 PREV_MOD=\$(sudo modinfo -n openvswitch || echo '')
1100
1101 # Install OVS offered by custom repo.
1102 sudo yum-config-manager --add-repo "${rpm_repo}"
1103 sudo yum -y versionlock delete openvswitch-*
1104 sudo yum -y remove openvswitch-*
1105 sudo yum -y --nogpgcheck install \${OVS_PKGS}
1106 sudo yum -y versionlock add \${OVS_PKGS}
1107
1108 # Most recent OVS versions have some incompatibility with certain versions of iptables
1109 # This below line will overcome that problem.
1110 sudo modprobe openvswitch
1111
1112 # Start OVS and print details
1113 sudo systemctl start openvswitch
1114 sudo systemctl enable openvswitch
1115 sudo ovs-vsctl --retry -t 5 show
1116 sudo modinfo openvswitch
1117
1118 # dkms rpm install can fail silently (probably because the OVS version is
1119 # incompatible with the running kernel), verify module was updated.
1120 NEW_MOD=\$(sudo modinfo -n openvswitch || echo '')
1121 [ "\${PREV_MOD}" != "\${NEW_MOD}" ] || (echo "Kernel module was not updated" && exit 1)
1122 EOF
1123
1124     scp "${WORKSPACE}"/install_ovs.sh "${ip}":/tmp
1125     ${SSH} "${ip}" "bash /tmp/install_ovs.sh >> /tmp/install_ovs.txt 2>&1"
1126 }
1127
1128 # Install OVS RPMS from path
1129 function install_ovs_from_path() {
1130     local -r ip=$1
1131     local -r rpm_path="$2"
1132
1133     echo "Creating OVS RPM repo on ${ip} ..."
1134     ${SSH} "${ip}" "mkdir -p /tmp/ovs_rpms"
1135     scp -r "${rpm_path}"/* "${ip}":/tmp/ovs_rpms
1136     ${SSH} "${ip}" "sudo yum -y install createrepo && createrepo --database /tmp/ovs_rpms"
1137     install_ovs_from_repo "${ip}" file:/tmp/ovs_rpms
1138 }
1139
1140