204362b187361a9221871df795d952e66bf0697e
[releng/builder.git] / jjb / integration / common-functions.sh
1 #!/bin/bash
2
3 echo "common-functions.sh is being sourced"
4
5 BUNDLEFOLDER=$1
6
7 # Basic controller configuration settings
8 export MAVENCONF=/tmp/${BUNDLEFOLDER}/etc/org.ops4j.pax.url.mvn.cfg
9 export FEATURESCONF=/tmp/${BUNDLEFOLDER}/etc/org.apache.karaf.features.cfg
10 export CUSTOMPROP=/tmp/${BUNDLEFOLDER}/etc/custom.properties
11 export LOGCONF=/tmp/${BUNDLEFOLDER}/etc/org.ops4j.pax.logging.cfg
12 export MEMCONF=/tmp/${BUNDLEFOLDER}/bin/setenv
13 export CONTROLLERMEM=${CONTROLLERMAXMEM}
14
15 # Cluster specific configuration settings
16 export AKKACONF=/tmp/${BUNDLEFOLDER}/configuration/initial/akka.conf
17 export MODULESCONF=/tmp/${BUNDLEFOLDER}/configuration/initial/modules.conf
18 export MODULESHARDSCONF=/tmp/${BUNDLEFOLDER}/configuration/initial/module-shards.conf
19
20 function print_common_env() {
21     cat << EOF
22 common-functions environment:
23 MAVENCONF: ${MAVENCONF}
24 ACTUALFEATURES: ${ACTUALFEATURES}
25 FEATURESCONF: ${FEATURESCONF}
26 CUSTOMPROP: ${CUSTOMPROP}
27 LOGCONF: ${LOGCONF}
28 MEMCONF: ${MEMCONF}
29 CONTROLLERMEM: ${CONTROLLERMEM}
30 AKKACONF: ${AKKACONF}
31 MODULESCONF: ${MODULESCONF}
32 MODULESHARDSCONF: ${MODULESHARDSCONF}
33 SUITES: ${SUITES}
34
35 EOF
36 }
37 print_common_env
38
39 # Setup JAVA_HOME and MAX_MEM Value in ODL startup config file
40 function set_java_vars() {
41     local -r java_home=$1
42     local -r controllermem=$2
43     local -r memconf=$3
44
45     echo "Configure"
46     echo "    java home: ${java_home}"
47     echo "    max memory: ${controllermem}"
48     echo "    memconf: ${memconf}"
49
50     # We do not want expressions to expand here.
51     # shellcheck disable=SC2016
52     sed -ie 's%^# export JAVA_HOME%export JAVA_HOME=${JAVA_HOME:-'"${java_home}"'}%g' "${memconf}"
53     sed -ie 's/JAVA_MAX_MEM="2048m"/JAVA_MAX_MEM='"${controllermem}"'/g' "${memconf}"
54     echo "cat ${memconf}"
55     cat "${memconf}"
56
57     echo "Set Java version"
58     sudo /usr/sbin/alternatives --install /usr/bin/java java "${java_home}/bin/java" 1
59     sudo /usr/sbin/alternatives --set java "${java_home}/bin/java"
60     echo "JDK default version ..."
61     java -version
62
63     echo "Set JAVA_HOME"
64     export JAVA_HOME="${java_home}"
65
66     # shellcheck disable=SC2037
67     JAVA_RESOLVED=$(readlink -e "${java_home}/bin/java")
68     echo "Java binary pointed at by JAVA_HOME: ${JAVA_RESOLVED}"
69 } # set_java_vars()
70
71 # shellcheck disable=SC2034
72 # foo appears unused. Verify it or export it.
73 function configure_karaf_log() {
74     local -r karaf_version=$1
75     local -r controllerdebugmap=$2
76     local logapi=log4j
77
78     # Check what the logging.cfg file is using for the logging api: log4j or log4j2
79     if grep "log4j2" "${LOGCONF}"; then
80         logapi=log4j2
81     fi
82
83     echo "Configuring the karaf log... karaf_version: ${karaf_version}, logapi: ${logapi}"
84     if [ "${logapi}" == "log4j2" ]; then
85         # FIXME: Make log size limit configurable from build parameter.
86         # Increase default log file size to 1GB
87         sed -ie 's/log4j2.appender.rolling.policies.size.size = 64MB/log4j2.appender.rolling.policies.size.size = 1GB/g' "${LOGCONF}"
88         orgmodule="org.opendaylight.yangtools.yang.parser.repo.YangTextSchemaContextResolver"
89         orgmodule_="${orgmodule//./_}"
90         echo "${logapi}.logger.${orgmodule_}.name = WARN" >> "${LOGCONF}"
91         echo "${logapi}.logger.${orgmodule_}.level = WARN" >> "${LOGCONF}"
92     else
93         sed -ie 's/log4j.appender.out.maxBackupIndex=10/log4j.appender.out.maxBackupIndex=1/g' "${LOGCONF}"
94         # FIXME: Make log size limit configurable from build parameter.
95         sed -ie 's/log4j.appender.out.maxFileSize=1MB/log4j.appender.out.maxFileSize=30GB/g' "${LOGCONF}"
96         echo "${logapi}.logger.org.opendaylight.yangtools.yang.parser.repo.YangTextSchemaContextResolver = WARN" >> "${LOGCONF}"
97     fi
98
99     # Add custom logging levels
100     # CONTROLLERDEBUGMAP is expected to be a key:value map of space separated
101     # values like "module:level module2:level2" where module is abbreviated and
102     # does not include "org.opendaylight."
103     unset IFS
104     echo "controllerdebugmap: ${controllerdebugmap}"
105     if [ -n "${controllerdebugmap}" ]; then
106         for kv in ${controllerdebugmap}; do
107             module="${kv%%:*}"
108             level="${kv#*:}"
109             echo "module: $module, level: $level"
110             # shellcheck disable=SC2157
111             if [ -n "${module}" ] && [ -n "${level}" ]; then
112                 orgmodule="org.opendaylight.${module}"
113                 if [ "${logapi}" == "log4j2" ]; then
114                     orgmodule_="${orgmodule//./_}"
115                     echo "${logapi}.logger.${orgmodule_}.name = ${orgmodule}" >> "${LOGCONF}"
116                     echo "${logapi}.logger.${orgmodule_}.level = ${level}" >> "${LOGCONF}"
117                 else
118                     echo "${logapi}.logger.${orgmodule} = ${level}" >> "${LOGCONF}"
119                 fi
120             fi
121         done
122     fi
123
124     echo "cat ${LOGCONF}"
125     cat "${LOGCONF}"
126 } # function configure_karaf_log()
127
128 function configure_karaf_log_for_apex() {
129     # TODO: add the extra steps to this function to do any extra work
130     # in this apex environment like we do in our standard environment.
131     # EX: log size, rollover, etc.
132
133     # Modify ODL Log Levels, if needed, for new distribution. This will modify
134     # the control nodes hiera data which will be used during the puppet deploy
135     # CONTROLLERDEBUGMAP is expected to be a key:value map of space separated
136     # values like "module:level module2:level2" where module is abbreviated and
137     # does not include "org.opendaylight."
138
139     local -r controller_ip=$1
140
141     unset IFS
142     # shellcheck disable=SC2153
143     echo "CONTROLLERDEBUGMAP: ${CONTROLLERDEBUGMAP}"
144     if [ -n "${CONTROLLERDEBUGMAP}" ]; then
145         logging_config='\"opendaylight::log_levels\": {'
146         for kv in ${CONTROLLERDEBUGMAP}; do
147             module="${kv%%:*}"
148             level="${kv#*:}"
149             echo "module: $module, level: $level"
150             # shellcheck disable=SC2157
151             if [ -n "${module}" ] && [ -n "${level}" ]; then
152                 orgmodule="org.opendaylight.${module}"
153                 logging_config="${logging_config} \\\"${orgmodule}\\\": \\\"${level}\\\","
154             fi
155         done
156         # replace the trailing comma with a closing brace followed by trailing comma
157         logging_config=${logging_config%,}" },"
158         echo "$logging_config"
159
160         # fine a sane line number to inject the custom logging json
161         lineno=$(ssh "$OPENSTACK_CONTROL_NODE_1_IP" "sudo grep -Fn 'opendaylight::log_mechanism' /etc/puppet/hieradata/service_configs.json" | awk -F: '{print $1}')
162         # We purposely want these variables to expand client-side
163         # shellcheck disable=SC2029
164         ssh "$controller_ip" "sudo sed -i \"${lineno}i ${logging_config}\" /etc/puppet/hieradata/service_configs.json"
165         ssh "$controller_ip" "sudo cat /etc/puppet/hieradata/service_configs.json"
166     fi
167 } # function configure_karaf_log_for_apex()
168
169 function configure_odl_features_for_apex() {
170
171     # if the environment variable $ACTUALFEATURES is not null, then rewrite
172     # the puppet config file with the features given in that variable, otherwise
173     # this function is a noop
174
175     local -r controller_ip=$1
176     local -r config_file=/etc/puppet/hieradata/service_configs.json
177
178 cat > /tmp/set_odl_features.sh << EOF
179 sudo jq '.["opendaylight::extra_features"] |= []' $config_file > tmp.json && mv tmp.json $config_file
180 for feature in "\${ACTUALFEATURES//,/ }"; do
181     sudo jq --arg jq_arg \$feature '.["opendaylight::extra_features"] |= . + [\$jq_arg]' $config_file > tmp && mv tmp $config_file;
182 done
183 echo "Modified puppet-opendaylight service_configs.json..."
184 cat $config_file
185 EOF
186
187     echo "Feature configuration script..."
188     cat /tmp/set_odl_features.sh
189
190     if [ -n "${ACTUALFEATURES}" ]; then
191         scp /tmp/set_odl_features.sh "$controller_ip":/tmp/set_odl_features.sh
192         ssh "$controller_ip" "sudo bash /tmp/set_odl_features.sh"
193     fi
194
195 } # function configure_odl_features_for_apex()
196
197 function get_os_deploy() {
198     local -r num_systems=${1:-$NUM_OPENSTACK_SYSTEM}
199     case ${num_systems} in
200     1)
201         OPENSTACK_TOPO="1cmb-0ctl-0cmp"
202         ;;
203     2)
204         OPENSTACK_TOPO="1cmb-0ctl-1cmp"
205         ;;
206     3|*)
207         OPENSTACK_TOPO="0cmb-1ctl-2cmp"
208         ;;
209     esac
210     export OPENSTACK_TOPO
211 }
212
213 function get_test_suites() {
214
215     #let the caller pick the name of the variable we will assign the suites to
216     local __suite_list=$1
217
218     echo "Locating test plan to use..."
219     testplan_filepath="${WORKSPACE}/test/csit/testplans/${STREAMTESTPLAN}"
220     if [ ! -f "${testplan_filepath}" ]; then
221         testplan_filepath="${WORKSPACE}/test/csit/testplans/${TESTPLAN}"
222     fi
223
224     if [ "${ELASTICSEARCHATTRIBUTE}" != "disabled" ]; then
225         add_test="integration/test/csit/suites/integration/Create_JVM_Plots.robot"
226         echo "${add_test}" >> "$testplan_filepath"
227     fi
228
229     echo "Changing the testplan path..."
230     sed "s:integration:${WORKSPACE}:" "${testplan_filepath}" > testplan.txt
231     cat testplan.txt
232
233     # Use the testplan if specific SUITES are not defined.
234     if [ -z "${SUITES}" ]; then
235         suite_list=$(grep -E -v '(^[[:space:]]*#|^[[:space:]]*$)' testplan.txt | tr '\012' ' ')
236     else
237         suite_list=""
238         workpath="${WORKSPACE}/test/csit/suites"
239         for suite in ${SUITES}; do
240             fullsuite="${workpath}/${suite}"
241             if [ -z "${suite_list}" ]; then
242                 suite_list+=${fullsuite}
243             else
244                 suite_list+=" "${fullsuite}
245             fi
246         done
247     fi
248
249     eval "$__suite_list='$suite_list'"
250 }
251
252 function run_plan() {
253     local -r type=$1
254
255     case ${type} in
256     script)
257         plan=$SCRIPTPLAN
258         ;;
259     config|*)
260         plan=$CONFIGPLAN
261         ;;
262     esac
263
264     printf "Locating %s plan to use...\\n" "${type}"
265     plan_filepath="${WORKSPACE}/test/csit/${type}plans/$plan"
266     if [ ! -f "${plan_filepath}" ]; then
267         plan_filepath="${WORKSPACE}/test/csit/${type}plans/${STREAMTESTPLAN}"
268         if [ ! -f "${plan_filepath}" ]; then
269             plan_filepath="${WORKSPACE}/test/csit/${type}plans/${TESTPLAN}"
270         fi
271     fi
272
273     if [ -f "${plan_filepath}" ]; then
274         printf "%s plan exists!!!\\n" "${type}"
275         printf "Changing the %s plan path...\\n" "${type}"
276         sed "s:integration:${WORKSPACE}:" "${plan_filepath}" > "${type}plan.txt"
277         cat "${type}plan.txt"
278         # shellcheck disable=SC2013
279         for line in $( grep -E -v '(^[[:space:]]*#|^[[:space:]]*$)' "${type}plan.txt" ); do
280             printf "Executing %s...\\n" "${line}"
281             # shellcheck source=${line} disable=SC1091
282             source "${line}"
283         done
284     fi
285     printf "Finished running %s plans\\n" "${type}"
286 } # function run_plan()
287
288 # Run scripts to support JVM monitoring.
289 function add_jvm_support()
290 {
291     if [ "${ELASTICSEARCHATTRIBUTE}" != "disabled" ]; then
292         set_elasticsearch_attribute "${ELASTICSEARCHATTRIBUTE}"
293         set_jvm_common_attribute
294     fi
295 } # function add_jvm_support()
296
297 #Expected input parameter: long/short/a number
298 function set_elasticsearch_attribute()
299 {
300 short=5000
301 long=120000
302 default=$short
303
304 case $1 in
305 short)
306   period=$short
307   ;;
308 long)
309   period=$long
310   ;;
311 *)
312   # shellcheck disable=SC2166
313   if [[ "$1" =~ ^[0-9]+$ ]] && [ "$1" -ge $short -a "$1" -le $long ]; then
314       period=$1
315   else
316       period=$default
317   fi
318   ;;
319 esac
320
321 cat > "${WORKSPACE}"/org.apache.karaf.decanter.scheduler.simple.cfg <<EOF
322 period=$period
323
324 EOF
325
326 echo "Copying config files to ODL Controller folder"
327
328 # shellcheck disable=SC2086
329 for i in $(seq 1 ${NUM_ODL_SYSTEM})
330 do
331         CONTROLLERIP=ODL_SYSTEM_${i}_IP
332         echo "Set Decanter Polling Period to ${!CONTROLLERIP}"
333         # shellcheck disable=SC2029
334         ssh "${!CONTROLLERIP}" "mkdir -p \"/tmp/${BUNDLEFOLDER}/etc/opendaylight/karaf/\""
335         scp "${WORKSPACE}"/org.apache.karaf.decanter.scheduler.simple.cfg "${!CONTROLLERIP}":/tmp/"${BUNDLEFOLDER}"/etc/
336 done
337 } #function set_elasticsearch_attribute
338
339 function set_jvm_common_attribute()
340 {
341 cat > "${WORKSPACE}"/org.apache.karaf.decanter.collector.jmx-local.cfg <<EOF
342 type=jmx-local
343 url=local
344 object.name=java.lang:type=*,name=*
345
346 EOF
347
348 cat > "${WORKSPACE}"/org.apache.karaf.decanter.collector.jmx-others.cfg <<EOF
349 type=jmx-local
350 url=local
351 object.name=java.lang:type=*
352
353 EOF
354
355 # shellcheck disable=SC2086
356 for i in $(seq 1 ${NUM_ODL_SYSTEM})
357 do
358     CONTROLLERIP=ODL_SYSTEM_${i}_IP
359
360     cat > "${WORKSPACE}"/elasticsearch.yml <<EOF
361     discovery.zen.ping.multicast.enabled: false
362
363 EOF
364
365     cat > "${WORKSPACE}"/elasticsearch_startup.sh <<EOF
366     cd /tmp/elasticsearch/elasticsearch-1.7.5
367     ls -al
368
369     if [ -d "data" ]; then
370         echo "data directory exists, deleting...."
371         rm -r data
372     else
373         echo "data directory does not exist"
374     fi
375
376     cd /tmp/elasticsearch
377     ls -al
378
379     echo "Starting Elasticsearch node"
380     sudo /tmp/elasticsearch/elasticsearch-1.7.5/bin/elasticsearch > /dev/null 2>&1 &
381     ls -al /tmp/elasticsearch/elasticsearch-1.7.5/bin/elasticsearch
382
383 EOF
384     echo "Setup ODL_SYSTEM_IP specific config files for ${!CONTROLLERIP} "
385     cat "${WORKSPACE}"/org.apache.karaf.decanter.collector.jmx-local.cfg
386     cat "${WORKSPACE}"/org.apache.karaf.decanter.collector.jmx-others.cfg
387     cat "${WORKSPACE}"/elasticsearch.yml
388
389
390     echo "Copying config files to ${!CONTROLLERIP}"
391     scp "${WORKSPACE}"/org.apache.karaf.decanter.collector.jmx-local.cfg "${!CONTROLLERIP}":/tmp/"${BUNDLEFOLDER}"/etc/
392     scp "${WORKSPACE}"/org.apache.karaf.decanter.collector.jmx-others.cfg "${!CONTROLLERIP}":/tmp/"${BUNDLEFOLDER}"/etc/
393     scp "${WORKSPACE}"/elasticsearch.yml "${!CONTROLLERIP}":/tmp/
394
395     ssh "${!CONTROLLERIP}" "sudo ls -al /tmp/elasticsearch/"
396     ssh "${!CONTROLLERIP}" "sudo mv /tmp/elasticsearch.yml /tmp/elasticsearch/elasticsearch-1.7.5/config/"
397     ssh "${!CONTROLLERIP}" "cat /tmp/elasticsearch/elasticsearch-1.7.5/config/elasticsearch.yml"
398
399     echo "Copying the elasticsearch_startup script to ${!CONTROLLERIP}"
400     cat "${WORKSPACE}"/elasticsearch_startup.sh
401     scp "${WORKSPACE}"/elasticsearch_startup.sh "${!CONTROLLERIP}":/tmp
402     ssh "${!CONTROLLERIP}" 'bash /tmp/elasticsearch_startup.sh'
403     ssh "${!CONTROLLERIP}" 'ps aux | grep elasticsearch'
404 done
405 } #function set_jvm_common_attribute
406
407 # Return elapsed time. Usage:
408 # - Call first time with no arguments and a new timer is returned.
409 # - Next call with the first argument as the timer and the elapsed time is returned.
410 function timer()
411 {
412     if [ $# -eq 0 ]; then
413         # return the current time
414         printf "%s" "$(date "+%s")"
415     else
416         local start_time=$1
417         end_time=$(date "+%s")
418
419         if [ -z "$start_time" ]; then
420             start_time=$end_time;
421         fi
422
423         delta_time=$((end_time - start_time))
424         ds=$((delta_time % 60))
425         dm=$(((delta_time / 60) % 60))
426         dh=$((delta_time / 3600))
427         # return the elapsed time
428         printf "%d:%02d:%02d" $dh $dm $ds
429     fi
430 }
431
432 # convert commas in csv strings to spaces (ssv)
433 function csv2ssv() {
434     local csv=$1
435     if [ -n "${csv}" ]; then
436         ssv=$(echo "${csv}" | sed 's/,/ /g' | sed 's/\ \ */\ /g')
437     fi
438
439     echo "${ssv}"
440 } # csv2ssv
441
442 function is_openstack_feature_enabled() {
443     local feature=$1
444     for enabled_feature in $(csv2ssv "${ENABLE_OS_SERVICES}"); do
445         if [ "${enabled_feature}" == "${feature}" ]; then
446            echo 1
447            return
448         fi
449     done
450     echo 0
451 }
452
453 SSH="ssh -t -t"
454
455 # shellcheck disable=SC2153
456 function print_job_parameters() {
457     cat << EOF
458
459 Job parameters:
460 DISTROBRANCH: ${DISTROBRANCH}
461 DISTROSTREAM: ${DISTROSTREAM}
462 BUNDLE_URL: ${BUNDLE_URL}
463 CONTROLLERFEATURES: ${CONTROLLERFEATURES}
464 CONTROLLERDEBUGMAP: ${CONTROLLERDEBUGMAP}
465 CONTROLLERMAXMEM: ${CONTROLLERMAXMEM}
466 SCRIPTPLAN: ${SCRIPTPLAN}
467 CONFIGPLAN: ${CONFIGPLAN}
468 STREAMTESTPLAN: ${STREAMTESTPLAN}
469 TESTPLAN: ${TESTPLAN}
470 SUITES: ${SUITES}
471 PATCHREFSPEC: ${PATCHREFSPEC}
472 OPENSTACK_BRANCH: ${OPENSTACK_BRANCH}
473 DEVSTACK_HASH: ${DEVSTACK_HASH}
474 ODL_ML2_DRIVER_REPO: ${ODL_ML2_DRIVER_REPO}
475 ODL_ML2_BRANCH: ${ODL_ML2_BRANCH}
476 ODL_ML2_DRIVER_VERSION: ${ODL_ML2_DRIVER_VERSION}
477 ODL_ML2_PORT_BINDING: ${ODL_ML2_PORT_BINDING}
478 DEVSTACK_KUBERNETES_PLUGIN_REPO: ${DEVSTACK_KUBERNETES_PLUGIN_REPO}
479 DEVSTACK_LBAAS_PLUGIN_REPO: ${DEVSTACK_LBAAS_PLUGIN_REPO}
480 DEVSTACK_NETWORKING_SFC_PLUGIN_REPO: ${DEVSTACK_NETWORKING_SFC_PLUGIN_REPO}
481 IPSEC_VXLAN_TUNNELS_ENABLED: ${IPSEC_VXLAN_TUNNELS_ENABLED}
482 PUBLIC_BRIDGE: ${PUBLIC_BRIDGE}
483 ENABLE_HAPROXY_FOR_NEUTRON: ${ENABLE_HAPROXY_FOR_NEUTRON}
484 ENABLE_OS_SERVICES: ${ENABLE_OS_SERVICES}
485 ENABLE_OS_COMPUTE_SERVICES: ${ENABLE_OS_COMPUTE_SERVICES}
486 ENABLE_OS_NETWORK_SERVICES: ${ENABLE_OS_NETWORK_SERVICES}
487 ENABLE_OS_PLUGINS: ${ENABLE_OS_PLUGINS}
488 DISABLE_OS_SERVICES: ${DISABLE_OS_SERVICES}
489 TENANT_NETWORK_TYPE: ${TENANT_NETWORK_TYPE}
490 SECURITY_GROUP_MODE: ${SECURITY_GROUP_MODE}
491 ENABLE_ITM_DIRECT_TUNNELS: ${ENABLE_ITM_DIRECT_TUNNELS}
492 PUBLIC_PHYSICAL_NETWORK: ${PUBLIC_PHYSICAL_NETWORK}
493 ENABLE_NETWORKING_L2GW: ${ENABLE_NETWORKING_L2GW}
494 CREATE_INITIAL_NETWORKS: ${CREATE_INITIAL_NETWORKS}
495 LBAAS_SERVICE_PROVIDER: ${LBAAS_SERVICE_PROVIDER}
496 ODL_SFC_DRIVER: ${ODL_SFC_DRIVER}
497 ODL_SNAT_MODE: ${ODL_SNAT_MODE}
498
499 EOF
500 }
501
502 function tcpdump_start() {
503     local -r prefix=$1
504     local -r ip=$2
505     local -r filter=$3
506     filter_=${filter// /_}
507
508     printf "node %s, %s_%s__%s: starting tcpdump\\n" "${ip}" "${prefix}" "${ip}" "${filter}"
509     # $fileter needs to be parsed client-side
510     # shellcheck disable=SC2029
511     ssh "${ip}" "nohup sudo /usr/sbin/tcpdump -vvv -ni eth0 ${filter} -w /tmp/tcpdump_${prefix}_${ip}__${filter_}.pcap > /tmp/tcpdump_start.log 2>&1 &"
512     ${SSH} "${ip}" "ps -ef | grep tcpdump"
513 }
514
515 function tcpdump_stop() {
516     local -r ip=$1
517
518     printf "node %s: stopping tcpdump\\n" "$ip"
519     ${SSH} "${ip}" "ps -ef | grep tcpdump.sh"
520     ${SSH} "${ip}" "sudo pkill -f tcpdump"
521     ${SSH} "${ip}" "sudo xz -9ekvvf /tmp/*.pcap"
522     ${SSH} "${ip}" "sudo ls -al /tmp/*.pcap"
523     # copy_logs will copy any *.xz files
524 }
525
526 # Collect the list of files on the hosts
527 function collect_files() {
528     local -r ip=$1
529     local -r folder=$2
530     finddir=/tmp/finder
531     ${SSH} "${ip}" "mkdir -p ${finddir}"
532     ${SSH} "${ip}" "sudo find /etc > ${finddir}/find.etc.txt"
533     ${SSH} "${ip}" "sudo find /opt/stack > ${finddir}/find.opt.stack.txt"
534     ${SSH} "${ip}" "sudo find /var > ${finddir}/find2.txt"
535     ${SSH} "${ip}" "sudo find /var > ${finddir}/find.var.txt"
536     ${SSH} "${ip}" "sudo tar -cf - -C /tmp finder | xz -T 0 > /tmp/find.tar.xz"
537     scp "${ip}":/tmp/find.tar.xz "${folder}"
538     mkdir -p "${finddir}"
539     rsync --rsync-path="sudo rsync" --list-only -arvhe ssh "${ip}":/etc/ > "${finddir}"/rsync.etc.txt
540     rsync --rsync-path="sudo rsync" --list-only -arvhe ssh "${ip}":/opt/stack/ > "${finddir}"/rsync.opt.stack.txt
541     rsync --rsync-path="sudo rsync" --list-only -arvhe ssh "${ip}":/var/ > "${finddir}"/rsync.var.txt
542     tar -cf - -C /tmp finder | xz -T 0 > /tmp/rsync.tar.xz
543     cp /tmp/rsync.tar.xz "${folder}"
544 }
545
546 # List of extra services to extract from journalctl
547 # Add new services on a separate line, in alpha order, add \ at the end
548 extra_services_cntl=" \
549     dnsmasq.service \
550     httpd.service \
551     libvirtd.service \
552     openvswitch.service \
553     ovs-vswitchd.service \
554     ovsdb-server.service \
555     rabbitmq-server.service \
556 "
557
558 extra_services_cmp=" \
559     libvirtd.service \
560     openvswitch.service \
561     ovs-vswitchd.service \
562     ovsdb-server.service \
563 "
564
565 # Collect the logs for the openstack services
566 # First get all the services started by devstack which would have devstack@ as a prefix
567 # Next get all the extra services
568 function collect_openstack_logs() {
569     local -r ip=${1}
570     local -r folder=${2}
571     local -r node_type=${3}
572     local oslogs="${folder}/oslogs"
573
574     printf "collect_openstack_logs for %s node: %s into %s\\n" "${node_type}" "${ip}" "${oslogs}"
575     rm -rf "${oslogs}"
576     mkdir -p "${oslogs}"
577     # There are always some logs in /opt/stack/logs and this also covers the
578     # pre-queens branches which always use /opt/stack/logs
579     rsync -avhe ssh "${ip}":/opt/stack/logs/* "${oslogs}" # rsync to prevent copying of symbolic links
580
581     # Starting with queens break out the logs from journalctl
582     if [ "${OPENSTACK_BRANCH}" = "stable/queens" ]; then
583         cat > "${WORKSPACE}"/collect_openstack_logs.sh << EOF
584 extra_services_cntl="${extra_services_cntl}"
585 extra_services_cmp="${extra_services_cmp}"
586
587 function extract_from_journal() {
588     local -r services=\${1}
589     local -r folder=\${2}
590     local -r node_type=\${3}
591     printf "extract_from_journal folder: \${folder}, services: \${services}\\n"
592     for service in \${services}; do
593         # strip anything before @ and anything after .
594         # devstack@g-api.service will end as g-api
595         service_="\${service#*@}"
596         service_="\${service_%.*}"
597         sudo journalctl -u "\${service}" > "\${folder}/\${service_}.log"
598     done
599 }
600
601 rm -rf /tmp/oslogs
602 mkdir -p /tmp/oslogs
603 systemctl list-unit-files --all > /tmp/oslogs/systemctl.units.log 2>&1
604 svcs=\$(grep devstack@ /tmp/oslogs/systemctl.units.log | awk '{print \$1}')
605 extract_from_journal "\${svcs}" "/tmp/oslogs"
606 if [ "\${node_type}" = "control" ]; then
607     extract_from_journal "\${extra_services_cntl}" "/tmp/oslogs"
608 else
609     extract_from_journal "\${extra_services_cmp}" "/tmp/oslogs"
610 fi
611 ls -al /tmp/oslogs
612 EOF
613 # cat > ${WORKSPACE}/collect_openstack_logs.sh << EOF
614         printf "collect_openstack_logs for %s node: %s into %s, executing script\\n" "${node_type}" "${ip}" "${oslogs}"
615         cat "${WORKSPACE}"/collect_openstack_logs.sh
616         scp "${WORKSPACE}"/collect_openstack_logs.sh "${ip}":/tmp
617         ${SSH} "${ip}" "bash /tmp/collect_openstack_logs.sh > /tmp/collect_openstack_logs.log 2>&1"
618         rsync -avhe ssh "${ip}":/tmp/oslogs/* "${oslogs}"
619         scp "${ip}":/tmp/collect_openstack_logs.log "${oslogs}"
620     fi # if [ "${OPENSTACK_BRANCH}" = "stable/queens" ]; then
621 }
622
623 function collect_netvirt_logs() {
624     set +e  # We do not want to create red dot just because something went wrong while fetching logs.
625
626     cat > extra_debug.sh << EOF
627 echo -e "/usr/sbin/lsmod | /usr/bin/grep openvswitch\\n"
628 /usr/sbin/lsmod | /usr/bin/grep openvswitch
629 echo -e "\\nsudo grep ct_ /var/log/openvswitch/ovs-vswitchd.log\\n"
630 sudo grep "Datapath supports" /var/log/openvswitch/ovs-vswitchd.log
631 echo -e "\\nsudo netstat -punta\\n"
632 sudo netstat -punta
633 echo -e "\\nsudo getenforce\\n"
634 sudo getenforce
635 echo -e "\\nsudo systemctl status httpd\\n"
636 sudo systemctl status httpd
637 echo -e "\\nenv\\n"
638 env
639 source /opt/stack/devstack/openrc admin admin
640 echo -e "\\nenv after openrc\\n"
641 env
642 echo -e "\\nsudo du -hs /opt/stack"
643 sudo du -hs /opt/stack
644 echo -e "\\nsudo mount"
645 sudo mount
646 echo -e "\\ndmesg -T > /tmp/dmesg.log"
647 dmesg -T > /tmp/dmesg.log
648 echo -e "\\njournalctl > /tmp/journalctl.log\\n"
649 sudo journalctl > /tmp/journalctl.log
650 echo -e "\\novsdb-tool -mm show-log > /tmp/ovsdb-tool.log"
651 ovsdb-tool -mm show-log > /tmp/ovsdb-tool.log
652 EOF
653
654     # Since this log collection work is happening before the archive build macro which also
655     # creates the ${WORKSPACE}/archives dir, we have to do it here first.  The mkdir in the
656     # archives build step will essentially be a noop.
657     mkdir -p "${WORKSPACE}"/archives
658
659     mv /tmp/changes.txt "${WORKSPACE}"/archives
660     mv /tmp/validations.txt "${WORKSPACE}"/archives
661     mv "${WORKSPACE}"/rabbit.txt "${WORKSPACE}"/archives
662     mv "${WORKSPACE}"/haproxy.cfg "${WORKSPACE}"/archives
663     ssh "${OPENSTACK_HAPROXY_1_IP}" "sudo journalctl -u haproxy > /tmp/haproxy.log"
664     scp "${OPENSTACK_HAPROXY_1_IP}":/tmp/haproxy.log "${WORKSPACE}"/archives/
665
666     sleep 5
667     # FIXME: Do not create .tar and gzip before copying.
668     for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
669         CONTROLLERIP=ODL_SYSTEM_${i}_IP
670         echo "collect_logs: for opendaylight controller ip: ${!CONTROLLERIP}"
671         NODE_FOLDER="odl_${i}"
672         mkdir -p "${NODE_FOLDER}"
673         echo "Let's take the karaf thread dump again..."
674         ssh "${!CONTROLLERIP}" "sudo ps aux" > "${WORKSPACE}"/ps_after.log
675         pid=$(grep org.apache.karaf.main.Main "${WORKSPACE}"/ps_after.log | grep -v grep | tr -s ' ' | cut -f2 -d' ')
676         echo "karaf main: org.apache.karaf.main.Main, pid:${pid}"
677         # $pid needs to be parsed client-side
678         # shellcheck disable=SC2029
679         ssh "${!CONTROLLERIP}" "${JAVA_HOME}/bin/jstack -l ${pid}" > "${WORKSPACE}/karaf_${i}_${pid}_threads_after.log" || true
680         echo "killing karaf process..."
681         # shellcheck disable=SC2016
682         ${SSH} "${!CONTROLLERIP}" bash -c 'ps axf | grep karaf | grep -v grep | awk '"'"'{print "kill -9 " $1}'"'"' | sh'
683         ${SSH} "${!CONTROLLERIP}" "sudo journalctl > /tmp/journalctl.log"
684         scp "${!CONTROLLERIP}":/tmp/journalctl.log "${NODE_FOLDER}"
685         ${SSH} "${!CONTROLLERIP}" "dmesg -T > /tmp/dmesg.log"
686         scp "${!CONTROLLERIP}":/tmp/dmesg.log "${NODE_FOLDER}"
687         ${SSH} "${!CONTROLLERIP}" "tar -cf - -C /tmp/${BUNDLEFOLDER} etc | xz -T 0 > /tmp/etc.tar.xz"
688         scp "${!CONTROLLERIP}":/tmp/etc.tar.xz "${NODE_FOLDER}"
689         ${SSH} "${!CONTROLLERIP}" "cp -r /tmp/${BUNDLEFOLDER}/data/log /tmp/odl_log"
690         ${SSH} "${!CONTROLLERIP}" "tar -cf /tmp/odl${i}_karaf.log.tar /tmp/odl_log/*"
691         scp "${!CONTROLLERIP}:/tmp/odl${i}_karaf.log.tar" "${NODE_FOLDER}"
692         ${SSH} "${!CONTROLLERIP}" "tar -cf /tmp/odl${i}_zrpcd.log.tar /tmp/zrpcd.init.log"
693         scp "${!CONTROLLERIP}:/tmp/odl${i}_zrpcd.log.tar" "${NODE_FOLDER}"
694         tar -xvf "${NODE_FOLDER}/odl${i}_karaf.log.tar" -C "${NODE_FOLDER}" --strip-components 2 --transform "s/karaf/odl${i}_karaf/g"
695         grep "ROBOT MESSAGE\\| ERROR " "${NODE_FOLDER}/odl${i}_karaf.log" > "${NODE_FOLDER}/odl${i}_err.log"
696         grep "ROBOT MESSAGE\\| ERROR \\| WARN \\|Exception" \
697             "${NODE_FOLDER}/odl${i}_karaf.log" > "${NODE_FOLDER}/odl${i}_err_warn_exception.log"
698         # Print ROBOT lines and print Exception lines. For exception lines also print the previous line for context
699         sed -n -e '/ROBOT MESSAGE/P' -e '$!N;/Exception/P;D' "${NODE_FOLDER}/odl${i}_karaf.log" > "${NODE_FOLDER}/odl${i}_exception.log"
700         mv "/tmp/odl${i}_exceptions.txt" "${NODE_FOLDER}"
701         rm "${NODE_FOLDER}/odl${i}_karaf.log.tar"
702         mv -- *_threads* "${NODE_FOLDER}"
703         mv ps_* "${NODE_FOLDER}"
704         mv "${NODE_FOLDER}" "${WORKSPACE}"/archives/
705     done
706
707     print_job_parameters > "${WORKSPACE}"/archives/params.txt
708
709     # Control Node
710     for i in $(seq 1 "${NUM_OPENSTACK_CONTROL_NODES}"); do
711         OSIP=OPENSTACK_CONTROL_NODE_${i}_IP
712         if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then
713             echo "collect_logs: for openstack combo node ip: ${!OSIP}"
714             NODE_FOLDER="combo_${i}"
715         else
716             echo "collect_logs: for openstack control node ip: ${!OSIP}"
717             NODE_FOLDER="control_${i}"
718         fi
719         mkdir -p "${NODE_FOLDER}"
720         tcpdump_stop "${!OSIP}"
721         scp extra_debug.sh "${!OSIP}":/tmp
722         # Capture compute logs if this is a combo node
723         if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then
724             scp "${!OSIP}":/etc/nova/nova.conf "${NODE_FOLDER}"
725             scp "${!OSIP}":/etc/nova/nova-cpu.conf "${NODE_FOLDER}"
726             scp "${!OSIP}":/etc/openstack/clouds.yaml "${NODE_FOLDER}"
727             rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/nova-agent.log "${NODE_FOLDER}"
728         fi
729         ${SSH} "${!OSIP}" "bash /tmp/extra_debug.sh > /tmp/extra_debug.log 2>&1"
730         scp "${!OSIP}":/etc/dnsmasq.conf "${NODE_FOLDER}"
731         scp "${!OSIP}":/etc/keystone/keystone.conf "${NODE_FOLDER}"
732         scp "${!OSIP}":/etc/keystone/keystone-uwsgi-admin.ini "${NODE_FOLDER}"
733         scp "${!OSIP}":/etc/keystone/keystone-uwsgi-public.ini "${NODE_FOLDER}"
734         scp "${!OSIP}":/etc/kuryr/kuryr.conf "${NODE_FOLDER}"
735         scp "${!OSIP}":/etc/neutron/dhcp_agent.ini "${NODE_FOLDER}"
736         scp "${!OSIP}":/etc/neutron/metadata_agent.ini "${NODE_FOLDER}"
737         scp "${!OSIP}":/etc/neutron/neutron.conf "${NODE_FOLDER}"
738         scp "${!OSIP}":/etc/neutron/neutron_lbaas.conf "${NODE_FOLDER}"
739         scp "${!OSIP}":/etc/neutron/plugins/ml2/ml2_conf.ini "${NODE_FOLDER}"
740         scp "${!OSIP}":/etc/neutron/services/loadbalancer/haproxy/lbaas_agent.ini "${NODE_FOLDER}"
741         scp "${!OSIP}":/etc/nova/nova.conf "${NODE_FOLDER}"
742         scp "${!OSIP}":/etc/nova/nova-api-uwsgi.ini "${NODE_FOLDER}"
743         scp "${!OSIP}":/etc/nova/nova_cell1.conf "${NODE_FOLDER}"
744         scp "${!OSIP}":/etc/nova/nova-cpu.conf "${NODE_FOLDER}"
745         scp "${!OSIP}":/etc/nova/placement-uwsgi.ini "${NODE_FOLDER}"
746         scp "${!OSIP}":/etc/openstack/clouds.yaml "${NODE_FOLDER}"
747         scp "${!OSIP}":/opt/stack/devstack/.stackenv "${NODE_FOLDER}"
748         scp "${!OSIP}":/opt/stack/devstack/nohup.out "${NODE_FOLDER}"/stack.log
749         scp "${!OSIP}":/opt/stack/devstack/openrc "${NODE_FOLDER}"
750         scp "${!OSIP}":/opt/stack/requirements/upper-constraints.txt "${NODE_FOLDER}"
751         scp "${!OSIP}":/opt/stack/tempest/etc/tempest.conf "${NODE_FOLDER}"
752         scp "${!OSIP}":/tmp/*.xz "${NODE_FOLDER}"
753         scp "${!OSIP}":/tmp/dmesg.log "${NODE_FOLDER}"
754         scp "${!OSIP}":/tmp/extra_debug.log "${NODE_FOLDER}"
755         scp "${!OSIP}":/tmp/get_devstack.sh.txt "${NODE_FOLDER}"
756         scp "${!OSIP}":/tmp/install_ovs.txt "${NODE_FOLDER}"
757         scp "${!OSIP}":/tmp/journalctl.log "${NODE_FOLDER}"
758         scp "${!OSIP}":/tmp/ovsdb-tool.log "${NODE_FOLDER}"
759         scp "${!OSIP}":/tmp/tcpdump_start.log "${NODE_FOLDER}"
760         collect_files "${!OSIP}" "${NODE_FOLDER}"
761         ${SSH} "${!OSIP}" "sudo tar -cf - -C /var/log rabbitmq | xz -T 0 > /tmp/rabbitmq.tar.xz "
762         scp "${!OSIP}":/tmp/rabbitmq.tar.xz "${NODE_FOLDER}"
763         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/etc/hosts "${NODE_FOLDER}"
764         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/usr/lib/systemd/system/haproxy.service "${NODE_FOLDER}"
765         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/audit/audit.log "${NODE_FOLDER}"
766         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/httpd/keystone_access.log "${NODE_FOLDER}"
767         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/httpd/keystone.log "${NODE_FOLDER}"
768         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/messages* "${NODE_FOLDER}"
769         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/openvswitch/ovs-vswitchd.log "${NODE_FOLDER}"
770         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/openvswitch/ovsdb-server.log "${NODE_FOLDER}"
771         collect_openstack_logs "${!OSIP}" "${NODE_FOLDER}" "control"
772         mv "local.conf_control_${!OSIP}" "${NODE_FOLDER}/local.conf"
773         # qdhcp files are created by robot tests and copied into /tmp/qdhcp during the test
774         tar -cf - -C /tmp qdhcp | xz -T 0 > /tmp/qdhcp.tar.xz
775         mv /tmp/qdhcp.tar.xz "${NODE_FOLDER}"
776         mv "${NODE_FOLDER}" "${WORKSPACE}"/archives/
777     done
778
779     # Compute Nodes
780     for i in $(seq 1 "${NUM_OPENSTACK_COMPUTE_NODES}"); do
781         OSIP="OPENSTACK_COMPUTE_NODE_${i}_IP"
782         echo "collect_logs: for openstack compute node ip: ${!OSIP}"
783         NODE_FOLDER="compute_${i}"
784         mkdir -p "${NODE_FOLDER}"
785         tcpdump_stop "${!OSIP}"
786         scp extra_debug.sh "${!OSIP}":/tmp
787         ${SSH} "${!OSIP}" "bash /tmp/extra_debug.sh > /tmp/extra_debug.log 2>&1"
788         scp "${!OSIP}":/etc/nova/nova.conf "${NODE_FOLDER}"
789         scp "${!OSIP}":/etc/nova/nova-cpu.conf "${NODE_FOLDER}"
790         scp "${!OSIP}":/etc/openstack/clouds.yaml "${NODE_FOLDER}"
791         scp "${!OSIP}":/opt/stack/devstack/.stackenv "${NODE_FOLDER}"
792         scp "${!OSIP}":/opt/stack/devstack/nohup.out "${NODE_FOLDER}"/stack.log
793         scp "${!OSIP}":/opt/stack/devstack/openrc "${NODE_FOLDER}"
794         scp "${!OSIP}":/opt/stack/requirements/upper-constraints.txt "${NODE_FOLDER}"
795         scp "${!OSIP}":/tmp/*.xz "${NODE_FOLDER}"/
796         scp "${!OSIP}":/tmp/dmesg.log "${NODE_FOLDER}"
797         scp "${!OSIP}":/tmp/extra_debug.log "${NODE_FOLDER}"
798         scp "${!OSIP}":/tmp/get_devstack.sh.txt "${NODE_FOLDER}"
799         scp "${!OSIP}":/tmp/install_ovs.txt "${NODE_FOLDER}"
800         scp "${!OSIP}":/tmp/journalctl.log "${NODE_FOLDER}"
801         scp "${!OSIP}":/tmp/ovsdb-tool.log "${NODE_FOLDER}"
802         scp "${!OSIP}":/tmp/tcpdump_start.log "${NODE_FOLDER}"
803         collect_files "${!OSIP}" "${NODE_FOLDER}"
804         ${SSH} "${!OSIP}" "sudo tar -cf - -C /var/log libvirt | xz -T 0 > /tmp/libvirt.tar.xz "
805         scp "${!OSIP}":/tmp/libvirt.tar.xz "${NODE_FOLDER}"
806         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/etc/hosts "${NODE_FOLDER}"
807         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/audit/audit.log "${NODE_FOLDER}"
808         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/messages* "${NODE_FOLDER}"
809         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/nova-agent.log "${NODE_FOLDER}"
810         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/openvswitch/ovs-vswitchd.log "${NODE_FOLDER}"
811         rsync --rsync-path="sudo rsync" -avhe ssh "${!OSIP}":/var/log/openvswitch/ovsdb-server.log "${NODE_FOLDER}"
812         collect_openstack_logs "${!OSIP}" "${NODE_FOLDER}" "compute"
813         mv "local.conf_compute_${!OSIP}" "${NODE_FOLDER}"/local.conf
814         mv "${NODE_FOLDER}" "${WORKSPACE}"/archives/
815     done
816
817     # Tempest
818     DEVSTACK_TEMPEST_DIR="/opt/stack/tempest"
819     TESTREPO=".stestr"
820     TEMPEST_LOGS_DIR="${WORKSPACE}/archives/tempest"
821     # Look for tempest test results in the $TESTREPO dir and copy if found
822     if ${SSH} "${OPENSTACK_CONTROL_NODE_1_IP}" "sudo sh -c '[ -f ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/0 ]'"; then
823         ${SSH} "${OPENSTACK_CONTROL_NODE_1_IP}" "for I in \$(sudo ls ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/ | grep -E '^[0-9]+$'); do sudo sh -c \"${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/subunit-1to2 < ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/\${I} >> ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt\"; done"
824         ${SSH} "${OPENSTACK_CONTROL_NODE_1_IP}" "sudo sh -c '${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/python ${DEVSTACK_TEMPEST_DIR}/.tox/tempest/lib/python2.7/site-packages/os_testr/subunit2html.py ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt ${DEVSTACK_TEMPEST_DIR}/tempest_results.html'"
825         mkdir -p "${TEMPEST_LOGS_DIR}"
826         scp "${OPENSTACK_CONTROL_NODE_1_IP}:${DEVSTACK_TEMPEST_DIR}/tempest_results.html" "${TEMPEST_LOGS_DIR}"
827         scp "${OPENSTACK_CONTROL_NODE_1_IP}:${DEVSTACK_TEMPEST_DIR}/tempest.log" "${TEMPEST_LOGS_DIR}"
828     else
829         echo "tempest results not found in ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/0"
830     fi
831 } # collect_netvirt_logs()
832
833 # Utility function for joining strings.
834 function join() {
835     delim=' '
836     final=$1; shift
837
838     for str in "$@" ; do
839         final=${final}${delim}${str}
840     done
841
842     echo "${final}"
843 }
844
845 function get_nodes_list() {
846     # Create the string for nodes
847     for i in $(seq 1 "${NUM_ODL_SYSTEM}") ; do
848         CONTROLLERIP=ODL_SYSTEM_${i}_IP
849         nodes[$i]=${!CONTROLLERIP}
850     done
851
852     nodes_list=$(join "${nodes[@]}")
853     echo "${nodes_list}"
854 }
855
856 function get_features() {
857     if [ "${CONTROLLERSCOPE}" == 'all' ]; then
858         ACTUALFEATURES="odl-integration-compatible-with-all,${CONTROLLERFEATURES}"
859         # if CONTROLLERMEM still is the default 2G and was not overridden by a
860         # custom job, then we need to make sure to increase it because "all"
861         # features can be heavy
862         if [ "${CONTROLLERMEM}" == "2048m" ]; then
863             export CONTROLLERMEM="3072m"
864         fi
865     else
866         ACTUALFEATURES="odl-infrautils-ready,${CONTROLLERFEATURES}"
867     fi
868
869     if [ "${ELASTICSEARCHATTRIBUTE}" != "disabled" ]; then
870         # Add decanter features to allow JVM monitoring
871         ACTUALFEATURES="${ACTUALFEATURES},decanter-collector-jmx,decanter-appender-elasticsearch-rest"
872     fi
873
874     # Some versions of jenkins job builder result in feature list containing spaces
875     # and ending in newline. Remove all that.
876     ACTUALFEATURES=$(echo "${ACTUALFEATURES}" | tr -d '\n \r')
877     echo "ACTUALFEATURES: ${ACTUALFEATURES}"
878
879     # In the case that we want to install features via karaf shell, a space separated list of
880     # ACTUALFEATURES IS NEEDED
881     SPACE_SEPARATED_FEATURES=$(echo "${ACTUALFEATURES}" | tr ',' ' ')
882     echo "SPACE_SEPARATED_FEATURES: ${SPACE_SEPARATED_FEATURES}"
883
884     export ACTUALFEATURES
885     export SPACE_SEPARATED_FEATURES
886 }
887
888 # Create the configuration script to be run on controllers.
889 function create_configuration_script() {
890     cat > "${WORKSPACE}"/configuration-script.sh <<EOF
891 set -x
892 source /tmp/common-functions.sh ${BUNDLEFOLDER}
893
894 echo "Changing to /tmp"
895 cd /tmp
896
897 echo "Downloading the distribution from ${ACTUAL_BUNDLE_URL}"
898 wget --progress=dot:mega '${ACTUAL_BUNDLE_URL}'
899
900 echo "Extracting the new controller..."
901 unzip -q ${BUNDLE}
902
903 echo "Adding external repositories..."
904 sed -ie "s%org.ops4j.pax.url.mvn.repositories=%org.ops4j.pax.url.mvn.repositories=https://nexus.opendaylight.org/content/repositories/opendaylight.snapshot@id=opendaylight-snapshot@snapshots, https://nexus.opendaylight.org/content/repositories/public@id=opendaylight-mirror, http://repo1.maven.org/maven2@id=central, http://repository.springsource.com/maven/bundles/release@id=spring.ebr.release, http://repository.springsource.com/maven/bundles/external@id=spring.ebr.external, http://zodiac.springsource.com/maven/bundles/release@id=gemini, http://repository.apache.org/content/groups/snapshots-group@id=apache@snapshots@noreleases, https://oss.sonatype.org/content/repositories/snapshots@id=sonatype.snapshots.deploy@snapshots@noreleases, https://oss.sonatype.org/content/repositories/ops4j-snapshots@id=ops4j.sonatype.snapshots.deploy@snapshots@noreleases%g" ${MAVENCONF}
905 cat ${MAVENCONF}
906
907 if [[ "$USEFEATURESBOOT" == "True" ]]; then
908     echo "Configuring the startup features..."
909     sed -ie "s/\\(featuresBoot=\\|featuresBoot =\\)/featuresBoot = ${ACTUALFEATURES},/g" ${FEATURESCONF}
910 fi
911
912 FEATURE_TEST_STRING="features-integration-test"
913 KARAF_VERSION=${KARAF_VERSION:-karaf4}
914 if [[ "$KARAF_VERSION" == "karaf4" ]]; then
915     FEATURE_TEST_STRING="features-test"
916 fi
917
918 sed -ie "s%\\(featuresRepositories=\\|featuresRepositories =\\)%featuresRepositories = mvn:org.opendaylight.integration/\${FEATURE_TEST_STRING}/${BUNDLE_VERSION}/xml/features,mvn:org.apache.karaf.decanter/apache-karaf-decanter/1.2.0/xml/features,%g" ${FEATURESCONF}
919 if [[ ! -z "${REPO_URL}" ]]; then
920    sed -ie "s%featuresRepositories =%featuresRepositories = ${REPO_URL},%g" ${FEATURESCONF}
921 fi
922 cat ${FEATURESCONF}
923
924 configure_karaf_log "${KARAF_VERSION}" "${CONTROLLERDEBUGMAP}"
925
926 set_java_vars "${JAVA_HOME}" "${CONTROLLERMEM}" "${MEMCONF}"
927
928 echo "Listing all open ports on controller system..."
929 netstat -pnatu
930
931 # Copy shard file if exists
932 if [ -f /tmp/custom_shard_config.txt ]; then
933     echo "Custom shard config exists!!!"
934     echo "Copying the shard config..."
935     cp /tmp/custom_shard_config.txt /tmp/${BUNDLEFOLDER}/bin/
936 fi
937
938 echo "Configuring cluster"
939 /tmp/${BUNDLEFOLDER}/bin/configure_cluster.sh \$1 ${nodes_list}
940
941 echo "Dump akka.conf"
942 cat ${AKKACONF}
943
944 echo "Dump modules.conf"
945 cat ${MODULESCONF}
946
947 echo "Dump module-shards.conf"
948 cat ${MODULESHARDSCONF}
949 EOF
950 # cat > ${WORKSPACE}/configuration-script.sh <<EOF
951 }
952
953 # Create the startup script to be run on controllers.
954 function create_startup_script() {
955     cat > "${WORKSPACE}"/startup-script.sh <<EOF
956 echo "Redirecting karaf console output to karaf_console.log"
957 export KARAF_REDIRECT="/tmp/${BUNDLEFOLDER}/data/log/karaf_console.log"
958 mkdir -p /tmp/${BUNDLEFOLDER}/data/log
959
960 echo "Starting controller..."
961 /tmp/${BUNDLEFOLDER}/bin/start
962 EOF
963 # cat > ${WORKSPACE}/startup-script.sh <<EOF
964 }
965
966 function create_post_startup_script() {
967     cat > "${WORKSPACE}"/post-startup-script.sh <<EOF
968 # wait up to 60s for karaf port 8101 to be opened, polling every 5s
969 loop_count=0;
970 until [[ \$loop_count -ge 12 ]]; do
971     netstat -na | grep ":::8101" && break;
972     loop_count=\$[\$loop_count+1];
973     sleep 5;
974 done
975
976 # This workaround is required for Karaf decanter to work proper
977 # The bundle:refresh command does not fail if the decanter bundles are not present
978 echo "ssh to karaf console to do bundle refresh of decanter jmx collector"
979 sshpass -p karaf ssh -o StrictHostKeyChecking=no \
980                      -o UserKnownHostsFile=/dev/null \
981                      -o LogLevel=error \
982                      -p 8101 karaf@localhost \
983                      "bundle:refresh org.apache.karaf.decanter.collector.jmx && bundle:refresh org.apache.karaf.decanter.api"
984
985 if [[ "$USEFEATURESBOOT" != "True" ]]; then
986
987     echo "going to feature:install --no-auto-refresh ${SPACE_SEPARATED_FEATURES} one at a time"
988     for feature in ${SPACE_SEPARATED_FEATURES}; do
989         sshpass -p karaf ssh -o StrictHostKeyChecking=no \
990                              -o UserKnownHostsFile=/dev/null \
991                              -o LogLevel=error \
992                              -p 8101 karaf@localhost \
993                              feature:install --no-auto-refresh \$feature;
994     done
995
996     echo "ssh to karaf console to list -i installed features"
997     sshpass -p karaf ssh -o StrictHostKeyChecking=no \
998                          -o UserKnownHostsFile=/dev/null \
999                          -o LogLevel=error \
1000                          -p 8101 karaf@localhost \
1001                          feature:list -i
1002 fi
1003
1004 echo "Waiting up to 3 minutes for controller to come up, checking every 5 seconds..."
1005 for i in {1..36}; do
1006     sleep 5;
1007     grep 'org.opendaylight.infrautils.*System ready' /tmp/${BUNDLEFOLDER}/data/log/karaf.log
1008     if [ \$? -eq 0 ]; then
1009         echo "Controller is UP"
1010         break
1011     fi
1012 done;
1013
1014 # if we ended up not finding ready status in the above loop, we can output some debugs
1015 grep 'org.opendaylight.infrautils.*System ready' /tmp/${BUNDLEFOLDER}/data/log/karaf.log
1016 if [ \$? -ne 0 ]; then
1017     echo "Timeout Controller DOWN"
1018     echo "Dumping first 500K bytes of karaf log..."
1019     head --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
1020     echo "Dumping last 500K bytes of karaf log..."
1021     tail --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
1022     echo "Listing all open ports on controller system"
1023     netstat -pnatu
1024     exit 1
1025 fi
1026
1027 echo "Listing all open ports on controller system..."
1028 netstat -pnatu
1029
1030 function exit_on_log_file_message {
1031     echo "looking for \"\$1\" in log file"
1032     if grep --quiet "\$1" "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"; then
1033         echo ABORTING: found "\$1"
1034         echo "Dumping first 500K bytes of karaf log..."
1035         head --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
1036         echo "Dumping last 500K bytes of karaf log..."
1037         tail --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
1038         exit 1
1039     fi
1040 }
1041
1042 exit_on_log_file_message 'BindException: Address already in use'
1043 exit_on_log_file_message 'server is unhealthy'
1044 EOF
1045 # cat > ${WORKSPACE}/post-startup-script.sh <<EOF
1046 }
1047
1048 # Copy over the configuration script and configuration files to each controller
1049 # Execute the configuration script on each controller.
1050 function copy_and_run_configuration_script() {
1051     for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
1052         CONTROLLERIP="ODL_SYSTEM_${i}_IP"
1053         echo "Configuring member-${i} with IP address ${!CONTROLLERIP}"
1054         scp "${WORKSPACE}"/configuration-script.sh "${!CONTROLLERIP}":/tmp/
1055         # $i needs to be parsed client-side
1056         # shellcheck disable=SC2029
1057         ssh "${!CONTROLLERIP}" "bash /tmp/configuration-script.sh ${i}"
1058     done
1059 }
1060
1061 # Copy over the startup script to each controller and execute it.
1062 function copy_and_run_startup_script() {
1063     for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
1064         CONTROLLERIP="ODL_SYSTEM_${i}_IP"
1065         echo "Starting member-${i} with IP address ${!CONTROLLERIP}"
1066         scp "${WORKSPACE}"/startup-script.sh "${!CONTROLLERIP}":/tmp/
1067         ssh "${!CONTROLLERIP}" "bash /tmp/startup-script.sh"
1068     done
1069 }
1070
1071 function copy_and_run_post_startup_script() {
1072     seed_index=1
1073     for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
1074         CONTROLLERIP="ODL_SYSTEM_${i}_IP"
1075         echo "Execute the post startup script on controller ${!CONTROLLERIP}"
1076         scp "${WORKSPACE}"/post-startup-script.sh "${!CONTROLLERIP}":/tmp/
1077         # $seed_index needs to be parsed client-side
1078         # shellcheck disable=SC2029
1079         ssh "${!CONTROLLERIP}" "bash /tmp/post-startup-script.sh $(( seed_index++ ))"
1080         if [ $(( i % NUM_ODL_SYSTEM )) == 0 ]; then
1081             seed_index=1
1082         fi
1083     done
1084 }
1085
1086 function dump_controller_threads() {
1087     for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
1088         CONTROLLERIP="ODL_SYSTEM_${i}_IP"
1089         echo "Let's take the karaf thread dump"
1090         ssh "${!CONTROLLERIP}" "sudo ps aux" > "${WORKSPACE}"/ps_before.log
1091         pid=$(grep org.apache.karaf.main.Main "${WORKSPACE}"/ps_before.log | grep -v grep | tr -s ' ' | cut -f2 -d' ')
1092         echo "karaf main: org.apache.karaf.main.Main, pid:${pid}"
1093         # $i needs to be parsed client-side
1094         # shellcheck disable=SC2029
1095         ssh "${!CONTROLLERIP}" "${JAVA_HOME}/bin/jstack -l ${pid}" > "${WORKSPACE}/karaf_${i}_${pid}_threads_before.log" || true
1096     done
1097 }
1098
1099 # Function to build OVS from git repo
1100 function build_ovs() {
1101     local -r ip=$1
1102     local -r version=$2
1103     local -r rpm_path="$3"
1104
1105     echo "Building OVS ${version} on ${ip} ..."
1106     cat > "${WORKSPACE}"/build_ovs.sh << EOF
1107 set -ex -o pipefail
1108
1109 echo '---> Building openvswitch version ${version}'
1110
1111 # Install running kernel devel packages
1112 K_VERSION=\$(uname -r)
1113 YUM_OPTS="-y --disablerepo=* --enablerepo=base,updates,extra,C*-base,C*-updates,C*-extras"
1114 # Install centos-release to update vault repos from which to fetch
1115 # kernel devel packages
1116 sudo yum \${YUM_OPTS} install centos-release yum-utils @'Development Tools' rpm-build
1117 sudo yum \${YUM_OPTS} install kernel-{devel,headers}-\${K_VERSION}
1118
1119 TMP=\$(mktemp -d)
1120 pushd \${TMP}
1121
1122 git clone https://github.com/openvswitch/ovs.git
1123 cd ovs
1124
1125 if [ "${version}" = "v2.6.1-nsh" ]; then
1126     git checkout v2.6.1
1127     echo "Will apply nsh patches for OVS version 2.6.1"
1128     git clone https://github.com/yyang13/ovs_nsh_patches.git ../ovs_nsh_patches
1129     git apply ../ovs_nsh_patches/v2.6.1_centos7/*.patch
1130 else
1131     git checkout ${version}
1132 fi
1133
1134 # On early versions of OVS, flake warnings would fail the build.
1135 # Remove it.
1136 sudo pip uninstall -y flake8
1137
1138 # Get rid of sphinx dep as it conflicts with the already
1139 # installed one (via pip). Docs wont be built.
1140 sed -i "/BuildRequires:.*sphinx.*/d" rhel/openvswitch-fedora.spec.in
1141
1142 sed -e 's/@VERSION@/0.0.1/' rhel/openvswitch-fedora.spec.in > /tmp/ovs.spec
1143 sed -e 's/@VERSION@/0.0.1/' rhel/openvswitch-kmod-fedora.spec.in > /tmp/ovs-kmod.spec
1144 sed -e 's/@VERSION@/0.0.1/' rhel/openvswitch-dkms.spec.in > /tmp/ovs-dkms.spec
1145 sudo yum-builddep \${YUM_OPTS} /tmp/ovs.spec /tmp/ovs-kmod.spec /tmp/ovs-dkms.spec
1146 rm /tmp/ovs.spec /tmp/ovs-kmod.spec /tmp/ovs-dkms.spec
1147 ./boot.sh
1148 ./configure --build=x86_64-redhat-linux-gnu --host=x86_64-redhat-linux-gnu --with-linux=/lib/modules/\${K_VERSION}/build --program-prefix= --disable-dependency-tracking --prefix=/usr --exec-prefix=/usr --bindir=/usr/bin --sbindir=/usr/sbin --sysconfdir=/etc --datadir=/usr/share --includedir=/usr/include --libdir=/usr/lib64 --libexecdir=/usr/libexec --localstatedir=/var --sharedstatedir=/var/lib --mandir=/usr/share/man --infodir=/usr/share/info --enable-libcapng --enable-ssl --with-pkidir=/var/lib/openvswitch/pki PYTHON=/usr/bin/python2
1149 make rpm-fedora RPMBUILD_OPT="--without check"
1150 # Build dkms only for now
1151 # make rpm-fedora-kmod RPMBUILD_OPT='-D "kversion \${K_VERSION}"'
1152 rpmbuild -D "_topdir \$(pwd)/rpm/rpmbuild" -bb --without check rhel/openvswitch-dkms.spec
1153
1154 mkdir -p /tmp/ovs_rpms
1155 cp -r rpm/rpmbuild/RPMS/* /tmp/ovs_rpms/
1156
1157 popd
1158 rm -rf \${TMP}
1159 EOF
1160
1161     scp "${WORKSPACE}"/build_ovs.sh "${ip}":/tmp
1162     ${SSH} "${ip}" " bash /tmp/build_ovs.sh >> /tmp/install_ovs.txt 2>&1"
1163     scp -r "${ip}":/tmp/ovs_rpms/* "${rpm_path}/"
1164     ${SSH} "${ip}" "rm -rf /tmp/ovs_rpms"
1165 }
1166
1167 # Install OVS RPMs from yum repo
1168 function install_ovs_from_repo() {
1169     local -r ip=$1
1170     local -r rpm_repo="$2"
1171
1172     echo "Installing OVS from repo ${rpm_repo} on ${ip} ..."
1173     cat > "${WORKSPACE}"/install_ovs.sh << EOF
1174 set -ex -o pipefail
1175
1176 echo '---> Installing openvswitch from ${rpm_repo}'
1177
1178 # We need repoquery from yum-utils.
1179 sudo yum -y install yum-utils
1180
1181 # Get openvswitch packages offered by custom repo.
1182 # dkms package will have priority over kmod.
1183 OVS_REPO_OPTS="--repofrompath=ovs-repo,${rpm_repo} --disablerepo=* --enablerepo=ovs-repo"
1184 OVS_PKGS=\$(repoquery \${OVS_REPO_OPTS} openvswitch)
1185 OVS_SEL_PKG=\$(repoquery \${OVS_REPO_OPTS} openvswitch-selinux-policy)
1186 OVS_DKMS_PKG=\$(repoquery \${OVS_REPO_OPTS} openvswitch-dkms)
1187 OVS_KMOD_PKG=\$(repoquery \${OVS_REPO_OPTS} openvswitch-kmod)
1188 [ -n "\${OVS_SEL_PKG}" ] && OVS_PKGS="\${OVS_PKGS} \${OVS_SEL_PKG}"
1189 [ -n "\${OVS_DKMS_PKG}" ] && OVS_PKGS="\${OVS_PKGS} \${OVS_DKMS_PKG}"
1190 [ -z "\${OVS_DKMS_PKG}" ] && [ -n "\${OVS_KMOD_PKG}" ] && OVS_PKGS="\${OVS_PKGS} \${OVS_KMOD_PKG}"
1191
1192 # Bail with error if custom repo was provided but we could not
1193 # find suitable packages there.
1194 [ -z "\${OVS_PKGS}" ] && echo "No OVS packages found in custom repo." && exit 1
1195
1196 # Install kernel & devel packages for the openvswitch dkms package.
1197 if [ -n "\${OVS_DKMS_PKG}" ]; then
1198     # install centos-release to update vault repos from which to fetch
1199     # kernel devel packages
1200     sudo yum -y install centos-release
1201     K_VERSION=\$(uname -r)
1202     YUM_OPTS="-y --disablerepo=* --enablerepo=base,updates,extra,C*-base,C*-updates,C*-extras"
1203     sudo yum \${YUM_OPTS} install kernel-{headers,devel}-\${K_VERSION} @'Development Tools' python-six
1204 fi
1205
1206 PREV_MOD=\$(sudo modinfo -n openvswitch || echo '')
1207
1208 # Install OVS offered by custom repo.
1209 sudo yum-config-manager --add-repo "${rpm_repo}"
1210 sudo yum -y versionlock delete openvswitch-*
1211 sudo yum -y remove openvswitch-*
1212 sudo yum -y --nogpgcheck install \${OVS_PKGS}
1213 sudo yum -y versionlock add \${OVS_PKGS}
1214
1215 # Most recent OVS versions have some incompatibility with certain versions of iptables
1216 # This below line will overcome that problem.
1217 sudo modprobe openvswitch
1218
1219 # Start OVS and print details
1220 sudo systemctl start openvswitch
1221 sudo systemctl enable openvswitch
1222 sudo ovs-vsctl --retry -t 5 show
1223 sudo modinfo openvswitch
1224
1225 # dkms rpm install can fail silently (probably because the OVS version is
1226 # incompatible with the running kernel), verify module was updated.
1227 NEW_MOD=\$(sudo modinfo -n openvswitch || echo '')
1228 [ "\${PREV_MOD}" != "\${NEW_MOD}" ] || (echo "Kernel module was not updated" && exit 1)
1229 EOF
1230
1231     scp "${WORKSPACE}"/install_ovs.sh "${ip}":/tmp
1232     ${SSH} "${ip}" "bash /tmp/install_ovs.sh >> /tmp/install_ovs.txt 2>&1"
1233 }
1234
1235 # Install OVS RPMS from path
1236 function install_ovs_from_path() {
1237     local -r ip=$1
1238     local -r rpm_path="$2"
1239
1240     echo "Creating OVS RPM repo on ${ip} ..."
1241     ${SSH} "${ip}" "mkdir -p /tmp/ovs_rpms"
1242     scp -r "${rpm_path}"/* "${ip}":/tmp/ovs_rpms
1243     ${SSH} "${ip}" "sudo yum -y install createrepo && createrepo --database /tmp/ovs_rpms"
1244     install_ovs_from_repo "${ip}" file:/tmp/ovs_rpms
1245 }
1246
1247