Add tweakability features for ODL in apex job
[releng/builder.git] / jjb / integration / common-functions.sh
1 #!/bin/bash
2
3 echo "common-functions.sh is being sourced"
4
5 BUNDLEFOLDER=$1
6
7 # Basic controller configuration settings
8 export MAVENCONF=/tmp/${BUNDLEFOLDER}/etc/org.ops4j.pax.url.mvn.cfg
9 export FEATURESCONF=/tmp/${BUNDLEFOLDER}/etc/org.apache.karaf.features.cfg
10 export CUSTOMPROP=/tmp/${BUNDLEFOLDER}/etc/custom.properties
11 export LOGCONF=/tmp/${BUNDLEFOLDER}/etc/org.ops4j.pax.logging.cfg
12 export MEMCONF=/tmp/${BUNDLEFOLDER}/bin/setenv
13 export CONTROLLERMEM="2048m"
14
15 # Cluster specific configuration settings
16 export AKKACONF=/tmp/${BUNDLEFOLDER}/configuration/initial/akka.conf
17 export MODULESCONF=/tmp/${BUNDLEFOLDER}/configuration/initial/modules.conf
18 export MODULESHARDSCONF=/tmp/${BUNDLEFOLDER}/configuration/initial/module-shards.conf
19
20 function print_common_env() {
21     cat << EOF
22 common-functions environment:
23 MAVENCONF: ${MAVENCONF}
24 ACTUALFEATURES: ${ACTUALFEATURES}
25 FEATURESCONF: ${FEATURESCONF}
26 CUSTOMPROP: ${CUSTOMPROP}
27 LOGCONF: ${LOGCONF}
28 MEMCONF: ${MEMCONF}
29 CONTROLLERMEM: ${CONTROLLERMEM}
30 AKKACONF: ${AKKACONF}
31 MODULESCONF: ${MODULESCONF}
32 MODULESHARDSCONF: ${MODULESHARDSCONF}
33 SUITES: ${SUITES}
34
35 EOF
36 }
37 print_common_env
38
39 # Setup JAVA_HOME and MAX_MEM Value in ODL startup config file
40 function set_java_vars() {
41     local -r java_home=$1
42     local -r controllermem=$2
43     local -r memconf=$3
44
45     echo "Configure\n    java home: ${java_home}\n    max memory: ${controllermem}\n    memconf: ${memconf}"
46
47     sed -ie 's%^# export JAVA_HOME%export JAVA_HOME=${JAVA_HOME:-'"${java_home}"'}%g' ${memconf}
48     sed -ie 's/JAVA_MAX_MEM="2048m"/JAVA_MAX_MEM='"${controllermem}"'/g' ${memconf}
49     echo "cat ${memconf}"
50     cat ${memconf}
51
52     echo "Set Java version"
53     sudo /usr/sbin/alternatives --install /usr/bin/java java ${java_home}/bin/java 1
54     sudo /usr/sbin/alternatives --set java ${java_home}/bin/java
55     echo "JDK default version ..."
56     java -version
57
58     echo "Set JAVA_HOME"
59     export JAVA_HOME="${java_home}"
60
61     # shellcheck disable=SC2037
62     JAVA_RESOLVED=$(readlink -e "${java_home}/bin/java")
63     echo "Java binary pointed at by JAVA_HOME: ${JAVA_RESOLVED}"
64 } # set_java_vars()
65
66 # shellcheck disable=SC2034
67 # foo appears unused. Verify it or export it.
68 function configure_karaf_log() {
69     local -r karaf_version=$1
70     local -r controllerdebugmap=$2
71     local logapi=log4j
72
73     # Check what the logging.cfg file is using for the logging api: log4j or log4j2
74     grep "log4j2" ${LOGCONF}
75     if [ $? -eq 0 ]; then
76         logapi=log4j2
77     fi
78
79     echo "Configuring the karaf log... karaf_version: ${karaf_version}, logapi: ${logapi}"
80     if [ "${logapi}" == "log4j2" ]; then
81         # FIXME: Make log size limit configurable from build parameter.
82         sed -ie 's/log4j2.appender.rolling.policies.size.size = 16MB/log4j2.appender.rolling.policies.size.size = 1GB/g' ${LOGCONF}
83         orgmodule="org.opendaylight.yangtools.yang.parser.repo.YangTextSchemaContextResolver"
84         orgmodule_="${orgmodule//./_}"
85         echo "${logapi}.logger.${orgmodule_}.name = WARN" >> ${LOGCONF}
86         echo "${logapi}.logger.${orgmodule_}.level = WARN" >> ${LOGCONF}
87     else
88         sed -ie 's/log4j.appender.out.maxBackupIndex=10/log4j.appender.out.maxBackupIndex=1/g' ${LOGCONF}
89         # FIXME: Make log size limit configurable from build parameter.
90         sed -ie 's/log4j.appender.out.maxFileSize=1MB/log4j.appender.out.maxFileSize=30GB/g' ${LOGCONF}
91         echo "${logapi}.logger.org.opendaylight.yangtools.yang.parser.repo.YangTextSchemaContextResolver = WARN" >> ${LOGCONF}
92     fi
93
94     # Add custom logging levels
95     # CONTROLLERDEBUGMAP is expected to be a key:value map of space separated
96     # values like "module:level module2:level2" where module is abbreviated and
97     # does not include "org.opendaylight."
98     unset IFS
99     echo "controllerdebugmap: ${controllerdebugmap}"
100     if [ -n "${controllerdebugmap}" ]; then
101         for kv in ${controllerdebugmap}; do
102             module="${kv%%:*}"
103             level="${kv#*:}"
104             echo "module: $module, level: $level"
105             # shellcheck disable=SC2157
106             if [ -n "${module}" ] && [ -n "${level}" ]; then
107                 orgmodule="org.opendaylight.${module}"
108                 if [ "${logapi}" == "log4j2" ]; then
109                     orgmodule_="${orgmodule//./_}"
110                     echo "${logapi}.logger.${orgmodule_}.name = ${orgmodule}" >> ${LOGCONF}
111                     echo "${logapi}.logger.${orgmodule_}.level = ${level}" >> ${LOGCONF}
112                 else
113                     echo "${logapi}.logger.${orgmodule} = ${level}" >> ${LOGCONF}
114                 fi
115             fi
116         done
117     fi
118
119     echo "cat ${LOGCONF}"
120     cat ${LOGCONF}
121 } # function configure_karaf_log()
122
123 function configure_karaf_log_for_apex() {
124     # TODO: add the extra steps to this function to do any extra work
125     # in this apex environment like we do in our standard environment.
126     # EX: log size, rollover, etc.
127
128     # Modify ODL Log Levels, if needed, for new distribution. This will modify
129     # the control nodes hiera data which will be used during the puppet deploy
130     # CONTROLLERDEBUGMAP is expected to be a key:value map of space separated
131     # values like "module:level module2:level2" where module is abbreviated and
132     # does not include "org.opendaylight."
133
134     local -r controller_ip=$1
135
136     unset IFS
137     # shellcheck disable=SC2153
138     echo "CONTROLLERDEBUGMAP: ${CONTROLLERDEBUGMAP}"
139     if [ -n "${CONTROLLERDEBUGMAP}" ]; then
140         logging_config='\"opendaylight::log_levels\": {'
141         for kv in ${CONTROLLERDEBUGMAP}; do
142             module="${kv%%:*}"
143             level="${kv#*:}"
144             echo "module: $module, level: $level"
145             # shellcheck disable=SC2157
146             if [ -n "${module}" ] && [ -n "${level}" ]; then
147                 orgmodule="org.opendaylight.${module}"
148                 logging_config="${logging_config} \\\"${orgmodule}\\\": \\\"${level}\\\","
149             fi
150         done
151         # replace the trailing comma with a closing brace followed by trailing comma
152         logging_config=${logging_config%,}" },"
153         echo $logging_config
154
155         # fine a sane line number to inject the custom logging json
156         lineno=$(ssh $OPENSTACK_CONTROL_NODE_1_IP "sudo grep -Fn 'opendaylight::log_mechanism' /etc/puppet/hieradata/service_configs.json" | awk -F: '{print $1}')
157         ssh $controller_ip "sudo sed -i \"${lineno}i ${logging_config}\" /etc/puppet/hieradata/service_configs.json"
158         ssh $controller_ip "sudo cat /etc/puppet/hieradata/service_configs.json"
159     fi
160 } # function configure_karaf_log_for_apex()
161
162 function configure_odl_features_for_apex() {
163
164     # if the environment variable $ACTUALFEATURES is not null, then rewrite
165     # the puppet config file with the features given in that variable, otherwise
166     # this function is a noop
167
168     local -r controller_ip=$1
169     local -r config_file=/etc/puppet/hieradata/service_configs.json
170
171 cat > /tmp/set_odl_features.sh << EOF
172 sudo jq '.["opendaylight::extra_features"] |= []' $config_file > tmp.json && mv tmp.json $config_file
173 for feature in $(echo $ACTUALFEATURES | sed "s/,/ /g"); do
174     sudo jq --arg jq_arg \$feature '.["opendaylight::extra_features"] |= . + [\$jq_arg]' $config_file > tmp && mv tmp $config_file;
175 done
176 echo "Modified puppet-opendaylight service_configs.json..."
177 cat $config_file
178 EOF
179
180     echo "Feature configuration script..."
181     cat /tmp/set_odl_features.sh
182
183     if [ -n "${ACTUALFEATURES}" ]; then
184         scp /tmp/set_odl_features.sh $controller_ip:/tmp/set_odl_features.sh
185         ssh $controller_ip "sudo bash /tmp/set_odl_features.sh"
186     fi
187
188 } # function configure_odl_features_for_apex()
189
190 function get_os_deploy() {
191     local -r num_systems=${1:-$NUM_OPENSTACK_SYSTEM}
192     case ${num_systems} in
193     1)
194         OPENSTACK_TOPO="1cmb-0ctl-0cmp"
195         ;;
196     2)
197         OPENSTACK_TOPO="1cmb-0ctl-1cmp"
198         ;;
199     3|*)
200         OPENSTACK_TOPO="0cmb-1ctl-2cmp"
201         ;;
202     esac
203     export OPENSTACK_TOPO
204 }
205
206 function get_test_suites() {
207
208     #let the caller pick the name of the variable we will assign the suites to
209     local __suite_list=$1
210
211     echo "Locating test plan to use..."
212     testplan_filepath="${WORKSPACE}/test/csit/testplans/${STREAMTESTPLAN}"
213     if [ ! -f "${testplan_filepath}" ]; then
214         testplan_filepath="${WORKSPACE}/test/csit/testplans/${TESTPLAN}"
215     fi
216
217     echo "Changing the testplan path..."
218     cat "${testplan_filepath}" | sed "s:integration:${WORKSPACE}:" > testplan.txt
219     cat testplan.txt
220
221     # Use the testplan if specific SUITES are not defined.
222     if [ -z "${SUITES}" ]; then
223         suite_list=`egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' testplan.txt | tr '\012' ' '`
224     else
225         suite_list=""
226         workpath="${WORKSPACE}/test/csit/suites"
227         for suite in ${SUITES}; do
228             fullsuite="${workpath}/${suite}"
229             if [ -z "${suite_list}" ]; then
230                 suite_list+=${fullsuite}
231             else
232                 suite_list+=" "${fullsuite}
233             fi
234         done
235     fi
236
237     eval $__suite_list="'$suite_list'"
238 }
239
240 function run_plan() {
241     local -r type=$1
242
243     case ${type} in
244     script)
245         plan=$SCRIPTPLAN
246         ;;
247     config|*)
248         plan=$CONFIGPLAN
249         ;;
250     esac
251
252     printf "Locating %s plan to use...\n" "${type}"
253     plan_filepath="${WORKSPACE}/test/csit/${type}plans/$plan"
254     if [ ! -f "${plan_filepath}" ]; then
255         plan_filepath="${WORKSPACE}/test/csit/${type}plans/${STREAMTESTPLAN}"
256         if [ ! -f "${plan_filepath}" ]; then
257             plan_filepath="${WORKSPACE}/test/csit/${type}plans/${TESTPLAN}"
258         fi
259     fi
260
261     if [ -f "${plan_filepath}" ]; then
262         printf "%s plan exists!!!\n" "${type}"
263         printf "Changing the %s plan path...\n" "${type}"
264         cat ${plan_filepath} | sed "s:integration:${WORKSPACE}:" > ${type}plan.txt
265         cat ${type}plan.txt
266         for line in $( egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' ${type}plan.txt ); do
267             printf "Executing %s...\n" "${line}"
268             # shellcheck source=${line} disable=SC1091
269             source ${line}
270         done
271     fi
272     printf "Finished running %s plans\n" "${type}"
273 } # function run_plan()
274
275 # Return elapsed time. Usage:
276 # - Call first time with no arguments and a new timer is returned.
277 # - Next call with the first argument as the timer and the elapsed time is returned.
278 function timer()
279 {
280     if [ $# -eq 0 ]; then
281         # return the current time
282         printf "%s" "$(date "+%s")"
283     else
284         local start_time=$1
285         end_time=$(date "+%s")
286
287         if [ -z "$start_time" ]; then
288             start_time=$end_time;
289         fi
290
291         delta_time=$((end_time - start_time))
292         ds=$((delta_time % 60))
293         dm=$(((delta_time / 60) % 60))
294         dh=$((delta_time / 3600))
295         # return the elapsed time
296         printf "%d:%02d:%02d" $dh $dm $ds
297     fi
298 }
299
300 # convert commas in csv strings to spaces (ssv)
301 function csv2ssv() {
302     local csv=$1
303     if [ -n "${csv}" ]; then
304         ssv=$(echo ${csv} | sed 's/,/ /g' | sed 's/\ \ */\ /g')
305     fi
306
307     echo "${ssv}"
308 } # csv2ssv
309
310 function is_openstack_feature_enabled() {
311     local feature=$1
312     for enabled_feature in $(csv2ssv ${ENABLE_OS_SERVICES}); do
313         if [ "${enabled_feature}" == "${feature}" ]; then
314            echo 1
315            return
316         fi
317     done
318     echo 0
319 }
320
321 SSH="ssh -t -t"
322
323 # shellcheck disable=SC2153
324 function print_job_parameters() {
325     cat << EOF
326
327 Job parameters:
328 DISTROBRANCH: ${DISTROBRANCH}
329 DISTROSTREAM: ${DISTROSTREAM}
330 BUNDLE_URL: ${BUNDLE_URL}
331 CONTROLLERFEATURES: ${CONTROLLERFEATURES}
332 CONTROLLERDEBUGMAP: ${CONTROLLERDEBUGMAP}
333 SCRIPTPLAN: ${SCRIPTPLAN}
334 CONFIGPLAN: ${CONFIGPLAN}
335 STREAMTESTPLAN: ${STREAMTESTPLAN}
336 TESTPLAN: ${TESTPLAN}
337 SUITES: ${SUITES}
338 PATCHREFSPEC: ${PATCHREFSPEC}
339 OPENSTACK_BRANCH: ${OPENSTACK_BRANCH}
340 DEVSTACK_HASH: ${DEVSTACK_HASH}
341 ODL_ML2_DRIVER_REPO: ${ODL_ML2_DRIVER_REPO}
342 ODL_ML2_BRANCH: ${ODL_ML2_BRANCH}
343 ODL_ML2_DRIVER_VERSION: ${ODL_ML2_DRIVER_VERSION}
344 ODL_ML2_PORT_BINDING: ${ODL_ML2_PORT_BINDING}
345 DEVSTACK_KUBERNETES_PLUGIN_REPO: ${DEVSTACK_KUBERNETES_PLUGIN_REPO}
346 DEVSTACK_LBAAS_PLUGIN_REPO: ${DEVSTACK_LBAAS_PLUGIN_REPO}
347 DEVSTACK_NETWORKING_SFC_PLUGIN_REPO: ${DEVSTACK_NETWORKING_SFC_PLUGIN_REPO}
348 IPSEC_VXLAN_TUNNELS_ENABLED: ${IPSEC_VXLAN_TUNNELS_ENABLED}
349 PUBLIC_BRIDGE: ${PUBLIC_BRIDGE}
350 ENABLE_HAPROXY_FOR_NEUTRON: ${ENABLE_HAPROXY_FOR_NEUTRON}
351 ENABLE_OS_SERVICES: ${ENABLE_OS_SERVICES}
352 ENABLE_OS_COMPUTE_SERVICES: ${ENABLE_OS_COMPUTE_SERVICES}
353 ENABLE_OS_NETWORK_SERVICES: ${ENABLE_OS_NETWORK_SERVICES}
354 ENABLE_OS_PLUGINS: ${ENABLE_OS_PLUGINS}
355 DISABLE_OS_SERVICES: ${DISABLE_OS_SERVICES}
356 TENANT_NETWORK_TYPE: ${TENANT_NETWORK_TYPE}
357 SECURITY_GROUP_MODE: ${SECURITY_GROUP_MODE}
358 ENABLE_ITM_DIRECT_TUNNELS: ${ENABLE_ITM_DIRECT_TUNNELS}
359 PUBLIC_PHYSICAL_NETWORK: ${PUBLIC_PHYSICAL_NETWORK}
360 ENABLE_NETWORKING_L2GW: ${ENABLE_NETWORKING_L2GW}
361 CREATE_INITIAL_NETWORKS: ${CREATE_INITIAL_NETWORKS}
362 LBAAS_SERVICE_PROVIDER: ${LBAAS_SERVICE_PROVIDER}
363 ODL_SFC_DRIVER: ${ODL_SFC_DRIVER}
364 ODL_SNAT_MODE: ${ODL_SNAT_MODE}
365
366 EOF
367 }
368
369 function tcpdump_start() {
370     local -r prefix=$1
371     local -r ip=$2
372     local -r filter=$3
373     filter_=${filter// /_}
374
375     printf "node %s, %s_%s__%s: starting tcpdump\n" "${ip}" "${prefix}" "${ip}" "${filter}"
376     ssh ${ip} "nohup sudo /usr/sbin/tcpdump -vvv -ni eth0 ${filter} -w /tmp/tcpdump_${prefix}_${ip}__${filter_}.pcap > /tmp/tcpdump_start.log 2>&1 &"
377     ${SSH} ${ip} "ps -ef | grep tcpdump"
378 }
379
380 function tcpdump_stop() {
381     local -r ip=$1
382
383     printf "node %s: stopping tcpdump\n" "$ip"
384     ${SSH} ${ip} "ps -ef | grep tcpdump.sh"
385     ${SSH} ${ip} "sudo pkill -f tcpdump"
386     ${SSH} ${ip} "sudo xz -9ekvvf /tmp/*.pcap"
387     ${SSH} ${ip} "sudo ls -al /tmp/*.pcap"
388     # copy_logs will copy any *.xz files
389 }
390
391 # Collect the list of files on the hosts
392 function collect_files() {
393     local -r ip=$1
394     local -r folder=$2
395     finddir=/tmp/finder
396     ${SSH} ${ip} "mkdir -p ${finddir}"
397     ${SSH} ${ip} "sudo find /etc > ${finddir}/find.etc.txt"
398     ${SSH} ${ip} "sudo find /opt/stack > ${finddir}/find.opt.stack.txt"
399     ${SSH} ${ip} "sudo find /var > ${finddir}/find2.txt"
400     ${SSH} ${ip} "sudo find /var > ${finddir}/find.var.txt"
401     ${SSH} ${ip} "sudo tar -cf - -C /tmp finder | xz -T 0 > /tmp/find.tar.xz"
402     scp ${ip}:/tmp/find.tar.xz ${folder}
403     mkdir -p ${finddir}
404     rsync --rsync-path="sudo rsync" --list-only -arvhe ssh ${ip}:/etc/ > ${finddir}/rsync.etc.txt
405     rsync --rsync-path="sudo rsync" --list-only -arvhe ssh ${ip}:/opt/stack/ > ${finddir}/rsync.opt.stack.txt
406     rsync --rsync-path="sudo rsync" --list-only -arvhe ssh ${ip}:/var/ > ${finddir}/rsync.var.txt
407     tar -cf - -C /tmp finder | xz -T 0 > /tmp/rsync.tar.xz
408     cp /tmp/rsync.tar.xz ${folder}
409 }
410
411 # List of extra services to extract from journalctl
412 # Add new services on a separate line, in alpha order, add \ at the end
413 extra_services_cntl=" \
414     dnsmasq.service \
415     httpd.service \
416     libvirtd.service \
417     openvswitch.service \
418     ovs-vswitchd.service \
419     ovsdb-server.service \
420     rabbitmq-server.service \
421 "
422
423 extra_services_cmp=" \
424     libvirtd.service \
425     openvswitch.service \
426     ovs-vswitchd.service \
427     ovsdb-server.service \
428 "
429
430 # Collect the logs for the openstack services
431 # First get all the services started by devstack which would have devstack@ as a prefix
432 # Next get all the extra services
433 function collect_openstack_logs() {
434     local -r ip=${1}
435     local -r folder=${2}
436     local -r node_type=${3}
437     local oslogs="${folder}/oslogs"
438
439     printf "collect_openstack_logs for %s node: %s into %s\n" "${node_type}" "${ip}" "${oslogs}"
440     rm -rf ${oslogs}
441     mkdir -p ${oslogs}
442     # There are always some logs in /opt/stack/logs and this also covers the
443     # pre-queens branches which always use /opt/stack/logs
444     rsync -avhe ssh ${ip}:/opt/stack/logs/* ${oslogs} # rsync to prevent copying of symbolic links
445
446     # Starting with queens break out the logs from journalctl
447     if [ "${OPENSTACK_BRANCH}" = "stable/queens" ]; then
448         cat > ${WORKSPACE}/collect_openstack_logs.sh << EOF
449 extra_services_cntl="${extra_services_cntl}"
450 extra_services_cmp="${extra_services_cmp}"
451
452 function extract_from_journal() {
453     local -r services=\${1}
454     local -r folder=\${2}
455     local -r node_type=\${3}
456     printf "extract_from_journal folder: \${folder}, services: \${services}\n"
457     for service in \${services}; do
458         # strip anything before @ and anything after .
459         # devstack@g-api.service will end as g-api
460         service_="\${service#*@}"
461         service_="\${service_%.*}"
462         sudo journalctl -u "\${service}" > "\${folder}/\${service_}.log"
463     done
464 }
465
466 rm -rf /tmp/oslogs
467 mkdir -p /tmp/oslogs
468 systemctl list-unit-files --all > /tmp/oslogs/systemctl.units.log 2>&1
469 svcs=\$(grep devstack@ /tmp/oslogs/systemctl.units.log | awk '{print \$1}')
470 extract_from_journal "\${svcs}" "/tmp/oslogs"
471 if [ "\${node_type}" = "control" ]; then
472     extract_from_journal "\${extra_services_cntl}" "/tmp/oslogs"
473 else
474     extract_from_journal "\${extra_services_cmp}" "/tmp/oslogs"
475 fi
476 ls -al /tmp/oslogs
477 EOF
478 # cat > ${WORKSPACE}/collect_openstack_logs.sh << EOF
479         printf "collect_openstack_logs for %s node: %s into %s, executing script\n" "${node_type}" "${ip}" "${oslogs}"
480         cat ${WORKSPACE}/collect_openstack_logs.sh
481         scp ${WORKSPACE}/collect_openstack_logs.sh ${ip}:/tmp
482         ${SSH} ${ip} "bash /tmp/collect_openstack_logs.sh > /tmp/collect_openstack_logs.log 2>&1"
483         rsync -avhe ssh ${ip}:/tmp/oslogs/* ${oslogs}
484         scp ${ip}:/tmp/collect_openstack_logs.log ${oslogs}
485     fi # if [ "${OPENSTACK_BRANCH}" = "stable/queens" ]; then
486 }
487
488 function collect_netvirt_logs() {
489     set +e  # We do not want to create red dot just because something went wrong while fetching logs.
490
491     cat > extra_debug.sh << EOF
492 echo -e "/usr/sbin/lsmod | /usr/bin/grep openvswitch\n"
493 /usr/sbin/lsmod | /usr/bin/grep openvswitch
494 echo -e "\nsudo grep ct_ /var/log/openvswitch/ovs-vswitchd.log\n"
495 sudo grep "Datapath supports" /var/log/openvswitch/ovs-vswitchd.log
496 echo -e "\nsudo netstat -punta\n"
497 sudo netstat -punta
498 echo -e "\nsudo getenforce\n"
499 sudo getenforce
500 echo -e "\nsudo systemctl status httpd\n"
501 sudo systemctl status httpd
502 echo -e "\nenv\n"
503 env
504 source /opt/stack/devstack/openrc admin admin
505 echo -e "\nenv after openrc\n"
506 env
507 echo -e "\nsudo du -hs /opt/stack"
508 sudo du -hs /opt/stack
509 echo -e "\nsudo mount"
510 sudo mount
511 echo -e "\ndmesg -T > /tmp/dmesg.log"
512 dmesg -T > /tmp/dmesg.log
513 echo -e "\njournalctl > /tmp/journalctl.log\n"
514 sudo journalctl > /tmp/journalctl.log
515 echo -e "\novsdb-tool -mm show-log > /tmp/ovsdb-tool.log"
516 ovsdb-tool -mm show-log > /tmp/ovsdb-tool.log
517 EOF
518
519     # Since this log collection work is happening before the archive build macro which also
520     # creates the ${WORKSPACE}/archives dir, we have to do it here first.  The mkdir in the
521     # archives build step will essentially be a noop.
522     mkdir -p ${WORKSPACE}/archives
523
524     mv /tmp/changes.txt ${WORKSPACE}/archives
525     mv /tmp/validations.txt ${WORKSPACE}/archives
526     mv ${WORKSPACE}/rabbit.txt ${WORKSPACE}/archives
527     mv ${WORKSPACE}/haproxy.cfg ${WORKSPACE}/archives
528     ssh ${OPENSTACK_HAPROXY_1_IP} "sudo journalctl -u haproxy > /tmp/haproxy.log"
529     scp ${OPENSTACK_HAPROXY_1_IP}:/tmp/haproxy.log ${WORKSPACE}/archives/
530
531     sleep 5
532     # FIXME: Do not create .tar and gzip before copying.
533     for i in `seq 1 ${NUM_ODL_SYSTEM}`; do
534         CONTROLLERIP=ODL_SYSTEM_${i}_IP
535         echo "collect_logs: for opendaylight controller ip: ${!CONTROLLERIP}"
536         NODE_FOLDER="odl_${i}"
537         mkdir -p ${NODE_FOLDER}
538         echo "Lets's take the karaf thread dump again..."
539         ssh ${!CONTROLLERIP} "sudo ps aux" > ${WORKSPACE}/ps_after.log
540         pid=$(grep org.apache.karaf.main.Main ${WORKSPACE}/ps_after.log | grep -v grep | tr -s ' ' | cut -f2 -d' ')
541         echo "karaf main: org.apache.karaf.main.Main, pid:${pid}"
542         ssh ${!CONTROLLERIP} "${JAVA_HOME}/bin/jstack -l ${pid}" > ${WORKSPACE}/karaf_${i}_${pid}_threads_after.log || true
543         echo "killing karaf process..."
544         ${SSH} "${!CONTROLLERIP}" bash -c 'ps axf | grep karaf | grep -v grep | awk '"'"'{print "kill -9 " $1}'"'"' | sh'
545         ${SSH} ${!CONTROLLERIP} "sudo journalctl > /tmp/journalctl.log"
546         scp ${!CONTROLLERIP}:/tmp/journalctl.log ${NODE_FOLDER}
547         ${SSH} ${!CONTROLLERIP} "dmesg -T > /tmp/dmesg.log"
548         scp ${!CONTROLLERIP}:/tmp/dmesg.log ${NODE_FOLDER}
549         ${SSH} ${!CONTROLLERIP} "tar -cf - -C /tmp/${BUNDLEFOLDER} etc | xz -T 0 > /tmp/etc.tar.xz"
550         scp ${!CONTROLLERIP}:/tmp/etc.tar.xz ${NODE_FOLDER}
551         ${SSH} ${!CONTROLLERIP} "cp -r /tmp/${BUNDLEFOLDER}/data/log /tmp/odl_log"
552         ${SSH} ${!CONTROLLERIP} "tar -cf /tmp/odl${i}_karaf.log.tar /tmp/odl_log/*"
553         scp ${!CONTROLLERIP}:/tmp/odl${i}_karaf.log.tar ${NODE_FOLDER}
554         ${SSH} ${!CONTROLLERIP} "tar -cf /tmp/odl${i}_zrpcd.log.tar /tmp/zrpcd.init.log"
555         scp ${!CONTROLLERIP}:/tmp/odl${i}_zrpcd.log.tar ${NODE_FOLDER}
556         tar -xvf ${NODE_FOLDER}/odl${i}_karaf.log.tar -C ${NODE_FOLDER} --strip-components 2 --transform s/karaf/odl${i}_karaf/g
557         grep "ROBOT MESSAGE\| ERROR " ${NODE_FOLDER}/odl${i}_karaf.log > ${NODE_FOLDER}/odl${i}_err.log
558         grep "ROBOT MESSAGE\| ERROR \| WARN \|Exception" \
559             ${NODE_FOLDER}/odl${i}_karaf.log > ${NODE_FOLDER}/odl${i}_err_warn_exception.log
560         # Print ROBOT lines and print Exception lines. For exception lines also print the previous line for context
561         sed -n -e '/ROBOT MESSAGE/P' -e '$!N;/Exception/P;D' ${NODE_FOLDER}/odl${i}_karaf.log > ${NODE_FOLDER}/odl${i}_exception.log
562         mv /tmp/odl${i}_exceptions.txt ${NODE_FOLDER}
563         rm ${NODE_FOLDER}/odl${i}_karaf.log.tar
564         mv *_threads* ${NODE_FOLDER}
565         mv ps_* ${NODE_FOLDER}
566         mv ${NODE_FOLDER} ${WORKSPACE}/archives/
567     done
568
569     print_job_parameters > ${WORKSPACE}/archives/params.txt
570
571     # Control Node
572     for i in `seq 1 ${NUM_OPENSTACK_CONTROL_NODES}`; do
573         OSIP=OPENSTACK_CONTROL_NODE_${i}_IP
574         if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then
575             echo "collect_logs: for openstack combo node ip: ${!OSIP}"
576             NODE_FOLDER="combo_${i}"
577         else
578             echo "collect_logs: for openstack control node ip: ${!OSIP}"
579             NODE_FOLDER="control_${i}"
580         fi
581         mkdir -p ${NODE_FOLDER}
582         tcpdump_stop "${!OSIP}"
583         scp extra_debug.sh ${!OSIP}:/tmp
584         # Capture compute logs if this is a combo node
585         if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then
586             scp ${!OSIP}:/etc/nova/nova.conf ${NODE_FOLDER}
587             scp ${!OSIP}:/etc/nova/nova-cpu.conf ${NODE_FOLDER}
588             scp ${!OSIP}:/etc/openstack/clouds.yaml ${NODE_FOLDER}
589             rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/nova-agent.log ${NODE_FOLDER}
590         fi
591         ${SSH} ${!OSIP} "bash /tmp/extra_debug.sh > /tmp/extra_debug.log 2>&1"
592         scp ${!OSIP}:/etc/dnsmasq.conf ${NODE_FOLDER}
593         scp ${!OSIP}:/etc/keystone/keystone.conf ${NODE_FOLDER}
594         scp ${!OSIP}:/etc/keystone/keystone-uwsgi-admin.ini ${NODE_FOLDER}
595         scp ${!OSIP}:/etc/keystone/keystone-uwsgi-public.ini ${NODE_FOLDER}
596         scp ${!OSIP}:/etc/kuryr/kuryr.conf ${NODE_FOLDER}
597         scp ${!OSIP}:/etc/neutron/dhcp_agent.ini ${NODE_FOLDER}
598         scp ${!OSIP}:/etc/neutron/metadata_agent.ini ${NODE_FOLDER}
599         scp ${!OSIP}:/etc/neutron/neutron.conf ${NODE_FOLDER}
600         scp ${!OSIP}:/etc/neutron/neutron_lbaas.conf ${NODE_FOLDER}
601         scp ${!OSIP}:/etc/neutron/plugins/ml2/ml2_conf.ini ${NODE_FOLDER}
602         scp ${!OSIP}:/etc/neutron/services/loadbalancer/haproxy/lbaas_agent.ini ${NODE_FOLDER}
603         scp ${!OSIP}:/etc/nova/nova.conf ${NODE_FOLDER}
604         scp ${!OSIP}:/etc/nova/nova-api-uwsgi.ini ${NODE_FOLDER}
605         scp ${!OSIP}:/etc/nova/nova_cell1.conf ${NODE_FOLDER}
606         scp ${!OSIP}:/etc/nova/nova-cpu.conf ${NODE_FOLDER}
607         scp ${!OSIP}:/etc/nova/placement-uwsgi.ini ${NODE_FOLDER}
608         scp ${!OSIP}:/etc/openstack/clouds.yaml ${NODE_FOLDER}
609         scp ${!OSIP}:/opt/stack/devstack/.stackenv ${NODE_FOLDER}
610         scp ${!OSIP}:/opt/stack/devstack/nohup.out ${NODE_FOLDER}/stack.log
611         scp ${!OSIP}:/opt/stack/devstack/openrc ${NODE_FOLDER}
612         scp ${!OSIP}:/opt/stack/requirements/upper-constraints.txt ${NODE_FOLDER}
613         scp ${!OSIP}:/opt/stack/tempest/etc/tempest.conf ${NODE_FOLDER}
614         scp ${!OSIP}:/tmp/*.xz ${NODE_FOLDER}
615         scp ${!OSIP}:/tmp/dmesg.log ${NODE_FOLDER}
616         scp ${!OSIP}:/tmp/extra_debug.log ${NODE_FOLDER}
617         scp ${!OSIP}:/tmp/get_devstack.sh.txt ${NODE_FOLDER}
618         scp ${!OSIP}:/tmp/install_ovs.txt ${NODE_FOLDER}
619         scp ${!OSIP}:/tmp/journalctl.log ${NODE_FOLDER}
620         scp ${!OSIP}:/tmp/ovsdb-tool.log ${NODE_FOLDER}
621         scp ${!OSIP}:/tmp/tcpdump_start.log ${NODE_FOLDER}
622         collect_files "${!OSIP}" "${NODE_FOLDER}"
623         ${SSH} ${!OSIP} "sudo tar -cf - -C /var/log rabbitmq | xz -T 0 > /tmp/rabbitmq.tar.xz "
624         scp ${!OSIP}:/tmp/rabbitmq.tar.xz ${NODE_FOLDER}
625         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/etc/hosts ${NODE_FOLDER}
626         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/usr/lib/systemd/system/haproxy.service ${NODE_FOLDER}
627         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/audit/audit.log ${NODE_FOLDER}
628         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/httpd/keystone_access.log ${NODE_FOLDER}
629         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/httpd/keystone.log ${NODE_FOLDER}
630         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/messages* ${NODE_FOLDER}
631         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/openvswitch/ovs-vswitchd.log ${NODE_FOLDER}
632         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/openvswitch/ovsdb-server.log ${NODE_FOLDER}
633         collect_openstack_logs "${!OSIP}" "${NODE_FOLDER}" "control"
634         mv local.conf_control_${!OSIP} ${NODE_FOLDER}/local.conf
635         # qdhcp files are created by robot tests and copied into /tmp/qdhcp during the test
636         tar -cf - -C /tmp qdhcp | xz -T 0 > /tmp/qdhcp.tar.xz
637         mv /tmp/qdhcp.tar.xz ${NODE_FOLDER}
638         mv ${NODE_FOLDER} ${WORKSPACE}/archives/
639     done
640
641     # Compute Nodes
642     for i in `seq 1 ${NUM_OPENSTACK_COMPUTE_NODES}`; do
643         OSIP=OPENSTACK_COMPUTE_NODE_${i}_IP
644         echo "collect_logs: for openstack compute node ip: ${!OSIP}"
645         NODE_FOLDER="compute_${i}"
646         mkdir -p ${NODE_FOLDER}
647         tcpdump_stop "${!OSIP}"
648         scp extra_debug.sh ${!OSIP}:/tmp
649         ${SSH} ${!OSIP} "bash /tmp/extra_debug.sh > /tmp/extra_debug.log 2>&1"
650         scp ${!OSIP}:/etc/nova/nova.conf ${NODE_FOLDER}
651         scp ${!OSIP}:/etc/nova/nova-cpu.conf ${NODE_FOLDER}
652         scp ${!OSIP}:/etc/openstack/clouds.yaml ${NODE_FOLDER}
653         scp ${!OSIP}:/opt/stack/devstack/.stackenv ${NODE_FOLDER}
654         scp ${!OSIP}:/opt/stack/devstack/nohup.out ${NODE_FOLDER}/stack.log
655         scp ${!OSIP}:/opt/stack/devstack/openrc ${NODE_FOLDER}
656         scp ${!OSIP}:/opt/stack/requirements/upper-constraints.txt ${NODE_FOLDER}
657         scp ${!OSIP}:/tmp/*.xz ${NODE_FOLDER}/
658         scp ${!OSIP}:/tmp/dmesg.log ${NODE_FOLDER}
659         scp ${!OSIP}:/tmp/extra_debug.log ${NODE_FOLDER}
660         scp ${!OSIP}:/tmp/get_devstack.sh.txt ${NODE_FOLDER}
661         scp ${!OSIP}:/tmp/install_ovs.txt ${NODE_FOLDER}
662         scp ${!OSIP}:/tmp/journalctl.log ${NODE_FOLDER}
663         scp ${!OSIP}:/tmp/ovsdb-tool.log ${NODE_FOLDER}
664         scp ${!OSIP}:/tmp/tcpdump_start.log ${NODE_FOLDER}
665         collect_files "${!OSIP}" "${NODE_FOLDER}"
666         ${SSH} ${!OSIP} "sudo tar -cf - -C /var/log libvirt | xz -T 0 > /tmp/libvirt.tar.xz "
667         scp ${!OSIP}:/tmp/libvirt.tar.xz ${NODE_FOLDER}
668         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/etc/hosts ${NODE_FOLDER}
669         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/audit/audit.log ${NODE_FOLDER}
670         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/messages* ${NODE_FOLDER}
671         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/nova-agent.log ${NODE_FOLDER}
672         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/openvswitch/ovs-vswitchd.log ${NODE_FOLDER}
673         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/openvswitch/ovsdb-server.log ${NODE_FOLDER}
674         collect_openstack_logs "${!OSIP}" "${NODE_FOLDER}" "compute"
675         mv local.conf_compute_${!OSIP} ${NODE_FOLDER}/local.conf
676         mv ${NODE_FOLDER} ${WORKSPACE}/archives/
677     done
678
679     # Tempest
680     DEVSTACK_TEMPEST_DIR="/opt/stack/tempest"
681     TESTREPO=".stestr"
682     TEMPEST_LOGS_DIR=${WORKSPACE}/archives/tempest
683     # Look for tempest test results in the $TESTREPO dir and copy if found
684     if ${SSH} ${OPENSTACK_CONTROL_NODE_1_IP} "sudo sh -c '[ -f ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/0 ]'"; then
685         ${SSH} ${OPENSTACK_CONTROL_NODE_1_IP} "for I in \$(sudo ls ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/ | grep -E '^[0-9]+$'); do sudo sh -c \"${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/subunit-1to2 < ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/\${I} >> ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt\"; done"
686         ${SSH} ${OPENSTACK_CONTROL_NODE_1_IP} "sudo sh -c '${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/python ${DEVSTACK_TEMPEST_DIR}/.tox/tempest/lib/python2.7/site-packages/os_testr/subunit2html.py ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt ${DEVSTACK_TEMPEST_DIR}/tempest_results.html'"
687         mkdir -p ${TEMPEST_LOGS_DIR}
688         scp ${OPENSTACK_CONTROL_NODE_1_IP}:${DEVSTACK_TEMPEST_DIR}/tempest_results.html ${TEMPEST_LOGS_DIR}
689         scp ${OPENSTACK_CONTROL_NODE_1_IP}:${DEVSTACK_TEMPEST_DIR}/tempest.log ${TEMPEST_LOGS_DIR}
690     else
691         echo "tempest results not found in ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/0"
692     fi
693 } # collect_netvirt_logs()
694
695 # Utility function for joining strings.
696 function join() {
697     delim=' '
698     final=$1; shift
699
700     for str in "$@" ; do
701         final=${final}${delim}${str}
702     done
703
704     echo ${final}
705 }
706
707 function get_nodes_list() {
708     # Create the string for nodes
709     for i in `seq 1 ${NUM_ODL_SYSTEM}` ; do
710         CONTROLLERIP=ODL_SYSTEM_${i}_IP
711         nodes[$i]=${!CONTROLLERIP}
712     done
713
714     nodes_list=$(join "${nodes[@]}")
715     echo ${nodes_list}
716 }
717
718 function get_features() {
719     if [ ${CONTROLLERSCOPE} == 'all' ]; then
720         ACTUALFEATURES="odl-integration-compatible-with-all,${CONTROLLERFEATURES}"
721         export CONTROLLERMEM="3072m"
722     else
723         ACTUALFEATURES="odl-infrautils-ready,${CONTROLLERFEATURES}"
724     fi
725
726     # Some versions of jenkins job builder result in feature list containing spaces
727     # and ending in newline. Remove all that.
728     ACTUALFEATURES=`echo "${ACTUALFEATURES}" | tr -d '\n \r'`
729     echo "ACTUALFEATURES: ${ACTUALFEATURES}"
730
731     # In the case that we want to install features via karaf shell, a space separated list of
732     # ACTUALFEATURES IS NEEDED
733     SPACE_SEPARATED_FEATURES=$(echo "${ACTUALFEATURES}" | tr ',' ' ')
734     echo "SPACE_SEPARATED_FEATURES: ${SPACE_SEPARATED_FEATURES}"
735
736     export ACTUALFEATURES
737     export SPACE_SEPARATED_FEATURES
738 }
739
740 # Create the configuration script to be run on controllers.
741 function create_configuration_script() {
742     cat > ${WORKSPACE}/configuration-script.sh <<EOF
743 set -x
744 source /tmp/common-functions.sh ${BUNDLEFOLDER}
745
746 echo "Changing to /tmp"
747 cd /tmp
748
749 echo "Downloading the distribution from ${ACTUAL_BUNDLE_URL}"
750 wget --progress=dot:mega '${ACTUAL_BUNDLE_URL}'
751
752 echo "Extracting the new controller..."
753 unzip -q ${BUNDLE}
754
755 echo "Adding external repositories..."
756 sed -ie "s%org.ops4j.pax.url.mvn.repositories=%org.ops4j.pax.url.mvn.repositories=https://nexus.opendaylight.org/content/repositories/opendaylight.snapshot@id=opendaylight-snapshot@snapshots, https://nexus.opendaylight.org/content/repositories/public@id=opendaylight-mirror, http://repo1.maven.org/maven2@id=central, http://repository.springsource.com/maven/bundles/release@id=spring.ebr.release, http://repository.springsource.com/maven/bundles/external@id=spring.ebr.external, http://zodiac.springsource.com/maven/bundles/release@id=gemini, http://repository.apache.org/content/groups/snapshots-group@id=apache@snapshots@noreleases, https://oss.sonatype.org/content/repositories/snapshots@id=sonatype.snapshots.deploy@snapshots@noreleases, https://oss.sonatype.org/content/repositories/ops4j-snapshots@id=ops4j.sonatype.snapshots.deploy@snapshots@noreleases%g" ${MAVENCONF}
757 cat ${MAVENCONF}
758
759 if [[ "$USEFEATURESBOOT" == "True" ]]; then
760     echo "Configuring the startup features..."
761     sed -ie "s/\(featuresBoot=\|featuresBoot =\)/featuresBoot = ${ACTUALFEATURES},/g" ${FEATURESCONF}
762 fi
763
764 FEATURE_TEST_STRING="features-integration-test"
765 KARAF_VERSION=${KARAF_VERSION:-karaf4}
766 if [[ "$KARAF_VERSION" == "karaf4" ]]; then
767     FEATURE_TEST_STRING="features-test"
768 fi
769
770 sed -ie "s%\(featuresRepositories=\|featuresRepositories =\)%featuresRepositories = mvn:org.opendaylight.integration/\${FEATURE_TEST_STRING}/${BUNDLE_VERSION}/xml/features,mvn:org.apache.karaf.decanter/apache-karaf-decanter/1.0.0/xml/features,%g" ${FEATURESCONF}
771 if [[ ! -z "${REPO_URL}" ]]; then
772    sed -ie "s%featuresRepositories =%featuresRepositories = ${REPO_URL},%g" ${FEATURESCONF}
773 fi
774 cat ${FEATURESCONF}
775
776 configure_karaf_log "${KARAF_VERSION}" "${CONTROLLERDEBUGMAP}"
777
778 set_java_vars "${JAVA_HOME}" "${CONTROLLERMEM}" "${MEMCONF}"
779
780 echo "Listing all open ports on controller system..."
781 netstat -pnatu
782
783 # Copy shard file if exists
784 if [ -f /tmp/custom_shard_config.txt ]; then
785     echo "Custom shard config exists!!!"
786     echo "Copying the shard config..."
787     cp /tmp/custom_shard_config.txt /tmp/${BUNDLEFOLDER}/bin/
788 fi
789
790 echo "Configuring cluster"
791 /tmp/${BUNDLEFOLDER}/bin/configure_cluster.sh \$1 ${nodes_list}
792
793 echo "Dump akka.conf"
794 cat ${AKKACONF}
795
796 echo "Dump modules.conf"
797 cat ${MODULESCONF}
798
799 echo "Dump module-shards.conf"
800 cat ${MODULESHARDSCONF}
801 EOF
802 # cat > ${WORKSPACE}/configuration-script.sh <<EOF
803 }
804
805 # Create the startup script to be run on controllers.
806 function create_startup_script() {
807     cat > ${WORKSPACE}/startup-script.sh <<EOF
808 echo "Redirecting karaf console output to karaf_console.log"
809 export KARAF_REDIRECT="/tmp/${BUNDLEFOLDER}/data/log/karaf_console.log"
810 mkdir -p /tmp/${BUNDLEFOLDER}/data/log
811
812 echo "Starting controller..."
813 /tmp/${BUNDLEFOLDER}/bin/start
814 EOF
815 # cat > ${WORKSPACE}/startup-script.sh <<EOF
816 }
817
818 function create_post_startup_script() {
819     cat > ${WORKSPACE}/post-startup-script.sh <<EOF
820 if [[ "$USEFEATURESBOOT" != "True" ]]; then
821
822     # wait up to 60s for karaf port 8101 to be opened, polling every 5s
823     loop_count=0;
824     until [[ \$loop_count -ge 12 ]]; do
825         netstat -na | grep 8101 && break;
826         loop_count=\$[\$loop_count+1];
827         sleep 5;
828     done
829
830     echo "going to feature:install --no-auto-refresh ${SPACE_SEPARATED_FEATURES} one at a time"
831     for feature in ${SPACE_SEPARATED_FEATURES}; do
832         sshpass -p karaf ssh -o StrictHostKeyChecking=no \
833                              -o UserKnownHostsFile=/dev/null \
834                              -o LogLevel=error \
835                              -p 8101 karaf@localhost \
836                              feature:install --no-auto-refresh \$feature;
837     done
838
839     echo "ssh to karaf console to list -i installed features"
840     sshpass -p karaf ssh -o StrictHostKeyChecking=no \
841                          -o UserKnownHostsFile=/dev/null \
842                          -o LogLevel=error \
843                          -p 8101 karaf@localhost \
844                          feature:list -i
845 fi
846
847 echo "Waiting up to 3 minutes for controller to come up, checking every 5 seconds..."
848 for i in {1..36}; do
849     sleep 5;
850     grep 'org.opendaylight.infrautils.*System ready' /tmp/${BUNDLEFOLDER}/data/log/karaf.log
851     if [ \$? -eq 0 ]; then
852         echo "Controller is UP"
853         break
854     fi
855 done;
856
857 # if we ended up not finding ready status in the above loop, we can output some debugs
858 grep 'org.opendaylight.infrautils.*System ready' /tmp/${BUNDLEFOLDER}/data/log/karaf.log
859 if [ $? -ne 0 ]; then
860     echo "Timeout Controller DOWN"
861     echo "Dumping first 500K bytes of karaf log..."
862     head --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
863     echo "Dumping last 500K bytes of karaf log..."
864     tail --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
865     echo "Listing all open ports on controller system"
866     netstat -pnatu
867     exit 1
868 fi
869
870 echo "Listing all open ports on controller system..."
871 netstat -pnatu
872
873 function exit_on_log_file_message {
874     echo "looking for \"\$1\" in log file"
875     if grep --quiet "\$1" "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"; then
876         echo ABORTING: found "\$1"
877         echo "Dumping first 500K bytes of karaf log..."
878         head --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
879         echo "Dumping last 500K bytes of karaf log..."
880         tail --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
881         exit 1
882     fi
883 }
884
885 exit_on_log_file_message 'BindException: Address already in use'
886 exit_on_log_file_message 'server is unhealthy'
887 EOF
888 # cat > ${WORKSPACE}/post-startup-script.sh <<EOF
889 }
890
891 # Copy over the configuration script and configuration files to each controller
892 # Execute the configuration script on each controller.
893 function copy_and_run_configuration_script() {
894     for i in `seq 1 ${NUM_ODL_SYSTEM}`; do
895         CONTROLLERIP=ODL_SYSTEM_${i}_IP
896         echo "Configuring member-${i} with IP address ${!CONTROLLERIP}"
897         scp ${WORKSPACE}/configuration-script.sh ${!CONTROLLERIP}:/tmp/
898         ssh ${!CONTROLLERIP} "bash /tmp/configuration-script.sh ${i}"
899     done
900 }
901
902 # Copy over the startup script to each controller and execute it.
903 function copy_and_run_startup_script() {
904     for i in `seq 1 ${NUM_ODL_SYSTEM}`; do
905         CONTROLLERIP=ODL_SYSTEM_${i}_IP
906         echo "Starting member-${i} with IP address ${!CONTROLLERIP}"
907         scp ${WORKSPACE}/startup-script.sh ${!CONTROLLERIP}:/tmp/
908         ssh ${!CONTROLLERIP} "bash /tmp/startup-script.sh"
909     done
910 }
911
912 function copy_and_run_post_startup_script() {
913     seed_index=1
914     for i in `seq 1 ${NUM_ODL_SYSTEM}`; do
915         CONTROLLERIP=ODL_SYSTEM_${i}_IP
916         echo "Execute the post startup script on controller ${!CONTROLLERIP}"
917         scp ${WORKSPACE}/post-startup-script.sh ${!CONTROLLERIP}:/tmp
918         ssh ${!CONTROLLERIP} "bash /tmp/post-startup-script.sh $(( seed_index++ ))"
919         if [ $(( $i % ${NUM_ODL_SYSTEM} )) == 0 ]; then
920             seed_index=1
921         fi
922     done
923 }
924
925 function create_controller_variables() {
926     echo "Generating controller variables..."
927     for i in `seq 1 ${NUM_ODL_SYSTEM}`; do
928         CONTROLLERIP=ODL_SYSTEM_${i}_IP
929         odl_variables=${odl_variables}" -v ${CONTROLLERIP}:${!CONTROLLERIP}"
930         echo "Lets's take the karaf thread dump"
931         ssh ${!CONTROLLERIP} "sudo ps aux" > ${WORKSPACE}/ps_before.log
932         pid=$(grep org.apache.karaf.main.Main ${WORKSPACE}/ps_before.log | grep -v grep | tr -s ' ' | cut -f2 -d' ')
933         echo "karaf main: org.apache.karaf.main.Main, pid:${pid}"
934         ssh ${!CONTROLLERIP} "${JAVA_HOME}/bin/jstack -l ${pid}" > ${WORKSPACE}/karaf_${i}_${pid}_threads_before.log || true
935     done
936 }
937
938 # Function to build OVS from git repo
939 function build_ovs() {
940     local -r ip=$1
941     local -r version=$2
942     local -r rpm_path="$3"
943
944     echo "Building OVS ${version} on ${ip} ..."
945     cat > ${WORKSPACE}/build_ovs.sh << EOF
946 set -ex -o pipefail
947
948 echo '---> Building openvswitch version ${version}'
949
950 # Install running kernel devel packages
951 K_VERSION=\$(uname -r)
952 YUM_OPTS="-y --disablerepo=* --enablerepo=base,updates,extra,C*-base,C*-updates,C*-extras"
953 # Install centos-release to update vault repos from which to fetch
954 # kernel devel packages
955 sudo yum \${YUM_OPTS} install centos-release yum-utils @'Development Tools' rpm-build
956 sudo yum \${YUM_OPTS} install kernel-{devel,headers}-\${K_VERSION}
957
958 TMP=\$(mktemp -d)
959 pushd \${TMP}
960
961 git clone https://github.com/openvswitch/ovs.git
962 cd ovs
963
964 if [ "${version}" = "v2.6.1-nsh" ]; then
965     git checkout v2.6.1
966     echo "Will apply nsh patches for OVS version 2.6.1"
967     git clone https://github.com/yyang13/ovs_nsh_patches.git ../ovs_nsh_patches
968     git apply ../ovs_nsh_patches/v2.6.1_centos7/*.patch
969 else
970     git checkout ${version}
971 fi
972
973 # On early versions of OVS, flake warnings would fail the build.
974 # Remove it.
975 sudo pip uninstall -y flake8
976
977 # Get rid of sphinx dep as it conflicts with the already
978 # installed one (via pip). Docs wont be built.
979 sed -i "/BuildRequires:.*sphinx.*/d" rhel/openvswitch-fedora.spec.in
980
981 sed -e 's/@VERSION@/0.0.1/' rhel/openvswitch-fedora.spec.in > /tmp/ovs.spec
982 sed -e 's/@VERSION@/0.0.1/' rhel/openvswitch-kmod-fedora.spec.in > /tmp/ovs-kmod.spec
983 sed -e 's/@VERSION@/0.0.1/' rhel/openvswitch-dkms.spec.in > /tmp/ovs-dkms.spec
984 sudo yum-builddep \${YUM_OPTS} /tmp/ovs.spec /tmp/ovs-kmod.spec /tmp/ovs-dkms.spec
985 rm /tmp/ovs.spec /tmp/ovs-kmod.spec /tmp/ovs-dkms.spec
986 ./boot.sh
987 ./configure --build=x86_64-redhat-linux-gnu --host=x86_64-redhat-linux-gnu --with-linux=/lib/modules/\${K_VERSION}/build --program-prefix= --disable-dependency-tracking --prefix=/usr --exec-prefix=/usr --bindir=/usr/bin --sbindir=/usr/sbin --sysconfdir=/etc --datadir=/usr/share --includedir=/usr/include --libdir=/usr/lib64 --libexecdir=/usr/libexec --localstatedir=/var --sharedstatedir=/var/lib --mandir=/usr/share/man --infodir=/usr/share/info --enable-libcapng --enable-ssl --with-pkidir=/var/lib/openvswitch/pki PYTHON=/usr/bin/python2
988 make rpm-fedora RPMBUILD_OPT="--without check"
989 # Build dkms only for now
990 # make rpm-fedora-kmod RPMBUILD_OPT='-D "kversion \${K_VERSION}"'
991 rpmbuild -D "_topdir \$(pwd)/rpm/rpmbuild" -bb --without check rhel/openvswitch-dkms.spec
992
993 mkdir -p /tmp/ovs_rpms
994 cp -r rpm/rpmbuild/RPMS/* /tmp/ovs_rpms/
995
996 popd
997 rm -rf \${TMP}
998 EOF
999
1000     scp ${WORKSPACE}/build_ovs.sh ${ip}:/tmp
1001     ${SSH} ${ip} " bash /tmp/build_ovs.sh >> /tmp/install_ovs.txt 2>&1"
1002     scp -r ${ip}:/tmp/ovs_rpms/* "${rpm_path}/"
1003     ${SSH} ${ip} "rm -rf /tmp/ovs_rpms"
1004 }
1005
1006 # Install OVS RPMs from yum repo
1007 function install_ovs_from_repo() {
1008     local -r ip=$1
1009     local -r rpm_repo="$2"
1010
1011     echo "Installing OVS from repo ${rpm_repo} on ${ip} ..."
1012     cat > ${WORKSPACE}/install_ovs.sh << EOF
1013 set -ex -o pipefail
1014
1015 echo '---> Installing openvswitch from ${rpm_repo}'
1016
1017 # We need repoquery from yum-utils.
1018 sudo yum -y install yum-utils
1019
1020 # Get openvswitch packages offered by custom repo.
1021 # dkms package will have priority over kmod.
1022 OVS_REPO_OPTS="--repofrompath=ovs-repo,${rpm_repo} --disablerepo=* --enablerepo=ovs-repo"
1023 OVS_PKGS=\$(repoquery \${OVS_REPO_OPTS} openvswitch)
1024 OVS_SEL_PKG=\$(repoquery \${OVS_REPO_OPTS} openvswitch-selinux-policy)
1025 OVS_DKMS_PKG=\$(repoquery \${OVS_REPO_OPTS} openvswitch-dkms)
1026 OVS_KMOD_PKG=\$(repoquery \${OVS_REPO_OPTS} openvswitch-kmod)
1027 [ -n "\${OVS_SEL_PKG}" ] && OVS_PKGS="\${OVS_PKGS} \${OVS_SEL_PKG}"
1028 [ -n "\${OVS_DKMS_PKG}" ] && OVS_PKGS="\${OVS_PKGS} \${OVS_DKMS_PKG}"
1029 [ -z "\${OVS_DKMS_PKG}" ] && [ -n "\${OVS_KMOD_PKG}" ] && OVS_PKGS="\${OVS_PKGS} \${OVS_KMOD_PKG}"
1030
1031 # Bail with error if custom repo was provided but we could not
1032 # find suitable packages there.
1033 [ -z "\${OVS_PKGS}" ] && echo "No OVS packages found in custom repo." && exit 1
1034
1035 # Install kernel & devel packages for the openvswitch dkms package.
1036 if [ -n "\${OVS_DKMS_PKG}" ]; then
1037     # install centos-release to update vault repos from which to fetch
1038     # kernel devel packages
1039     sudo yum -y install centos-release
1040     K_VERSION=\$(uname -r)
1041     YUM_OPTS="-y --disablerepo=* --enablerepo=base,updates,extra,C*-base,C*-updates,C*-extras"
1042     sudo yum \${YUM_OPTS} install kernel-{headers,devel}-\${K_VERSION} @'Development Tools' python-six
1043 fi
1044
1045 PREV_MOD=\$(sudo modinfo -n openvswitch || echo '')
1046
1047 # Install OVS offered by custom repo.
1048 sudo yum-config-manager --add-repo "${rpm_repo}"
1049 sudo yum -y versionlock delete openvswitch-*
1050 sudo yum -y remove openvswitch-*
1051 sudo yum -y --nogpgcheck install \${OVS_PKGS}
1052 sudo yum -y versionlock add \${OVS_PKGS}
1053
1054 # Most recent OVS versions have some incompatibility with certain versions of iptables
1055 # This below line will overcome that problem.
1056 sudo modprobe openvswitch
1057
1058 # Start OVS and print details
1059 sudo systemctl start openvswitch
1060 sudo systemctl enable openvswitch
1061 sudo ovs-vsctl --retry -t 5 show
1062 sudo modinfo openvswitch
1063
1064 # dkms rpm install can fail silently (probably because the OVS version is
1065 # incompatible with the running kernel), verify module was updated.
1066 NEW_MOD=\$(sudo modinfo -n openvswitch || echo '')
1067 [ "\${PREV_MOD}" != "\${NEW_MOD}" ] || (echo "Kernel module was not updated" && exit 1)
1068 EOF
1069
1070     scp ${WORKSPACE}/install_ovs.sh ${ip}:/tmp
1071     ${SSH} ${ip} "bash /tmp/install_ovs.sh >> /tmp/install_ovs.txt 2>&1"
1072 }
1073
1074 # Install OVS RPMS from path
1075 function install_ovs_from_path() {
1076     local -r ip=$1
1077     local -r rpm_path="$2"
1078
1079     echo "Creating OVS RPM repo on ${ip} ..."
1080     ${SSH} ${ip} "mkdir -p /tmp/ovs_rpms"
1081     scp -r "${rpm_path}"/* ${ip}:/tmp/ovs_rpms
1082     ${SSH} ${ip} "sudo yum -y install createrepo && createrepo --database /tmp/ovs_rpms"
1083     install_ovs_from_repo ${ip} file:/tmp/ovs_rpms
1084 }
1085
1086