Log validations to file
[releng/builder.git] / jjb / integration / common-functions.sh
1 #!/bin/bash
2
3 echo "common-functions.sh is being sourced"
4
5 BUNDLEFOLDER=$1
6
7 # Basic controller configuration settings
8 export MAVENCONF=/tmp/${BUNDLEFOLDER}/etc/org.ops4j.pax.url.mvn.cfg
9 export FEATURESCONF=/tmp/${BUNDLEFOLDER}/etc/org.apache.karaf.features.cfg
10 export CUSTOMPROP=/tmp/${BUNDLEFOLDER}/etc/custom.properties
11 export LOGCONF=/tmp/${BUNDLEFOLDER}/etc/org.ops4j.pax.logging.cfg
12 export MEMCONF=/tmp/${BUNDLEFOLDER}/bin/setenv
13 export CONTROLLERMEM="2048m"
14
15 # Cluster specific configuration settings
16 export AKKACONF=/tmp/${BUNDLEFOLDER}/configuration/initial/akka.conf
17 export MODULESCONF=/tmp/${BUNDLEFOLDER}/configuration/initial/modules.conf
18 export MODULESHARDSCONF=/tmp/${BUNDLEFOLDER}/configuration/initial/module-shards.conf
19
20 function print_common_env() {
21     cat << EOF
22 common-functions environment:
23 MAVENCONF: ${MAVENCONF}
24 FEATURESCONF: ${FEATURESCONF}
25 CUSTOMPROP: ${CUSTOMPROP}
26 LOGCONF: ${LOGCONF}
27 MEMCONF: ${MEMCONF}
28 CONTROLLERMEM: ${CONTROLLERMEM}
29 AKKACONF: ${AKKACONF}
30 MODULESCONF: ${MODULESCONF}
31 MODULESHARDSCONF: ${MODULESHARDSCONF}
32
33 EOF
34 }
35 print_common_env
36
37 # Setup JAVA_HOME and MAX_MEM Value in ODL startup config file
38 function set_java_vars() {
39     local -r java_home=$1
40     local -r controllermem=$2
41     local -r memconf=$3
42
43     echo "Configure\n    java home: ${java_home}\n    max memory: ${controllermem}\n    memconf: ${memconf}"
44
45     sed -ie 's%^# export JAVA_HOME%export JAVA_HOME=${JAVA_HOME:-'"${java_home}"'}%g' ${memconf}
46     sed -ie 's/JAVA_MAX_MEM="2048m"/JAVA_MAX_MEM='"${controllermem}"'/g' ${memconf}
47     echo "cat ${memconf}"
48     cat ${memconf}
49
50     echo "Set Java version"
51     sudo /usr/sbin/alternatives --install /usr/bin/java java ${java_home}/bin/java 1
52     sudo /usr/sbin/alternatives --set java ${java_home}/bin/java
53     echo "JDK default version ..."
54     java -version
55
56     echo "Set JAVA_HOME"
57     export JAVA_HOME="${java_home}"
58
59     # shellcheck disable=SC2037
60     JAVA_RESOLVED=$(readlink -e "${java_home}/bin/java")
61     echo "Java binary pointed at by JAVA_HOME: ${JAVA_RESOLVED}"
62 } # set_java_vars()
63
64 # shellcheck disable=SC2034
65 # foo appears unused. Verify it or export it.
66 function configure_karaf_log() {
67     local -r karaf_version=$1
68     local -r controllerdebugmap=$2
69     local logapi=log4j
70
71     # Check what the logging.cfg file is using for the logging api: log4j or log4j2
72     grep "log4j2" ${LOGCONF}
73     if [ $? -eq 0 ]; then
74         logapi=log4j2
75     fi
76
77     echo "Configuring the karaf log... karaf_version: ${karaf_version}, logapi: ${logapi}"
78     if [ "${logapi}" == "log4j2" ]; then
79         # FIXME: Make log size limit configurable from build parameter.
80         sed -ie 's/log4j2.appender.rolling.policies.size.size = 16MB/log4j2.appender.rolling.policies.size.size = 1GB/g' ${LOGCONF}
81         orgmodule="org.opendaylight.yangtools.yang.parser.repo.YangTextSchemaContextResolver"
82         orgmodule_="${orgmodule//./_}"
83         echo "${logapi}.logger.${orgmodule_}.name = WARN" >> ${LOGCONF}
84         echo "${logapi}.logger.${orgmodule_}.level = WARN" >> ${LOGCONF}
85     else
86         sed -ie 's/log4j.appender.out.maxBackupIndex=10/log4j.appender.out.maxBackupIndex=1/g' ${LOGCONF}
87         # FIXME: Make log size limit configurable from build parameter.
88         sed -ie 's/log4j.appender.out.maxFileSize=1MB/log4j.appender.out.maxFileSize=30GB/g' ${LOGCONF}
89         echo "${logapi}.logger.org.opendaylight.yangtools.yang.parser.repo.YangTextSchemaContextResolver = WARN" >> ${LOGCONF}
90     fi
91
92     # Add custom logging levels
93     # CONTROLLERDEBUGMAP is expected to be a key:value map of space separated values like "module:level module2:level2"
94     # where module is abbreviated and does not include "org.opendaylight."
95     unset IFS
96     echo "controllerdebugmap: ${controllerdebugmap}"
97     if [ -n "${controllerdebugmap}" ]; then
98         for kv in ${controllerdebugmap}; do
99             module="${kv%%:*}"
100             level="${kv#*:}"
101             echo "module: $module, level: $level"
102             # shellcheck disable=SC2157
103             if [ -n "${module}" ] && [ -n "${level}" ]; then
104                 orgmodule="org.opendaylight.${module}"
105                 if [ "${logapi}" == "log4j2" ]; then
106                     orgmodule_="${orgmodule//./_}"
107                     echo "${logapi}.logger.${orgmodule_}.name = ${orgmodule}" >> ${LOGCONF}
108                     echo "${logapi}.logger.${orgmodule_}.level = ${level}" >> ${LOGCONF}
109                 else
110                     echo "${logapi}.logger.${orgmodule} = ${level}" >> ${LOGCONF}
111                 fi
112             fi
113         done
114     fi
115
116     echo "cat ${LOGCONF}"
117     cat ${LOGCONF}
118 } # function configure_karaf_log()
119
120 function get_os_deploy() {
121     local -r num_systems=${1:-$NUM_OPENSTACK_SYSTEM}
122     case ${num_systems} in
123     1)
124         OPENSTACK_TOPO="1cmb-0ctl-0cmp"
125         ;;
126     2)
127         OPENSTACK_TOPO="1cmb-0ctl-1cmp"
128         ;;
129     3|*)
130         OPENSTACK_TOPO="0cmb-1ctl-2cmp"
131         ;;
132     esac
133     export OPENSTACK_TOPO
134 }
135
136 function run_plan() {
137     local -r type=$1
138
139     case ${type} in
140     script)
141         plan=$SCRIPTPLAN
142         ;;
143     config|*)
144         plan=$CONFIGPLAN
145         ;;
146     esac
147
148     printf "Locating ${type} plan to use...\n"
149     plan_filepath="${WORKSPACE}/test/csit/${type}plans/$plan"
150     if [ ! -f "${plan_filepath}" ]; then
151         plan_filepath="${WORKSPACE}/test/csit/${type}plans/${STREAMTESTPLAN}"
152         if [ ! -f "${plan_filepath}" ]; then
153             plan_filepath="${WORKSPACE}/test/csit/${type}plans/${TESTPLAN}"
154         fi
155     fi
156
157     if [ -f "${plan_filepath}" ]; then
158         printf "${type} plan exists!!!\n"
159         printf "Changing the ${type} plan path...\n"
160         cat ${plan_filepath} | sed "s:integration:${WORKSPACE}:" > ${type}plan.txt
161         cat ${type}plan.txt
162         for line in $( egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' ${type}plan.txt ); do
163             printf "Executing ${line}...\n"
164             # shellcheck source=${line} disable=SC1091
165             source ${line}
166         done
167     fi
168     printf "Finished running ${type} plans\n"
169 } # function run_plan()
170
171 # Return elapsed time. Usage:
172 # - Call first time with no arguments and a new timer is returned.
173 # - Next call with the first argument as the timer and the elapsed time is returned.
174 function timer()
175 {
176     if [ $# -eq 0 ]; then
177         # return the current time
178         printf "$(date "+%s")"
179     else
180         local start_time=$1
181         end_time=$(date "+%s")
182
183         if [ -z "$start_time" ]; then
184             start_time=$end_time;
185         fi
186
187         delta_time=$((end_time - start_time))
188         ds=$((delta_time % 60))
189         dm=$(((delta_time / 60) % 60))
190         dh=$((delta_time / 3600))
191         # return the elapsed time
192         printf "%d:%02d:%02d" $dh $dm $ds
193     fi
194 }
195
196 # convert commas in csv strings to spaces (ssv)
197 function csv2ssv() {
198     local csv=$1
199     if [ -n "${csv}" ]; then
200         ssv=$(echo ${csv} | sed 's/,/ /g' | sed 's/\ \ */\ /g')
201     fi
202
203     echo "${ssv}"
204 } # csv2ssv
205
206 function is_openstack_feature_enabled() {
207     local feature=$1
208     for enabled_feature in $(csv2ssv ${ENABLE_OS_SERVICES}); do
209         if [ "${enabled_feature}" == "${feature}" ]; then
210            echo 1
211            return
212         fi
213     done
214     echo 0
215 }
216
217 SSH="ssh -t -t"
218
219 # shellcheck disable=SC2153
220 function print_job_parameters() {
221     cat << EOF
222
223 Job parameters:
224 DISTROBRANCH: ${DISTROBRANCH}
225 DISTROSTREAM: ${DISTROSTREAM}
226 BUNDLE_URL: ${BUNDLE_URL}
227 CONTROLLERFEATURES: ${CONTROLLERFEATURES}
228 CONTROLLERDEBUGMAP: ${CONTROLLERDEBUGMAP}
229 SCRIPTPLAN: ${SCRIPTPLAN}
230 CONFIGPLAN: ${CONFIGPLAN}
231 STREAMTESTPLAN: ${STREAMTESTPLAN}
232 TESTPLAN: ${TESTPLAN}
233 SUITES: ${SUITES}
234 PATCHREFSPEC: ${PATCHREFSPEC}
235 OPENSTACK_BRANCH: ${OPENSTACK_BRANCH}
236 DEVSTACK_HASH: ${DEVSTACK_HASH}
237 ODL_ML2_DRIVER_REPO: ${ODL_ML2_DRIVER_REPO}
238 ODL_ML2_BRANCH: ${ODL_ML2_BRANCH}
239 ODL_ML2_DRIVER_VERSION: ${ODL_ML2_DRIVER_VERSION}
240 ODL_ML2_PORT_BINDING: ${ODL_ML2_PORT_BINDING}
241 DEVSTACK_KUBERNETES_PLUGIN_REPO: ${DEVSTACK_KUBERNETES_PLUGIN_REPO}
242 DEVSTACK_LBAAS_PLUGIN_REPO: ${DEVSTACK_LBAAS_PLUGIN_REPO}
243 DEVSTACK_NETWORKING_SFC_PLUGIN_REPO: ${DEVSTACK_NETWORKING_SFC_PLUGIN_REPO}
244 IPSEC_VXLAN_TUNNELS_ENABLED: ${IPSEC_VXLAN_TUNNELS_ENABLED}
245 PUBLIC_BRIDGE: ${PUBLIC_BRIDGE}
246 ENABLE_HAPROXY_FOR_NEUTRON: ${ENABLE_HAPROXY_FOR_NEUTRON}
247 ENABLE_OS_SERVICES: ${ENABLE_OS_SERVICES}
248 ENABLE_OS_COMPUTE_SERVICES: ${ENABLE_OS_COMPUTE_SERVICES}
249 ENABLE_OS_NETWORK_SERVICES: ${ENABLE_OS_NETWORK_SERVICES}
250 ENABLE_OS_PLUGINS: ${ENABLE_OS_PLUGINS}
251 DISABLE_OS_SERVICES: ${DISABLE_OS_SERVICES}
252 TENANT_NETWORK_TYPE: ${TENANT_NETWORK_TYPE}
253 SECURITY_GROUP_MODE: ${SECURITY_GROUP_MODE}
254 ENABLE_ITM_DIRECT_TUNNELS: ${ENABLE_ITM_DIRECT_TUNNELS}
255 PUBLIC_PHYSICAL_NETWORK: ${PUBLIC_PHYSICAL_NETWORK}
256 ENABLE_NETWORKING_L2GW: ${ENABLE_NETWORKING_L2GW}
257 CREATE_INITIAL_NETWORKS: ${CREATE_INITIAL_NETWORKS}
258 LBAAS_SERVICE_PROVIDER: ${LBAAS_SERVICE_PROVIDER}
259 ODL_SFC_DRIVER: ${ODL_SFC_DRIVER}
260 ODL_SNAT_MODE: ${ODL_SNAT_MODE}
261
262 EOF
263 }
264
265 function tcpdump_start() {
266     local -r prefix=$1
267     local -r ip=$2
268     local -r filter=$3
269     filter_=${filter// /_}
270
271     printf "node ${ip}, ${prefix}_${ip}__${filter}: starting tcpdump\n"
272     ssh ${ip} "nohup sudo /usr/sbin/tcpdump -vvv -ni eth0 ${filter} -w /tmp/tcpdump_${prefix}_${ip}__${filter_}.pcap > /tmp/tcpdump_start.log 2>&1 &"
273     ${SSH} ${ip} "ps -ef | grep tcpdump"
274 }
275
276 function tcpdump_stop() {
277     local -r ip=$1
278
279     printf "node $ip: stopping tcpdump\n"
280     ${SSH} ${ip} "ps -ef | grep tcpdump.sh"
281     ${SSH} ${ip} "sudo pkill -f tcpdump"
282     ${SSH} ${ip} "sudo xz -9ekvvf /tmp/*.pcap"
283     ${SSH} ${ip} "sudo ls -al /tmp/*.pcap"
284     # copy_logs will copy any *.xz files
285 }
286
287 # Collect the list of files on the hosts
288 function collect_files() {
289     local -r ip=$1
290     local -r folder=$2
291     finddir=/tmp/finder
292     ${SSH} ${ip} "mkdir -p ${finddir}"
293     ${SSH} ${ip} "sudo find /etc > ${finddir}/find.etc.txt"
294     ${SSH} ${ip} "sudo find /opt/stack > ${finddir}/find.opt.stack.txt"
295     ${SSH} ${ip} "sudo find /var > ${finddir}/find2.txt"
296     ${SSH} ${ip} "sudo find /var > ${finddir}/find.var.txt"
297     ${SSH} ${ip} "sudo tar -cf - -C /tmp finder | xz -T 0 > /tmp/find.tar.xz"
298     scp ${ip}:/tmp/find.tar.xz ${folder}
299     mkdir -p ${finddir}
300     rsync --rsync-path="sudo rsync" --list-only -arvhe ssh ${ip}:/etc/ > ${finddir}/rsync.etc.txt
301     rsync --rsync-path="sudo rsync" --list-only -arvhe ssh ${ip}:/opt/stack/ > ${finddir}/rsync.opt.stack.txt
302     rsync --rsync-path="sudo rsync" --list-only -arvhe ssh ${ip}:/var/ > ${finddir}/rsync.var.txt
303     tar -cf - -C /tmp finder | xz -T 0 > /tmp/rsync.tar.xz
304     cp /tmp/rsync.tar.xz ${folder}
305 }
306
307 # List of extra services to extract from journalctl
308 # Add new services on a separate line, in alpha order, add \ at the end
309 extra_services_cntl=" \
310     dnsmasq.service \
311     httpd.service \
312     libvirtd.service \
313     openvswitch.service \
314     ovs-vswitchd.service \
315     ovsdb-server.service \
316     rabbitmq-server.service \
317 "
318
319 extra_services_cmp=" \
320     libvirtd.service \
321     openvswitch.service \
322     ovs-vswitchd.service \
323     ovsdb-server.service \
324 "
325
326 # Collect the logs for the openstack services
327 # First get all the services started by devstack which would have devstack@ as a prefix
328 # Next get all the extra services
329 function collect_openstack_logs() {
330     local -r ip=${1}
331     local -r folder=${2}
332     local -r node_type=${3}
333     local oslogs="${folder}/oslogs"
334
335     printf "collect_openstack_logs for ${node_type} node: ${ip} into ${oslogs}\n"
336     rm -rf ${oslogs}
337     mkdir -p ${oslogs}
338     # There are always some logs in /opt/stack/logs and this also covers the
339     # pre-queens branches which always use /opt/stack/logs
340     rsync -avhe ssh ${ip}:/opt/stack/logs/* ${oslogs} # rsync to prevent copying of symbolic links
341
342     # Starting with queens break out the logs from journalctl
343     if [ "${OPENSTACK_BRANCH}" = "stable/queens" ]; then
344         cat > ${WORKSPACE}/collect_openstack_logs.sh << EOF
345 extra_services_cntl="${extra_services_cntl}"
346 extra_services_cmp="${extra_services_cmp}"
347
348 function extract_from_journal() {
349     local -r services=\${1}
350     local -r folder=\${2}
351     local -r node_type=\${3}
352     printf "extract_from_journal folder: \${folder}, services: \${services}\n"
353     for service in \${services}; do
354         # strip anything before @ and anything after .
355         # devstack@g-api.service will end as g-api
356         service_="\${service#*@}"
357         service_="\${service_%.*}"
358         sudo journalctl -u "\${service}" > "\${folder}/\${service_}.log"
359     done
360 }
361
362 rm -rf /tmp/oslogs
363 mkdir -p /tmp/oslogs
364 systemctl list-unit-files --all > /tmp/oslogs/systemctl.units.log 2>&1
365 svcs=\$(grep devstack@ /tmp/oslogs/systemctl.units.log | awk '{print \$1}')
366 extract_from_journal "\${svcs}" "/tmp/oslogs"
367 if [ "\${node_type}" = "control" ]; then
368     extract_from_journal "\${extra_services_cntl}" "/tmp/oslogs"
369 else
370     extract_from_journal "\${extra_services_cmp}" "/tmp/oslogs"
371 fi
372 ls -al /tmp/oslogs
373 EOF
374 # cat > ${WORKSPACE}/collect_openstack_logs.sh << EOF
375         printf "collect_openstack_logs for ${node_type} node: ${ip} into ${oslogs}, executing script\n"
376         cat ${WORKSPACE}/collect_openstack_logs.sh
377         scp ${WORKSPACE}/collect_openstack_logs.sh ${ip}:/tmp
378         ${SSH} ${ip} "bash /tmp/collect_openstack_logs.sh > /tmp/collect_openstack_logs.log 2>&1"
379         rsync -avhe ssh ${ip}:/tmp/oslogs/* ${oslogs}
380         scp ${ip}:/tmp/collect_openstack_logs.log ${oslogs}
381     fi # if [ "${OPENSTACK_BRANCH}" = "stable/queens" ]; then
382 }
383
384 function collect_netvirt_logs() {
385     set +e  # We do not want to create red dot just because something went wrong while fetching logs.
386
387     cat > extra_debug.sh << EOF
388 echo -e "/usr/sbin/lsmod | /usr/bin/grep openvswitch\n"
389 /usr/sbin/lsmod | /usr/bin/grep openvswitch
390 echo -e "\nsudo grep ct_ /var/log/openvswitch/ovs-vswitchd.log\n"
391 sudo grep "Datapath supports" /var/log/openvswitch/ovs-vswitchd.log
392 echo -e "\nsudo netstat -punta\n"
393 sudo netstat -punta
394 echo -e "\nsudo getenforce\n"
395 sudo getenforce
396 echo -e "\nsudo systemctl status httpd\n"
397 sudo systemctl status httpd
398 echo -e "\nenv\n"
399 env
400 source /opt/stack/devstack/openrc admin admin
401 echo -e "\nenv after openrc\n"
402 env
403 echo -e "\nsudo du -hs /opt/stack"
404 sudo du -hs /opt/stack
405 echo -e "\nsudo mount"
406 sudo mount
407 echo -e "\ndmesg -T > /tmp/dmesg.log"
408 dmesg -T > /tmp/dmesg.log
409 echo -e "\njournalctl > /tmp/journalctl.log\n"
410 sudo journalctl > /tmp/journalctl.log
411 echo -e "\novsdb-tool -mm show-log > /tmp/ovsdb-tool.log"
412 ovsdb-tool -mm show-log > /tmp/ovsdb-tool.log
413 EOF
414
415     # Since this log collection work is happening before the archive build macro which also
416     # creates the ${WORKSPACE}/archives dir, we have to do it here first.  The mkdir in the
417     # archives build step will essentially be a noop.
418     mkdir -p ${WORKSPACE}/archives
419
420     mv /tmp/changes.txt ${WORKSPACE}/archives
421     mv /tmp/validations.txt ${WORKSPACE}/archives
422     mv ${WORKSPACE}/rabbit.txt ${WORKSPACE}/archives
423     mv ${WORKSPACE}/haproxy.cfg ${WORKSPACE}/archives
424     ssh ${OPENSTACK_HAPROXY_1_IP} "sudo journalctl -u haproxy > /tmp/haproxy.log"
425     scp ${OPENSTACK_HAPROXY_1_IP}:/tmp/haproxy.log ${WORKSPACE}/archives/
426
427     sleep 5
428     # FIXME: Do not create .tar and gzip before copying.
429     for i in `seq 1 ${NUM_ODL_SYSTEM}`; do
430         CONTROLLERIP=ODL_SYSTEM_${i}_IP
431         echo "collect_logs: for opendaylight controller ip: ${!CONTROLLERIP}"
432         NODE_FOLDER="odl_${i}"
433         mkdir -p ${NODE_FOLDER}
434         echo "Lets's take the karaf thread dump again..."
435         ssh ${!CONTROLLERIP} "sudo ps aux" > ${WORKSPACE}/ps_after.log
436         pid=$(grep org.apache.karaf.main.Main ${WORKSPACE}/ps_after.log | grep -v grep | tr -s ' ' | cut -f2 -d' ')
437         echo "karaf main: org.apache.karaf.main.Main, pid:${pid}"
438         ssh ${!CONTROLLERIP} "${JAVA_HOME}/bin/jstack -l ${pid}" > ${WORKSPACE}/karaf_${i}_${pid}_threads_after.log || true
439         echo "killing karaf process..."
440         ${SSH} "${!CONTROLLERIP}" bash -c 'ps axf | grep karaf | grep -v grep | awk '"'"'{print "kill -9 " $1}'"'"' | sh'
441         ${SSH} ${!CONTROLLERIP} "sudo journalctl > /tmp/journalctl.log"
442         scp ${!CONTROLLERIP}:/tmp/journalctl.log ${NODE_FOLDER}
443         ${SSH} ${!CONTROLLERIP} "dmesg -T > /tmp/dmesg.log"
444         scp ${!CONTROLLERIP}:/tmp/dmesg.log ${NODE_FOLDER}
445         ${SSH} ${!CONTROLLERIP} "tar -cf - -C /tmp/${BUNDLEFOLDER} etc | xz -T 0 > /tmp/etc.tar.xz"
446         scp ${!CONTROLLERIP}:/tmp/etc.tar.xz ${NODE_FOLDER}
447         ${SSH} ${!CONTROLLERIP} "cp -r /tmp/${BUNDLEFOLDER}/data/log /tmp/odl_log"
448         ${SSH} ${!CONTROLLERIP} "tar -cf /tmp/odl${i}_karaf.log.tar /tmp/odl_log/*"
449         scp ${!CONTROLLERIP}:/tmp/odl${i}_karaf.log.tar ${NODE_FOLDER}
450         ${SSH} ${!CONTROLLERIP} "tar -cf /tmp/odl${i}_zrpcd.log.tar /tmp/zrpcd.init.log"
451         scp ${!CONTROLLERIP}:/tmp/odl${i}_zrpcd.log.tar ${NODE_FOLDER}
452         tar -xvf ${NODE_FOLDER}/odl${i}_karaf.log.tar -C ${NODE_FOLDER} --strip-components 2 --transform s/karaf/odl${i}_karaf/g
453         grep "ROBOT MESSAGE\| ERROR " ${NODE_FOLDER}/odl${i}_karaf.log > ${NODE_FOLDER}/odl${i}_err.log
454         grep "ROBOT MESSAGE\| ERROR \| WARN \|Exception" \
455             ${NODE_FOLDER}/odl${i}_karaf.log > ${NODE_FOLDER}/odl${i}_err_warn_exception.log
456         # Print ROBOT lines and print Exception lines. For exception lines also print the previous line for context
457         sed -n -e '/ROBOT MESSAGE/P' -e '$!N;/Exception/P;D' ${NODE_FOLDER}/odl${i}_karaf.log > ${NODE_FOLDER}/odl${i}_exception.log
458         mv /tmp/odl${i}_exceptions.txt ${NODE_FOLDER}
459         rm ${NODE_FOLDER}/odl${i}_karaf.log.tar
460         mv *_threads* ${NODE_FOLDER}
461         mv ps_* ${NODE_FOLDER}
462         mv ${NODE_FOLDER} ${WORKSPACE}/archives/
463     done
464
465     print_job_parameters > ${WORKSPACE}/archives/params.txt
466
467     # Control Node
468     for i in `seq 1 ${NUM_OPENSTACK_CONTROL_NODES}`; do
469         OSIP=OPENSTACK_CONTROL_NODE_${i}_IP
470         if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then
471             echo "collect_logs: for openstack combo node ip: ${!OSIP}"
472             NODE_FOLDER="combo_${i}"
473         else
474             echo "collect_logs: for openstack control node ip: ${!OSIP}"
475             NODE_FOLDER="control_${i}"
476         fi
477         mkdir -p ${NODE_FOLDER}
478         tcpdump_stop "${!OSIP}"
479         scp extra_debug.sh ${!OSIP}:/tmp
480         # Capture compute logs if this is a combo node
481         if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then
482             scp ${!OSIP}:/etc/nova/nova.conf ${NODE_FOLDER}
483             scp ${!OSIP}:/etc/nova/nova-cpu.conf ${NODE_FOLDER}
484             scp ${!OSIP}:/etc/openstack/clouds.yaml ${NODE_FOLDER}
485             rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/nova-agent.log ${NODE_FOLDER}
486         fi
487         ${SSH} ${!OSIP} "bash /tmp/extra_debug.sh > /tmp/extra_debug.log 2>&1"
488         scp ${!OSIP}:/etc/dnsmasq.conf ${NODE_FOLDER}
489         scp ${!OSIP}:/etc/keystone/keystone.conf ${NODE_FOLDER}
490         scp ${!OSIP}:/etc/keystone/keystone-uwsgi-admin.ini ${NODE_FOLDER}
491         scp ${!OSIP}:/etc/keystone/keystone-uwsgi-public.ini ${NODE_FOLDER}
492         scp ${!OSIP}:/etc/kuryr/kuryr.conf ${NODE_FOLDER}
493         scp ${!OSIP}:/etc/neutron/dhcp_agent.ini ${NODE_FOLDER}
494         scp ${!OSIP}:/etc/neutron/metadata_agent.ini ${NODE_FOLDER}
495         scp ${!OSIP}:/etc/neutron/neutron.conf ${NODE_FOLDER}
496         scp ${!OSIP}:/etc/neutron/neutron_lbaas.conf ${NODE_FOLDER}
497         scp ${!OSIP}:/etc/neutron/plugins/ml2/ml2_conf.ini ${NODE_FOLDER}
498         scp ${!OSIP}:/etc/neutron/services/loadbalancer/haproxy/lbaas_agent.ini ${NODE_FOLDER}
499         scp ${!OSIP}:/etc/nova/nova.conf ${NODE_FOLDER}
500         scp ${!OSIP}:/etc/nova/nova-api-uwsgi.ini ${NODE_FOLDER}
501         scp ${!OSIP}:/etc/nova/nova_cell1.conf ${NODE_FOLDER}
502         scp ${!OSIP}:/etc/nova/nova-cpu.conf ${NODE_FOLDER}
503         scp ${!OSIP}:/etc/nova/placement-uwsgi.ini ${NODE_FOLDER}
504         scp ${!OSIP}:/etc/openstack/clouds.yaml ${NODE_FOLDER}
505         scp ${!OSIP}:/opt/stack/devstack/.stackenv ${NODE_FOLDER}
506         scp ${!OSIP}:/opt/stack/devstack/nohup.out ${NODE_FOLDER}/stack.log
507         scp ${!OSIP}:/opt/stack/devstack/openrc ${NODE_FOLDER}
508         scp ${!OSIP}:/opt/stack/requirements/upper-constraints.txt ${NODE_FOLDER}
509         scp ${!OSIP}:/opt/stack/tempest/etc/tempest.conf ${NODE_FOLDER}
510         scp ${!OSIP}:/tmp/*.xz ${NODE_FOLDER}
511         scp ${!OSIP}:/tmp/dmesg.log ${NODE_FOLDER}
512         scp ${!OSIP}:/tmp/extra_debug.log ${NODE_FOLDER}
513         scp ${!OSIP}:/tmp/get_devstack.sh.txt ${NODE_FOLDER}
514         scp ${!OSIP}:/tmp/install_ovs.txt ${NODE_FOLDER}
515         scp ${!OSIP}:/tmp/journalctl.log ${NODE_FOLDER}
516         scp ${!OSIP}:/tmp/ovsdb-tool.log ${NODE_FOLDER}
517         scp ${!OSIP}:/tmp/tcpdump_start.log ${NODE_FOLDER}
518         collect_files "${!OSIP}" "${NODE_FOLDER}"
519         ${SSH} ${!OSIP} "sudo tar -cf - -C /var/log rabbitmq | xz -T 0 > /tmp/rabbitmq.tar.xz "
520         scp ${!OSIP}:/tmp/rabbitmq.tar.xz ${NODE_FOLDER}
521         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/etc/hosts ${NODE_FOLDER}
522         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/usr/lib/systemd/system/haproxy.service ${NODE_FOLDER}
523         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/audit/audit.log ${NODE_FOLDER}
524         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/httpd/keystone_access.log ${NODE_FOLDER}
525         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/httpd/keystone.log ${NODE_FOLDER}
526         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/messages* ${NODE_FOLDER}
527         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/openvswitch/ovs-vswitchd.log ${NODE_FOLDER}
528         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/openvswitch/ovsdb-server.log ${NODE_FOLDER}
529         collect_openstack_logs "${!OSIP}" "${NODE_FOLDER}" "control"
530         mv local.conf_control_${!OSIP} ${NODE_FOLDER}/local.conf
531         # qdhcp files are created by robot tests and copied into /tmp/qdhcp during the test
532         tar -cf - -C /tmp qdhcp | xz -T 0 > /tmp/qdhcp.tar.xz
533         mv /tmp/qdhcp.tar.xz ${NODE_FOLDER}
534         mv ${NODE_FOLDER} ${WORKSPACE}/archives/
535     done
536
537     # Compute Nodes
538     for i in `seq 1 ${NUM_OPENSTACK_COMPUTE_NODES}`; do
539         OSIP=OPENSTACK_COMPUTE_NODE_${i}_IP
540         echo "collect_logs: for openstack compute node ip: ${!OSIP}"
541         NODE_FOLDER="compute_${i}"
542         mkdir -p ${NODE_FOLDER}
543         tcpdump_stop "${!OSIP}"
544         scp extra_debug.sh ${!OSIP}:/tmp
545         ${SSH} ${!OSIP} "bash /tmp/extra_debug.sh > /tmp/extra_debug.log 2>&1"
546         scp ${!OSIP}:/etc/nova/nova.conf ${NODE_FOLDER}
547         scp ${!OSIP}:/etc/nova/nova-cpu.conf ${NODE_FOLDER}
548         scp ${!OSIP}:/etc/openstack/clouds.yaml ${NODE_FOLDER}
549         scp ${!OSIP}:/opt/stack/devstack/.stackenv ${NODE_FOLDER}
550         scp ${!OSIP}:/opt/stack/devstack/nohup.out ${NODE_FOLDER}/stack.log
551         scp ${!OSIP}:/opt/stack/devstack/openrc ${NODE_FOLDER}
552         scp ${!OSIP}:/opt/stack/requirements/upper-constraints.txt ${NODE_FOLDER}
553         scp ${!OSIP}:/tmp/*.xz ${NODE_FOLDER}/
554         scp ${!OSIP}:/tmp/dmesg.log ${NODE_FOLDER}
555         scp ${!OSIP}:/tmp/extra_debug.log ${NODE_FOLDER}
556         scp ${!OSIP}:/tmp/get_devstack.sh.txt ${NODE_FOLDER}
557         scp ${!OSIP}:/tmp/install_ovs.txt ${NODE_FOLDER}
558         scp ${!OSIP}:/tmp/journalctl.log ${NODE_FOLDER}
559         scp ${!OSIP}:/tmp/ovsdb-tool.log ${NODE_FOLDER}
560         scp ${!OSIP}:/tmp/tcpdump_start.log ${NODE_FOLDER}
561         collect_files "${!OSIP}" "${NODE_FOLDER}"
562         ${SSH} ${!OSIP} "sudo tar -cf - -C /var/log libvirt | xz -T 0 > /tmp/libvirt.tar.xz "
563         scp ${!OSIP}:/tmp/libvirt.tar.xz ${NODE_FOLDER}
564         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/etc/hosts ${NODE_FOLDER}
565         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/audit/audit.log ${NODE_FOLDER}
566         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/messages* ${NODE_FOLDER}
567         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/nova-agent.log ${NODE_FOLDER}
568         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/openvswitch/ovs-vswitchd.log ${NODE_FOLDER}
569         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/openvswitch/ovsdb-server.log ${NODE_FOLDER}
570         collect_openstack_logs "${!OSIP}" "${NODE_FOLDER}" "compute"
571         mv local.conf_compute_${!OSIP} ${NODE_FOLDER}/local.conf
572         mv ${NODE_FOLDER} ${WORKSPACE}/archives/
573     done
574
575     # Tempest
576     DEVSTACK_TEMPEST_DIR="/opt/stack/tempest"
577     TESTREPO=".stestr"
578     TEMPEST_LOGS_DIR=${WORKSPACE}/archives/tempest
579     # Look for tempest test results in the $TESTREPO dir and copy if found
580     if ${SSH} ${OPENSTACK_CONTROL_NODE_1_IP} "sudo sh -c '[ -f ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/0 ]'"; then
581         ${SSH} ${OPENSTACK_CONTROL_NODE_1_IP} "for I in \$(sudo ls ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/ | grep -E '^[0-9]+$'); do sudo sh -c \"${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/subunit-1to2 < ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/\${I} >> ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt\"; done"
582         ${SSH} ${OPENSTACK_CONTROL_NODE_1_IP} "sudo sh -c '${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/python ${DEVSTACK_TEMPEST_DIR}/.tox/tempest/lib/python2.7/site-packages/os_testr/subunit2html.py ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt ${DEVSTACK_TEMPEST_DIR}/tempest_results.html'"
583         mkdir -p ${TEMPEST_LOGS_DIR}
584         scp ${OPENSTACK_CONTROL_NODE_1_IP}:${DEVSTACK_TEMPEST_DIR}/tempest_results.html ${TEMPEST_LOGS_DIR}
585         scp ${OPENSTACK_CONTROL_NODE_1_IP}:${DEVSTACK_TEMPEST_DIR}/tempest.log ${TEMPEST_LOGS_DIR}
586     else
587         echo "tempest results not found in ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/0"
588     fi
589 } # collect_netvirt_logs()
590
591 # Utility function for joining strings.
592 function join() {
593     delim=' '
594     final=$1; shift
595
596     for str in "$@" ; do
597         final=${final}${delim}${str}
598     done
599
600     echo ${final}
601 }
602
603 function get_nodes_list() {
604     # Create the string for nodes
605     for i in `seq 1 ${NUM_ODL_SYSTEM}` ; do
606         CONTROLLERIP=ODL_SYSTEM_${i}_IP
607         nodes[$i]=${!CONTROLLERIP}
608     done
609
610     nodes_list=$(join "${nodes[@]}")
611     echo ${nodes_list}
612 }
613
614 function get_features() {
615     if [ ${CONTROLLERSCOPE} == 'all' ]; then
616         ACTUALFEATURES="odl-integration-compatible-with-all,${CONTROLLERFEATURES}"
617         export CONTROLLERMEM="3072m"
618     else
619         ACTUALFEATURES="odl-infrautils-ready,${CONTROLLERFEATURES}"
620     fi
621
622     # Some versions of jenkins job builder result in feature list containing spaces
623     # and ending in newline. Remove all that.
624     ACTUALFEATURES=`echo "${ACTUALFEATURES}" | tr -d '\n \r'`
625     echo "ACTUALFEATURES: ${ACTUALFEATURES}"
626
627     # In the case that we want to install features via karaf shell, a space separated list of
628     # ACTUALFEATURES IS NEEDED
629     SPACE_SEPARATED_FEATURES=$(echo "${ACTUALFEATURES}" | tr ',' ' ')
630     echo "SPACE_SEPARATED_FEATURES: ${SPACE_SEPARATED_FEATURES}"
631
632     export ACTUALFEATURES
633     export SPACE_SEPARATED_FEATURES
634 }
635
636 # Create the configuration script to be run on controllers.
637 function create_configuration_script() {
638     cat > ${WORKSPACE}/configuration-script.sh <<EOF
639 set -x
640 source /tmp/common-functions.sh ${BUNDLEFOLDER}
641
642 echo "Changing to /tmp"
643 cd /tmp
644
645 echo "Downloading the distribution from ${ACTUAL_BUNDLE_URL}"
646 wget --progress=dot:mega '${ACTUAL_BUNDLE_URL}'
647
648 echo "Extracting the new controller..."
649 unzip -q ${BUNDLE}
650
651 echo "Adding external repositories..."
652 sed -ie "s%org.ops4j.pax.url.mvn.repositories=%org.ops4j.pax.url.mvn.repositories=https://nexus.opendaylight.org/content/repositories/opendaylight.snapshot@id=opendaylight-snapshot@snapshots, https://nexus.opendaylight.org/content/repositories/public@id=opendaylight-mirror, http://repo1.maven.org/maven2@id=central, http://repository.springsource.com/maven/bundles/release@id=spring.ebr.release, http://repository.springsource.com/maven/bundles/external@id=spring.ebr.external, http://zodiac.springsource.com/maven/bundles/release@id=gemini, http://repository.apache.org/content/groups/snapshots-group@id=apache@snapshots@noreleases, https://oss.sonatype.org/content/repositories/snapshots@id=sonatype.snapshots.deploy@snapshots@noreleases, https://oss.sonatype.org/content/repositories/ops4j-snapshots@id=ops4j.sonatype.snapshots.deploy@snapshots@noreleases%g" ${MAVENCONF}
653 cat ${MAVENCONF}
654
655 if [[ "$USEFEATURESBOOT" == "True" ]]; then
656     echo "Configuring the startup features..."
657     sed -ie "s/\(featuresBoot=\|featuresBoot =\)/featuresBoot = ${ACTUALFEATURES},/g" ${FEATURESCONF}
658 fi
659
660 FEATURE_TEST_STRING="features-integration-test"
661 KARAF_VERSION=${KARAF_VERSION:-karaf4}
662 if [[ "$KARAF_VERSION" == "karaf4" ]]; then
663     FEATURE_TEST_STRING="features-test"
664 fi
665
666 sed -ie "s%\(featuresRepositories=\|featuresRepositories =\)%featuresRepositories = mvn:org.opendaylight.integration/\${FEATURE_TEST_STRING}/${BUNDLE_VERSION}/xml/features,mvn:org.apache.karaf.decanter/apache-karaf-decanter/1.0.0/xml/features,%g" ${FEATURESCONF}
667 if [[ ! -z "${REPO_URL}" ]]; then
668    sed -ie "s%featuresRepositories =%featuresRepositories = ${REPO_URL},%g" ${FEATURESCONF}
669 fi
670 cat ${FEATURESCONF}
671
672 configure_karaf_log "${KARAF_VERSION}" "${CONTROLLERDEBUGMAP}"
673
674 set_java_vars "${JAVA_HOME}" "${CONTROLLERMEM}" "${MEMCONF}"
675
676 echo "Listing all open ports on controller system..."
677 netstat -pnatu
678
679 # Copy shard file if exists
680 if [ -f /tmp/custom_shard_config.txt ]; then
681     echo "Custom shard config exists!!!"
682     echo "Copying the shard config..."
683     cp /tmp/custom_shard_config.txt /tmp/${BUNDLEFOLDER}/bin/
684 fi
685
686 echo "Configuring cluster"
687 /tmp/${BUNDLEFOLDER}/bin/configure_cluster.sh \$1 ${nodes_list}
688
689 echo "Dump akka.conf"
690 cat ${AKKACONF}
691
692 echo "Dump modules.conf"
693 cat ${MODULESCONF}
694
695 echo "Dump module-shards.conf"
696 cat ${MODULESHARDSCONF}
697 EOF
698 # cat > ${WORKSPACE}/configuration-script.sh <<EOF
699 }
700
701 # Create the startup script to be run on controllers.
702 function create_startup_script() {
703     cat > ${WORKSPACE}/startup-script.sh <<EOF
704 echo "Redirecting karaf console output to karaf_console.log"
705 export KARAF_REDIRECT="/tmp/${BUNDLEFOLDER}/data/log/karaf_console.log"
706 mkdir -p /tmp/${BUNDLEFOLDER}/data/log
707
708 echo "Starting controller..."
709 /tmp/${BUNDLEFOLDER}/bin/start
710 EOF
711 # cat > ${WORKSPACE}/startup-script.sh <<EOF
712 }
713
714 function create_post_startup_script() {
715     cat > ${WORKSPACE}/post-startup-script.sh <<EOF
716 if [[ "$USEFEATURESBOOT" != "True" ]]; then
717
718     # wait up to 60s for karaf port 8101 to be opened, polling every 5s
719     loop_count=0;
720     until [[ \$loop_count -ge 12 ]]; do
721         netstat -na | grep 8101 && break;
722         loop_count=\$[\$loop_count+1];
723         sleep 5;
724     done
725
726     echo "going to feature:install --no-auto-refresh ${SPACE_SEPARATED_FEATURES} one at a time"
727     for feature in ${SPACE_SEPARATED_FEATURES}; do
728         sshpass -p karaf ssh -o StrictHostKeyChecking=no \
729                              -o UserKnownHostsFile=/dev/null \
730                              -o LogLevel=error \
731                              -p 8101 karaf@localhost \
732                              feature:install --no-auto-refresh \$feature;
733     done
734
735     echo "ssh to karaf console to list -i installed features"
736     sshpass -p karaf ssh -o StrictHostKeyChecking=no \
737                          -o UserKnownHostsFile=/dev/null \
738                          -o LogLevel=error \
739                          -p 8101 karaf@localhost \
740                          feature:list -i
741 fi
742
743 echo "Waiting up to 3 minutes for controller to come up, checking every 5 seconds..."
744 for i in {1..36}; do
745     sleep 5;
746     grep 'org.opendaylight.infrautils.*System ready' /tmp/${BUNDLEFOLDER}/data/log/karaf.log
747     if [ \$? -eq 0 ]; then
748         echo "Controller is UP"
749         break
750     fi
751 done;
752
753 # if we ended up not finding ready status in the above loop, we can output some debugs
754 grep 'org.opendaylight.infrautils.*System ready' /tmp/${BUNDLEFOLDER}/data/log/karaf.log
755 if [ $? -ne 0 ]; then
756     echo "Timeout Controller DOWN"
757     echo "Dumping first 500K bytes of karaf log..."
758     head --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
759     echo "Dumping last 500K bytes of karaf log..."
760     tail --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
761     echo "Listing all open ports on controller system"
762     netstat -pnatu
763     exit 1
764 fi
765
766 echo "Listing all open ports on controller system..."
767 netstat -pnatu
768
769 function exit_on_log_file_message {
770     echo "looking for \"\$1\" in log file"
771     if grep --quiet "\$1" "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"; then
772         echo ABORTING: found "\$1"
773         echo "Dumping first 500K bytes of karaf log..."
774         head --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
775         echo "Dumping last 500K bytes of karaf log..."
776         tail --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
777         exit 1
778     fi
779 }
780
781 exit_on_log_file_message 'BindException: Address already in use'
782 exit_on_log_file_message 'server is unhealthy'
783 EOF
784 # cat > ${WORKSPACE}/post-startup-script.sh <<EOF
785 }
786
787 # Copy over the configuration script and configuration files to each controller
788 # Execute the configuration script on each controller.
789 function copy_and_run_configuration_script() {
790     for i in `seq 1 ${NUM_ODL_SYSTEM}`; do
791         CONTROLLERIP=ODL_SYSTEM_${i}_IP
792         echo "Configuring member-${i} with IP address ${!CONTROLLERIP}"
793         scp ${WORKSPACE}/configuration-script.sh ${!CONTROLLERIP}:/tmp/
794         ssh ${!CONTROLLERIP} "bash /tmp/configuration-script.sh ${i}"
795     done
796 }
797
798 # Copy over the startup script to each controller and execute it.
799 function copy_and_run_startup_script() {
800     for i in `seq 1 ${NUM_ODL_SYSTEM}`; do
801         CONTROLLERIP=ODL_SYSTEM_${i}_IP
802         echo "Starting member-${i} with IP address ${!CONTROLLERIP}"
803         scp ${WORKSPACE}/startup-script.sh ${!CONTROLLERIP}:/tmp/
804         ssh ${!CONTROLLERIP} "bash /tmp/startup-script.sh"
805     done
806 }
807
808 function copy_and_run_post_startup_script() {
809     seed_index=1
810     for i in `seq 1 ${NUM_ODL_SYSTEM}`; do
811         CONTROLLERIP=ODL_SYSTEM_${i}_IP
812         echo "Execute the post startup script on controller ${!CONTROLLERIP}"
813         scp ${WORKSPACE}/post-startup-script.sh ${!CONTROLLERIP}:/tmp
814         ssh ${!CONTROLLERIP} "bash /tmp/post-startup-script.sh $(( seed_index++ ))"
815         if [ $(( $i % ${NUM_ODL_SYSTEM} )) == 0 ]; then
816             seed_index=1
817         fi
818     done
819 }
820
821 function create_controller_variables() {
822     echo "Generating controller variables..."
823     for i in `seq 1 ${NUM_ODL_SYSTEM}`; do
824         CONTROLLERIP=ODL_SYSTEM_${i}_IP
825         odl_variables=${odl_variables}" -v ${CONTROLLERIP}:${!CONTROLLERIP}"
826         echo "Lets's take the karaf thread dump"
827         ssh ${!CONTROLLERIP} "sudo ps aux" > ${WORKSPACE}/ps_before.log
828         pid=$(grep org.apache.karaf.main.Main ${WORKSPACE}/ps_before.log | grep -v grep | tr -s ' ' | cut -f2 -d' ')
829         echo "karaf main: org.apache.karaf.main.Main, pid:${pid}"
830         ssh ${!CONTROLLERIP} "${JAVA_HOME}/bin/jstack -l ${pid}" > ${WORKSPACE}/karaf_${i}_${pid}_threads_before.log || true
831     done
832 }
833
834 # Function to build OVS from git repo
835 function build_ovs() {
836     local -r ip=$1
837     local -r version=$2
838     local -r rpm_path="$3"
839
840     echo "Building OVS ${version} on ${ip} ..."
841     cat > ${WORKSPACE}/build_ovs.sh << EOF
842 set -ex -o pipefail
843
844 echo '---> Building openvswitch version ${version}'
845
846 # Install running kernel devel packages
847 K_VERSION=\$(uname -r)
848 YUM_OPTS="-y --disablerepo=* --enablerepo=base,updates,extra,C*-base,C*-updates,C*-extras"
849 # Install centos-release to update vault repos from which to fetch
850 # kernel devel packages
851 sudo yum \${YUM_OPTS} install centos-release yum-utils @'Development Tools' rpm-build
852 sudo yum \${YUM_OPTS} install kernel-{devel,headers}-\${K_VERSION}
853
854 TMP=\$(mktemp -d)
855 pushd \${TMP}
856
857 git clone https://github.com/openvswitch/ovs.git
858 cd ovs
859
860 if [ "${version}" = "v2.6.1-nsh" ]; then
861     git checkout v2.6.1
862     echo "Will apply nsh patches for OVS version 2.6.1"
863     git clone https://github.com/yyang13/ovs_nsh_patches.git ../ovs_nsh_patches
864     git apply ../ovs_nsh_patches/v2.6.1_centos7/*.patch
865 else
866     git checkout ${version}
867 fi
868
869 # On early versions of OVS, flake warnings would fail the build.
870 # Remove it.
871 sudo pip uninstall -y flake8
872
873 # Get rid of sphinx dep as it conflicts with the already
874 # installed one (via pip). Docs wont be built.
875 sed -i "/BuildRequires:.*sphinx.*/d" rhel/openvswitch-fedora.spec.in
876
877 sed -e 's/@VERSION@/0.0.1/' rhel/openvswitch-fedora.spec.in > /tmp/ovs.spec
878 sed -e 's/@VERSION@/0.0.1/' rhel/openvswitch-kmod-fedora.spec.in > /tmp/ovs-kmod.spec
879 sed -e 's/@VERSION@/0.0.1/' rhel/openvswitch-dkms.spec.in > /tmp/ovs-dkms.spec
880 sudo yum-builddep \${YUM_OPTS} /tmp/ovs.spec /tmp/ovs-kmod.spec /tmp/ovs-dkms.spec
881 rm /tmp/ovs.spec /tmp/ovs-kmod.spec /tmp/ovs-dkms.spec
882 ./boot.sh
883 ./configure --build=x86_64-redhat-linux-gnu --host=x86_64-redhat-linux-gnu --with-linux=/lib/modules/\${K_VERSION}/build --program-prefix= --disable-dependency-tracking --prefix=/usr --exec-prefix=/usr --bindir=/usr/bin --sbindir=/usr/sbin --sysconfdir=/etc --datadir=/usr/share --includedir=/usr/include --libdir=/usr/lib64 --libexecdir=/usr/libexec --localstatedir=/var --sharedstatedir=/var/lib --mandir=/usr/share/man --infodir=/usr/share/info --enable-libcapng --enable-ssl --with-pkidir=/var/lib/openvswitch/pki PYTHON=/usr/bin/python2
884 make rpm-fedora RPMBUILD_OPT="--without check"
885 # Build dkms only for now
886 # make rpm-fedora-kmod RPMBUILD_OPT='-D "kversion \${K_VERSION}"'
887 rpmbuild -D "_topdir \$(pwd)/rpm/rpmbuild" -bb --without check rhel/openvswitch-dkms.spec
888
889 mkdir -p /tmp/ovs_rpms
890 cp -r rpm/rpmbuild/RPMS/* /tmp/ovs_rpms/
891
892 popd
893 rm -rf \${TMP}
894 EOF
895
896     scp ${WORKSPACE}/build_ovs.sh ${ip}:/tmp
897     ${SSH} ${ip} " bash /tmp/build_ovs.sh >> /tmp/install_ovs.txt 2>&1"
898     scp -r ${ip}:/tmp/ovs_rpms/* "${rpm_path}/"
899     ${SSH} ${ip} "rm -rf /tmp/ovs_rpms"
900 }
901
902 # Install OVS RPMs from yum repo
903 function install_ovs_from_repo() {
904     local -r ip=$1
905     local -r rpm_repo="$2"
906
907     echo "Installing OVS from repo ${rpm_repo} on ${ip} ..."
908     cat > ${WORKSPACE}/install_ovs.sh << EOF
909 set -ex -o pipefail
910
911 echo '---> Installing openvswitch from ${rpm_repo}'
912
913 # We need repoquery from yum-utils.
914 sudo yum -y install yum-utils
915
916 # Get openvswitch packages offered by custom repo.
917 # dkms package will have priority over kmod.
918 OVS_REPO_OPTS="--repofrompath=ovs-repo,${rpm_repo} --disablerepo=* --enablerepo=ovs-repo"
919 OVS_PKGS=\$(repoquery \${OVS_REPO_OPTS} openvswitch)
920 OVS_SEL_PKG=\$(repoquery \${OVS_REPO_OPTS} openvswitch-selinux-policy)
921 OVS_DKMS_PKG=\$(repoquery \${OVS_REPO_OPTS} openvswitch-dkms)
922 OVS_KMOD_PKG=\$(repoquery \${OVS_REPO_OPTS} openvswitch-kmod)
923 [ -n "\${OVS_SEL_PKG}" ] && OVS_PKGS="\${OVS_PKGS} \${OVS_SEL_PKG}"
924 [ -n "\${OVS_DKMS_PKG}" ] && OVS_PKGS="\${OVS_PKGS} \${OVS_DKMS_PKG}"
925 [ -z "\${OVS_DKMS_PKG}" ] && [ -n "\${OVS_KMOD_PKG}" ] && OVS_PKGS="\${OVS_PKGS} \${OVS_KMOD_PKG}"
926
927 # Bail with error if custom repo was provided but we could not
928 # find suitable packages there.
929 [ -z "\${OVS_PKGS}" ] && echo "No OVS packages found in custom repo." && exit 1
930
931 # Install kernel & devel packages for the openvswitch dkms package.
932 if [ -n "\${OVS_DKMS_PKG}" ]; then
933     # install centos-release to update vault repos from which to fetch
934     # kernel devel packages
935     sudo yum -y install centos-release
936     K_VERSION=\$(uname -r)
937     YUM_OPTS="-y --disablerepo=* --enablerepo=base,updates,extra,C*-base,C*-updates,C*-extras"
938     sudo yum \${YUM_OPTS} install kernel-{headers,devel}-\${K_VERSION} @'Development Tools' python-six
939 fi
940
941 PREV_MOD=\$(sudo modinfo -n openvswitch || echo '')
942
943 # Install OVS offered by custom repo.
944 sudo yum-config-manager --add-repo "${rpm_repo}"
945 sudo yum -y versionlock delete openvswitch-*
946 sudo yum -y remove openvswitch-*
947 sudo yum -y --nogpgcheck install \${OVS_PKGS}
948 sudo yum -y versionlock add \${OVS_PKGS}
949
950 # Most recent OVS versions have some incompatibility with certain versions of iptables
951 # This below line will overcome that problem.
952 sudo modprobe openvswitch
953
954 # Start OVS and print details
955 sudo systemctl start openvswitch
956 sudo systemctl enable openvswitch
957 sudo ovs-vsctl --retry -t 5 show
958 sudo modinfo openvswitch
959
960 # dkms rpm install can fail silently (probably because the OVS version is
961 # incompatible with the running kernel), verify module was updated.
962 NEW_MOD=\$(sudo modinfo -n openvswitch || echo '')
963 [ "\${PREV_MOD}" != "\${NEW_MOD}" ] || (echo "Kernel module was not updated" && exit 1)
964 EOF
965
966     scp ${WORKSPACE}/install_ovs.sh ${ip}:/tmp
967     ${SSH} ${ip} "bash /tmp/install_ovs.sh >> /tmp/install_ovs.txt 2>&1"
968 }
969
970 # Install OVS RPMS from path
971 function install_ovs_from_path() {
972     local -r ip=$1
973     local -r rpm_path="$2"
974
975     echo "Creating OVS RPM repo on ${ip} ..."
976     ${SSH} ${ip} "mkdir -p /tmp/ovs_rpms"
977     scp -r "${rpm_path}"/* ${ip}:/tmp/ovs_rpms
978     ${SSH} ${ip} "sudo yum -y install createrepo && createrepo --database /tmp/ovs_rpms"
979     install_ovs_from_repo ${ip} file:/tmp/ovs_rpms
980 }
981
982