3 echo "common-functions.sh is being sourced"
7 # Basic controller configuration settings
8 export MAVENCONF=/tmp/${BUNDLEFOLDER}/etc/org.ops4j.pax.url.mvn.cfg
9 export FEATURESCONF=/tmp/${BUNDLEFOLDER}/etc/org.apache.karaf.features.cfg
10 export CUSTOMPROP=/tmp/${BUNDLEFOLDER}/etc/custom.properties
11 export LOGCONF=/tmp/${BUNDLEFOLDER}/etc/org.ops4j.pax.logging.cfg
12 export MEMCONF=/tmp/${BUNDLEFOLDER}/bin/setenv
13 export CONTROLLERMEM="2048m"
15 # Cluster specific configuration settings
16 export AKKACONF=/tmp/${BUNDLEFOLDER}/configuration/initial/akka.conf
17 export MODULESCONF=/tmp/${BUNDLEFOLDER}/configuration/initial/modules.conf
18 export MODULESHARDSCONF=/tmp/${BUNDLEFOLDER}/configuration/initial/module-shards.conf
20 function print_common_env() {
22 common-functions environment:
23 MAVENCONF: ${MAVENCONF}
24 FEATURESCONF: ${FEATURESCONF}
25 CUSTOMPROP: ${CUSTOMPROP}
28 CONTROLLERMEM: ${CONTROLLERMEM}
30 MODULESCONF: ${MODULESCONF}
31 MODULESHARDSCONF: ${MODULESHARDSCONF}
37 # Setup JAVA_HOME and MAX_MEM Value in ODL startup config file
38 function set_java_vars() {
40 local -r controllermem=$2
43 echo "Configure\n java home: ${java_home}\n max memory: ${controllermem}\n memconf: ${memconf}"
45 sed -ie 's%^# export JAVA_HOME%export JAVA_HOME=${JAVA_HOME:-'"${java_home}"'}%g' ${memconf}
46 sed -ie 's/JAVA_MAX_MEM="2048m"/JAVA_MAX_MEM='"${controllermem}"'/g' ${memconf}
50 echo "Set Java version"
51 sudo /usr/sbin/alternatives --install /usr/bin/java java ${java_home}/bin/java 1
52 sudo /usr/sbin/alternatives --set java ${java_home}/bin/java
53 echo "JDK default version ..."
57 export JAVA_HOME="${java_home}"
59 # shellcheck disable=SC2037
60 JAVA_RESOLVED=$(readlink -e "${java_home}/bin/java")
61 echo "Java binary pointed at by JAVA_HOME: ${JAVA_RESOLVED}"
64 # shellcheck disable=SC2034
65 # foo appears unused. Verify it or export it.
66 function configure_karaf_log() {
67 local -r karaf_version=$1
68 local -r controllerdebugmap=$2
71 # Check what the logging.cfg file is using for the logging api: log4j or log4j2
72 grep "log4j2" ${LOGCONF}
77 echo "Configuring the karaf log... karaf_version: ${karaf_version}, logapi: ${logapi}"
78 if [ "${logapi}" == "log4j2" ]; then
79 # FIXME: Make log size limit configurable from build parameter.
80 sed -ie 's/log4j2.appender.rolling.policies.size.size = 16MB/log4j2.appender.rolling.policies.size.size = 1GB/g' ${LOGCONF}
81 orgmodule="org.opendaylight.yangtools.yang.parser.repo.YangTextSchemaContextResolver"
82 orgmodule_="${orgmodule//./_}"
83 echo "${logapi}.logger.${orgmodule_}.name = WARN" >> ${LOGCONF}
84 echo "${logapi}.logger.${orgmodule_}.level = WARN" >> ${LOGCONF}
86 sed -ie 's/log4j.appender.out.maxBackupIndex=10/log4j.appender.out.maxBackupIndex=1/g' ${LOGCONF}
87 # FIXME: Make log size limit configurable from build parameter.
88 sed -ie 's/log4j.appender.out.maxFileSize=1MB/log4j.appender.out.maxFileSize=30GB/g' ${LOGCONF}
89 echo "${logapi}.logger.org.opendaylight.yangtools.yang.parser.repo.YangTextSchemaContextResolver = WARN" >> ${LOGCONF}
92 # Add custom logging levels
93 # CONTROLLERDEBUGMAP is expected to be a key:value map of space separated values like "module:level module2:level2"
94 # where module is abbreviated and does not include "org.opendaylight."
96 echo "controllerdebugmap: ${controllerdebugmap}"
97 if [ -n "${controllerdebugmap}" ]; then
98 for kv in ${controllerdebugmap}; do
101 echo "module: $module, level: $level"
102 # shellcheck disable=SC2157
103 if [ -n "${module}" ] && [ -n "${level}" ]; then
104 orgmodule="org.opendaylight.${module}"
105 if [ "${logapi}" == "log4j2" ]; then
106 orgmodule_="${orgmodule//./_}"
107 echo "${logapi}.logger.${orgmodule_}.name = ${orgmodule}" >> ${LOGCONF}
108 echo "${logapi}.logger.${orgmodule_}.level = ${level}" >> ${LOGCONF}
110 echo "${logapi}.logger.${orgmodule} = ${level}" >> ${LOGCONF}
116 echo "cat ${LOGCONF}"
118 } # function configure_karaf_log()
120 function get_os_deploy() {
121 local -r num_systems=${1:-$NUM_OPENSTACK_SYSTEM}
122 case ${num_systems} in
124 OPENSTACK_TOPO="1cmb-0ctl-0cmp"
127 OPENSTACK_TOPO="1cmb-0ctl-1cmp"
130 OPENSTACK_TOPO="0cmb-1ctl-2cmp"
133 export OPENSTACK_TOPO
136 function run_plan() {
148 printf "Locating ${type} plan to use...\n"
149 plan_filepath="${WORKSPACE}/test/csit/${type}plans/$plan"
150 if [ ! -f "${plan_filepath}" ]; then
151 plan_filepath="${WORKSPACE}/test/csit/${type}plans/${STREAMTESTPLAN}"
152 if [ ! -f "${plan_filepath}" ]; then
153 plan_filepath="${WORKSPACE}/test/csit/${type}plans/${TESTPLAN}"
157 if [ -f "${plan_filepath}" ]; then
158 printf "${type} plan exists!!!\n"
159 printf "Changing the ${type} plan path...\n"
160 cat ${plan_filepath} | sed "s:integration:${WORKSPACE}:" > ${type}plan.txt
162 for line in $( egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' ${type}plan.txt ); do
163 printf "Executing ${line}...\n"
164 # shellcheck source=${line} disable=SC1091
168 printf "Finished running ${type} plans\n"
169 } # function run_plan()
171 # Return elapsed time. Usage:
172 # - Call first time with no arguments and a new timer is returned.
173 # - Next call with the first argument as the timer and the elapsed time is returned.
176 if [ $# -eq 0 ]; then
177 # return the current time
178 printf "$(date "+%s")"
181 end_time=$(date "+%s")
183 if [ -z "$start_time" ]; then
184 start_time=$end_time;
187 delta_time=$((end_time - start_time))
188 ds=$((delta_time % 60))
189 dm=$(((delta_time / 60) % 60))
190 dh=$((delta_time / 3600))
191 # return the elapsed time
192 printf "%d:%02d:%02d" $dh $dm $ds
196 # convert commas in csv strings to spaces (ssv)
199 if [ -n "${csv}" ]; then
200 ssv=$(echo ${csv} | sed 's/,/ /g' | sed 's/\ \ */\ /g')
206 function is_openstack_feature_enabled() {
208 for enabled_feature in $(csv2ssv ${ENABLE_OS_SERVICES}); do
209 if [ "${enabled_feature}" == "${feature}" ]; then
219 # shellcheck disable=SC2153
220 function print_job_parameters() {
224 DISTROBRANCH: ${DISTROBRANCH}
225 DISTROSTREAM: ${DISTROSTREAM}
226 BUNDLE_URL: ${BUNDLE_URL}
227 CONTROLLERFEATURES: ${CONTROLLERFEATURES}
228 CONTROLLERDEBUGMAP: ${CONTROLLERDEBUGMAP}
229 SCRIPTPLAN: ${SCRIPTPLAN}
230 CONFIGPLAN: ${CONFIGPLAN}
231 STREAMTESTPLAN: ${STREAMTESTPLAN}
232 TESTPLAN: ${TESTPLAN}
234 PATCHREFSPEC: ${PATCHREFSPEC}
235 OPENSTACK_BRANCH: ${OPENSTACK_BRANCH}
236 DEVSTACK_HASH: ${DEVSTACK_HASH}
237 ODL_ML2_DRIVER_REPO: ${ODL_ML2_DRIVER_REPO}
238 ODL_ML2_BRANCH: ${ODL_ML2_BRANCH}
239 ODL_ML2_DRIVER_VERSION: ${ODL_ML2_DRIVER_VERSION}
240 ODL_ML2_PORT_BINDING: ${ODL_ML2_PORT_BINDING}
241 DEVSTACK_KUBERNETES_PLUGIN_REPO: ${DEVSTACK_KUBERNETES_PLUGIN_REPO}
242 DEVSTACK_LBAAS_PLUGIN_REPO: ${DEVSTACK_LBAAS_PLUGIN_REPO}
243 DEVSTACK_NETWORKING_SFC_PLUGIN_REPO: ${DEVSTACK_NETWORKING_SFC_PLUGIN_REPO}
244 IPSEC_VXLAN_TUNNELS_ENABLED: ${IPSEC_VXLAN_TUNNELS_ENABLED}
245 PUBLIC_BRIDGE: ${PUBLIC_BRIDGE}
246 ENABLE_HAPROXY_FOR_NEUTRON: ${ENABLE_HAPROXY_FOR_NEUTRON}
247 ENABLE_OS_SERVICES: ${ENABLE_OS_SERVICES}
248 ENABLE_OS_COMPUTE_SERVICES: ${ENABLE_OS_COMPUTE_SERVICES}
249 ENABLE_OS_NETWORK_SERVICES: ${ENABLE_OS_NETWORK_SERVICES}
250 ENABLE_OS_PLUGINS: ${ENABLE_OS_PLUGINS}
251 DISABLE_OS_SERVICES: ${DISABLE_OS_SERVICES}
252 TENANT_NETWORK_TYPE: ${TENANT_NETWORK_TYPE}
253 SECURITY_GROUP_MODE: ${SECURITY_GROUP_MODE}
254 ENABLE_ITM_DIRECT_TUNNELS: ${ENABLE_ITM_DIRECT_TUNNELS}
255 PUBLIC_PHYSICAL_NETWORK: ${PUBLIC_PHYSICAL_NETWORK}
256 ENABLE_NETWORKING_L2GW: ${ENABLE_NETWORKING_L2GW}
257 CREATE_INITIAL_NETWORKS: ${CREATE_INITIAL_NETWORKS}
258 LBAAS_SERVICE_PROVIDER: ${LBAAS_SERVICE_PROVIDER}
259 ODL_SFC_DRIVER: ${ODL_SFC_DRIVER}
260 ODL_SNAT_MODE: ${ODL_SNAT_MODE}
265 function tcpdump_start() {
269 filter_=${filter// /_}
271 printf "node ${ip}, ${prefix}_${ip}__${filter}: starting tcpdump\n"
272 ssh ${ip} "nohup sudo /usr/sbin/tcpdump -vvv -ni eth0 ${filter} -w /tmp/tcpdump_${prefix}_${ip}__${filter_}.pcap > /tmp/tcpdump_start.log 2>&1 &"
273 ${SSH} ${ip} "ps -ef | grep tcpdump"
276 function tcpdump_stop() {
279 printf "node $ip: stopping tcpdump\n"
280 ${SSH} ${ip} "ps -ef | grep tcpdump.sh"
281 ${SSH} ${ip} "sudo pkill -f tcpdump"
282 ${SSH} ${ip} "sudo xz -9ekvvf /tmp/*.pcap"
283 ${SSH} ${ip} "sudo ls -al /tmp/*.pcap"
284 # copy_logs will copy any *.xz files
287 # Collect the list of files on the hosts
288 function collect_files() {
292 ${SSH} ${ip} "mkdir -p ${finddir}"
293 ${SSH} ${ip} "sudo find /etc > ${finddir}/find.etc.txt"
294 ${SSH} ${ip} "sudo find /opt/stack > ${finddir}/find.opt.stack.txt"
295 ${SSH} ${ip} "sudo find /var > ${finddir}/find2.txt"
296 ${SSH} ${ip} "sudo find /var > ${finddir}/find.var.txt"
297 ${SSH} ${ip} "sudo tar -cf - -C /tmp finder | xz -T 0 > /tmp/find.tar.xz"
298 scp ${ip}:/tmp/find.tar.xz ${folder}
300 rsync --rsync-path="sudo rsync" --list-only -arvhe ssh ${ip}:/etc/ > ${finddir}/rsync.etc.txt
301 rsync --rsync-path="sudo rsync" --list-only -arvhe ssh ${ip}:/opt/stack/ > ${finddir}/rsync.opt.stack.txt
302 rsync --rsync-path="sudo rsync" --list-only -arvhe ssh ${ip}:/var/ > ${finddir}/rsync.var.txt
303 tar -cf - -C /tmp finder | xz -T 0 > /tmp/rsync.tar.xz
304 cp /tmp/rsync.tar.xz ${folder}
307 # List of extra services to extract from journalctl
308 # Add new services on a separate line, in alpha order, add \ at the end
309 extra_services_cntl=" \
313 openvswitch.service \
314 ovs-vswitchd.service \
315 ovsdb-server.service \
316 rabbitmq-server.service \
319 extra_services_cmp=" \
321 openvswitch.service \
322 ovs-vswitchd.service \
323 ovsdb-server.service \
326 # Collect the logs for the openstack services
327 # First get all the services started by devstack which would have devstack@ as a prefix
328 # Next get all the extra services
329 function collect_openstack_logs() {
332 local -r node_type=${3}
333 local oslogs="${folder}/oslogs"
335 printf "collect_openstack_logs for ${node_type} node: ${ip} into ${oslogs}\n"
338 # There are always some logs in /opt/stack/logs and this also covers the
339 # pre-queens branches which always use /opt/stack/logs
340 rsync -avhe ssh ${ip}:/opt/stack/logs/* ${oslogs} # rsync to prevent copying of symbolic links
342 # Starting with queens break out the logs from journalctl
343 if [ "${OPENSTACK_BRANCH}" = "stable/queens" ]; then
344 cat > ${WORKSPACE}/collect_openstack_logs.sh << EOF
345 extra_services_cntl="${extra_services_cntl}"
346 extra_services_cmp="${extra_services_cmp}"
348 function extract_from_journal() {
349 local -r services=\${1}
350 local -r folder=\${2}
351 local -r node_type=\${3}
352 printf "extract_from_journal folder: \${folder}, services: \${services}\n"
353 for service in \${services}; do
354 # strip anything before @ and anything after .
355 # devstack@g-api.service will end as g-api
356 service_="\${service#*@}"
357 service_="\${service_%.*}"
358 sudo journalctl -u "\${service}" > "\${folder}/\${service_}.log"
364 systemctl list-unit-files --all > /tmp/oslogs/systemctl.units.log 2>&1
365 svcs=\$(grep devstack@ /tmp/oslogs/systemctl.units.log | awk '{print \$1}')
366 extract_from_journal "\${svcs}" "/tmp/oslogs"
367 if [ "\${node_type}" = "control" ]; then
368 extract_from_journal "\${extra_services_cntl}" "/tmp/oslogs"
370 extract_from_journal "\${extra_services_cmp}" "/tmp/oslogs"
374 # cat > ${WORKSPACE}/collect_openstack_logs.sh << EOF
375 printf "collect_openstack_logs for ${node_type} node: ${ip} into ${oslogs}, executing script\n"
376 cat ${WORKSPACE}/collect_openstack_logs.sh
377 scp ${WORKSPACE}/collect_openstack_logs.sh ${ip}:/tmp
378 ${SSH} ${ip} "bash /tmp/collect_openstack_logs.sh > /tmp/collect_openstack_logs.log 2>&1"
379 rsync -avhe ssh ${ip}:/tmp/oslogs/* ${oslogs}
380 scp ${ip}:/tmp/collect_openstack_logs.log ${oslogs}
381 fi # if [ "${OPENSTACK_BRANCH}" = "stable/queens" ]; then
384 function collect_netvirt_logs() {
385 set +e # We do not want to create red dot just because something went wrong while fetching logs.
387 cat > extra_debug.sh << EOF
388 echo -e "/usr/sbin/lsmod | /usr/bin/grep openvswitch\n"
389 /usr/sbin/lsmod | /usr/bin/grep openvswitch
390 echo -e "\nsudo grep ct_ /var/log/openvswitch/ovs-vswitchd.log\n"
391 sudo grep "Datapath supports" /var/log/openvswitch/ovs-vswitchd.log
392 echo -e "\nsudo netstat -punta\n"
394 echo -e "\nsudo getenforce\n"
396 echo -e "\nsudo systemctl status httpd\n"
397 sudo systemctl status httpd
400 source /opt/stack/devstack/openrc admin admin
401 echo -e "\nenv after openrc\n"
403 echo -e "\nsudo du -hs /opt/stack"
404 sudo du -hs /opt/stack
405 echo -e "\nsudo mount"
407 echo -e "\ndmesg -T > /tmp/dmesg.log"
408 dmesg -T > /tmp/dmesg.log
409 echo -e "\njournalctl > /tmp/journalctl.log\n"
410 sudo journalctl > /tmp/journalctl.log
411 echo -e "\novsdb-tool -mm show-log > /tmp/ovsdb-tool.log"
412 ovsdb-tool -mm show-log > /tmp/ovsdb-tool.log
415 # Since this log collection work is happening before the archive build macro which also
416 # creates the ${WORKSPACE}/archives dir, we have to do it here first. The mkdir in the
417 # archives build step will essentially be a noop.
418 mkdir -p ${WORKSPACE}/archives
420 mv /tmp/changes.txt ${WORKSPACE}/archives
421 mv ${WORKSPACE}/rabbit.txt ${WORKSPACE}/archives
424 # FIXME: Do not create .tar and gzip before copying.
425 for i in `seq 1 ${NUM_ODL_SYSTEM}`; do
426 CONTROLLERIP=ODL_SYSTEM_${i}_IP
427 echo "collect_logs: for opendaylight controller ip: ${!CONTROLLERIP}"
428 NODE_FOLDER="odl_${i}"
429 mkdir -p ${NODE_FOLDER}
430 echo "Lets's take the karaf thread dump again..."
431 ssh ${!CONTROLLERIP} "sudo ps aux" > ${WORKSPACE}/ps_after.log
432 pid=$(grep org.apache.karaf.main.Main ${WORKSPACE}/ps_after.log | grep -v grep | tr -s ' ' | cut -f2 -d' ')
433 echo "karaf main: org.apache.karaf.main.Main, pid:${pid}"
434 ssh ${!CONTROLLERIP} "${JAVA_HOME}/bin/jstack -l ${pid}" > ${WORKSPACE}/karaf_${i}_${pid}_threads_after.log || true
435 echo "killing karaf process..."
436 ${SSH} "${!CONTROLLERIP}" bash -c 'ps axf | grep karaf | grep -v grep | awk '"'"'{print "kill -9 " $1}'"'"' | sh'
437 ${SSH} ${!CONTROLLERIP} "sudo journalctl > /tmp/journalctl.log"
438 scp ${!CONTROLLERIP}:/tmp/journalctl.log ${NODE_FOLDER}
439 ${SSH} ${!CONTROLLERIP} "dmesg -T > /tmp/dmesg.log"
440 scp ${!CONTROLLERIP}:/tmp/dmesg.log ${NODE_FOLDER}
441 ${SSH} ${!CONTROLLERIP} "tar -cf - -C /tmp/${BUNDLEFOLDER} etc | xz -T 0 > /tmp/etc.tar.xz"
442 scp ${!CONTROLLERIP}:/tmp/etc.tar.xz ${NODE_FOLDER}
443 ${SSH} ${!CONTROLLERIP} "cp -r /tmp/${BUNDLEFOLDER}/data/log /tmp/odl_log"
444 ${SSH} ${!CONTROLLERIP} "tar -cf /tmp/odl${i}_karaf.log.tar /tmp/odl_log/*"
445 scp ${!CONTROLLERIP}:/tmp/odl${i}_karaf.log.tar ${NODE_FOLDER}
446 ${SSH} ${!CONTROLLERIP} "tar -cf /tmp/odl${i}_zrpcd.log.tar /tmp/zrpcd.init.log"
447 scp ${!CONTROLLERIP}:/tmp/odl${i}_zrpcd.log.tar ${NODE_FOLDER}
448 tar -xvf ${NODE_FOLDER}/odl${i}_karaf.log.tar -C ${NODE_FOLDER} --strip-components 2 --transform s/karaf/odl${i}_karaf/g
449 grep "ROBOT MESSAGE\| ERROR " ${NODE_FOLDER}/odl${i}_karaf.log > ${NODE_FOLDER}/odl${i}_err.log
450 grep "ROBOT MESSAGE\| ERROR \| WARN \|Exception" \
451 ${NODE_FOLDER}/odl${i}_karaf.log > ${NODE_FOLDER}/odl${i}_err_warn_exception.log
452 # Print ROBOT lines and print Exception lines. For exception lines also print the previous line for context
453 sed -n -e '/ROBOT MESSAGE/P' -e '$!N;/Exception/P;D' ${NODE_FOLDER}/odl${i}_karaf.log > ${NODE_FOLDER}/odl${i}_exception.log
454 mv /tmp/odl${i}_exceptions.txt ${NODE_FOLDER}
455 rm ${NODE_FOLDER}/odl${i}_karaf.log.tar
456 mv *_threads* ${NODE_FOLDER}
457 mv ps_* ${NODE_FOLDER}
458 mv ${NODE_FOLDER} ${WORKSPACE}/archives/
461 print_job_parameters > ${WORKSPACE}/archives/params.txt
464 for i in `seq 1 ${NUM_OPENSTACK_CONTROL_NODES}`; do
465 OSIP=OPENSTACK_CONTROL_NODE_${i}_IP
466 if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then
467 echo "collect_logs: for openstack combo node ip: ${!OSIP}"
468 NODE_FOLDER="combo_${i}"
470 echo "collect_logs: for openstack control node ip: ${!OSIP}"
471 NODE_FOLDER="control_${i}"
473 mkdir -p ${NODE_FOLDER}
474 tcpdump_stop "${!OSIP}"
475 scp extra_debug.sh ${!OSIP}:/tmp
476 # Capture compute logs if this is a combo node
477 if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then
478 scp ${!OSIP}:/etc/nova/nova.conf ${NODE_FOLDER}
479 scp ${!OSIP}:/etc/nova/nova-cpu.conf ${NODE_FOLDER}
480 scp ${!OSIP}:/etc/openstack/clouds.yaml ${NODE_FOLDER}
481 rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/nova-agent.log ${NODE_FOLDER}
483 ${SSH} ${!OSIP} "bash /tmp/extra_debug.sh > /tmp/extra_debug.log 2>&1"
484 scp ${!OSIP}:/etc/dnsmasq.conf ${NODE_FOLDER}
485 scp ${!OSIP}:/etc/keystone/keystone.conf ${NODE_FOLDER}
486 scp ${!OSIP}:/etc/keystone/keystone-uwsgi-admin.ini ${NODE_FOLDER}
487 scp ${!OSIP}:/etc/keystone/keystone-uwsgi-public.ini ${NODE_FOLDER}
488 scp ${!OSIP}:/etc/kuryr/kuryr.conf ${NODE_FOLDER}
489 scp ${!OSIP}:/etc/neutron/dhcp_agent.ini ${NODE_FOLDER}
490 scp ${!OSIP}:/etc/neutron/metadata_agent.ini ${NODE_FOLDER}
491 scp ${!OSIP}:/etc/neutron/neutron.conf ${NODE_FOLDER}
492 scp ${!OSIP}:/etc/neutron/neutron_lbaas.conf ${NODE_FOLDER}
493 scp ${!OSIP}:/etc/neutron/plugins/ml2/ml2_conf.ini ${NODE_FOLDER}
494 scp ${!OSIP}:/etc/neutron/services/loadbalancer/haproxy/lbaas_agent.ini ${NODE_FOLDER}
495 scp ${!OSIP}:/etc/nova/nova.conf ${NODE_FOLDER}
496 scp ${!OSIP}:/etc/nova/nova-api-uwsgi.ini ${NODE_FOLDER}
497 scp ${!OSIP}:/etc/nova/nova_cell1.conf ${NODE_FOLDER}
498 scp ${!OSIP}:/etc/nova/nova-cpu.conf ${NODE_FOLDER}
499 scp ${!OSIP}:/etc/nova/placement-uwsgi.ini ${NODE_FOLDER}
500 scp ${!OSIP}:/etc/openstack/clouds.yaml ${NODE_FOLDER}
501 scp ${!OSIP}:/opt/stack/devstack/.stackenv ${NODE_FOLDER}
502 scp ${!OSIP}:/opt/stack/devstack/nohup.out ${NODE_FOLDER}/stack.log
503 scp ${!OSIP}:/opt/stack/devstack/openrc ${NODE_FOLDER}
504 scp ${!OSIP}:/opt/stack/requirements/upper-constraints.txt ${NODE_FOLDER}
505 scp ${!OSIP}:/opt/stack/tempest/etc/tempest.conf ${NODE_FOLDER}
506 scp ${!OSIP}:/tmp/*.xz ${NODE_FOLDER}
507 scp ${!OSIP}:/tmp/dmesg.log ${NODE_FOLDER}
508 scp ${!OSIP}:/tmp/extra_debug.log ${NODE_FOLDER}
509 scp ${!OSIP}:/tmp/get_devstack.sh.txt ${NODE_FOLDER}
510 scp ${!OSIP}:/tmp/journalctl.log ${NODE_FOLDER}
511 scp ${!OSIP}:/tmp/ovsdb-tool.log ${NODE_FOLDER}
512 scp ${!OSIP}:/tmp/tcpdump_start.log ${NODE_FOLDER}
513 collect_files "${!OSIP}" "${NODE_FOLDER}"
514 ${SSH} ${!OSIP} "sudo tar -cf - -C /var/log rabbitmq | xz -T 0 > /tmp/rabbitmq.tar.xz "
515 scp ${!OSIP}:/tmp/rabbitmq.tar.xz ${NODE_FOLDER}
516 rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/etc/hosts ${NODE_FOLDER}
517 rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/usr/lib/systemd/system/haproxy.service ${NODE_FOLDER}
518 rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/audit/audit.log ${NODE_FOLDER}
519 rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/httpd/keystone_access.log ${NODE_FOLDER}
520 rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/httpd/keystone.log ${NODE_FOLDER}
521 rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/messages* ${NODE_FOLDER}
522 rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/openvswitch/ovs-vswitchd.log ${NODE_FOLDER}
523 rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/openvswitch/ovsdb-server.log ${NODE_FOLDER}
524 collect_openstack_logs "${!OSIP}" "${NODE_FOLDER}" "control"
525 mv local.conf_control_${!OSIP} ${NODE_FOLDER}/local.conf
526 # qdhcp files are created by robot tests and copied into /tmp/qdhcp during the test
527 tar -cf - -C /tmp qdhcp | xz -T 0 > /tmp/qdhcp.tar.xz
528 mv /tmp/qdhcp.tar.xz ${NODE_FOLDER}
529 mv ${NODE_FOLDER} ${WORKSPACE}/archives/
533 for i in `seq 1 ${NUM_OPENSTACK_COMPUTE_NODES}`; do
534 OSIP=OPENSTACK_COMPUTE_NODE_${i}_IP
535 echo "collect_logs: for openstack compute node ip: ${!OSIP}"
536 NODE_FOLDER="compute_${i}"
537 mkdir -p ${NODE_FOLDER}
538 tcpdump_stop "${!OSIP}"
539 scp extra_debug.sh ${!OSIP}:/tmp
540 ${SSH} ${!OSIP} "bash /tmp/extra_debug.sh > /tmp/extra_debug.log 2>&1"
541 scp ${!OSIP}:/etc/nova/nova.conf ${NODE_FOLDER}
542 scp ${!OSIP}:/etc/nova/nova-cpu.conf ${NODE_FOLDER}
543 scp ${!OSIP}:/etc/openstack/clouds.yaml ${NODE_FOLDER}
544 scp ${!OSIP}:/opt/stack/devstack/.stackenv ${NODE_FOLDER}
545 scp ${!OSIP}:/opt/stack/devstack/nohup.out ${NODE_FOLDER}/stack.log
546 scp ${!OSIP}:/opt/stack/devstack/openrc ${NODE_FOLDER}
547 scp ${!OSIP}:/opt/stack/requirements/upper-constraints.txt ${NODE_FOLDER}
548 scp ${!OSIP}:/tmp/*.xz ${NODE_FOLDER}/
549 scp ${!OSIP}:/tmp/dmesg.log ${NODE_FOLDER}
550 scp ${!OSIP}:/tmp/extra_debug.log ${NODE_FOLDER}
551 scp ${!OSIP}:/tmp/get_devstack.sh.txt ${NODE_FOLDER}
552 scp ${!OSIP}:/tmp/journalctl.log ${NODE_FOLDER}
553 scp ${!OSIP}:/tmp/ovsdb-tool.log ${NODE_FOLDER}
554 scp ${!OSIP}:/tmp/tcpdump_start.log ${NODE_FOLDER}
555 collect_files "${!OSIP}" "${NODE_FOLDER}"
556 ${SSH} ${!OSIP} "sudo tar -cf - -C /var/log libvirt | xz -T 0 > /tmp/libvirt.tar.xz "
557 scp ${!OSIP}:/tmp/libvirt.tar.xz ${NODE_FOLDER}
558 rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/etc/hosts ${NODE_FOLDER}
559 rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/audit/audit.log ${NODE_FOLDER}
560 rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/messages* ${NODE_FOLDER}
561 rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/nova-agent.log ${NODE_FOLDER}
562 rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/openvswitch/ovs-vswitchd.log ${NODE_FOLDER}
563 rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/openvswitch/ovsdb-server.log ${NODE_FOLDER}
564 collect_openstack_logs "${!OSIP}" "${NODE_FOLDER}" "compute"
565 mv local.conf_compute_${!OSIP} ${NODE_FOLDER}/local.conf
566 mv ${NODE_FOLDER} ${WORKSPACE}/archives/
570 DEVSTACK_TEMPEST_DIR="/opt/stack/tempest"
572 TEMPEST_LOGS_DIR=${WORKSPACE}/archives/tempest
573 # Look for tempest test results in the $TESTREPO dir and copy if found
574 if ${SSH} ${OPENSTACK_CONTROL_NODE_1_IP} "sudo sh -c '[ -f ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/0 ]'"; then
575 ${SSH} ${OPENSTACK_CONTROL_NODE_1_IP} "for I in \$(sudo ls ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/ | grep -E '^[0-9]+$'); do sudo sh -c \"${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/subunit-1to2 < ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/\${I} >> ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt\"; done"
576 ${SSH} ${OPENSTACK_CONTROL_NODE_1_IP} "sudo sh -c '${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/python ${DEVSTACK_TEMPEST_DIR}/.tox/tempest/lib/python2.7/site-packages/os_testr/subunit2html.py ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt ${DEVSTACK_TEMPEST_DIR}/tempest_results.html'"
577 mkdir -p ${TEMPEST_LOGS_DIR}
578 scp ${OPENSTACK_CONTROL_NODE_1_IP}:${DEVSTACK_TEMPEST_DIR}/tempest_results.html ${TEMPEST_LOGS_DIR}
579 scp ${OPENSTACK_CONTROL_NODE_1_IP}:${DEVSTACK_TEMPEST_DIR}/tempest.log ${TEMPEST_LOGS_DIR}
581 echo "tempest results not found in ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/0"
583 } # collect_netvirt_logs()
585 # Utility function for joining strings.
591 final=${final}${delim}${str}
597 function get_nodes_list() {
598 # Create the string for nodes
599 for i in `seq 1 ${NUM_ODL_SYSTEM}` ; do
600 CONTROLLERIP=ODL_SYSTEM_${i}_IP
601 nodes[$i]=${!CONTROLLERIP}
604 nodes_list=$(join "${nodes[@]}")
608 function get_features() {
609 if [ ${CONTROLLERSCOPE} == 'all' ]; then
610 ACTUALFEATURES="odl-integration-compatible-with-all,${CONTROLLERFEATURES}"
611 export CONTROLLERMEM="3072m"
613 ACTUALFEATURES="odl-infrautils-ready,${CONTROLLERFEATURES}"
616 # Some versions of jenkins job builder result in feature list containing spaces
617 # and ending in newline. Remove all that.
618 ACTUALFEATURES=`echo "${ACTUALFEATURES}" | tr -d '\n \r'`
619 echo "ACTUALFEATURES: ${ACTUALFEATURES}"
621 # In the case that we want to install features via karaf shell, a space separated list of
622 # ACTUALFEATURES IS NEEDED
623 SPACE_SEPARATED_FEATURES=$(echo "${ACTUALFEATURES}" | tr ',' ' ')
624 echo "SPACE_SEPARATED_FEATURES: ${SPACE_SEPARATED_FEATURES}"
626 export ACTUALFEATURES
627 export SPACE_SEPARATED_FEATURES
630 # Create the configuration script to be run on controllers.
631 function create_configuration_script() {
632 cat > ${WORKSPACE}/configuration-script.sh <<EOF
634 source /tmp/common-functions.sh ${BUNDLEFOLDER}
636 echo "Changing to /tmp"
639 echo "Downloading the distribution from ${ACTUAL_BUNDLE_URL}"
640 wget --progress=dot:mega '${ACTUAL_BUNDLE_URL}'
642 echo "Extracting the new controller..."
645 echo "Adding external repositories..."
646 sed -ie "s%org.ops4j.pax.url.mvn.repositories=%org.ops4j.pax.url.mvn.repositories=https://nexus.opendaylight.org/content/repositories/opendaylight.snapshot@id=opendaylight-snapshot@snapshots, https://nexus.opendaylight.org/content/repositories/public@id=opendaylight-mirror, http://repo1.maven.org/maven2@id=central, http://repository.springsource.com/maven/bundles/release@id=spring.ebr.release, http://repository.springsource.com/maven/bundles/external@id=spring.ebr.external, http://zodiac.springsource.com/maven/bundles/release@id=gemini, http://repository.apache.org/content/groups/snapshots-group@id=apache@snapshots@noreleases, https://oss.sonatype.org/content/repositories/snapshots@id=sonatype.snapshots.deploy@snapshots@noreleases, https://oss.sonatype.org/content/repositories/ops4j-snapshots@id=ops4j.sonatype.snapshots.deploy@snapshots@noreleases%g" ${MAVENCONF}
649 if [[ "$USEFEATURESBOOT" == "True" ]]; then
650 echo "Configuring the startup features..."
651 sed -ie "s/\(featuresBoot=\|featuresBoot =\)/featuresBoot = ${ACTUALFEATURES},/g" ${FEATURESCONF}
654 FEATURE_TEST_STRING="features-integration-test"
655 KARAF_VERSION=${KARAF_VERSION:-karaf4}
656 if [[ "$KARAF_VERSION" == "karaf4" ]]; then
657 FEATURE_TEST_STRING="features-test"
660 sed -ie "s%\(featuresRepositories=\|featuresRepositories =\)%featuresRepositories = mvn:org.opendaylight.integration/\${FEATURE_TEST_STRING}/${BUNDLE_VERSION}/xml/features,mvn:org.apache.karaf.decanter/apache-karaf-decanter/1.0.0/xml/features,%g" ${FEATURESCONF}
661 if [[ ! -z "${REPO_URL}" ]]; then
662 sed -ie "s%featuresRepositories =%featuresRepositories = ${REPO_URL},%g" ${FEATURESCONF}
666 configure_karaf_log "${KARAF_VERSION}" "${CONTROLLERDEBUGMAP}"
668 set_java_vars "${JAVA_HOME}" "${CONTROLLERMEM}" "${MEMCONF}"
670 echo "Listing all open ports on controller system..."
673 # Copy shard file if exists
674 if [ -f /tmp/custom_shard_config.txt ]; then
675 echo "Custom shard config exists!!!"
676 echo "Copying the shard config..."
677 cp /tmp/custom_shard_config.txt /tmp/${BUNDLEFOLDER}/bin/
680 echo "Configuring cluster"
681 /tmp/${BUNDLEFOLDER}/bin/configure_cluster.sh \$1 ${nodes_list}
683 echo "Dump akka.conf"
686 echo "Dump modules.conf"
689 echo "Dump module-shards.conf"
690 cat ${MODULESHARDSCONF}
692 # cat > ${WORKSPACE}/configuration-script.sh <<EOF
695 # Create the startup script to be run on controllers.
696 function create_startup_script() {
697 cat > ${WORKSPACE}/startup-script.sh <<EOF
698 echo "Redirecting karaf console output to karaf_console.log"
699 export KARAF_REDIRECT="/tmp/${BUNDLEFOLDER}/data/log/karaf_console.log"
700 mkdir -p /tmp/${BUNDLEFOLDER}/data/log
702 echo "Starting controller..."
703 /tmp/${BUNDLEFOLDER}/bin/start
705 # cat > ${WORKSPACE}/startup-script.sh <<EOF
708 function create_post_startup_script() {
709 cat > ${WORKSPACE}/post-startup-script.sh <<EOF
710 if [[ "$USEFEATURESBOOT" != "True" ]]; then
712 # wait up to 60s for karaf port 8101 to be opened, polling every 5s
714 until [[ \$loop_count -ge 12 ]]; do
715 netstat -na | grep 8101 && break;
716 loop_count=\$[\$loop_count+1];
720 echo "going to feature:install --no-auto-refresh ${SPACE_SEPARATED_FEATURES} one at a time"
721 for feature in ${SPACE_SEPARATED_FEATURES}; do
722 sshpass -p karaf ssh -o StrictHostKeyChecking=no \
723 -o UserKnownHostsFile=/dev/null \
725 -p 8101 karaf@localhost \
726 feature:install --no-auto-refresh \$feature;
729 echo "ssh to karaf console to list -i installed features"
730 sshpass -p karaf ssh -o StrictHostKeyChecking=no \
731 -o UserKnownHostsFile=/dev/null \
733 -p 8101 karaf@localhost \
737 echo "Waiting up to 3 minutes for controller to come up, checking every 5 seconds..."
740 grep 'org.opendaylight.infrautils.ready-impl.*System ready' /tmp/${BUNDLEFOLDER}/data/log/karaf.log
741 if [ \$? -eq 0 ]; then
742 echo "Controller is UP"
747 # if we ended up not finding ready status in the above loop, we can output some debugs
748 grep 'org.opendaylight.infrautils.ready-impl.*System ready' /tmp/${BUNDLEFOLDER}/data/log/karaf.log
749 if [ $? -ne 0 ]; then
750 echo "Timeout Controller DOWN"
751 echo "Dumping first 500K bytes of karaf log..."
752 head --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
753 echo "Dumping last 500K bytes of karaf log..."
754 tail --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
755 echo "Listing all open ports on controller system"
760 echo "Listing all open ports on controller system..."
763 function exit_on_log_file_message {
764 echo "looking for \"\$1\" in log file"
765 if grep --quiet "\$1" "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"; then
766 echo ABORTING: found "\$1"
767 echo "Dumping first 500K bytes of karaf log..."
768 head --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
769 echo "Dumping last 500K bytes of karaf log..."
770 tail --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
775 exit_on_log_file_message 'BindException: Address already in use'
776 exit_on_log_file_message 'server is unhealthy'
778 # cat > ${WORKSPACE}/post-startup-script.sh <<EOF
781 # Copy over the configuration script and configuration files to each controller
782 # Execute the configuration script on each controller.
783 function copy_and_run_configuration_script() {
784 for i in `seq 1 ${NUM_ODL_SYSTEM}`; do
785 CONTROLLERIP=ODL_SYSTEM_${i}_IP
786 echo "Configuring member-${i} with IP address ${!CONTROLLERIP}"
787 scp ${WORKSPACE}/configuration-script.sh ${!CONTROLLERIP}:/tmp/
788 ssh ${!CONTROLLERIP} "bash /tmp/configuration-script.sh ${i}"
792 # Copy over the startup script to each controller and execute it.
793 function copy_and_run_startup_script() {
794 for i in `seq 1 ${NUM_ODL_SYSTEM}`; do
795 CONTROLLERIP=ODL_SYSTEM_${i}_IP
796 echo "Starting member-${i} with IP address ${!CONTROLLERIP}"
797 scp ${WORKSPACE}/startup-script.sh ${!CONTROLLERIP}:/tmp/
798 ssh ${!CONTROLLERIP} "bash /tmp/startup-script.sh"
802 function copy_and_run_post_startup_script() {
804 for i in `seq 1 ${NUM_ODL_SYSTEM}`; do
805 CONTROLLERIP=ODL_SYSTEM_${i}_IP
806 echo "Execute the post startup script on controller ${!CONTROLLERIP}"
807 scp ${WORKSPACE}/post-startup-script.sh ${!CONTROLLERIP}:/tmp
808 ssh ${!CONTROLLERIP} "bash /tmp/post-startup-script.sh $(( seed_index++ ))"
809 if [ $(( $i % ${NUM_ODL_SYSTEM} )) == 0 ]; then
815 function create_controller_variables() {
816 echo "Generating controller variables..."
817 for i in `seq 1 ${NUM_ODL_SYSTEM}`; do
818 CONTROLLERIP=ODL_SYSTEM_${i}_IP
819 odl_variables=${odl_variables}" -v ${CONTROLLERIP}:${!CONTROLLERIP}"
820 echo "Lets's take the karaf thread dump"
821 ssh ${!CONTROLLERIP} "sudo ps aux" > ${WORKSPACE}/ps_before.log
822 pid=$(grep org.apache.karaf.main.Main ${WORKSPACE}/ps_before.log | grep -v grep | tr -s ' ' | cut -f2 -d' ')
823 echo "karaf main: org.apache.karaf.main.Main, pid:${pid}"
824 ssh ${!CONTROLLERIP} "${JAVA_HOME}/bin/jstack -l ${pid}" > ${WORKSPACE}/karaf_${i}_${pid}_threads_before.log || true