1 #@IgnoreInspection BashAddShebang
2 # Activate robotframework virtualenv
3 # ${ROBOT_VENV} comes from the include-raw-integration-install-robotframework.sh
5 source ${ROBOT_VENV}/bin/activate
7 # TODO: remove this work to run changes.py if/when it's moved higher up to be visible at the Robot level
8 echo "showing recent changes that made it in to the distribution used by this job"
9 pip install --upgrade urllib3
10 python ${WORKSPACE}/test/tools/distchanges/changes.py -d /tmp/distribution_folder \
11 -u ${ACTUALBUNDLEURL} -b ${DISTROBRANCH} \
12 -r ssh://jenkins-${SILO}@git.opendaylight.org:29418 || true
14 echo "#################################################"
15 echo "## Deploy Openstack 3-node ##"
16 echo "#################################################"
21 function create_control_node_local_conf {
22 local_conf_file_name=${WORKSPACE}/local.conf_control
24 if [ "${ODL_ML2_BRANCH}" != "stable/ocata" ]; then
29 cat > ${local_conf_file_name} << EOF
32 SCREEN_LOGDIR=/opt/stack/data/log
38 for service_name in ${DISABLE_OS_SERVICES}
40 cat >> ${local_conf_file_name} << EOF
41 disable_service ${service_name}
44 for service_name in ${ENABLE_OS_SERVICES}
46 cat >> ${local_conf_file_name} << EOF
47 enable_service ${service_name}
52 cat >> ${local_conf_file_name} << EOF
53 HOST_IP=$OPENSTACK_CONTROL_NODE_IP
54 SERVICE_HOST=\$HOST_IP
56 NEUTRON_CREATE_INITIAL_NETWORKS=False
58 Q_ML2_TENANT_NETWORK_TYPE=${TENANT_NETWORK_TYPE}
61 ENABLE_TENANT_TUNNELS=True
63 MYSQL_HOST=\$SERVICE_HOST
64 RABBIT_HOST=\$SERVICE_HOST
65 GLANCE_HOSTPORT=\$SERVICE_HOST:9292
66 KEYSTONE_AUTH_HOST=\$SERVICE_HOST
67 KEYSTONE_SERVICE_HOST=\$SERVICE_HOST
70 RABBIT_PASSWORD=rabbit
72 SERVICE_PASSWORD=admin
75 enable_plugin networking-odl ${ODL_ML2_DRIVER_REPO} ${ODL_ML2_BRANCH}
83 if [ "${ENABLE_NETWORKING_L2GW}" == "yes" ]; then
84 cat >> ${local_conf_file_name} << EOF
86 enable_plugin networking-l2gw ${NETWORKING_L2GW_DRIVER} ${ODL_ML2_BRANCH}
87 NETWORKING_L2GW_SERVICE_DRIVER=L2GW:OpenDaylight:networking_odl.l2gateway.driver.OpenDaylightL2gwDriver:default
88 ENABLED_SERVICES+=,neutron,q-svc,nova,q-meta
93 if [ "${ODL_ML2_DRIVER_VERSION}" == "v2" ]; then
94 echo "ODL_V2DRIVER=True" >> ${local_conf_file_name}
97 if [ "${NUM_ODL_SYSTEM}" -gt 1 ]; then
98 odl_list=${ODL_SYSTEM_1_IP}
99 for i in `seq 2 ${NUM_ODL_SYSTEM}`
101 odlip=ODL_SYSTEM_${i}_IP
102 odl_list=${odl_list},${!odlip}
104 if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then
105 HA_PROXY_INDEX=${NUM_OPENSTACK_SYSTEM}
106 odlmgrip=OPENSTACK_COMPUTE_NODE_${HA_PROXY_INDEX}_IP
107 odl_mgr_ip=${!odlmgrip}
109 odl_mgr_ip=${ODL_SYSTEM_1_IP}
111 cat >> ${local_conf_file_name} << EOF
112 ODL_OVS_MANAGERS=${odl_list}
113 ODL_MGR_IP=${odl_mgr_ip}
116 cat >> ${local_conf_file_name} << EOF
117 ODL_MGR_IP=${ODL_SYSTEM_1_IP}
121 # if we are using the old netvirt impl, as determined by the feature name
122 # odl-ovsdb-openstack (note: new impl is odl-netvirt-openstack) then we
123 # want ODL_L3 to be True. New impl wants it False
124 if [[ ${CONTROLLERFEATURES} == *"odl-ovsdb-openstack"* ]]; then
130 # if we are using the new netvirt impl, as determined by the feature name
131 # odl-netvirt-openstack (note: old impl is odl-ovsdb-openstack) then we
132 # want PROVIDER_MAPPINGS to be used -- this should be fixed if we want to support
133 # external networks in legacy netvirt
134 if [[ ${CONTROLLERFEATURES} == *"odl-netvirt-openstack"* ]]; then
135 ODL_PROVIDER_MAPPINGS="\${PUBLIC_PHYSICAL_NETWORK}:${PUBLIC_BRIDGE}"
137 ODL_PROVIDER_MAPPINGS=
140 if [ "${ODL_ENABLE_L3_FWD}" == "yes" ]; then
141 cat >> ${local_conf_file_name} << EOF
142 PUBLIC_BRIDGE=${PUBLIC_BRIDGE}
143 PUBLIC_PHYSICAL_NETWORK=physnet1 # FIXME this should be a parameter
144 ML2_VLAN_RANGES=physnet1
145 ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS}
148 PUBLIC_INTERFACE=br100
151 if [ -z ${DISABLE_ODL_L3_PLUGIN} ] || [ "${DISABLE_ODL_L3_PLUGIN}" == "no" ]; then
152 if [ "${ODL_ML2_BRANCH}" == "stable/mitaka" ]; then
153 cat >> ${local_conf_file_name} << EOF
156 [[post-config|\$NEUTRON_CONF]]
158 service_plugins = networking_odl.l3.l3_odl.OpenDaylightL3RouterPlugin
161 fi #check for ODL_ML2_BRANCH
162 fi #check for DISABLE_ODL_L3_PLUGIN
164 fi #ODL_ENABLE_L3_FWD check
166 cat >> ${local_conf_file_name} << EOF
167 [[post-config|/etc/neutron/plugins/ml2/ml2_conf.ini]]
169 minimize_polling=True
172 # Needed for VLAN provider tests - because our provider networks are always encapsulated in VXLAN (br-physnet1)
173 # MTU(1440) + VXLAN(50) + VLAN(4) = 1494 < MTU eth0/br-phynset1(1500)
174 physical_network_mtus = physnet1:1440
176 [[post-config|/etc/neutron/dhcp_agent.ini]]
178 force_metadata = True
179 enable_isolated_metadata = True
181 [[post-config|/etc/nova/nova.conf]]
183 force_config_drive = False
187 echo "local.conf Created...."
188 cat ${local_conf_file_name}
191 function create_compute_node_local_conf {
194 if [ "${ODL_ML2_BRANCH}" != "stable/ocata" ]; then
199 local_conf_file_name=${WORKSPACE}/local.conf_compute_${HOSTIP}
200 cat > ${local_conf_file_name} << EOF
204 SCREEN_LOGDIR=/opt/stack/data/log
207 NOVA_VNC_ENABLED=True
209 ENABLED_SERVICES=n-cpu
212 SERVICE_HOST=${OPENSTACK_CONTROL_NODE_IP}
215 ENABLE_TENANT_TUNNELS=True
216 Q_ML2_TENANT_NETWORK_TYPE=vxlan
218 Q_HOST=\$SERVICE_HOST
219 MYSQL_HOST=\$SERVICE_HOST
220 RABBIT_HOST=\$SERVICE_HOST
221 GLANCE_HOSTPORT=\$SERVICE_HOST:9292
222 KEYSTONE_AUTH_HOST=\$SERVICE_HOST
223 KEYSTONE_SERVICE_HOST=\$SERVICE_HOST
226 RABBIT_PASSWORD=rabbit
227 SERVICE_TOKEN=service
228 SERVICE_PASSWORD=admin
231 enable_plugin networking-odl ${ODL_ML2_DRIVER_REPO} ${ODL_ML2_BRANCH}
236 if [ "${NUM_ODL_SYSTEM}" -gt 1 ]; then
237 odl_list=${ODL_SYSTEM_1_IP}
238 for i in `seq 2 ${NUM_ODL_SYSTEM}`
240 odlip=ODL_SYSTEM_${i}_IP
241 odl_list=${odl_list},${!odlip}
243 if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then
244 HA_PROXY_INDEX=${NUM_OPENSTACK_SYSTEM}
245 odlmgrip=OPENSTACK_COMPUTE_NODE_${HA_PROXY_INDEX}_IP
246 odl_mgr_ip=${!odlmgrip}
248 odl_mgr_ip=${ODL_SYSTEM_1_IP}
250 cat >> ${local_conf_file_name} << EOF
251 ODL_OVS_MANAGERS=${odl_list}
252 ODL_MGR_IP=${odl_mgr_ip}
255 cat >> ${local_conf_file_name} << EOF
256 ODL_MGR_IP=${ODL_SYSTEM_1_IP}
260 # if we are using the new netvirt impl, as determined by the feature name
261 # odl-netvirt-openstack (note: old impl is odl-ovsdb-openstack) then we
262 # want PROVIDER_MAPPINGS to be used -- this should be fixed if we want to support
263 # external networks in legacy netvirt
264 if [[ ${CONTROLLERFEATURES} == *"odl-netvirt-openstack"* ]]; then
265 ODL_PROVIDER_MAPPINGS="\${PUBLIC_PHYSICAL_NETWORK}:${PUBLIC_BRIDGE}"
267 ODL_PROVIDER_MAPPINGS=
270 if [ "${ODL_ENABLE_L3_FWD}" == "yes" ]; then
271 cat >> ${local_conf_file_name} << EOF
272 # Uncomment lines below if odl-compute is to be used for l3 forwarding
275 PUBLIC_INTERFACE=br100 # FIXME do we use br100 at all?
276 PUBLIC_BRIDGE=${PUBLIC_BRIDGE}
277 PUBLIC_PHYSICAL_NETWORK=physnet1 # FIXME this should be a parameter
278 ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS}
281 echo "local.conf Created...."
282 cat ${local_conf_file_name}
285 function configure_haproxy_for_neutron_requests () {
286 HA_PROXY_INDEX=${NUM_OPENSTACK_SYSTEM}
287 odlmgrip=OPENSTACK_COMPUTE_NODE_${HA_PROXY_INDEX}_IP
288 ha_proxy_ip=${!odlmgrip}
290 cat > ${WORKSPACE}/install_ha_proxy.sh<< EOF
291 sudo systemctl stop firewalld
292 sudo yum -y install policycoreutils-python haproxy
295 cat > ${WORKSPACE}/haproxy.cfg << EOF
301 pidfile /tmp/haproxy.pid
309 timeout http-request 10s
317 bind ${ha_proxy_ip}:8080
321 for i in `seq 1 ${NUM_ODL_SYSTEM}`
323 odlip=ODL_SYSTEM_${i}_IP
324 cat >> ${WORKSPACE}/haproxy.cfg << EOF
325 server controller-$i ${!odlip}:8080 check fall 5 inter 2000 rise 2
329 cat >> ${WORKSPACE}/haproxy.cfg << EOF
330 listen opendaylight_rest
331 bind ${ha_proxy_ip}:8181
335 for i in `seq 1 ${NUM_ODL_SYSTEM}`
337 odlip=ODL_SYSTEM_${i}_IP
338 cat >> ${WORKSPACE}/haproxy.cfg << EOF
339 server controller-rest-$i ${!odlip}:8181 check fall 5 inter 2000 rise 2
343 cat > ${WORKSPACE}/deploy_ha_proxy.sh<< EOF
344 sudo chown haproxy:haproxy /tmp/haproxy.cfg
345 sudo sed -i 's/\\/etc\\/haproxy\\/haproxy.cfg/\\/tmp\\/haproxy.cfg/g' /usr/lib/systemd/system/haproxy.service
346 sudo /usr/sbin/semanage permissive -a haproxy_t
347 sudo systemctl restart haproxy
350 sudo systemctl status haproxy
353 scp ${WORKSPACE}/install_ha_proxy.sh ${ha_proxy_ip}:/tmp
354 ${SSH} ${ha_proxy_ip} "sudo bash /tmp/install_ha_proxy.sh"
355 scp ${WORKSPACE}/haproxy.cfg ${ha_proxy_ip}:/tmp
356 scp ${WORKSPACE}/deploy_ha_proxy.sh ${ha_proxy_ip}:/tmp
357 ${SSH} ${ha_proxy_ip} "sudo bash /tmp/deploy_ha_proxy.sh"
360 function collect_logs_and_exit (){
361 set +e # We do not want to create red dot just because something went wrong while fetching logs.
362 for i in `seq 1 ${NUM_ODL_SYSTEM}`
364 CONTROLLERIP=ODL_SYSTEM_${i}_IP
365 echo "killing karaf process..."
366 ${SSH} "${!CONTROLLERIP}" bash -c 'ps axf | grep karaf | grep -v grep | awk '"'"'{print "kill -9 " $1}'"'"' | sh'
369 cat > extra_debug.sh << EOF
370 echo -e "/usr/sbin/lsmod | /usr/bin/grep openvswitch\n"
371 /usr/sbin/lsmod | /usr/bin/grep openvswitch
372 echo -e "\ngrep ct_ /var/log/openvswitch/ovs-vswitchd.log\n"
373 grep ct_ /var/log/openvswitch/ovs-vswitchd.log
377 # FIXME: Do not create .tar and gzip before copying.
378 for i in `seq 1 ${NUM_ODL_SYSTEM}`
380 CONTROLLERIP=ODL_SYSTEM_${i}_IP
381 ${SSH} "${!CONTROLLERIP}" "cp -r /tmp/${BUNDLEFOLDER}/data/log /tmp/odl_log"
382 ${SSH} "${!CONTROLLERIP}" "tar -cf /tmp/odl${i}_karaf.log.tar /tmp/odl_log/*"
383 scp "${!CONTROLLERIP}:/tmp/odl${i}_karaf.log.tar" "${WORKSPACE}/odl${i}_karaf.log.tar"
384 tar -xvf ${WORKSPACE}/odl${i}_karaf.log.tar -C . --strip-components 2 --transform s/karaf/odl${i}_karaf/g
385 grep "ROBOT MESSAGE\| ERROR " odl${i}_karaf.log > odl${i}_err.log
386 grep "ROBOT MESSAGE\|Exception" odl${i}_karaf.log > odl${i}_exception.log
387 grep "ROBOT MESSAGE\| ERROR \| WARN \|Exception" odl${i}_karaf.log > odl${i}_err_warn_exception.log
388 rm ${WORKSPACE}/odl${i}_karaf.log.tar
391 # Since this log collection work is happening before the archive build macro which also
392 # creates the ${WORKSPACE}/archives dir, we have to do it here first. The mkdir in the
393 # archives build step will essentially be a noop.
394 mkdir -p ${WORKSPACE}/archives
397 OS_CTRL_FOLDER="control"
398 mkdir -p ${OS_CTRL_FOLDER}
399 scp ${OPENSTACK_CONTROL_NODE_IP}:/opt/stack/devstack/nohup.out ${OS_CTRL_FOLDER}/stack.log
400 scp ${OPENSTACK_CONTROL_NODE_IP}:/var/log/openvswitch/ovs-vswitchd.log ${OS_CTRL_FOLDER}/ovs-vswitchd.log
401 rsync -avhe ssh ${OPENSTACK_CONTROL_NODE_IP}:/opt/stack/logs/* ${OS_CTRL_FOLDER} # rsync to prevent copying of symbolic links
402 scp extra_debug.sh ${OPENSTACK_CONTROL_NODE_IP}:/tmp
403 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "bash /tmp/extra_debug.sh > /tmp/extra_debug.log"
404 scp ${OPENSTACK_CONTROL_NODE_IP}:/tmp/extra_debug.log ${OS_CTRL_FOLDER}/extra_debug.log
405 mv local.conf_control ${OS_CTRL_FOLDER}/local.conf
406 mv ${OS_CTRL_FOLDER} ${WORKSPACE}/archives/
409 for i in `seq 1 $((NUM_OPENSTACK_SYSTEM - 1))`
411 OSIP=OPENSTACK_COMPUTE_NODE_${i}_IP
412 OS_COMPUTE_FOLDER="compute_${i}"
413 mkdir -p ${OS_COMPUTE_FOLDER}
414 scp ${!OSIP}:/opt/stack/devstack/nohup.out ${OS_COMPUTE_FOLDER}/stack.log
415 scp ${!OSIP}:/var/log/openvswitch/ovs-vswitchd.log ${OS_COMPUTE_FOLDER}/ovs-vswitchd.log
416 rsync -avhe ssh ${!OSIP}:/opt/stack/logs/* ${OS_COMPUTE_FOLDER} # rsync to prevent copying of symbolic links
417 scp extra_debug.sh ${!OSIP}:/tmp
418 ${SSH} ${!OSIP} "bash /tmp/extra_debug.sh > /tmp/extra_debug.log"
419 scp ${!OSIP}:/tmp/extra_debug.log ${OS_COMPUTE_FOLDER}/extra_debug.log
420 mv local.conf_compute_${!OSIP} ${OS_COMPUTE_FOLDER}/local.conf
421 mv ${OS_COMPUTE_FOLDER} ${WORKSPACE}/archives/
424 ls local.conf* | xargs -I % mv % %.log
427 DEVSTACK_TEMPEST_DIR="/opt/stack/tempest"
428 if $(ssh ${OPENSTACK_CONTROL_NODE_IP} "sudo sh -c '[ -f ${DEVSTACK_TEMPEST_DIR}/.testrepository/0 ]'"); then # if Tempest results exist
429 ssh ${OPENSTACK_CONTROL_NODE_IP} "for I in \$(sudo ls ${DEVSTACK_TEMPEST_DIR}/.testrepository/ | grep -E '^[0-9]+$'); do sudo sh -c \"${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/subunit-1to2 < ${DEVSTACK_TEMPEST_DIR}/.testrepository/\${I} >> ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt\"; done"
430 ssh ${OPENSTACK_CONTROL_NODE_IP} "sudo sh -c '${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/python ${DEVSTACK_TEMPEST_DIR}/.tox/tempest/lib/python2.7/site-packages/os_testr/subunit2html.py ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt ${DEVSTACK_TEMPEST_DIR}/tempest_results.html'"
431 TEMPEST_LOGS_DIR=${WORKSPACE}/archives/tempest
432 mkdir -p ${TEMPEST_LOGS_DIR}
433 scp ${OPENSTACK_CONTROL_NODE_IP}:${DEVSTACK_TEMPEST_DIR}/tempest_results.html ${TEMPEST_LOGS_DIR}
434 scp ${OPENSTACK_CONTROL_NODE_IP}:${DEVSTACK_TEMPEST_DIR}/tempest.log ${TEMPEST_LOGS_DIR}
435 mv ${WORKSPACE}/tempest_output* ${TEMPEST_LOGS_DIR}
439 cat > ${WORKSPACE}/disable_firewall.sh << EOF
440 sudo systemctl stop firewalld
441 sudo systemctl stop iptables
445 cat > ${WORKSPACE}/get_devstack.sh << EOF
446 sudo systemctl stop firewalld
447 sudo yum install bridge-utils -y
448 sudo systemctl stop NetworkManager
449 #Disable NetworkManager and kill dhclient and dnsmasq
450 sudo systemctl stop NetworkManager
451 sudo killall dhclient
453 #Workaround for mysql failure
454 echo "127.0.0.1 localhost \${HOSTNAME}" > /tmp/hosts
455 echo "::1 localhost \${HOSTNAME}" >> /tmp/hosts
456 sudo mv /tmp/hosts /etc/hosts
457 sudo /usr/sbin/brctl addbr br100
458 #sudo ifconfig eth0 mtu 2000
459 sudo mkdir /opt/stack
460 sudo chmod 777 /opt/stack
462 git clone https://git.openstack.org/openstack-dev/devstack
464 git checkout $OPENSTACK_BRANCH
467 echo "Create HAProxy if needed"
468 if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then
469 echo "Need to configure HAProxy"
470 configure_haproxy_for_neutron_requests
474 echo "Stack the Control Node"
475 scp ${WORKSPACE}/get_devstack.sh ${OPENSTACK_CONTROL_NODE_IP}:/tmp
476 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "bash /tmp/get_devstack.sh"
477 create_control_node_local_conf
478 scp ${WORKSPACE}/local.conf_control ${OPENSTACK_CONTROL_NODE_IP}:/opt/stack/devstack/local.conf
480 cat > "${WORKSPACE}/manual_install_package.sh" << EOF
485 sudo python setup.py install
490 # Workworund for successful stacking with Mitaka
491 if [ "${ODL_ML2_BRANCH}" == "stable/mitaka" ]; then
493 # Workaround for problems with latest versions/specified versions in requirements of openstack
494 # Openstacksdk,libvirt-python -> the current version does not work with Mitaka diue to some requirements
495 # conflict and breaks when trying to stack
496 # paramiko -> Problems with tempest tests due to paramiko incompatibility with pycrypto.
497 # the problem has been solved with version 1.17. If the latest version of paramiko is used, it causes
498 # other timeout problems
499 ssh ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack; git clone https://git.openstack.org/openstack/requirements; cd requirements; git checkout stable/mitaka; sed -i /openstacksdk/d upper-constraints.txt; sed -i /libvirt-python/d upper-constraints.txt; sed -i /paramiko/d upper-constraints.txt"
500 scp "${WORKSPACE}/manual_install_package.sh" "${OPENSTACK_CONTROL_NODE_IP}:/tmp"
501 ssh ${OPENSTACK_CONTROL_NODE_IP} "sudo pip install deprecation"
502 # Fix for recent requirements update in the master branch of the sdk.The section must be replaced with a better fix.
503 ssh "${OPENSTACK_CONTROL_NODE_IP}" "sh /tmp/manual_install_package.sh https://github.com/openstack/python-openstacksdk python-openstacksdk 0.9.14"
504 ssh "${OPENSTACK_CONTROL_NODE_IP}" "sh /tmp/manual_install_package.sh https://github.com/paramiko/paramiko paramiko 1.17"
507 ssh ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack/devstack; nohup ./stack.sh > /opt/stack/devstack/nohup.out 2>&1 &"
508 ssh ${OPENSTACK_CONTROL_NODE_IP} "ps -ef | grep stack.sh"
509 ssh ${OPENSTACK_CONTROL_NODE_IP} "ls -lrt /opt/stack/devstack/nohup.out"
510 os_node_list+=(${OPENSTACK_CONTROL_NODE_IP})
512 #Workaround for stable/newton jobs
513 if [ "${ODL_ML2_BRANCH}" == "stable/newton" ]; then
514 ssh ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack; git clone https://git.openstack.org/openstack/requirements; cd requirements; git checkout stable/newton; sed -i /appdirs/d upper-constraints.txt"
518 for i in `seq 1 $((NUM_OPENSTACK_SYSTEM - 1))`
520 COMPUTEIP=OPENSTACK_COMPUTE_NODE_${i}_IP
521 scp ${WORKSPACE}/get_devstack.sh ${!COMPUTEIP}:/tmp
522 ${SSH} ${!COMPUTEIP} "bash /tmp/get_devstack.sh"
523 create_compute_node_local_conf ${!COMPUTEIP}
524 scp ${WORKSPACE}/local.conf_compute_${!COMPUTEIP} ${!COMPUTEIP}:/opt/stack/devstack/local.conf
525 if [ "${ODL_ML2_BRANCH}" == "stable/mitaka" ]; then
526 ssh ${!COMPUTEIP} "cd /opt/stack; git clone https://git.openstack.org/openstack/requirements; cd requirements; git checkout stable/mitaka; sed -i /libvirt-python/d upper-constraints.txt"
528 ssh ${!COMPUTEIP} "cd /opt/stack/devstack; nohup ./stack.sh > /opt/stack/devstack/nohup.out 2>&1 &"
529 ssh ${!COMPUTEIP} "ps -ef | grep stack.sh"
530 os_node_list+=(${!COMPUTEIP})
533 cat > ${WORKSPACE}/check_stacking.sh << EOF
534 > /tmp/stack_progress
535 ps -ef | grep "stack.sh" | grep -v grep
537 if [ \${ret} -eq 1 ]; then
538 grep "This is your host IP address:" /opt/stack/devstack/nohup.out
539 if [ \$? -eq 0 ]; then
540 echo "Stacking Complete" > /tmp/stack_progress
542 echo "Stacking Failed" > /tmp/stack_progress
544 elif [ \${ret} -eq 0 ]; then
545 echo "Still Stacking" > /tmp/stack_progress
549 #the checking is repeated for an hour
552 while [ ${in_progress} -eq 1 ]; do
553 iteration=$(($iteration + 1))
554 for index in ${!os_node_list[@]}
556 echo "Check the status of stacking in ${os_node_list[index]}"
557 scp ${WORKSPACE}/check_stacking.sh ${os_node_list[index]}:/tmp
558 ${SSH} ${os_node_list[index]} "bash /tmp/check_stacking.sh"
559 scp ${os_node_list[index]}:/tmp/stack_progress .
562 stacking_status=`cat stack_progress`
563 if [ "$stacking_status" == "Still Stacking" ]; then
565 elif [ "$stacking_status" == "Stacking Failed" ]; then
566 collect_logs_and_exit
568 elif [ "$stacking_status" == "Stacking Complete" ]; then
569 unset os_node_list[index]
570 if [ ${#os_node_list[@]} -eq 0 ]; then
575 echo "sleep for a minute before the next check"
577 if [ ${iteration} -eq 60 ]; then
578 collect_logs_and_exit
583 #Need to disable firewalld and iptables in control node
584 echo "Stop Firewall in Control Node for compute nodes to be able to reach the ports and add to hypervisor-list"
585 scp ${WORKSPACE}/disable_firewall.sh ${OPENSTACK_CONTROL_NODE_IP}:/tmp
586 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo bash /tmp/disable_firewall.sh"
587 echo "sleep for a minute and print hypervisor-list"
589 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack/devstack; source openrc admin admin; nova hypervisor-list"
590 # in the case that we are doing openstack (control + compute) all in one node, then the number of hypervisors
591 # will be the same as the number of openstack systems. However, if we are doing multinode openstack then the
592 # assumption is we have a single control node and the rest are compute nodes, so the number of expected hypervisors
593 # is one less than the total number of openstack systems
594 if [ "${NUM_OPENSTACK_SYSTEM}" -eq 1 ]; then
595 expected_num_hypervisors=1
597 expected_num_hypervisors=$((NUM_OPENSTACK_SYSTEM - 1))
599 num_hypervisors=$(${SSH} ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack/devstack; source openrc admin admin; openstack hypervisor list -f value | wc -l" | tail -1 | tr -d "\r")
600 if ! [ "${num_hypervisors}" ] || ! [ ${num_hypervisors} -eq ${expected_num_hypervisors} ]; then
601 echo "Error: Only $num_hypervisors hypervisors detected, expected $expected_num_hypervisors"
602 collect_logs_and_exit
606 #Need to disable firewalld and iptables in compute nodes as well
607 for i in `seq 1 $((NUM_OPENSTACK_SYSTEM - 1))`
609 OSIP=OPENSTACK_COMPUTE_NODE_${i}_IP
610 scp ${WORKSPACE}/disable_firewall.sh "${!OSIP}:/tmp"
611 ${SSH} "${!OSIP}" "sudo bash /tmp/disable_firewall.sh"
614 # upgrading pip, urllib3 and httplib2 so that tempest tests can be run on ${OPENSTACK_CONTROL_NODE_IP}
615 # this needs to happen after devstack runs because it seems devstack is pulling in specific versions
616 # of these libs that are not working for tempest.
617 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo pip install --upgrade pip"
618 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo pip install urllib3 --upgrade"
619 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo pip install httplib2 --upgrade"
621 for i in `seq 1 $((NUM_OPENSTACK_SYSTEM - 1))`
623 IP_VAR=OPENSTACK_COMPUTE_NODE_${i}_IP
624 COMPUTE_IPS[$((i-1))]=${!IP_VAR}
628 echo "prepare external networks by adding vxlan tunnels between all nodes on a separate bridge..."
630 for ip in ${OPENSTACK_CONTROL_NODE_IP} ${COMPUTE_IPS[*]}
632 # FIXME - Workaround, ODL (new netvirt) currently adds PUBLIC_BRIDGE as a port in br-int since it doesn't see such a bridge existing when we stack
633 ${SSH} $ip "sudo ovs-vsctl --if-exists del-port br-int $PUBLIC_BRIDGE"
634 ${SSH} $ip "sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE -- set bridge $PUBLIC_BRIDGE other-config:disable-in-band=true other_config:hwaddr=f6:00:00:ff:01:0$((devstack_index++))"
637 # Control Node - PUBLIC_BRIDGE will act as the external router
638 GATEWAY_IP="10.10.10.250" # FIXME this should be a parameter, also shared with integration-test
639 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo ifconfig $PUBLIC_BRIDGE up ${GATEWAY_IP}/24"
641 for compute_ip in ${COMPUTE_IPS[*]}
643 # Tunnel from controller to compute
644 PORT_NAME=compute$((compute_index++))_vxlan
645 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo ovs-vsctl add-port $PUBLIC_BRIDGE $PORT_NAME -- set interface $PORT_NAME type=vxlan options:local_ip="${OPENSTACK_CONTROL_NODE_IP}" options:remote_ip="$compute_ip" options:dst_port=9876 options:key=flow"
647 # Tunnel from compute to controller
648 PORT_NAME=control_vxlan
649 ${SSH} ${compute_ip} "sudo ovs-vsctl add-port $PUBLIC_BRIDGE $PORT_NAME -- set interface $PORT_NAME type=vxlan options:local_ip="$compute_ip" options:remote_ip="${OPENSTACK_CONTROL_NODE_IP}" options:dst_port=9876 options:key=flow"
652 if [ "${NUM_ODL_SYSTEM}" -gt 1 ]; then
653 HA_PROXY_INDEX=${NUM_OPENSTACK_SYSTEM}
654 odlmgrip=OPENSTACK_COMPUTE_NODE_${HA_PROXY_INDEX}_IP
655 HA_PROXY_IP=${!odlmgrip}
657 HA_PROXY_IP=${ODL_SYSTEM_IP}
659 echo "Locating test plan to use..."
660 testplan_filepath="${WORKSPACE}/test/csit/testplans/${STREAMTESTPLAN}"
661 if [ ! -f "${testplan_filepath}" ]; then
662 testplan_filepath="${WORKSPACE}/test/csit/testplans/${TESTPLAN}"
665 echo "Changing the testplan path..."
666 cat "${testplan_filepath}" | sed "s:integration:${WORKSPACE}:" > testplan.txt
669 SUITES=`egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' testplan.txt | tr '\012' ' '`
671 echo "Starting Robot test suites ${SUITES} ..."
672 # please add pybot -v arguments on a single line and alphabetized
673 pybot -N ${TESTPLAN} --removekeywords wuks -c critical -e exclude \
674 -v BUNDLEFOLDER:${BUNDLEFOLDER} \
675 -v BUNDLE_URL:${ACTUALBUNDLEURL} \
676 -v CONTROLLER_USER:${USER} \
677 -v DEVSTACK_DEPLOY_PATH:/opt/stack/devstack \
678 -v HA_PROXY_IP:${HA_PROXY_IP} \
679 -v JDKVERSION:${JDKVERSION} \
680 -v NEXUSURL_PREFIX:${NEXUSURL_PREFIX} \
681 -v NUM_ODL_SYSTEM:${NUM_ODL_SYSTEM} \
682 -v NUM_OS_SYSTEM:${NUM_OPENSTACK_SYSTEM} \
683 -v NUM_TOOLS_SYSTEM:${NUM_TOOLS_SYSTEM} \
684 -v ODL_STREAM:${DISTROSTREAM} \
685 -v ODL_SYSTEM_IP:${ODL_SYSTEM_IP} \
686 -v ODL_SYSTEM_1_IP:${ODL_SYSTEM_1_IP} \
687 -v ODL_SYSTEM_2_IP:${ODL_SYSTEM_2_IP} \
688 -v ODL_SYSTEM_3_IP:${ODL_SYSTEM_3_IP} \
689 -v OS_CONTROL_NODE_IP:${OPENSTACK_CONTROL_NODE_IP} \
690 -v OPENSTACK_BRANCH:${OPENSTACK_BRANCH} \
691 -v OS_COMPUTE_1_IP:${OPENSTACK_COMPUTE_NODE_1_IP} \
692 -v OS_COMPUTE_2_IP:${OPENSTACK_COMPUTE_NODE_2_IP} \
694 -v PUBLIC_PHYSICAL_NETWORK:${PUBLIC_PHYSICAL_NETWORK} \
695 -v SECURITY_GROUP_MODE:${SECURITY_GROUP_MODE} \
696 -v TOOLS_SYSTEM_IP:${TOOLS_SYSTEM_1_IP} \
697 -v TOOLS_SYSTEM_1_IP:${TOOLS_SYSTEM_1_IP} \
698 -v TOOLS_SYSTEM_2_IP:${TOOLS_SYSTEM_2_IP} \
699 -v USER_HOME:${HOME} \
701 ${TESTOPTIONS} ${SUITES} || true
703 echo "Examining the files in data/log and checking filesize"
704 ssh ${ODL_SYSTEM_IP} "ls -altr /tmp/${BUNDLEFOLDER}/data/log/"
705 ssh ${ODL_SYSTEM_IP} "du -hs /tmp/${BUNDLEFOLDER}/data/log/*"
707 echo "Tests Executed"
708 collect_logs_and_exit
710 true # perhaps Jenkins is testing last exit code
711 # vim: ts=4 sw=4 sts=4 et ft=sh :