1 #@IgnoreInspection BashAddShebang
2 # Activate robotframework virtualenv
3 # ${ROBOT_VENV} comes from the include-raw-integration-install-robotframework.sh
5 source ${ROBOT_VENV}/bin/activate
7 echo "#################################################"
8 echo "## Deploy Openstack 3-node ##"
9 echo "#################################################"
14 function create_control_node_local_conf {
15 local_conf_file_name=${WORKSPACE}/local.conf_control
17 if [ "${ODL_ML2_BRANCH}" == "stable/mitaka" ]; then
22 cat > ${local_conf_file_name} << EOF
25 SCREEN_LOGDIR=/opt/stack/data/log
31 for service_name in ${DISABLE_OS_SERVICES}
33 cat >> ${local_conf_file_name} << EOF
34 disable_service ${service_name}
37 for service_name in ${ENABLE_OS_SERVICES}
39 cat >> ${local_conf_file_name} << EOF
40 enable_service ${service_name}
45 cat >> ${local_conf_file_name} << EOF
46 HOST_IP=$OPENSTACK_CONTROL_NODE_IP
47 SERVICE_HOST=\$HOST_IP
49 NEUTRON_CREATE_INITIAL_NETWORKS=False
51 Q_ML2_TENANT_NETWORK_TYPE=${TENANT_NETWORK_TYPE}
54 ENABLE_TENANT_TUNNELS=True
56 MYSQL_HOST=\$SERVICE_HOST
57 RABBIT_HOST=\$SERVICE_HOST
58 GLANCE_HOSTPORT=\$SERVICE_HOST:9292
59 KEYSTONE_AUTH_HOST=\$SERVICE_HOST
60 KEYSTONE_SERVICE_HOST=\$SERVICE_HOST
63 RABBIT_PASSWORD=rabbit
65 SERVICE_PASSWORD=admin
68 enable_plugin networking-odl ${ODL_ML2_DRIVER_REPO} ${ODL_ML2_BRANCH}
77 if [ "${ODL_ML2_DRIVER_VERSION}" == "v2" ]; then
78 echo "ODL_V2DRIVER=True" >> ${local_conf_file_name}
81 if [ "${NUM_ODL_SYSTEM}" -gt 1 ]; then
82 odl_list=${ODL_SYSTEM_1_IP}
83 for i in `seq 2 ${NUM_ODL_SYSTEM}`
85 odlip=ODL_SYSTEM_${i}_IP
86 odl_list=${odl_list},${!odlip}
88 if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then
89 HA_PROXY_INDEX=${NUM_OPENSTACK_SYSTEM}
90 odlmgrip=OPENSTACK_COMPUTE_NODE_${HA_PROXY_INDEX}_IP
91 odl_mgr_ip=${!odlmgrip}
93 odl_mgr_ip=${ODL_SYSTEM_1_IP}
95 cat >> ${local_conf_file_name} << EOF
96 ODL_OVS_MANAGERS=${odl_list}
97 ODL_MGR_IP=${odl_mgr_ip}
100 cat >> ${local_conf_file_name} << EOF
101 ODL_MGR_IP=${ODL_SYSTEM_1_IP}
105 # if we are using the old netvirt impl, as determined by the feature name
106 # odl-ovsdb-openstack (note: new impl is odl-netvirt-openstack) then we
107 # want ODL_L3 to be True. New impl wants it False
108 if [[ ${CONTROLLERFEATURES} == *"odl-ovsdb-openstack"* ]]; then
114 # if we are using the new netvirt impl, as determined by the feature name
115 # odl-netvirt-openstack (note: old impl is odl-ovsdb-openstack) then we
116 # want PROVIDER_MAPPINGS to be used -- this should be fixed if we want to support
117 # external networks in legacy netvirt
118 if [[ ${CONTROLLERFEATURES} == *"odl-netvirt-openstack"* ]]; then
119 ODL_PROVIDER_MAPPINGS="\${PUBLIC_PHYSICAL_NETWORK}:${PUBLIC_BRIDGE}"
121 ODL_PROVIDER_MAPPINGS=
124 if [ "${ODL_ENABLE_L3_FWD}" == "yes" ]; then
125 cat >> ${local_conf_file_name} << EOF
126 PUBLIC_BRIDGE=${PUBLIC_BRIDGE}
127 PUBLIC_PHYSICAL_NETWORK=physnet1 # FIXME this should be a parameter
128 ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS}
133 PUBLIC_INTERFACE=br100
136 if [ "${ODL_ML2_BRANCH}" == "stable/mitaka" ]; then
137 cat >> ${local_conf_file_name} << EOF
138 [[post-config|\$NEUTRON_CONF]]
140 service_plugins = networking_odl.l3.l3_odl.OpenDaylightL3RouterPlugin
143 fi #check for ODL_ML2_BRANCH
145 fi #ODL_ENABLE_L3_FWD check
147 cat >> ${local_conf_file_name} << EOF
148 [[post-config|/etc/neutron/plugins/ml2/ml2_conf.ini]]
150 minimize_polling=True
152 [[post-config|/etc/neutron/dhcp_agent.ini]]
154 force_metadata = True
155 enable_isolated_metadata = True
157 [[post-config|/etc/nova/nova.conf]]
159 force_config_drive = False
163 echo "local.conf Created...."
164 cat ${local_conf_file_name}
167 function create_compute_node_local_conf {
170 if [ "${ODL_ML2_BRANCH}" == "stable/mitaka" ]; then
175 local_conf_file_name=${WORKSPACE}/local.conf_compute_${HOSTIP}
176 cat > ${local_conf_file_name} << EOF
180 SCREEN_LOGDIR=/opt/stack/data/log
183 NOVA_VNC_ENABLED=True
185 ENABLED_SERVICES=n-cpu
188 SERVICE_HOST=${OPENSTACK_CONTROL_NODE_IP}
191 ENABLE_TENANT_TUNNELS=True
192 Q_ML2_TENANT_NETWORK_TYPE=vxlan
194 Q_HOST=\$SERVICE_HOST
195 MYSQL_HOST=\$SERVICE_HOST
196 RABBIT_HOST=\$SERVICE_HOST
197 GLANCE_HOSTPORT=\$SERVICE_HOST:9292
198 KEYSTONE_AUTH_HOST=\$SERVICE_HOST
199 KEYSTONE_SERVICE_HOST=\$SERVICE_HOST
202 RABBIT_PASSWORD=rabbit
203 SERVICE_TOKEN=service
204 SERVICE_PASSWORD=admin
207 enable_plugin networking-odl ${ODL_ML2_DRIVER_REPO} ${ODL_ML2_BRANCH}
212 if [ "${NUM_ODL_SYSTEM}" -gt 1 ]; then
213 odl_list=${ODL_SYSTEM_1_IP}
214 for i in `seq 2 ${NUM_ODL_SYSTEM}`
216 odlip=ODL_SYSTEM_${i}_IP
217 odl_list=${odl_list},${!odlip}
219 if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then
220 HA_PROXY_INDEX=${NUM_OPENSTACK_SYSTEM}
221 odlmgrip=OPENSTACK_COMPUTE_NODE_${HA_PROXY_INDEX}_IP
222 odl_mgr_ip=${!odlmgrip}
224 odl_mgr_ip=${ODL_SYSTEM_1_IP}
226 cat >> ${local_conf_file_name} << EOF
227 ODL_OVS_MANAGERS=${odl_list}
228 ODL_MGR_IP=${odl_mgr_ip}
231 cat >> ${local_conf_file_name} << EOF
232 ODL_MGR_IP=${ODL_SYSTEM_1_IP}
236 # if we are using the new netvirt impl, as determined by the feature name
237 # odl-netvirt-openstack (note: old impl is odl-ovsdb-openstack) then we
238 # want PROVIDER_MAPPINGS to be used -- this should be fixed if we want to support
239 # external networks in legacy netvirt
240 if [[ ${CONTROLLERFEATURES} == *"odl-netvirt-openstack"* ]]; then
241 ODL_PROVIDER_MAPPINGS="\${PUBLIC_PHYSICAL_NETWORK}:${PUBLIC_BRIDGE}"
243 ODL_PROVIDER_MAPPINGS=
246 if [ "${ODL_ENABLE_L3_FWD}" == "yes" ]; then
247 cat >> ${local_conf_file_name} << EOF
248 # Uncomment lines below if odl-compute is to be used for l3 forwarding
251 PUBLIC_INTERFACE=br100 # FIXME do we use br100 at all?
252 PUBLIC_BRIDGE=${PUBLIC_BRIDGE}
253 PUBLIC_PHYSICAL_NETWORK=physnet1 # FIXME this should be a parameter
254 ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS}
257 echo "local.conf Created...."
258 cat ${local_conf_file_name}
261 function configure_haproxy_for_neutron_requests () {
262 HA_PROXY_INDEX=${NUM_OPENSTACK_SYSTEM}
263 odlmgrip=OPENSTACK_COMPUTE_NODE_${HA_PROXY_INDEX}_IP
264 ha_proxy_ip=${!odlmgrip}
266 cat > ${WORKSPACE}/install_ha_proxy.sh<< EOF
267 sudo systemctl stop firewalld
268 sudo yum -y install policycoreutils-python haproxy
271 cat > ${WORKSPACE}/haproxy.cfg << EOF
277 pidfile /tmp/haproxy.pid
285 timeout http-request 10s
293 bind ${ha_proxy_ip}:8080
297 for i in `seq 1 ${NUM_ODL_SYSTEM}`
299 odlip=ODL_SYSTEM_${i}_IP
300 cat >> ${WORKSPACE}/haproxy.cfg << EOF
301 server controller-$i ${!odlip}:8080 check fall 5 inter 2000 rise 2
305 cat >> ${WORKSPACE}/haproxy.cfg << EOF
306 listen opendaylight_rest
307 bind ${ha_proxy_ip}:8181
311 for i in `seq 1 ${NUM_ODL_SYSTEM}`
313 odlip=ODL_SYSTEM_${i}_IP
314 cat >> ${WORKSPACE}/haproxy.cfg << EOF
315 server controller-rest-$i ${!odlip}:8181 check fall 5 inter 2000 rise 2
319 cat > ${WORKSPACE}/deploy_ha_proxy.sh<< EOF
320 sudo chown haproxy:haproxy /tmp/haproxy.cfg
321 sudo sed -i 's/\\/etc\\/haproxy\\/haproxy.cfg/\\/tmp\\/haproxy.cfg/g' /usr/lib/systemd/system/haproxy.service
322 sudo /usr/sbin/semanage permissive -a haproxy_t
323 sudo systemctl restart haproxy
326 sudo systemctl status haproxy
329 scp ${WORKSPACE}/install_ha_proxy.sh ${ha_proxy_ip}:/tmp
330 ${SSH} ${ha_proxy_ip} "sudo bash /tmp/install_ha_proxy.sh"
331 scp ${WORKSPACE}/haproxy.cfg ${ha_proxy_ip}:/tmp
332 scp ${WORKSPACE}/deploy_ha_proxy.sh ${ha_proxy_ip}:/tmp
333 ${SSH} ${ha_proxy_ip} "sudo bash /tmp/deploy_ha_proxy.sh"
336 function collect_logs_and_exit (){
337 set +e # We do not want to create red dot just because something went wrong while fetching logs.
338 for i in `seq 1 ${NUM_ODL_SYSTEM}`
340 CONTROLLERIP=ODL_SYSTEM_${i}_IP
341 echo "killing karaf process..."
342 ${SSH} "${!CONTROLLERIP}" bash -c 'ps axf | grep karaf | grep -v grep | awk '"'"'{print "kill -9 " $1}'"'"' | sh'
345 cat > extra_debug.sh << EOF
346 echo -e "/usr/sbin/lsmod | /usr/bin/grep openvswitch\n"
347 /usr/sbin/lsmod | /usr/bin/grep openvswitch
348 echo -e "\ngrep ct_ /var/log/openvswitch/ovs-vswitchd.log\n"
349 grep ct_ /var/log/openvswitch/ovs-vswitchd.log
353 # FIXME: Do not create .tar and gzip before copying.
354 for i in `seq 1 ${NUM_ODL_SYSTEM}`
356 CONTROLLERIP=ODL_SYSTEM_${i}_IP
357 ${SSH} "${!CONTROLLERIP}" "cp -r /tmp/${BUNDLEFOLDER}/data/log /tmp/odl_log"
358 ${SSH} "${!CONTROLLERIP}" "tar -cf /tmp/odl${i}_karaf.log.tar /tmp/odl_log/*"
359 scp "${!CONTROLLERIP}:/tmp/odl${i}_karaf.log.tar" "${WORKSPACE}/odl${i}_karaf.log.tar"
360 tar -xvf ${WORKSPACE}/odl${i}_karaf.log.tar -C . --strip-components 2 --transform s/karaf/odl${i}_karaf/g
361 rm ${WORKSPACE}/odl${i}_karaf.log.tar
364 # Since this log collection work is happening before the archive build macro which also
365 # creates the ${WORKSPACE}/archives dir, we have to do it here first. The mkdir in the
366 # archives build step will essentially be a noop.
367 mkdir -p ${WORKSPACE}/archives
370 OS_CTRL_FOLDER="control"
371 mkdir -p ${OS_CTRL_FOLDER}
372 scp ${OPENSTACK_CONTROL_NODE_IP}:/opt/stack/devstack/nohup.out ${OS_CTRL_FOLDER}/stack.log
373 scp ${OPENSTACK_CONTROL_NODE_IP}:/var/log/openvswitch/ovs-vswitchd.log ${OS_CTRL_FOLDER}/ovs-vswitchd.log
374 rsync -avhe ssh ${OPENSTACK_CONTROL_NODE_IP}:/opt/stack/logs/* ${OS_CTRL_FOLDER} # rsync to prevent copying of symbolic links
375 scp extra_debug.sh ${OPENSTACK_CONTROL_NODE_IP}:/tmp
376 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "bash /tmp/extra_debug.sh > /tmp/extra_debug.log"
377 scp ${OPENSTACK_CONTROL_NODE_IP}:/tmp/extra_debug.log ${OS_CTRL_FOLDER}/extra_debug.log
378 mv local.conf_control ${OS_CTRL_FOLDER}/local.conf
379 mv ${OS_CTRL_FOLDER} ${WORKSPACE}/archives/
382 for i in `seq 1 $((NUM_OPENSTACK_SYSTEM - 1))`
384 OSIP=OPENSTACK_COMPUTE_NODE_${i}_IP
385 OS_COMPUTE_FOLDER="compute_${i}"
386 mkdir -p ${OS_COMPUTE_FOLDER}
387 scp ${!OSIP}:/opt/stack/devstack/nohup.out ${OS_COMPUTE_FOLDER}/stack.log
388 scp ${!OSIP}:/var/log/openvswitch/ovs-vswitchd.log ${OS_COMPUTE_FOLDER}/ovs-vswitchd.log
389 rsync -avhe ssh ${!OSIP}:/opt/stack/logs/* ${OS_COMPUTE_FOLDER} # rsync to prevent copying of symbolic links
390 scp extra_debug.sh ${!OSIP}:/tmp
391 ${SSH} ${!OSIP} "bash /tmp/extra_debug.sh > /tmp/extra_debug.log"
392 scp ${!OSIP}:/tmp/extra_debug.log ${OS_COMPUTE_FOLDER}/extra_debug.log
393 mv local.conf_compute_${!OSIP} ${OS_COMPUTE_FOLDER}/local.conf
394 mv ${OS_COMPUTE_FOLDER} ${WORKSPACE}/archives/
397 ls local.conf* | xargs -I % mv % %.log
400 cat > ${WORKSPACE}/disable_firewall.sh << EOF
401 sudo systemctl stop firewalld
402 sudo systemctl stop iptables
406 cat > ${WORKSPACE}/get_devstack.sh << EOF
407 sudo systemctl stop firewalld
408 sudo yum install bridge-utils -y
409 sudo systemctl stop NetworkManager
410 #Disable NetworkManager and kill dhclient and dnsmasq
411 sudo systemctl stop NetworkManager
412 sudo killall dhclient
414 #Workaround for mysql failure
415 echo "127.0.0.1 localhost \${HOSTNAME}" > /tmp/hosts
416 echo "::1 localhost \${HOSTNAME}" >> /tmp/hosts
417 sudo mv /tmp/hosts /etc/hosts
418 sudo /usr/sbin/brctl addbr br100
419 #sudo ifconfig eth0 mtu 2000
420 sudo mkdir /opt/stack
421 sudo chmod 777 /opt/stack
423 git clone https://git.openstack.org/openstack-dev/devstack
425 git checkout $OPENSTACK_BRANCH
428 echo "Create HAProxy if needed"
429 if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then
430 echo "Need to configure HAProxy"
431 configure_haproxy_for_neutron_requests
435 echo "Stack the Control Node"
436 scp ${WORKSPACE}/get_devstack.sh ${OPENSTACK_CONTROL_NODE_IP}:/tmp
437 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "bash /tmp/get_devstack.sh"
438 create_control_node_local_conf
439 scp ${WORKSPACE}/local.conf_control ${OPENSTACK_CONTROL_NODE_IP}:/opt/stack/devstack/local.conf
441 if [ "${ODL_ML2_BRANCH}" == "stable/mitaka" ]; then
442 ssh ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack; git clone https://git.openstack.org/openstack/requirements; cd requirements; git checkout stable/mitaka; sed -i /openstacksdk/d upper-constraints.txt; sed -i /libvirt-python/d upper-constraints.txt"
443 ssh ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack; git clone https://github.com/openstack/python-openstacksdk; cd python-openstacksdk; sudo python setup.py install"
446 ssh ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack/devstack; nohup ./stack.sh > /opt/stack/devstack/nohup.out 2>&1 &"
447 ssh ${OPENSTACK_CONTROL_NODE_IP} "ps -ef | grep stack.sh"
448 ssh ${OPENSTACK_CONTROL_NODE_IP} "ls -lrt /opt/stack/devstack/nohup.out"
449 os_node_list+=(${OPENSTACK_CONTROL_NODE_IP})
452 for i in `seq 1 $((NUM_OPENSTACK_SYSTEM - 1))`
454 COMPUTEIP=OPENSTACK_COMPUTE_NODE_${i}_IP
455 scp ${WORKSPACE}/get_devstack.sh ${!COMPUTEIP}:/tmp
456 ${SSH} ${!COMPUTEIP} "bash /tmp/get_devstack.sh"
457 create_compute_node_local_conf ${!COMPUTEIP}
458 scp ${WORKSPACE}/local.conf_compute_${!COMPUTEIP} ${!COMPUTEIP}:/opt/stack/devstack/local.conf
459 if [ "${ODL_ML2_BRANCH}" == "stable/mitaka" ]; then
460 ssh ${!COMPUTEIP} "cd /opt/stack; git clone https://git.openstack.org/openstack/requirements; cd requirements; git checkout stable/mitaka; sed -i /libvirt-python/d upper-constraints.txt"
462 ssh ${!COMPUTEIP} "cd /opt/stack/devstack; nohup ./stack.sh > /opt/stack/devstack/nohup.out 2>&1 &"
463 ssh ${!COMPUTEIP} "ps -ef | grep stack.sh"
464 os_node_list+=(${!COMPUTEIP})
467 cat > ${WORKSPACE}/check_stacking.sh << EOF
468 > /tmp/stack_progress
469 ps -ef | grep "stack.sh" | grep -v grep
471 if [ \${ret} -eq 1 ]; then
472 grep "This is your host IP address:" /opt/stack/devstack/nohup.out
473 if [ \$? -eq 0 ]; then
474 echo "Stacking Complete" > /tmp/stack_progress
476 echo "Stacking Failed" > /tmp/stack_progress
478 elif [ \${ret} -eq 0 ]; then
479 echo "Still Stacking" > /tmp/stack_progress
483 #the checking is repeated for an hour
486 while [ ${in_progress} -eq 1 ]; do
487 iteration=$(($iteration + 1))
488 for index in ${!os_node_list[@]}
490 echo "Check the status of stacking in ${os_node_list[index]}"
491 scp ${WORKSPACE}/check_stacking.sh ${os_node_list[index]}:/tmp
492 ${SSH} ${os_node_list[index]} "bash /tmp/check_stacking.sh"
493 scp ${os_node_list[index]}:/tmp/stack_progress .
496 stacking_status=`cat stack_progress`
497 if [ "$stacking_status" == "Still Stacking" ]; then
499 elif [ "$stacking_status" == "Stacking Failed" ]; then
500 collect_logs_and_exit
502 elif [ "$stacking_status" == "Stacking Complete" ]; then
503 unset os_node_list[index]
504 if [ ${#os_node_list[@]} -eq 0 ]; then
509 echo "sleep for a minute before the next check"
511 if [ ${iteration} -eq 60 ]; then
512 collect_logs_and_exit
517 #Need to disable firewalld and iptables in control node
518 echo "Stop Firewall in Control Node for compute nodes to be able to reach the ports and add to hypervisor-list"
519 scp ${WORKSPACE}/disable_firewall.sh ${OPENSTACK_CONTROL_NODE_IP}:/tmp
520 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo bash /tmp/disable_firewall.sh"
521 echo "sleep for a minute and print hypervisor-list"
523 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack/devstack; source openrc admin admin; nova hypervisor-list"
525 #Need to disable firewalld and iptables in compute nodes as well
526 for i in `seq 1 $((NUM_OPENSTACK_SYSTEM - 1))`
528 OSIP=OPENSTACK_COMPUTE_NODE_${i}_IP
529 scp ${WORKSPACE}/disable_firewall.sh "${!OSIP}:/tmp"
530 ${SSH} "${!OSIP}" "sudo bash /tmp/disable_firewall.sh"
533 # upgrading pip, urllib3 and httplib2 so that tempest tests can be run on ${OPENSTACK_CONTROL_NODE_IP}
534 # this needs to happen after devstack runs because it seems devstack is pulling in specific versions
535 # of these libs that are not working for tempest.
536 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo pip install --upgrade pip"
537 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo pip install urllib3 --upgrade"
538 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo pip install httplib2 --upgrade"
540 for i in `seq 1 $((NUM_OPENSTACK_SYSTEM - 1))`
542 IP_VAR=OPENSTACK_COMPUTE_NODE_${i}_IP
543 COMPUTE_IPS[$((i-1))]=${!IP_VAR}
547 echo "prepare external networks by adding vxlan tunnels between all nodes on a separate bridge..."
549 for ip in ${OPENSTACK_CONTROL_NODE_IP} ${COMPUTE_IPS[*]}
551 # FIXME - Workaround, ODL (new netvirt) currently adds PUBLIC_BRIDGE as a port in br-int since it doesn't see such a bridge existing when we stack
552 ${SSH} $ip "sudo ovs-vsctl --if-exists del-port br-int $PUBLIC_BRIDGE"
553 ${SSH} $ip "sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE -- set bridge $PUBLIC_BRIDGE other-config:disable-in-band=true other_config:hwaddr=f6:00:00:ff:01:0$((devstack_index++))"
556 # Control Node - PUBLIC_BRIDGE will act as the external router
557 GATEWAY_IP="10.10.10.250" # FIXME this should be a parameter, also shared with integration-test
558 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo ifconfig $PUBLIC_BRIDGE up ${GATEWAY_IP}/24"
560 for compute_ip in ${COMPUTE_IPS[*]}
562 # Tunnel from controller to compute
563 PORT_NAME=compute$((compute_index++))_vxlan
564 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo ovs-vsctl add-port $PUBLIC_BRIDGE $PORT_NAME -- set interface $PORT_NAME type=vxlan options:local_ip="${OPENSTACK_CONTROL_NODE_IP}" options:remote_ip="$compute_ip" options:dst_port=9876 options:key=flow"
566 # Tunnel from compute to controller
567 PORT_NAME=control_vxlan
568 ${SSH} ${compute_ip} "sudo ovs-vsctl add-port $PUBLIC_BRIDGE $PORT_NAME -- set interface $PORT_NAME type=vxlan options:local_ip="$compute_ip" options:remote_ip="${OPENSTACK_CONTROL_NODE_IP}" options:dst_port=9876 options:key=flow"
571 if [ "${NUM_ODL_SYSTEM}" -gt 1 ]; then
572 HA_PROXY_INDEX=${NUM_OPENSTACK_SYSTEM}
573 odlmgrip=OPENSTACK_COMPUTE_NODE_${HA_PROXY_INDEX}_IP
574 HA_PROXY_IP=${!odlmgrip}
576 HA_PROXY_IP=${ODL_SYSTEM_IP}
578 echo "Locating test plan to use..."
579 testplan_filepath="${WORKSPACE}/test/csit/testplans/${STREAMTESTPLAN}"
580 if [ ! -f "${testplan_filepath}" ]; then
581 testplan_filepath="${WORKSPACE}/test/csit/testplans/${TESTPLAN}"
584 echo "Changing the testplan path..."
585 cat "${testplan_filepath}" | sed "s:integration:${WORKSPACE}:" > testplan.txt
588 SUITES=`egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' testplan.txt | tr '\012' ' '`
590 echo "Starting Robot test suites ${SUITES} ..."
591 pybot -N ${TESTPLAN} --removekeywords wuks -c critical -e exclude -v BUNDLEFOLDER:${BUNDLEFOLDER} -v WORKSPACE:/tmp \
592 -v BUNDLE_URL:${ACTUALBUNDLEURL} -v NEXUSURL_PREFIX:${NEXUSURL_PREFIX} -v JDKVERSION:${JDKVERSION} -v ODL_STREAM:${DISTROSTREAM} \
593 -v ODL_SYSTEM_IP:${ODL_SYSTEM_IP} -v ODL_SYSTEM_1_IP:${ODL_SYSTEM_1_IP} -v ODL_SYSTEM_2_IP:${ODL_SYSTEM_2_IP} \
594 -v ODL_SYSTEM_3_IP:${ODL_SYSTEM_3_IP} -v NUM_ODL_SYSTEM:${NUM_ODL_SYSTEM} -v CONTROLLER_USER:${USER} -v OS_USER:${USER} \
595 -v NUM_OS_SYSTEM:${NUM_OPENSTACK_SYSTEM} -v OS_CONTROL_NODE_IP:${OPENSTACK_CONTROL_NODE_IP} \
596 -v OS_COMPUTE_1_IP:${OPENSTACK_COMPUTE_NODE_1_IP} -v OS_COMPUTE_2_IP:${OPENSTACK_COMPUTE_NODE_2_IP} \
597 -v HA_PROXY_IP:${HA_PROXY_IP} \
598 -v DEVSTACK_DEPLOY_PATH:/opt/stack/devstack -v USER_HOME:${HOME} ${TESTOPTIONS} ${SUITES} || true
600 echo "Tests Executed"
601 DEVSTACK_TEMPEST_DIR="/opt/stack/tempest"
602 if $(ssh ${OPENSTACK_CONTROL_NODE_IP} "sudo sh -c '[ -f ${DEVSTACK_TEMPEST_DIR}/.testrepository/0 ]'"); then # if Tempest results exist
603 ssh ${OPENSTACK_CONTROL_NODE_IP} "for I in \$(sudo ls ${DEVSTACK_TEMPEST_DIR}/.testrepository/ | grep -E '^[0-9]+$'); do sudo sh -c \"${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/subunit-1to2 < ${DEVSTACK_TEMPEST_DIR}/.testrepository/\${I} >> ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt\"; done"
604 ssh ${OPENSTACK_CONTROL_NODE_IP} "sudo sh -c '${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/python ${DEVSTACK_TEMPEST_DIR}/.tox/tempest/lib/python2.7/site-packages/os_testr/subunit2html.py ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt ${DEVSTACK_TEMPEST_DIR}/tempest_results.html'"
605 scp ${OPENSTACK_CONTROL_NODE_IP}:${DEVSTACK_TEMPEST_DIR}/tempest_results.html ${WORKSPACE}/
607 collect_logs_and_exit
609 true # perhaps Jenkins is testing last exit code
610 # vim: ts=4 sw=4 sts=4 et ft=sh :