1 #@IgnoreInspection BashAddShebang
2 # Activate robotframework virtualenv
3 # ${ROBOT_VENV} comes from the include-raw-integration-install-robotframework.sh
5 source ${ROBOT_VENV}/bin/activate
7 # TODO: remove this work to run changes.py if/when it's moved higher up to be visible at the Robot level
8 echo "showing recent changes that made it in to the distribution used by this job"
9 pip install --upgrade urllib3
10 python ${WORKSPACE}/test/tools/distchanges/changes.py -d /tmp/distribution_folder \
11 -u ${ACTUALBUNDLEURL} -b ${BRANCH} \
12 -r ssh://jenkins-${SILO}@git.opendaylight.org:29418 || true
14 echo "#################################################"
15 echo "## Deploy Openstack 3-node ##"
16 echo "#################################################"
21 function create_control_node_local_conf {
22 local_conf_file_name=${WORKSPACE}/local.conf_control
24 if [ "${ODL_ML2_BRANCH}" != "stable/ocata" ]; then
29 cat > ${local_conf_file_name} << EOF
32 SCREEN_LOGDIR=/opt/stack/data/log
38 for service_name in ${DISABLE_OS_SERVICES}
40 cat >> ${local_conf_file_name} << EOF
41 disable_service ${service_name}
44 for service_name in ${ENABLE_OS_SERVICES}
46 cat >> ${local_conf_file_name} << EOF
47 enable_service ${service_name}
52 cat >> ${local_conf_file_name} << EOF
53 HOST_IP=$OPENSTACK_CONTROL_NODE_IP
54 SERVICE_HOST=\$HOST_IP
56 NEUTRON_CREATE_INITIAL_NETWORKS=False
58 Q_ML2_TENANT_NETWORK_TYPE=${TENANT_NETWORK_TYPE}
61 ENABLE_TENANT_TUNNELS=True
63 MYSQL_HOST=\$SERVICE_HOST
64 RABBIT_HOST=\$SERVICE_HOST
65 GLANCE_HOSTPORT=\$SERVICE_HOST:9292
66 KEYSTONE_AUTH_HOST=\$SERVICE_HOST
67 KEYSTONE_SERVICE_HOST=\$SERVICE_HOST
70 RABBIT_PASSWORD=rabbit
72 SERVICE_PASSWORD=admin
75 enable_plugin networking-odl ${ODL_ML2_DRIVER_REPO} ${ODL_ML2_BRANCH}
83 if [ "${ENABLE_NETWORKING_L2GW}" == "yes" ]; then
84 cat >> ${local_conf_file_name} << EOF
86 enable_plugin networking-l2gw ${NETWORKING_L2GW_DRIVER} ${ODL_ML2_BRANCH}
87 NETWORKING_L2GW_SERVICE_DRIVER=L2GW:OpenDaylight:networking_odl.l2gateway.driver.OpenDaylightL2gwDriver:default
88 ENABLED_SERVICES+=,neutron,q-svc,nova,q-meta
93 if [ "${ODL_ML2_DRIVER_VERSION}" == "v2" ]; then
94 echo "ODL_V2DRIVER=True" >> ${local_conf_file_name}
97 if [ "${NUM_ODL_SYSTEM}" -gt 1 ]; then
98 odl_list=${ODL_SYSTEM_1_IP}
99 for i in `seq 2 ${NUM_ODL_SYSTEM}`
101 odlip=ODL_SYSTEM_${i}_IP
102 odl_list=${odl_list},${!odlip}
104 if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then
105 HA_PROXY_INDEX=${NUM_OPENSTACK_SYSTEM}
106 odlmgrip=OPENSTACK_COMPUTE_NODE_${HA_PROXY_INDEX}_IP
107 odl_mgr_ip=${!odlmgrip}
109 odl_mgr_ip=${ODL_SYSTEM_1_IP}
111 cat >> ${local_conf_file_name} << EOF
112 ODL_OVS_MANAGERS=${odl_list}
113 ODL_MGR_IP=${odl_mgr_ip}
116 cat >> ${local_conf_file_name} << EOF
117 ODL_MGR_IP=${ODL_SYSTEM_1_IP}
121 # if we are using the old netvirt impl, as determined by the feature name
122 # odl-ovsdb-openstack (note: new impl is odl-netvirt-openstack) then we
123 # want ODL_L3 to be True. New impl wants it False
124 if [[ ${CONTROLLERFEATURES} == *"odl-ovsdb-openstack"* ]]; then
130 # if we are using the new netvirt impl, as determined by the feature name
131 # odl-netvirt-openstack (note: old impl is odl-ovsdb-openstack) then we
132 # want PROVIDER_MAPPINGS to be used -- this should be fixed if we want to support
133 # external networks in legacy netvirt
134 if [[ ${CONTROLLERFEATURES} == *"odl-netvirt-openstack"* ]]; then
135 ODL_PROVIDER_MAPPINGS="\${PUBLIC_PHYSICAL_NETWORK}:${PUBLIC_BRIDGE}"
137 ODL_PROVIDER_MAPPINGS=
140 if [ "${ODL_ENABLE_L3_FWD}" == "yes" ]; then
141 cat >> ${local_conf_file_name} << EOF
142 PUBLIC_BRIDGE=${PUBLIC_BRIDGE}
143 PUBLIC_PHYSICAL_NETWORK=physnet1 # FIXME this should be a parameter
144 ML2_VLAN_RANGES=physnet1
145 ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS}
148 PUBLIC_INTERFACE=br100
151 if [ -z ${DISABLE_ODL_L3_PLUGIN} ] || [ "${DISABLE_ODL_L3_PLUGIN}" == "no" ]; then
152 if [ "${ODL_ML2_BRANCH}" == "stable/mitaka" ]; then
153 cat >> ${local_conf_file_name} << EOF
156 [[post-config|\$NEUTRON_CONF]]
158 service_plugins = networking_odl.l3.l3_odl.OpenDaylightL3RouterPlugin
161 fi #check for ODL_ML2_BRANCH
162 fi #check for DISABLE_ODL_L3_PLUGIN
164 fi #ODL_ENABLE_L3_FWD check
166 cat >> ${local_conf_file_name} << EOF
167 [[post-config|/etc/neutron/plugins/ml2/ml2_conf.ini]]
169 minimize_polling=True
172 # Needed for VLAN provider tests - because our provider networks are always encapsulated in VXLAN (br-physnet1)
173 # MTU(1440) + VXLAN(50) + VLAN(4) = 1494 < MTU eth0/br-phynset1(1500)
174 physical_network_mtus = physnet1:1440
176 [[post-config|/etc/neutron/dhcp_agent.ini]]
178 force_metadata = True
179 enable_isolated_metadata = True
181 [[post-config|/etc/nova/nova.conf]]
183 force_config_drive = False
187 echo "local.conf Created...."
188 cat ${local_conf_file_name}
191 function create_compute_node_local_conf {
194 if [ "${ODL_ML2_BRANCH}" != "stable/ocata" ]; then
199 local_conf_file_name=${WORKSPACE}/local.conf_compute_${HOSTIP}
200 cat > ${local_conf_file_name} << EOF
204 SCREEN_LOGDIR=/opt/stack/data/log
207 NOVA_VNC_ENABLED=True
209 ENABLED_SERVICES=n-cpu
212 SERVICE_HOST=${OPENSTACK_CONTROL_NODE_IP}
215 ENABLE_TENANT_TUNNELS=True
216 Q_ML2_TENANT_NETWORK_TYPE=vxlan
218 Q_HOST=\$SERVICE_HOST
219 MYSQL_HOST=\$SERVICE_HOST
220 RABBIT_HOST=\$SERVICE_HOST
221 GLANCE_HOSTPORT=\$SERVICE_HOST:9292
222 KEYSTONE_AUTH_HOST=\$SERVICE_HOST
223 KEYSTONE_SERVICE_HOST=\$SERVICE_HOST
226 RABBIT_PASSWORD=rabbit
227 SERVICE_TOKEN=service
228 SERVICE_PASSWORD=admin
231 enable_plugin networking-odl ${ODL_ML2_DRIVER_REPO} ${ODL_ML2_BRANCH}
236 if [ "${NUM_ODL_SYSTEM}" -gt 1 ]; then
237 odl_list=${ODL_SYSTEM_1_IP}
238 for i in `seq 2 ${NUM_ODL_SYSTEM}`
240 odlip=ODL_SYSTEM_${i}_IP
241 odl_list=${odl_list},${!odlip}
243 if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then
244 HA_PROXY_INDEX=${NUM_OPENSTACK_SYSTEM}
245 odlmgrip=OPENSTACK_COMPUTE_NODE_${HA_PROXY_INDEX}_IP
246 odl_mgr_ip=${!odlmgrip}
248 odl_mgr_ip=${ODL_SYSTEM_1_IP}
250 cat >> ${local_conf_file_name} << EOF
251 ODL_OVS_MANAGERS=${odl_list}
252 ODL_MGR_IP=${odl_mgr_ip}
255 cat >> ${local_conf_file_name} << EOF
256 ODL_MGR_IP=${ODL_SYSTEM_1_IP}
260 # if we are using the new netvirt impl, as determined by the feature name
261 # odl-netvirt-openstack (note: old impl is odl-ovsdb-openstack) then we
262 # want PROVIDER_MAPPINGS to be used -- this should be fixed if we want to support
263 # external networks in legacy netvirt
264 if [[ ${CONTROLLERFEATURES} == *"odl-netvirt-openstack"* ]]; then
265 ODL_PROVIDER_MAPPINGS="\${PUBLIC_PHYSICAL_NETWORK}:${PUBLIC_BRIDGE}"
267 ODL_PROVIDER_MAPPINGS=
270 if [ "${ODL_ENABLE_L3_FWD}" == "yes" ]; then
271 cat >> ${local_conf_file_name} << EOF
272 # Uncomment lines below if odl-compute is to be used for l3 forwarding
275 PUBLIC_INTERFACE=br100 # FIXME do we use br100 at all?
276 PUBLIC_BRIDGE=${PUBLIC_BRIDGE}
277 PUBLIC_PHYSICAL_NETWORK=physnet1 # FIXME this should be a parameter
278 ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS}
281 echo "local.conf Created...."
282 cat ${local_conf_file_name}
285 function configure_haproxy_for_neutron_requests () {
286 HA_PROXY_INDEX=${NUM_OPENSTACK_SYSTEM}
287 odlmgrip=OPENSTACK_COMPUTE_NODE_${HA_PROXY_INDEX}_IP
288 ha_proxy_ip=${!odlmgrip}
290 cat > ${WORKSPACE}/install_ha_proxy.sh<< EOF
291 sudo systemctl stop firewalld
292 sudo yum -y install policycoreutils-python haproxy
295 cat > ${WORKSPACE}/haproxy.cfg << EOF
301 pidfile /tmp/haproxy.pid
309 timeout http-request 10s
317 bind ${ha_proxy_ip}:8080
321 for i in `seq 1 ${NUM_ODL_SYSTEM}`
323 odlip=ODL_SYSTEM_${i}_IP
324 cat >> ${WORKSPACE}/haproxy.cfg << EOF
325 server controller-$i ${!odlip}:8080 check fall 5 inter 2000 rise 2
329 cat >> ${WORKSPACE}/haproxy.cfg << EOF
330 listen opendaylight_rest
331 bind ${ha_proxy_ip}:8181
335 for i in `seq 1 ${NUM_ODL_SYSTEM}`
337 odlip=ODL_SYSTEM_${i}_IP
338 cat >> ${WORKSPACE}/haproxy.cfg << EOF
339 server controller-rest-$i ${!odlip}:8181 check fall 5 inter 2000 rise 2
343 cat > ${WORKSPACE}/deploy_ha_proxy.sh<< EOF
344 sudo chown haproxy:haproxy /tmp/haproxy.cfg
345 sudo sed -i 's/\\/etc\\/haproxy\\/haproxy.cfg/\\/tmp\\/haproxy.cfg/g' /usr/lib/systemd/system/haproxy.service
346 sudo /usr/sbin/semanage permissive -a haproxy_t
347 sudo systemctl restart haproxy
350 sudo systemctl status haproxy
353 scp ${WORKSPACE}/install_ha_proxy.sh ${ha_proxy_ip}:/tmp
354 ${SSH} ${ha_proxy_ip} "sudo bash /tmp/install_ha_proxy.sh"
355 scp ${WORKSPACE}/haproxy.cfg ${ha_proxy_ip}:/tmp
356 scp ${WORKSPACE}/deploy_ha_proxy.sh ${ha_proxy_ip}:/tmp
357 ${SSH} ${ha_proxy_ip} "sudo bash /tmp/deploy_ha_proxy.sh"
360 function collect_logs_and_exit (){
361 set +e # We do not want to create red dot just because something went wrong while fetching logs.
362 for i in `seq 1 ${NUM_ODL_SYSTEM}`
364 CONTROLLERIP=ODL_SYSTEM_${i}_IP
365 echo "killing karaf process..."
366 ${SSH} "${!CONTROLLERIP}" bash -c 'ps axf | grep karaf | grep -v grep | awk '"'"'{print "kill -9 " $1}'"'"' | sh'
369 cat > extra_debug.sh << EOF
370 echo -e "/usr/sbin/lsmod | /usr/bin/grep openvswitch\n"
371 /usr/sbin/lsmod | /usr/bin/grep openvswitch
372 echo -e "\ngrep ct_ /var/log/openvswitch/ovs-vswitchd.log\n"
373 grep ct_ /var/log/openvswitch/ovs-vswitchd.log
377 # FIXME: Do not create .tar and gzip before copying.
378 for i in `seq 1 ${NUM_ODL_SYSTEM}`
380 CONTROLLERIP=ODL_SYSTEM_${i}_IP
381 ${SSH} "${!CONTROLLERIP}" "cp -r /tmp/${BUNDLEFOLDER}/data/log /tmp/odl_log"
382 ${SSH} "${!CONTROLLERIP}" "tar -cf /tmp/odl${i}_karaf.log.tar /tmp/odl_log/*"
383 scp "${!CONTROLLERIP}:/tmp/odl${i}_karaf.log.tar" "${WORKSPACE}/odl${i}_karaf.log.tar"
384 tar -xvf ${WORKSPACE}/odl${i}_karaf.log.tar -C . --strip-components 2 --transform s/karaf/odl${i}_karaf/g
385 rm ${WORKSPACE}/odl${i}_karaf.log.tar
388 # Since this log collection work is happening before the archive build macro which also
389 # creates the ${WORKSPACE}/archives dir, we have to do it here first. The mkdir in the
390 # archives build step will essentially be a noop.
391 mkdir -p ${WORKSPACE}/archives
394 OS_CTRL_FOLDER="control"
395 mkdir -p ${OS_CTRL_FOLDER}
396 scp ${OPENSTACK_CONTROL_NODE_IP}:/opt/stack/devstack/nohup.out ${OS_CTRL_FOLDER}/stack.log
397 scp ${OPENSTACK_CONTROL_NODE_IP}:/var/log/openvswitch/ovs-vswitchd.log ${OS_CTRL_FOLDER}/ovs-vswitchd.log
398 rsync -avhe ssh ${OPENSTACK_CONTROL_NODE_IP}:/opt/stack/logs/* ${OS_CTRL_FOLDER} # rsync to prevent copying of symbolic links
399 scp extra_debug.sh ${OPENSTACK_CONTROL_NODE_IP}:/tmp
400 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "bash /tmp/extra_debug.sh > /tmp/extra_debug.log"
401 scp ${OPENSTACK_CONTROL_NODE_IP}:/tmp/extra_debug.log ${OS_CTRL_FOLDER}/extra_debug.log
402 mv local.conf_control ${OS_CTRL_FOLDER}/local.conf
403 mv ${OS_CTRL_FOLDER} ${WORKSPACE}/archives/
406 for i in `seq 1 $((NUM_OPENSTACK_SYSTEM - 1))`
408 OSIP=OPENSTACK_COMPUTE_NODE_${i}_IP
409 OS_COMPUTE_FOLDER="compute_${i}"
410 mkdir -p ${OS_COMPUTE_FOLDER}
411 scp ${!OSIP}:/opt/stack/devstack/nohup.out ${OS_COMPUTE_FOLDER}/stack.log
412 scp ${!OSIP}:/var/log/openvswitch/ovs-vswitchd.log ${OS_COMPUTE_FOLDER}/ovs-vswitchd.log
413 rsync -avhe ssh ${!OSIP}:/opt/stack/logs/* ${OS_COMPUTE_FOLDER} # rsync to prevent copying of symbolic links
414 scp extra_debug.sh ${!OSIP}:/tmp
415 ${SSH} ${!OSIP} "bash /tmp/extra_debug.sh > /tmp/extra_debug.log"
416 scp ${!OSIP}:/tmp/extra_debug.log ${OS_COMPUTE_FOLDER}/extra_debug.log
417 mv local.conf_compute_${!OSIP} ${OS_COMPUTE_FOLDER}/local.conf
418 mv ${OS_COMPUTE_FOLDER} ${WORKSPACE}/archives/
421 ls local.conf* | xargs -I % mv % %.log
424 cat > ${WORKSPACE}/disable_firewall.sh << EOF
425 sudo systemctl stop firewalld
426 sudo systemctl stop iptables
430 cat > ${WORKSPACE}/get_devstack.sh << EOF
431 sudo systemctl stop firewalld
432 sudo yum install bridge-utils -y
433 sudo systemctl stop NetworkManager
434 #Disable NetworkManager and kill dhclient and dnsmasq
435 sudo systemctl stop NetworkManager
436 sudo killall dhclient
438 #Workaround for mysql failure
439 echo "127.0.0.1 localhost \${HOSTNAME}" > /tmp/hosts
440 echo "::1 localhost \${HOSTNAME}" >> /tmp/hosts
441 sudo mv /tmp/hosts /etc/hosts
442 sudo /usr/sbin/brctl addbr br100
443 #sudo ifconfig eth0 mtu 2000
444 sudo mkdir /opt/stack
445 sudo chmod 777 /opt/stack
447 git clone https://git.openstack.org/openstack-dev/devstack
449 git checkout $OPENSTACK_BRANCH
452 echo "Create HAProxy if needed"
453 if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then
454 echo "Need to configure HAProxy"
455 configure_haproxy_for_neutron_requests
459 echo "Stack the Control Node"
460 scp ${WORKSPACE}/get_devstack.sh ${OPENSTACK_CONTROL_NODE_IP}:/tmp
461 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "bash /tmp/get_devstack.sh"
462 create_control_node_local_conf
463 scp ${WORKSPACE}/local.conf_control ${OPENSTACK_CONTROL_NODE_IP}:/opt/stack/devstack/local.conf
466 # Workworund for successful stacking with Mitaka
467 if [ "${ODL_ML2_BRANCH}" == "stable/mitaka" ]; then
469 # Workaround for problems with latest versions/specified versions in requirements of openstack
470 # Openstacksdk,libvirt-python -> the current version does not work with Mitaka diue to some requirements
471 # conflict and breaks when trying to stack
472 # paramiko -> Problems with tempest tests due to paramiko incompatibility with pycrypto.
473 # the problem has been solved with version 1.17. If the latest version of paramiko is used, it causes
474 # other timeout problems
475 ssh ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack; git clone https://git.openstack.org/openstack/requirements; cd requirements; git checkout stable/mitaka; sed -i /openstacksdk/d upper-constraints.txt; sed -i /libvirt-python/d upper-constraints.txt; sed -i /paramiko/d upper-constraints.txt"
476 ssh ${OPENSTACK_CONTROL_NODE_IP} "sudo pip install deprecation"
477 ssh ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack; git clone https://github.com/openstack/python-openstacksdk; cd python-openstacksdk; sudo python setup.py install"
478 ssh ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack; git clone https://github.com/paramiko/paramiko; cd paramiko; git checkout 1.17; sudo python setup.py install"
481 ssh ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack/devstack; nohup ./stack.sh > /opt/stack/devstack/nohup.out 2>&1 &"
482 ssh ${OPENSTACK_CONTROL_NODE_IP} "ps -ef | grep stack.sh"
483 ssh ${OPENSTACK_CONTROL_NODE_IP} "ls -lrt /opt/stack/devstack/nohup.out"
484 os_node_list+=(${OPENSTACK_CONTROL_NODE_IP})
486 #Workaround for stable/newton jobs
487 if [ "${ODL_ML2_BRANCH}" == "stable/newton" ]; then
488 ssh ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack; git clone https://git.openstack.org/openstack/requirements; cd requirements; git checkout stable/newton; sed -i /appdirs/d upper-constraints.txt"
492 for i in `seq 1 $((NUM_OPENSTACK_SYSTEM - 1))`
494 COMPUTEIP=OPENSTACK_COMPUTE_NODE_${i}_IP
495 scp ${WORKSPACE}/get_devstack.sh ${!COMPUTEIP}:/tmp
496 ${SSH} ${!COMPUTEIP} "bash /tmp/get_devstack.sh"
497 create_compute_node_local_conf ${!COMPUTEIP}
498 scp ${WORKSPACE}/local.conf_compute_${!COMPUTEIP} ${!COMPUTEIP}:/opt/stack/devstack/local.conf
499 if [ "${ODL_ML2_BRANCH}" == "stable/mitaka" ]; then
500 ssh ${!COMPUTEIP} "cd /opt/stack; git clone https://git.openstack.org/openstack/requirements; cd requirements; git checkout stable/mitaka; sed -i /libvirt-python/d upper-constraints.txt"
502 ssh ${!COMPUTEIP} "cd /opt/stack/devstack; nohup ./stack.sh > /opt/stack/devstack/nohup.out 2>&1 &"
503 ssh ${!COMPUTEIP} "ps -ef | grep stack.sh"
504 os_node_list+=(${!COMPUTEIP})
507 cat > ${WORKSPACE}/check_stacking.sh << EOF
508 > /tmp/stack_progress
509 ps -ef | grep "stack.sh" | grep -v grep
511 if [ \${ret} -eq 1 ]; then
512 grep "This is your host IP address:" /opt/stack/devstack/nohup.out
513 if [ \$? -eq 0 ]; then
514 echo "Stacking Complete" > /tmp/stack_progress
516 echo "Stacking Failed" > /tmp/stack_progress
518 elif [ \${ret} -eq 0 ]; then
519 echo "Still Stacking" > /tmp/stack_progress
523 #the checking is repeated for an hour
526 while [ ${in_progress} -eq 1 ]; do
527 iteration=$(($iteration + 1))
528 for index in ${!os_node_list[@]}
530 echo "Check the status of stacking in ${os_node_list[index]}"
531 scp ${WORKSPACE}/check_stacking.sh ${os_node_list[index]}:/tmp
532 ${SSH} ${os_node_list[index]} "bash /tmp/check_stacking.sh"
533 scp ${os_node_list[index]}:/tmp/stack_progress .
536 stacking_status=`cat stack_progress`
537 if [ "$stacking_status" == "Still Stacking" ]; then
539 elif [ "$stacking_status" == "Stacking Failed" ]; then
540 collect_logs_and_exit
542 elif [ "$stacking_status" == "Stacking Complete" ]; then
543 unset os_node_list[index]
544 if [ ${#os_node_list[@]} -eq 0 ]; then
549 echo "sleep for a minute before the next check"
551 if [ ${iteration} -eq 60 ]; then
552 collect_logs_and_exit
557 #Need to disable firewalld and iptables in control node
558 echo "Stop Firewall in Control Node for compute nodes to be able to reach the ports and add to hypervisor-list"
559 scp ${WORKSPACE}/disable_firewall.sh ${OPENSTACK_CONTROL_NODE_IP}:/tmp
560 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo bash /tmp/disable_firewall.sh"
561 echo "sleep for a minute and print hypervisor-list"
563 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack/devstack; source openrc admin admin; nova hypervisor-list"
564 # in the case that we are doing openstack (control + compute) all in one node, then the number of hypervisors
565 # will be the same as the number of openstack systems. However, if we are doing multinode openstack then the
566 # assumption is we have a single control node and the rest are compute nodes, so the number of expected hypervisors
567 # is one less than the total number of openstack systems
568 if [ "${NUM_OPENSTACK_SYSTEM}" -eq 1 ]; then
569 expected_num_hypervisors=1
571 expected_num_hypervisors=$((NUM_OPENSTACK_SYSTEM - 1))
573 num_hypervisors=$(${SSH} ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack/devstack; source openrc admin admin; openstack hypervisor list -f value | wc -l" | tail -1 | tr -d "\r")
574 if ! [ "${num_hypervisors}" ] || ! [ ${num_hypervisors} -eq ${expected_num_hypervisors} ]; then
575 echo "Error: Only $num_hypervisors hypervisors detected, expected $expected_num_hypervisors"
576 collect_logs_and_exit
580 #Need to disable firewalld and iptables in compute nodes as well
581 for i in `seq 1 $((NUM_OPENSTACK_SYSTEM - 1))`
583 OSIP=OPENSTACK_COMPUTE_NODE_${i}_IP
584 scp ${WORKSPACE}/disable_firewall.sh "${!OSIP}:/tmp"
585 ${SSH} "${!OSIP}" "sudo bash /tmp/disable_firewall.sh"
588 # upgrading pip, urllib3 and httplib2 so that tempest tests can be run on ${OPENSTACK_CONTROL_NODE_IP}
589 # this needs to happen after devstack runs because it seems devstack is pulling in specific versions
590 # of these libs that are not working for tempest.
591 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo pip install --upgrade pip"
592 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo pip install urllib3 --upgrade"
593 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo pip install httplib2 --upgrade"
595 for i in `seq 1 $((NUM_OPENSTACK_SYSTEM - 1))`
597 IP_VAR=OPENSTACK_COMPUTE_NODE_${i}_IP
598 COMPUTE_IPS[$((i-1))]=${!IP_VAR}
602 echo "prepare external networks by adding vxlan tunnels between all nodes on a separate bridge..."
604 for ip in ${OPENSTACK_CONTROL_NODE_IP} ${COMPUTE_IPS[*]}
606 # FIXME - Workaround, ODL (new netvirt) currently adds PUBLIC_BRIDGE as a port in br-int since it doesn't see such a bridge existing when we stack
607 ${SSH} $ip "sudo ovs-vsctl --if-exists del-port br-int $PUBLIC_BRIDGE"
608 ${SSH} $ip "sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE -- set bridge $PUBLIC_BRIDGE other-config:disable-in-band=true other_config:hwaddr=f6:00:00:ff:01:0$((devstack_index++))"
611 # Control Node - PUBLIC_BRIDGE will act as the external router
612 GATEWAY_IP="10.10.10.250" # FIXME this should be a parameter, also shared with integration-test
613 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo ifconfig $PUBLIC_BRIDGE up ${GATEWAY_IP}/24"
615 for compute_ip in ${COMPUTE_IPS[*]}
617 # Tunnel from controller to compute
618 PORT_NAME=compute$((compute_index++))_vxlan
619 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo ovs-vsctl add-port $PUBLIC_BRIDGE $PORT_NAME -- set interface $PORT_NAME type=vxlan options:local_ip="${OPENSTACK_CONTROL_NODE_IP}" options:remote_ip="$compute_ip" options:dst_port=9876 options:key=flow"
621 # Tunnel from compute to controller
622 PORT_NAME=control_vxlan
623 ${SSH} ${compute_ip} "sudo ovs-vsctl add-port $PUBLIC_BRIDGE $PORT_NAME -- set interface $PORT_NAME type=vxlan options:local_ip="$compute_ip" options:remote_ip="${OPENSTACK_CONTROL_NODE_IP}" options:dst_port=9876 options:key=flow"
626 if [ "${NUM_ODL_SYSTEM}" -gt 1 ]; then
627 HA_PROXY_INDEX=${NUM_OPENSTACK_SYSTEM}
628 odlmgrip=OPENSTACK_COMPUTE_NODE_${HA_PROXY_INDEX}_IP
629 HA_PROXY_IP=${!odlmgrip}
631 HA_PROXY_IP=${ODL_SYSTEM_IP}
633 echo "Locating test plan to use..."
634 testplan_filepath="${WORKSPACE}/test/csit/testplans/${STREAMTESTPLAN}"
635 if [ ! -f "${testplan_filepath}" ]; then
636 testplan_filepath="${WORKSPACE}/test/csit/testplans/${TESTPLAN}"
639 echo "Changing the testplan path..."
640 cat "${testplan_filepath}" | sed "s:integration:${WORKSPACE}:" > testplan.txt
643 SUITES=`egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' testplan.txt | tr '\012' ' '`
645 echo "Starting Robot test suites ${SUITES} ..."
646 # please add pybot -v arguments on a single line and alphabetized
647 pybot -N ${TESTPLAN} --removekeywords wuks -c critical -e exclude \
648 -v BUNDLEFOLDER:${BUNDLEFOLDER} \
649 -v BUNDLE_URL:${ACTUALBUNDLEURL} \
650 -v CONTROLLER_USER:${USER} \
651 -v DEVSTACK_DEPLOY_PATH:/opt/stack/devstack \
652 -v HA_PROXY_IP:${HA_PROXY_IP} \
653 -v JDKVERSION:${JDKVERSION} \
654 -v NEXUSURL_PREFIX:${NEXUSURL_PREFIX} \
655 -v NUM_ODL_SYSTEM:${NUM_ODL_SYSTEM} \
656 -v NUM_OS_SYSTEM:${NUM_OPENSTACK_SYSTEM} \
657 -v NUM_TOOLS_SYSTEM:${NUM_TOOLS_SYSTEM} \
658 -v ODL_STREAM:${DISTROSTREAM} \
659 -v ODL_SYSTEM_IP:${ODL_SYSTEM_IP} \
660 -v ODL_SYSTEM_1_IP:${ODL_SYSTEM_1_IP} \
661 -v ODL_SYSTEM_2_IP:${ODL_SYSTEM_2_IP} \
662 -v ODL_SYSTEM_3_IP:${ODL_SYSTEM_3_IP} \
663 -v OS_CONTROL_NODE_IP:${OPENSTACK_CONTROL_NODE_IP} \
664 -v OPENSTACK_BRANCH:${OPENSTACK_BRANCH} \
665 -v OS_COMPUTE_1_IP:${OPENSTACK_COMPUTE_NODE_1_IP} \
666 -v OS_COMPUTE_2_IP:${OPENSTACK_COMPUTE_NODE_2_IP} \
668 -v PUBLIC_PHYSICAL_NETWORK:${PUBLIC_PHYSICAL_NETWORK} \
669 -v TOOLS_SYSTEM_IP:${TOOLS_SYSTEM_1_IP} \
670 -v TOOLS_SYSTEM_1_IP:${TOOLS_SYSTEM_1_IP} \
671 -v TOOLS_SYSTEM_2_IP:${TOOLS_SYSTEM_2_IP} \
672 -v USER_HOME:${HOME} \
674 ${TESTOPTIONS} ${SUITES} || true
676 echo "Examining the files in data/log and checking filesize"
677 ssh ${ODL_SYSTEM_IP} "ls -altr /tmp/${BUNDLEFOLDER}/data/log/"
678 ssh ${ODL_SYSTEM_IP} "du -hs /tmp/${BUNDLEFOLDER}/data/log/*"
680 echo "Tests Executed"
681 DEVSTACK_TEMPEST_DIR="/opt/stack/tempest"
682 if $(ssh ${OPENSTACK_CONTROL_NODE_IP} "sudo sh -c '[ -f ${DEVSTACK_TEMPEST_DIR}/.testrepository/0 ]'"); then # if Tempest results exist
683 ssh ${OPENSTACK_CONTROL_NODE_IP} "for I in \$(sudo ls ${DEVSTACK_TEMPEST_DIR}/.testrepository/ | grep -E '^[0-9]+$'); do sudo sh -c \"${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/subunit-1to2 < ${DEVSTACK_TEMPEST_DIR}/.testrepository/\${I} >> ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt\"; done"
684 ssh ${OPENSTACK_CONTROL_NODE_IP} "sudo sh -c '${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/python ${DEVSTACK_TEMPEST_DIR}/.tox/tempest/lib/python2.7/site-packages/os_testr/subunit2html.py ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt ${DEVSTACK_TEMPEST_DIR}/tempest_results.html'"
685 scp ${OPENSTACK_CONTROL_NODE_IP}:${DEVSTACK_TEMPEST_DIR}/tempest_results.html ${WORKSPACE}/
687 collect_logs_and_exit
689 true # perhaps Jenkins is testing last exit code
690 # vim: ts=4 sw=4 sts=4 et ft=sh :