2 # Activate robotframework virtualenv
3 # ${ROBOT_VENV} comes from the integration-install-robotframework.sh
5 # shellcheck source=${ROBOT_VENV}/bin/activate disable=SC1091
6 source ${ROBOT_VENV}/bin/activate
7 source /tmp/common-functions.sh ${BUNDLEFOLDER}
8 # Ensure we fail the job if any steps fail.
13 PYTHON="${ROBOT_VENV}/bin/python"
15 ADMIN_PASSWORD="admin"
16 OPENSTACK_MASTER_CLIENTS_VERSION="queens"
18 # TODO: remove this work to run changes.py if/when it's moved higher up to be visible at the Robot level
19 printf "\nshowing recent changes that made it into the distribution used by this job:\n"
20 $PYTHON -m pip install --upgrade urllib3
21 python ${WORKSPACE}/test/tools/distchanges/changes.py -d /tmp/distribution_folder \
22 -u ${ACTUAL_BUNDLE_URL} -b ${DISTROBRANCH} \
23 -r ssh://jenkins-${SILO}@git.opendaylight.org:29418 || true
25 printf "\nshowing recent changes that made it into integration/test used by this job:\n"
27 printf "Hash Author Date Commit Date Author Subject\n"
28 printf "%s\n" "------- ------------------------------ ------------------------------ -------------------- -----------------------------"
29 git --no-pager log --pretty=format:'%h %<(30)%ad %<(30)%cd %<(20,trunc)%an%d %s' -n20
34 #################################################
35 ## Deploy Openstack 3-node ##
36 #################################################
39 # Catch command errors and collect logs.
40 # This ensures logs are collected when script commands fail rather than simply exiting.
41 function trap_handler() {
45 echo "trap_handler: ${prog}: line ${lastline}: exit status of last command: ${lasterr}"
46 echo "trap_handler: command: ${BASH_COMMAND}"
50 trap 'trap_handler ${LINENO} ${$?}' ERR
54 function create_etc_hosts() {
57 : > ${WORKSPACE}/hosts_file
58 for iter in `seq 1 ${NUM_OPENSTACK_COMPUTE_NODES}`; do
59 COMPUTE_IP=OPENSTACK_COMPUTE_NODE_${iter}_IP
60 if [ "${!COMPUTE_IP}" == "${NODE_IP}" ]; then
61 CONTROL_HNAME=$(${SSH} ${CTRL_IP} "hostname")
62 echo "${CTRL_IP} ${CONTROL_HNAME}" >> ${WORKSPACE}/hosts_file
64 COMPUTE_HNAME=$(${SSH} ${!COMPUTE_IP} "hostname")
65 echo "${!COMPUTE_IP} ${COMPUTE_HNAME}" >> ${WORKSPACE}/hosts_file
69 echo "Created the hosts file for ${NODE_IP}:"
70 cat ${WORKSPACE}/hosts_file
71 } # create_etc_hosts()
73 #function to install Openstack Clients for Testing
74 #This will pull the latest versions compatiable with the
76 function install_openstack_clients_in_robot_vm() {
77 packages=("python-novaclient" "python-neutronclient" "python-openstackclient")
79 os_plugins=$(csv2ssv "${ENABLE_OS_PLUGINS}")
80 for plugin_name in $os_plugins; do
81 if [ "$plugin_name" == "networking-sfc" ]; then
82 packages+=("networking-sfc")
85 openstack_version=$(echo ${OPENSTACK_BRANCH} | cut -d/ -f2)
86 #If the job tests "master", we will use the clients from previous released stable version to avoid failures
87 if [ "${openstack_version}" == "master" ]; then
88 openstack_version=${OPENSTACK_MASTER_CLIENTS_VERSION}
90 for package in ${packages[*]}; do
91 echo "Get the current support version of the package ${package}"
92 wget https://raw.githubusercontent.com/openstack/requirements/stable/${openstack_version}/upper-constraints.txt -O /tmp/constraints.txt 2>/dev/null
93 echo "$PYTHON -m pip install --upgrade --no-deps ${package} --no-cache-dir -c /tmp/constraints.txt"
94 $PYTHON -m pip install --upgrade --no-deps ${package} --no-cache-dir -c /tmp/constraints.txt
95 echo "$PYTHON -m pip install ${package} --no-cache-dir -c /tmp/constraints.txt"
96 $PYTHON -m pip install ${package} --no-cache-dir -c /tmp/constraints.txt
99 if [ "${ENABLE_NETWORKING_L2GW}" == "yes" ]; then
100 #networking-l2gw is not officially available in any release yet. Getting the latest stable version.
101 $PYTHON -m pip install networking-l2gw==11.0.0
105 #Function to install rdo release
106 # This will help avoiding installing wrong version of packages which causes
107 # functionality failures
108 function install_rdo_release() {
110 case ${OPENSTACK_BRANCH} in
112 ${SSH} ${ip} "sudo yum install -y https://repos.fedorapeople.org/repos/openstack/openstack-pike/rdo-release-pike-1.noarch.rpm"
116 ${SSH} ${ip} "sudo yum install -y https://repos.fedorapeople.org/repos/openstack/openstack-queens/rdo-release-queens-1.noarch.rpm"
120 ${SSH} ${ip} "sudo yum install -y https://repos.fedorapeople.org/repos/openstack/openstack-queens/rdo-release-queens-1.noarch.rpm"
125 # Involves just setting up the shared directory
126 function setup_live_migration_control() {
128 printf "${control_ip}:Setup directory Share with NFS"
129 cat > ${WORKSPACE}/setup_live_migration_control.sh << EOF
130 sudo mkdir --mode=777 /vm_instances
131 sudo chown -R jenkins:jenkins /vm_instances
132 sudo yum install -y nfs-utils
133 printf "/vm_instances *(rw,no_root_squash)" | sudo tee -a /etc/exports
134 sudo systemctl start rpcbind nfs-server
137 scp ${WORKSPACE}/setup_live_migration_control.sh ${control_ip}:/tmp/setup_live_migration_control.sh
138 ssh ${control_ip} "bash /tmp/setup_live_migration_control.sh"
141 # Involves mounting the share and configuring the libvirtd
142 function setup_live_migration_compute() {
145 printf "${compute_ip}:Mount Shared directory from ${control_ip}"
146 printf "${compute_ip}:Configure libvirt in listen mode"
147 cat > ${WORKSPACE}/setup_live_migration_compute.sh << EOF
148 sudo yum install -y libvirt libvirt-devel nfs-utils
149 sudo crudini --verbose --set --inplace /etc/libvirt/libvirtd.conf '' listen_tls 0
150 sudo crudini --verbose --set --inplace /etc/libvirt/libvirtd.conf '' listen_tcp 1
151 sudo crudini --verbose --set --inplace /etc/libvirt/libvirtd.conf '' auth_tcp '"none"'
152 sudo crudini --verbose --set --inplace /etc/sysconfig/libvirtd '' LIBVIRTD_ARGS '"--listen"'
153 sudo mkdir --mode=777 -p /var/instances
154 sudo chown -R jenkins:jenkins /var/instances
155 sudo chmod o+x /var/instances
156 sudo systemctl start rpcbind
157 sudo mount -t nfs ${control_ip}:/vm_instances /var/instances
160 scp ${WORKSPACE}/setup_live_migration_compute.sh ${compute_ip}:/tmp/setup_live_migration_compute.sh
161 ssh ${compute_ip} "bash /tmp/setup_live_migration_compute.sh"
164 # Add enable_services and disable_services to the local.conf
165 function add_os_services() {
166 local core_services=$1
167 local enable_services=$2
168 local disable_services=$3
169 local local_conf_file_name=$4
170 local enable_network_services=$5
172 cat >> ${local_conf_file_name} << EOF
173 enable_service $(csv2ssv "${core_services}")
175 if [ -n "${enable_services}" ]; then
176 cat >> ${local_conf_file_name} << EOF
177 enable_service $(csv2ssv "${enable_services}")
180 if [ -n "${disable_services}" ]; then
181 cat >> ${local_conf_file_name} << EOF
182 disable_service $(csv2ssv "${disable_services}")
185 if [ -n "${enable_network_services}" ]; then
186 cat >> ${local_conf_file_name} << EOF
187 enable_service $(csv2ssv "${enable_network_services}")
192 function create_control_node_local_conf() {
195 ODL_OVS_MANAGERS="$3"
197 local_conf_file_name=${WORKSPACE}/local.conf_control_${HOSTIP}
198 cat > ${local_conf_file_name} << EOF
204 # Increase the wait used by stack to poll for services
210 add_os_services "${CORE_OS_CONTROL_SERVICES}" "${ENABLE_OS_SERVICES}" "${DISABLE_OS_SERVICES}" "${local_conf_file_name}" "${ENABLE_OS_NETWORK_SERVICES}"
212 cat >> ${local_conf_file_name} << EOF
215 SERVICE_HOST=\$HOST_IP
216 Q_ML2_TENANT_NETWORK_TYPE=${TENANT_NETWORK_TYPE}
217 NEUTRON_CREATE_INITIAL_NETWORKS=${CREATE_INITIAL_NETWORKS}
222 ODL_PORT_BINDING_CONTROLLER=${ODL_ML2_PORT_BINDING}
223 ODL_OVS_MANAGERS=${ODL_OVS_MANAGERS}
225 MYSQL_HOST=\$SERVICE_HOST
226 RABBIT_HOST=\$SERVICE_HOST
227 GLANCE_HOSTPORT=\$SERVICE_HOST:9292
228 KEYSTONE_AUTH_HOST=\$SERVICE_HOST
229 KEYSTONE_SERVICE_HOST=\$SERVICE_HOST
231 ADMIN_PASSWORD=${ADMIN_PASSWORD}
232 DATABASE_PASSWORD=${ADMIN_PASSWORD}
233 RABBIT_PASSWORD=${ADMIN_PASSWORD}
234 SERVICE_TOKEN=${ADMIN_PASSWORD}
235 SERVICE_PASSWORD=${ADMIN_PASSWORD}
237 NEUTRON_LBAAS_SERVICE_PROVIDERV2=${LBAAS_SERVICE_PROVIDER} # Only relevant if neutron-lbaas plugin is enabled
238 NEUTRON_SFC_DRIVERS=${ODL_SFC_DRIVER} # Only relevant if networking-sfc plugin is enabled
239 NEUTRON_FLOWCLASSIFIER_DRIVERS=${ODL_SFC_DRIVER} # Only relevant if networking-sfc plugin is enabled
241 PUBLIC_BRIDGE=${PUBLIC_BRIDGE}
242 PUBLIC_PHYSICAL_NETWORK=${PUBLIC_PHYSICAL_NETWORK}
243 ML2_VLAN_RANGES=${PUBLIC_PHYSICAL_NETWORK}
244 ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS}
246 if [ "${TENANT_NETWORK_TYPE}" == "local" ]; then
247 cat >> ${local_conf_file_name} << EOF
248 ENABLE_TENANT_TUNNELS=false
252 if [ "${ODL_ML2_DRIVER_VERSION}" == "v2" ]; then
253 echo "ODL_V2DRIVER=True" >> ${local_conf_file_name}
256 for plugin_name in ${ENABLE_OS_PLUGINS}; do
257 if [ "$plugin_name" == "networking-odl" ]; then
258 ENABLE_PLUGIN_ARGS="${ODL_ML2_DRIVER_REPO} ${ODL_ML2_BRANCH}"
259 elif [ "$plugin_name" == "kuryr-kubernetes" ]; then
260 ENABLE_PLUGIN_ARGS="${DEVSTACK_KUBERNETES_PLUGIN_REPO} master" # note: kuryr-kubernetes only exists in master at the moment
261 elif [ "$plugin_name" == "neutron-lbaas" ]; then
262 ENABLE_PLUGIN_ARGS="${DEVSTACK_LBAAS_PLUGIN_REPO} ${OPENSTACK_BRANCH}"
263 IS_LBAAS_PLUGIN_ENABLED="yes"
264 elif [ "$plugin_name" == "networking-sfc" ]; then
265 ENABLE_PLUGIN_ARGS="${DEVSTACK_NETWORKING_SFC_PLUGIN_REPO} ${OPENSTACK_BRANCH}"
266 IS_SFC_PLUGIN_ENABLED="yes"
268 echo "Error: Invalid plugin $plugin_name, unsupported"
271 cat >> ${local_conf_file_name} << EOF
273 enable_plugin ${plugin_name} ${ENABLE_PLUGIN_ARGS}
278 if [ "${ENABLE_NETWORKING_L2GW}" == "yes" ]; then
279 cat >> ${local_conf_file_name} << EOF
281 enable_plugin networking-l2gw ${NETWORKING_L2GW_DRIVER} ${ODL_ML2_BRANCH}
282 NETWORKING_L2GW_SERVICE_DRIVER=L2GW:OpenDaylight:networking_odl.l2gateway.driver_v2.OpenDaylightL2gwDriver:default
286 if [ "${ODL_ML2_DRIVER_VERSION}" == "v2" ]; then
287 SERVICE_PLUGINS="odl-router_v2"
289 SERVICE_PLUGINS="odl-router"
291 if [ "${ENABLE_NETWORKING_L2GW}" == "yes" ]; then
292 SERVICE_PLUGINS+=", networking_l2gw.services.l2gateway.plugin.L2GatewayPlugin"
294 if [ "${IS_LBAAS_PLUGIN_ENABLED}" == "yes" ]; then
295 SERVICE_PLUGINS+=", lbaasv2"
297 if [ "${IS_SFC_PLUGIN_ENABLED}" == "yes" ]; then
298 SERVICE_PLUGINS+=", networking_sfc.services.flowclassifier.plugin.FlowClassifierPlugin,networking_sfc.services.sfc.plugin.SfcPlugin"
301 cat >> ${local_conf_file_name} << EOF
303 [[post-config|\$NEUTRON_CONF]]
305 service_plugins = ${SERVICE_PLUGINS}
306 log_dir = /opt/stack/logs
308 [[post-config|/etc/neutron/plugins/ml2/ml2_conf.ini]]
310 minimize_polling=True
313 # Needed for VLAN provider tests - because our provider networks are always encapsulated in VXLAN (br-physnet1)
314 # MTU(1400) + VXLAN(50) + VLAN(4) = 1454 < MTU eth0/br-physnet1(1458)
315 physical_network_mtus = ${PUBLIC_PHYSICAL_NETWORK}:1400
319 if [ "${ENABLE_NETWORKING_L2GW}" == "yes" ]; then
320 cat >> ${local_conf_file_name} << EOF
323 enable_dhcp_service = True
327 cat >> ${local_conf_file_name} << EOF
330 # Trigger n-odl full sync every 30 secs.
331 maintenance_interval = 30
333 [[post-config|/etc/neutron/dhcp_agent.ini]]
335 force_metadata = True
336 enable_isolated_metadata = True
337 log_dir = /opt/stack/logs
339 [[post-config|/etc/nova/nova.conf]]
341 discover_hosts_in_cells_interval = 30
344 force_config_drive = False
345 force_raw_images = False
346 log_dir = /opt/stack/logs
350 if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then
351 cat >> ${local_conf_file_name} << EOF
353 force_raw_images = False
354 log_dir = /opt/stack/logs
356 live_migration_uri = qemu+tcp://%s/system
361 if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then
362 echo "Combo local.conf created:"
364 echo "Control local.conf created:"
366 cat ${local_conf_file_name}
367 } # create_control_node_local_conf()
369 function create_compute_node_local_conf() {
373 ODL_OVS_MANAGERS="$4"
375 local_conf_file_name=${WORKSPACE}/local.conf_compute_${HOSTIP}
376 cat > ${local_conf_file_name} << EOF
382 # Increase the wait used by stack to poll for the nova service on the control node
383 NOVA_READY_TIMEOUT=1800
388 add_os_services "${CORE_OS_COMPUTE_SERVICES}" "${ENABLE_OS_COMPUTE_SERVICES}" "${DISABLE_OS_SERVICES}" "${local_conf_file_name}"
390 cat >> ${local_conf_file_name} << EOF
392 SERVICE_HOST=${SERVICEHOST}
393 Q_ML2_TENANT_NETWORK_TYPE=${TENANT_NETWORK_TYPE}
398 ODL_PORT_BINDING_CONTROLLER=${ODL_ML2_PORT_BINDING}
399 ODL_OVS_MANAGERS=${ODL_OVS_MANAGERS}
401 Q_HOST=\$SERVICE_HOST
402 MYSQL_HOST=\$SERVICE_HOST
403 RABBIT_HOST=\$SERVICE_HOST
404 GLANCE_HOSTPORT=\$SERVICE_HOST:9292
405 KEYSTONE_AUTH_HOST=\$SERVICE_HOST
406 KEYSTONE_SERVICE_HOST=\$SERVICE_HOST
408 ADMIN_PASSWORD=${ADMIN_PASSWORD}
409 DATABASE_PASSWORD=${ADMIN_PASSWORD}
410 RABBIT_PASSWORD=${ADMIN_PASSWORD}
411 SERVICE_TOKEN=${ADMIN_PASSWORD}
412 SERVICE_PASSWORD=${ADMIN_PASSWORD}
414 PUBLIC_BRIDGE=${PUBLIC_BRIDGE}
415 PUBLIC_PHYSICAL_NETWORK=${PUBLIC_PHYSICAL_NETWORK}
416 ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS}
419 if [[ "${ENABLE_OS_PLUGINS}" =~ networking-odl ]]; then
420 cat >> ${local_conf_file_name} << EOF
422 enable_plugin networking-odl ${ODL_ML2_DRIVER_REPO} ${ODL_ML2_BRANCH}
426 cat >> ${local_conf_file_name} << EOF
428 [[post-config|/etc/nova/nova.conf]]
430 auth_strategy = keystone
433 force_raw_images = False
434 log_dir = /opt/stack/logs
436 live_migration_uri = qemu+tcp://%s/system
440 echo "Compute local.conf created:"
441 cat ${local_conf_file_name}
442 } # create_compute_node_local_conf()
444 function configure_haproxy_for_neutron_requests() {
446 # shellcheck disable=SC2206
449 cat > ${WORKSPACE}/install_ha_proxy.sh<< EOF
450 sudo systemctl stop firewalld
451 sudo yum -y install policycoreutils-python haproxy
454 cat > ${WORKSPACE}/haproxy.cfg << EOF
460 pidfile /tmp/haproxy.pid
468 timeout http-request 10s
479 listen opendaylight_rest
483 listen opendaylight_websocket
490 for odlip in ${ODL_IPS[*]}; do
491 sed -i "/listen opendaylight$/a server controller-${odlindex} ${odlip}:8080 check fall 5 inter 2000 rise 2" ${WORKSPACE}/haproxy.cfg
492 sed -i "/listen opendaylight_rest$/a server controller-rest-${odlindex} ${odlip}:8181 check fall 5 inter 2000 rise 2" ${WORKSPACE}/haproxy.cfg
493 sed -i "/listen opendaylight_websocket$/a server controller-websocket-${odlindex} ${odlip}:8185 check fall 5 inter 2000 rise 2" ${WORKSPACE}/haproxy.cfg
494 odlindex=$((odlindex+1))
498 echo "Dump haproxy.cfg"
499 cat ${WORKSPACE}/haproxy.cfg
501 cat > ${WORKSPACE}/deploy_ha_proxy.sh<< EOF
502 sudo chown haproxy:haproxy /tmp/haproxy.cfg
503 sudo sed -i 's/\\/etc\\/haproxy\\/haproxy.cfg/\\/tmp\\/haproxy.cfg/g' /usr/lib/systemd/system/haproxy.service
504 sudo /usr/sbin/semanage permissive -a haproxy_t
505 sudo systemctl restart haproxy
508 sudo systemctl status haproxy
512 scp ${WORKSPACE}/install_ha_proxy.sh ${MGRIP}:/tmp
513 ${SSH} ${MGRIP} "sudo bash /tmp/install_ha_proxy.sh"
514 scp ${WORKSPACE}/haproxy.cfg ${MGRIP}:/tmp
515 scp ${WORKSPACE}/deploy_ha_proxy.sh ${MGRIP}:/tmp
516 ${SSH} ${MGRIP} "sudo bash /tmp/deploy_ha_proxy.sh"
517 } # configure_haproxy_for_neutron_requests()
519 # Following three functions are debugging helpers when debugging devstack changes.
520 # Keeping them for now so we can simply call them when needed.
524 function get_hostnames () {
526 local ctrlip=${OPENSTACK_CONTROL_NODE_1_IP}
527 local comp1ip=${OPENSTACK_COMPUTE_NODE_1_IP}
528 local comp2ip=${OPENSTACK_COMPUTE_NODE_2_IP}
529 ctrlhn=$(${SSH} ${ctrlip} "hostname")
530 comp1hn=$(${SSH} ${comp1ip} "hostname")
531 comp2hn=$(${SSH} ${comp2ip} "hostname")
532 echo "hostnames: ${ctrlhn}, ${comp1hn}, ${comp2hn}"
536 function check_firewall() {
539 local ctrlip=${OPENSTACK_CONTROL_NODE_1_IP}
540 local comp1ip=${OPENSTACK_COMPUTE_NODE_1_IP}
541 local comp2ip=${OPENSTACK_COMPUTE_NODE_2_IP}
543 echo "check_firewall on control"
545 sudo systemctl status firewalld
546 sudo systemctl -l status iptables
547 sudo iptables --line-numbers -nvL
549 echo "check_firewall on compute 1"
551 sudo systemctl status firewalld
552 sudo systemctl -l status iptables
553 sudo iptables --line-numbers -nvL
555 echo "check_firewall on compute 2"
557 sudo systemctl status firewalld
558 sudo systemctl -l status iptables
559 sudo iptables --line-numbers -nvL
563 function get_service () {
567 local ctrlip=${OPENSTACK_CONTROL_NODE_1_IP}
568 local comp1ip=${OPENSTACK_COMPUTE_NODE_1_IP}
570 #if [ ${idx} -eq 1 ]; then
571 if [ ${iter} -eq 1 ] || [ ${iter} -gt 16 ]; then
572 curl http://${ctrlip}:5000
573 curl http://${ctrlip}:35357
574 curl http://${ctrlip}/identity
576 source /opt/stack/devstack/openrc admin admin;
578 openstack configuration show --unmask;
579 openstack service list
580 openstack --os-cloud devstack-admin --os-region RegionOne compute service list
581 openstack hypervisor list;
589 # Check if rabbitmq is ready by looking for a pid in it's status.
590 # The function returns the status of the grep command which callers can check.
591 function is_rabbitmq_ready() {
593 local grepfor="nova_cell1"
595 ${SSH} ${ip} "sudo rabbitmqctl list_vhosts" > rabbit.txt
596 grep ${grepfor} rabbit.txt
599 # retry the given command ($3) until success for a number of iterations ($1)
600 # sleeping ($2) between tries.
602 local -r -i max_tries=${1}
603 local -r -i sleep_time=${2}
608 echo "retry ${cmd}: attempt: ${retries}"
611 if ((${rc} == 0)); then
614 if ((${retries} == ${max_tries})); then
625 function install_ovs() {
627 local -r rpm_path=${2}
629 if [ "${OVS_INSTALL:0:1}" = "v" ]; then
630 # An OVS version was given, so we build it ourselves from OVS git repo.
631 # Only on the first node though, consecutive nodes will use RPMs
632 # built for the first one.
633 [ ! -d "${rpm_path}" ] && mkdir -p "${rpm_path}" && build_ovs ${node} ${OVS_INSTALL} "${rpm_path}"
634 # Install OVS from path
635 install_ovs_from_path ${node} "${rpm_path}"
636 elif [ "${OVS_INSTALL:0:4}" = "http" ]; then
637 # Otherwise, install from rpm repo directly.
638 install_ovs_from_repo ${node} ${OVS_INSTALL}
640 echo "Expected either an OVS version git tag or a repo http url"
645 ODL_PROVIDER_MAPPINGS="\${PUBLIC_PHYSICAL_NETWORK}:${PUBLIC_BRIDGE}"
649 # Always compare the lists below against the devstack upstream ENABLED_SERVICES in
650 # https://github.com/openstack-dev/devstack/blob/master/stackrc#L52
651 # ODL CSIT does not use vnc, cinder, q-agt, q-l3 or horizon so they are not included below.
652 # collect performance stats
653 CORE_OS_CONTROL_SERVICES="dstat"
655 CORE_OS_CONTROL_SERVICES+=",g-api,g-reg"
657 CORE_OS_CONTROL_SERVICES+=",key"
658 # Nova - services to support libvirt
659 CORE_OS_CONTROL_SERVICES+=",n-api,n-api-meta,n-cauth,n-cond,n-crt,n-obj,n-sch"
660 # ODL - services to connect to ODL
661 CORE_OS_CONTROL_SERVICES+=",odl-compute,odl-neutron"
662 # Additional services
663 CORE_OS_CONTROL_SERVICES+=",mysql,rabbit"
665 # collect performance stats
666 CORE_OS_COMPUTE_SERVICES="dstat"
667 # computes only need nova and odl
668 CORE_OS_COMPUTE_SERVICES+=",n-cpu,odl-compute"
670 cat > ${WORKSPACE}/disable_firewall.sh << EOF
671 sudo systemctl stop firewalld
672 # Open these ports to match the tutorial vms
673 # http/https (80/443), samba (445), netbios (137,138,139)
674 sudo iptables -I INPUT -p tcp -m multiport --dports 80,443,139,445 -j ACCEPT
675 sudo iptables -I INPUT -p udp -m multiport --dports 137,138 -j ACCEPT
676 # OpenStack services as well as vxlan tunnel ports 4789 and 9876
677 # identity public/admin (5000/35357), ampq (5672), vnc (6080), nova (8774), glance (9292), neutron (9696)
678 sudo sudo iptables -I INPUT -p tcp -m multiport --dports 5000,5672,6080,8774,9292,9696,35357 -j ACCEPT
679 sudo sudo iptables -I INPUT -p udp -m multiport --dports 4789,9876 -j ACCEPT
680 sudo iptables-save > /etc/sysconfig/iptables
681 sudo systemctl restart iptables
682 sudo iptables --line-numbers -nvL
686 cat > ${WORKSPACE}/get_devstack.sh << EOF
687 sudo systemctl stop firewalld
688 sudo yum install bridge-utils python-pip -y
689 #sudo systemctl stop NetworkManager
690 #Disable NetworkManager and kill dhclient and dnsmasq
691 sudo systemctl stop NetworkManager
692 sudo killall dhclient
694 #Workaround for mysql failure
695 echo "127.0.0.1 localhost \${HOSTNAME}" >> /tmp/hosts
696 echo "::1 localhost \${HOSTNAME}" >> /tmp/hosts
697 sudo mv /tmp/hosts /etc/hosts
698 sudo mkdir /opt/stack
699 echo "Create RAM disk for /opt/stack"
700 sudo mount -t tmpfs -o size=2G tmpfs /opt/stack
701 sudo chmod 777 /opt/stack
703 echo "git clone https://git.openstack.org/openstack-dev/devstack --branch ${OPENSTACK_BRANCH}"
704 git clone https://git.openstack.org/openstack-dev/devstack --branch ${OPENSTACK_BRANCH}
706 if [ -n "${DEVSTACK_HASH}" ]; then
707 echo "git checkout ${DEVSTACK_HASH}"
708 git checkout ${DEVSTACK_HASH}
710 git --no-pager log --pretty=format:'%h %<(13)%ar%<(13)%cr %<(20,trunc)%an%d %s%b' -n20
713 echo "workaround: do not upgrade openvswitch"
714 sudo yum install -y yum-plugin-versionlock
715 sudo yum versionlock add openvswitch
717 #Install qemu-img command in Control Node for Pike
718 echo "Install qemu-img application"
719 sudo yum install -y qemu-img
722 cat > "${WORKSPACE}/setup_host_cell_mapping.sh" << EOF
723 sudo nova-manage cell_v2 map_cell0
724 sudo nova-manage cell_v2 simple_cell_setup
725 sudo nova-manage db sync
726 sudo nova-manage cell_v2 discover_hosts
731 if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then
732 echo "Configure HAProxy"
733 ODL_HAPROXYIP_PARAM=OPENSTACK_HAPROXY_1_IP
734 ODL_IP_PARAM1=ODL_SYSTEM_1_IP
735 ODL_IP_PARAM2=ODL_SYSTEM_2_IP
736 ODL_IP_PARAM3=ODL_SYSTEM_3_IP
737 ODLMGRIP=${!ODL_HAPROXYIP_PARAM} # ODL Northbound uses HAProxy VIP
738 ODL_OVS_MGRS="${!ODL_IP_PARAM1},${!ODL_IP_PARAM2},${!ODL_IP_PARAM3}" # OVSDB connects to all ODL IPs
739 configure_haproxy_for_neutron_requests ${!ODL_HAPROXYIP_PARAM} "${ODL_OVS_MGRS}"
741 ODL_IP_PARAM=ODL_SYSTEM_1_IP
742 ODLMGRIP=${!ODL_IP_PARAM} # OVSDB connects to ODL IP
743 ODL_OVS_MGRS="${!ODL_IP_PARAM}" # ODL Northbound uses ODL IP
747 for i in `seq 1 ${NUM_OPENSTACK_CONTROL_NODES}`; do
748 cip=OPENSTACK_CONTROL_NODE_${i}_IP
750 os_ip_list+=("${ip}")
753 for i in `seq 1 ${NUM_OPENSTACK_COMPUTE_NODES}`; do
754 cip=OPENSTACK_COMPUTE_NODE_${i}_IP
756 os_ip_list+=("${ip}")
759 for i in "${!os_ip_list[@]}"; do
761 tcpdump_start "${i}" "${ip}" "port 6653"
765 # Begin stacking the nodes, starting with the controller(s) and then the compute(s)
767 for i in `seq 1 ${NUM_OPENSTACK_CONTROL_NODES}`; do
768 CONTROLIP=OPENSTACK_CONTROL_NODE_${i}_IP
769 echo "Configure the stack of the control node ${i} of ${NUM_OPENSTACK_CONTROL_NODES}: ${!CONTROLIP}"
770 scp ${WORKSPACE}/disable_firewall.sh ${!CONTROLIP}:/tmp
771 ${SSH} ${!CONTROLIP} "sudo bash /tmp/disable_firewall.sh"
772 create_etc_hosts ${!CONTROLIP}
773 scp ${WORKSPACE}/hosts_file ${!CONTROLIP}:/tmp/hosts
774 scp ${WORKSPACE}/get_devstack.sh ${!CONTROLIP}:/tmp
775 # devstack Master is yet to migrate fully to lib/neutron, there are some ugly hacks that is
776 # affecting the stacking.
777 # Workaround For Queens, Make the physical Network as physnet1 in lib/neutron
778 # In Queens the neutron new libs are used and do not have the following options from Pike and earlier:
779 # Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS could be used for the flat_networks
780 # and Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS could be used for the ml2_type_vlan
781 ${SSH} ${!CONTROLIP} "bash /tmp/get_devstack.sh > /tmp/get_devstack.sh.txt 2>&1"
782 if [ "${ODL_ML2_BRANCH}" == "stable/queens" ]; then
783 ssh ${!CONTROLIP} "sed -i 's/flat_networks public/flat_networks public,physnet1/' /opt/stack/devstack/lib/neutron"
784 ssh ${!CONTROLIP} "sed -i '186i iniset \$NEUTRON_CORE_PLUGIN_CONF ml2_type_vlan network_vlan_ranges public:1:4094,physnet1:1:4094' /opt/stack/devstack/lib/neutron"
786 create_control_node_local_conf ${!CONTROLIP} ${ODLMGRIP} "${ODL_OVS_MGRS}"
787 scp ${WORKSPACE}/local.conf_control_${!CONTROLIP} ${!CONTROLIP}:/opt/stack/devstack/local.conf
788 echo "Install rdo release to avoid incompatible Package versions"
789 install_rdo_release ${!CONTROLIP}
790 setup_live_migration_control ${!CONTROLIP}
791 if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then
792 setup_live_migration_compute ${!CONTROLIP} ${!CONTROLIP}
794 [ -n "${OVS_INSTALL}" ] && install_ovs ${!CONTROLIP} /tmp/ovs_rpms
795 echo "Stack the control node ${i} of ${NUM_OPENSTACK_CONTROL_NODES}: ${CONTROLIP}"
796 ssh ${!CONTROLIP} "cd /opt/stack/devstack; nohup ./stack.sh > /opt/stack/devstack/nohup.out 2>&1 &"
797 ssh ${!CONTROLIP} "ps -ef | grep stack.sh"
798 ssh ${!CONTROLIP} "ls -lrt /opt/stack/devstack/nohup.out"
799 os_node_list+=("${!CONTROLIP}")
802 # This is a backup to the CELLSV2_SETUP=singleconductor workaround. Keeping it here as an easy lookup
804 # Let the control node get started to avoid a race condition where the computes start and try to access
805 # the nova_cell1 on the control node before it is created. If that happens, the nova-compute service on the
806 # compute exits and does not attempt to restart.
807 # 180s is chosen because in test runs the control node usually finished in 17-20 minutes and the computes finished
808 # in 17 minutes, so take the max difference of 3 minutes and the jobs should still finish around the same time.
809 # one of the following errors is seen in the compute n-cpu.log:
810 # Unhandled error: NotAllowed: Connection.open: (530) NOT_ALLOWED - access to vhost 'nova_cell1' refused for user 'stackrabbit'
811 # AccessRefused: (0, 0): (403) ACCESS_REFUSED - Login was refused using authentication mechanism AMQPLAIN. For details see the broker logfile.
812 # Compare that timestamp to this log in the control stack.log: sudo rabbitmqctl set_permissions -p nova_cell1 stackrabbit
813 # If the n-cpu.log is earlier than the control stack.log timestamp then the failure condition is likely hit.
814 if [ ${NUM_OPENSTACK_COMPUTE_NODES} -gt 0 ]; then
815 WAIT_FOR_RABBITMQ_MINUTES=60
816 echo "Wait a maximum of ${WAIT_FOR_RABBITMQ_MINUTES}m until rabbitmq is ready and nova_cell1 created to allow the controller to create nova_cell1 before the computes need it"
818 retry ${WAIT_FOR_RABBITMQ_MINUTES} 60 "is_rabbitmq_ready ${OPENSTACK_CONTROL_NODE_1_IP}"
821 if ((${rc} == 0)); then
822 echo "rabbitmq is ready, starting ${NUM_OPENSTACK_COMPUTE_NODES} compute(s)"
824 echo "rabbitmq was not ready in ${WAIT_FOR_RABBITMQ_MINUTES}m"
829 for i in `seq 1 ${NUM_OPENSTACK_COMPUTE_NODES}`; do
830 COMPUTEIP=OPENSTACK_COMPUTE_NODE_${i}_IP
831 CONTROLIP=OPENSTACK_CONTROL_NODE_1_IP
832 echo "Configure the stack of the compute node ${i} of ${NUM_OPENSTACK_COMPUTE_NODES}: ${!COMPUTEIP}"
833 scp ${WORKSPACE}/disable_firewall.sh "${!COMPUTEIP}:/tmp"
834 ${SSH} "${!COMPUTEIP}" "sudo bash /tmp/disable_firewall.sh"
835 create_etc_hosts ${!COMPUTEIP} ${!CONTROLIP}
836 scp ${WORKSPACE}/hosts_file ${!COMPUTEIP}:/tmp/hosts
837 scp ${WORKSPACE}/get_devstack.sh ${!COMPUTEIP}:/tmp
838 ${SSH} ${!COMPUTEIP} "bash /tmp/get_devstack.sh > /tmp/get_devstack.sh.txt 2>&1"
839 create_compute_node_local_conf ${!COMPUTEIP} ${!CONTROLIP} ${ODLMGRIP} "${ODL_OVS_MGRS}"
840 scp ${WORKSPACE}/local.conf_compute_${!COMPUTEIP} ${!COMPUTEIP}:/opt/stack/devstack/local.conf
841 echo "Install rdo release to avoid incompatible Package versions"
842 install_rdo_release ${!COMPUTEIP}
843 setup_live_migration_compute ${!COMPUTEIP} ${!CONTROLIP}
844 [ -n "${OVS_INSTALL}" ] && install_ovs ${!COMPUTEIP} /tmp/ovs_rpms
845 echo "Stack the compute node ${i} of ${NUM_OPENSTACK_COMPUTE_NODES}: ${!COMPUTEIP}"
846 ssh ${!COMPUTEIP} "cd /opt/stack/devstack; nohup ./stack.sh > /opt/stack/devstack/nohup.out 2>&1 &"
847 ssh ${!COMPUTEIP} "ps -ef | grep stack.sh"
848 os_node_list+=("${!COMPUTEIP}")
851 echo "nodelist: ${os_node_list[*]}"
853 # This script runs on the openstack nodes. It greps for a string that devstack writes when stacking is complete.
854 # The script then writes a status depending on the grep output that is later scraped by the robot vm to control
855 # the status polling.
856 cat > ${WORKSPACE}/check_stacking.sh << EOF
857 > /tmp/stack_progress
858 ps -ef | grep "stack.sh" | grep -v grep
860 if [ \${ret} -eq 1 ]; then
861 grep "This is your host IP address:" /opt/stack/devstack/nohup.out
862 if [ \$? -eq 0 ]; then
863 echo "Stacking Complete" > /tmp/stack_progress
865 echo "Stacking Failed" > /tmp/stack_progress
867 elif [ \${ret} -eq 0 ]; then
868 echo "Still Stacking" > /tmp/stack_progress
875 # Check if the stacking is finished. Poll all nodes every 60s for one hour.
878 while [ ${in_progress} -eq 1 ]; do
879 iteration=$(($iteration + 1))
880 for index in "${!os_node_list[@]}"; do
881 echo "node $index ${os_node_list[index]}: checking stacking status attempt ${iteration} of 60"
882 scp ${WORKSPACE}/check_stacking.sh ${os_node_list[index]}:/tmp
883 ${SSH} ${os_node_list[index]} "bash /tmp/check_stacking.sh"
884 scp ${os_node_list[index]}:/tmp/stack_progress .
886 stacking_status=`cat stack_progress`
888 # get_service "${iteration}" "${index}"
889 if [ "$stacking_status" == "Still Stacking" ]; then
891 elif [ "$stacking_status" == "Stacking Failed" ]; then
892 echo "node $index ${os_node_list[index]}: stacking has failed"
894 elif [ "$stacking_status" == "Stacking Complete" ]; then
895 echo "node $index ${os_node_list[index]}: stacking complete"
896 unset 'os_node_list[index]'
897 if [ ${#os_node_list[@]} -eq 0 ]; then
902 echo "sleep for a minute before the next check"
904 if [ ${iteration} -eq 60 ]; then
905 echo "stacking has failed - took longer than 60m"
910 # Further configuration now that stacking is complete.
911 echo "Configure the Control Node"
912 CONTROLIP=OPENSTACK_CONTROL_NODE_1_IP
913 # Gather Compute IPs for the site
914 for i in `seq 1 ${NUM_OPENSTACK_COMPUTE_NODES}`; do
915 IP_VAR=OPENSTACK_COMPUTE_NODE_${i}_IP
916 COMPUTE_IPS[$((i-1))]=${!IP_VAR}
919 echo "sleep for 60s and print hypervisor-list"
921 ${SSH} ${!CONTROLIP} "cd /opt/stack/devstack; source openrc admin admin; nova hypervisor-list"
922 # in the case that we are doing openstack (control + compute) all in one node, then the number of hypervisors
923 # will be the same as the number of openstack systems. However, if we are doing multinode openstack then the
924 # assumption is we have a single control node and the rest are compute nodes, so the number of expected hypervisors
925 # is one less than the total number of openstack systems
926 if [ ${NUM_OPENSTACK_SYSTEM} -eq 1 ]; then
927 expected_num_hypervisors=1
929 expected_num_hypervisors=${NUM_OPENSTACK_COMPUTE_NODES}
930 if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then
931 expected_num_hypervisors=$((expected_num_hypervisors + 1))
934 num_hypervisors=$(${SSH} ${!CONTROLIP} "cd /opt/stack/devstack; source openrc admin admin; openstack hypervisor list -f value | wc -l" | tail -1 | tr -d "\r")
935 if ! [ "${num_hypervisors}" ] || ! [ ${num_hypervisors} -eq ${expected_num_hypervisors} ]; then
936 echo "Error: Only $num_hypervisors hypervisors detected, expected $expected_num_hypervisors"
941 echo "prepare external networks by adding vxlan tunnels between all nodes on a separate bridge..."
942 # FIXME Should there be a unique gateway IP and devstack index for each site?
944 for ip in ${!CONTROLIP} ${COMPUTE_IPS[*]}; do
945 # FIXME - Workaround, ODL (new netvirt) currently adds PUBLIC_BRIDGE as a port in br-int since it doesn't see such a bridge existing when we stack
946 ${SSH} $ip "sudo ovs-vsctl --if-exists del-port br-int $PUBLIC_BRIDGE"
947 ${SSH} $ip "sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE -- set bridge $PUBLIC_BRIDGE other-config:disable-in-band=true other_config:hwaddr=f6:00:00:ff:01:0$((devstack_index++))"
951 if [ "${IPSEC_VXLAN_TUNNELS_ENABLED}" == "yes" ]; then
952 # shellcheck disable=SC2206
953 ALL_NODES=(${!CONTROLIP} ${COMPUTE_IPS[*]})
954 for ((inx_ip1=0; inx_ip1<$((${#ALL_NODES[@]} - 1)); inx_ip1++)); do
955 for ((inx_ip2=$((inx_ip1 + 1)); inx_ip2<${#ALL_NODES[@]}; inx_ip2++)); do
956 KEY1=0x$(dd if=/dev/urandom count=32 bs=1 2> /dev/null| xxd -p -c 64)
957 KEY2=0x$(dd if=/dev/urandom count=32 bs=1 2> /dev/null| xxd -p -c 64)
958 ID=0x$(dd if=/dev/urandom count=4 bs=1 2> /dev/null| xxd -p -c 8)
959 ip1=${ALL_NODES[$inx_ip1]}
960 ip2=${ALL_NODES[$inx_ip2]}
961 ${SSH} $ip1 "sudo ip xfrm state add src $ip1 dst $ip2 proto esp spi $ID reqid $ID mode transport auth sha256 $KEY1 enc aes $KEY2"
962 ${SSH} $ip1 "sudo ip xfrm state add src $ip2 dst $ip1 proto esp spi $ID reqid $ID mode transport auth sha256 $KEY1 enc aes $KEY2"
963 ${SSH} $ip1 "sudo ip xfrm policy add src $ip1 dst $ip2 proto udp dir out tmpl src $ip1 dst $ip2 proto esp reqid $ID mode transport"
964 ${SSH} $ip1 "sudo ip xfrm policy add src $ip2 dst $ip1 proto udp dir in tmpl src $ip2 dst $ip1 proto esp reqid $ID mode transport"
966 ${SSH} $ip2 "sudo ip xfrm state add src $ip2 dst $ip1 proto esp spi $ID reqid $ID mode transport auth sha256 $KEY1 enc aes $KEY2"
967 ${SSH} $ip2 "sudo ip xfrm state add src $ip1 dst $ip2 proto esp spi $ID reqid $ID mode transport auth sha256 $KEY1 enc aes $KEY2"
968 ${SSH} $ip2 "sudo ip xfrm policy add src $ip2 dst $ip1 proto udp dir out tmpl src $ip2 dst $ip1 proto esp reqid $ID mode transport"
969 ${SSH} $ip2 "sudo ip xfrm policy add src $ip1 dst $ip2 proto udp dir in tmpl src $ip1 dst $ip2 proto esp reqid $ID mode transport"
973 for ip in ${!CONTROLIP} ${COMPUTE_IPS[*]}; do
974 echo "ip xfrm configuration for node $ip:"
975 ${SSH} $ip "sudo ip xfrm policy list"
976 ${SSH} $ip "sudo ip xfrm state list"
980 # Control Node - PUBLIC_BRIDGE will act as the external router
981 # Parameter values below are used in integration/test - changing them requires updates in intergration/test as well
982 EXTNET_GATEWAY_IP="10.10.10.250"
983 EXTNET_INTERNET_IP="10.9.9.9"
984 EXTNET_PNF_IP="10.10.10.253"
985 ${SSH} ${!CONTROLIP} "sudo ifconfig ${PUBLIC_BRIDGE} up ${EXTNET_GATEWAY_IP}/24"
987 # Control Node - external net PNF simulation
988 ${SSH} ${!CONTROLIP} "
989 sudo ip netns add pnf_ns;
990 sudo ip link add pnf_veth0 type veth peer name pnf_veth1;
991 sudo ip link set pnf_veth1 netns pnf_ns;
992 sudo ip link set pnf_veth0 up;
993 sudo ip netns exec pnf_ns ifconfig pnf_veth1 up ${EXTNET_PNF_IP}/24;
994 sudo ovs-vsctl add-port ${PUBLIC_BRIDGE} pnf_veth0;
997 # Control Node - external net internet address simulation
998 ${SSH} ${!CONTROLIP} "
999 sudo ip tuntap add dev internet_tap mode tap;
1000 sudo ifconfig internet_tap up ${EXTNET_INTERNET_IP}/24;
1005 for compute_ip in ${COMPUTE_IPS[*]}; do
1006 # Tunnel from controller to compute
1007 COMPUTEPORT=compute$(( compute_index++ ))_vxlan
1008 ${SSH} ${!CONTROLIP} "
1009 sudo ovs-vsctl add-port $PUBLIC_BRIDGE $COMPUTEPORT -- set interface $COMPUTEPORT type=vxlan options:local_ip=${!CONTROLIP} options:remote_ip=$compute_ip options:dst_port=9876 options:key=flow
1011 # Tunnel from compute to controller
1012 CONTROLPORT="control_vxlan"
1013 ${SSH} $compute_ip "
1014 sudo ovs-vsctl add-port $PUBLIC_BRIDGE $CONTROLPORT -- set interface $CONTROLPORT type=vxlan options:local_ip=$compute_ip options:remote_ip=${!CONTROLIP} options:dst_port=9876 options:key=flow
1018 if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then
1019 odlmgrip=OPENSTACK_HAPROXY_1_IP
1020 HA_PROXY_IP=${!odlmgrip}
1021 HA_PROXY_1_IP=${!odlmgrip}
1022 odlmgrip2=OPENSTACK_HAPROXY_2_IP
1023 HA_PROXY_2_IP=${!odlmgrip2}
1024 odlmgrip3=OPENSTACK_HAPROXY_1_IP
1025 HA_PROXY_3_IP=${!odlmgrip3}
1027 HA_PROXY_IP=${ODL_SYSTEM_IP}
1028 HA_PROXY_1_IP=${ODL_SYSTEM_1_IP}
1029 HA_PROXY_2_IP=${ODL_SYSTEM_2_IP}
1030 HA_PROXY_3_IP=${ODL_SYSTEM_3_IP}
1033 echo "Locating test plan to use..."
1034 testplan_filepath="${WORKSPACE}/test/csit/testplans/${STREAMTESTPLAN}"
1035 if [ ! -f "${testplan_filepath}" ]; then
1036 testplan_filepath="${WORKSPACE}/test/csit/testplans/${TESTPLAN}"
1039 echo "Changing the testplan path..."
1040 cat "${testplan_filepath}" | sed "s:integration:${WORKSPACE}:" > testplan.txt
1043 # Use the testplan if specific SUITES are not defined.
1044 if [ -z "${SUITES}" ]; then
1045 SUITES=`egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' testplan.txt | tr '\012' ' '`
1048 workpath="${WORKSPACE}/test/csit/suites"
1049 for suite in ${SUITES}; do
1050 fullsuite="${workpath}/${suite}"
1051 if [ -z "${newsuites}" ]; then
1052 newsuites+=${fullsuite}
1054 newsuites+=" "${fullsuite}
1060 #install all client versions required for this job testing
1061 install_openstack_clients_in_robot_vm
1063 # TODO: run openrc on control node and then scrape the vars from it
1064 # Environment Variables Needed to execute Openstack Client for NetVirt Jobs
1065 cat > /tmp/os_netvirt_client_rc << EOF
1066 export OS_USERNAME=admin
1067 export OS_PASSWORD=admin
1068 export OS_PROJECT_NAME=admin
1069 export OS_USER_DOMAIN_NAME=default
1070 export OS_PROJECT_DOMAIN_NAME=default
1071 export OS_AUTH_URL="http://${!CONTROLIP}/identity"
1072 export OS_IDENTITY_API_VERSION=3
1073 export OS_IMAGE_API_VERSION=2
1074 export OS_TENANT_NAME=admin
1078 source /tmp/os_netvirt_client_rc
1080 echo "Get all versions before executing pybot"
1081 echo "openstack --version"
1084 echo "nova --version"
1087 echo "neutron --version"
1091 stacktime=$(timer $totaltmr)
1092 printf "Stacking elapsed time: %s\n" "${stacktime}"
1094 echo "Starting Robot test suites ${SUITES} ..."
1095 # please add pybot -v arguments on a single line and alphabetized
1097 for suite in ${SUITES}; do
1098 # prepend an incremental counter to the suite name so that the full robot log combining all the suites as is done
1099 # in the rebot step below will list all the suites in chronological order as rebot seems to alphabetize them
1100 let "suite_num = suite_num + 1"
1101 suite_index="$(printf %02d ${suite_num})"
1102 suite_name="$(basename ${suite} | cut -d. -f1)"
1103 log_name="${suite_index}_${suite_name}"
1104 pybot -N ${log_name} \
1105 -c critical -e exclude -e skip_if_${DISTROSTREAM} \
1106 --log log_${log_name}.html --report report_${log_name}.html --output output_${log_name}.xml \
1107 --removekeywords wuks \
1108 --removekeywords name:SetupUtils.Setup_Utils_For_Setup_And_Teardown \
1109 --removekeywords name:SetupUtils.Setup_Test_With_Logging_And_Without_Fast_Failing \
1110 --removekeywords name:OpenStackOperations.Add_OVS_Logging_On_All_OpenStack_Nodes \
1111 -v BUNDLEFOLDER:${BUNDLEFOLDER} \
1112 -v BUNDLE_URL:${ACTUAL_BUNDLE_URL} \
1113 -v CMP_INSTANCES_SHARED_PATH:/var/instances \
1114 -v CONTROLLERFEATURES:"${CONTROLLERFEATURES}" \
1115 -v CONTROLLER_USER:${USER} \
1116 -v DEVSTACK_DEPLOY_PATH:/opt/stack/devstack \
1117 -v ENABLE_ITM_DIRECT_TUNNELS:${ENABLE_ITM_DIRECT_TUNNELS} \
1118 -v HA_PROXY_IP:${HA_PROXY_IP} \
1119 -v HA_PROXY_1_IP:${HA_PROXY_1_IP} \
1120 -v HA_PROXY_2_IP:${HA_PROXY_2_IP} \
1121 -v HA_PROXY_3_IP:${HA_PROXY_3_IP} \
1122 -v JDKVERSION:${JDKVERSION} \
1123 -v JENKINS_WORKSPACE:${WORKSPACE} \
1124 -v NEXUSURL_PREFIX:${NEXUSURL_PREFIX} \
1125 -v NUM_ODL_SYSTEM:${NUM_ODL_SYSTEM} \
1126 -v NUM_OS_SYSTEM:${NUM_OPENSTACK_SYSTEM} \
1127 -v NUM_TOOLS_SYSTEM:${NUM_TOOLS_SYSTEM} \
1128 -v ODL_SNAT_MODE:${ODL_SNAT_MODE} \
1129 -v ODL_STREAM:${DISTROSTREAM} \
1130 -v ODL_SYSTEM_IP:${ODL_SYSTEM_IP} \
1131 -v ODL_SYSTEM_1_IP:${ODL_SYSTEM_1_IP} \
1132 -v ODL_SYSTEM_2_IP:${ODL_SYSTEM_2_IP} \
1133 -v ODL_SYSTEM_3_IP:${ODL_SYSTEM_3_IP} \
1134 -v ODL_SYSTEM_4_IP:${ODL_SYSTEM_4_IP} \
1135 -v ODL_SYSTEM_5_IP:${ODL_SYSTEM_5_IP} \
1136 -v ODL_SYSTEM_6_IP:${ODL_SYSTEM_6_IP} \
1137 -v ODL_SYSTEM_7_IP:${ODL_SYSTEM_7_IP} \
1138 -v ODL_SYSTEM_8_IP:${ODL_SYSTEM_8_IP} \
1139 -v ODL_SYSTEM_9_IP:${ODL_SYSTEM_9_IP} \
1140 -v OS_CONTROL_NODE_IP:${OPENSTACK_CONTROL_NODE_1_IP} \
1141 -v OS_CONTROL_NODE_1_IP:${OPENSTACK_CONTROL_NODE_1_IP} \
1142 -v OS_CONTROL_NODE_2_IP:${OPENSTACK_CONTROL_NODE_2_IP} \
1143 -v OS_CONTROL_NODE_3_IP:${OPENSTACK_CONTROL_NODE_3_IP} \
1144 -v OPENSTACK_BRANCH:${OPENSTACK_BRANCH} \
1145 -v OS_COMPUTE_1_IP:${OPENSTACK_COMPUTE_NODE_1_IP} \
1146 -v OS_COMPUTE_2_IP:${OPENSTACK_COMPUTE_NODE_2_IP} \
1147 -v OS_COMPUTE_3_IP:${OPENSTACK_COMPUTE_NODE_3_IP} \
1148 -v OS_COMPUTE_4_IP:${OPENSTACK_COMPUTE_NODE_4_IP} \
1149 -v OS_COMPUTE_5_IP:${OPENSTACK_COMPUTE_NODE_5_IP} \
1150 -v OS_COMPUTE_6_IP:${OPENSTACK_COMPUTE_NODE_6_IP} \
1151 -v OPENSTACK_TOPO:${OPENSTACK_TOPO} \
1152 -v OS_USER:${USER} \
1153 -v PUBLIC_PHYSICAL_NETWORK:${PUBLIC_PHYSICAL_NETWORK} \
1154 -v SECURITY_GROUP_MODE:${SECURITY_GROUP_MODE} \
1155 -v TOOLS_SYSTEM_IP:${TOOLS_SYSTEM_1_IP} \
1156 -v TOOLS_SYSTEM_1_IP:${TOOLS_SYSTEM_1_IP} \
1157 -v TOOLS_SYSTEM_2_IP:${TOOLS_SYSTEM_2_IP} \
1158 -v USER_HOME:${HOME} \
1160 ${TESTOPTIONS} ${suite} || true
1162 #rebot exit codes seem to be different
1163 rebot --output ${WORKSPACE}/output.xml --log log_full.html --report report.html -N openstack output_*.xml || true
1165 echo "Examining the files in data/log and checking file size"
1166 ssh ${ODL_SYSTEM_IP} "ls -altr /tmp/${BUNDLEFOLDER}/data/log/"
1167 ssh ${ODL_SYSTEM_IP} "du -hs /tmp/${BUNDLEFOLDER}/data/log/*"
1169 echo "Tests Executed"
1170 printf "Total elapsed time: %s, stacking time: %s\n" "$(timer $totaltmr)" "${stacktime}"
1171 true # perhaps Jenkins is testing last exit code
1172 # vim: ts=4 sw=4 sts=4 et ft=sh :