2 # Activate robotframework virtualenv
3 # ${ROBOT_VENV} comes from the integration-install-robotframework.sh
5 # shellcheck source=${ROBOT_VENV}/bin/activate disable=SC1091
6 source ${ROBOT_VENV}/bin/activate
7 source /tmp/common-functions.sh ${BUNDLEFOLDER}
8 # Ensure we fail the job if any steps fail.
13 PYTHON="${ROBOT_VENV}/bin/python"
15 ADMIN_PASSWORD="admin"
16 OPENSTACK_MASTER_CLIENTS_VERSION="queens"
21 # TODO: remove this work to run changes.py if/when it's moved higher up to be visible at the Robot level
22 printf "\nshowing recent changes that made it into the distribution used by this job:\n"
23 $PYTHON -m pip install --upgrade urllib3
24 python ${WORKSPACE}/test/tools/distchanges/changes.py -d /tmp/distribution_folder \
25 -u ${ACTUAL_BUNDLE_URL} -b ${DISTROBRANCH} \
26 -r ssh://jenkins-${SILO}@git.opendaylight.org:29418 || true
28 printf "\nshowing recent changes that made it into integration/test used by this job:\n"
30 printf "Hash Author Date Commit Date Author Subject\n"
31 printf "%s\n" "------- ------------------------------ ------------------------------ -------------------- -----------------------------"
32 git --no-pager log --pretty=format:'%h %<(30)%ad %<(30)%cd %<(20,trunc)%an%d %s' -n20
37 #################################################
38 ## Deploy Openstack 3-node ##
39 #################################################
42 # Catch command errors and collect logs.
43 # This ensures logs are collected when script commands fail rather than simply exiting.
44 function trap_handler() {
48 echo "trap_hanlder: ${prog}: line ${lastline}: exit status of last command: ${lasterr}"
49 echo "trap_handler: command: ${BASH_COMMAND}"
53 trap 'trap_handler ${LINENO} ${$?}' ERR
57 function create_etc_hosts() {
60 : > ${WORKSPACE}/hosts_file
61 for iter in `seq 1 ${NUM_OPENSTACK_COMPUTE_NODES}`; do
62 COMPUTE_IP=OPENSTACK_COMPUTE_NODE_${iter}_IP
63 if [ "${!COMPUTE_IP}" == "${NODE_IP}" ]; then
64 CONTROL_HNAME=$(${SSH} ${CTRL_IP} "hostname")
65 echo "${CTRL_IP} ${CONTROL_HNAME}" >> ${WORKSPACE}/hosts_file
67 COMPUTE_HNAME=$(${SSH} ${!COMPUTE_IP} "hostname")
68 echo "${!COMPUTE_IP} ${COMPUTE_HNAME}" >> ${WORKSPACE}/hosts_file
72 echo "Created the hosts file for ${NODE_IP}:"
73 cat ${WORKSPACE}/hosts_file
74 } # create_etc_hosts()
76 #function to install Openstack Clients for Testing
77 #This will pull the latest versions compatiable with the
79 function install_openstack_clients_in_robot_vm() {
80 packages=("python-novaclient" "python-neutronclient" "python-openstackclient")
82 os_plugins=$(csv2ssv "${ENABLE_OS_PLUGINS}")
83 for plugin_name in $os_plugins; do
84 if [ "$plugin_name" == "networking-sfc" ]; then
85 packages+=("networking-sfc")
88 openstack_version=$(echo ${OPENSTACK_BRANCH} | cut -d/ -f2)
89 #If the job tests "master", we will use the clients from previous released stable version to avoid failures
90 if [ "${openstack_version}" == "master" ]; then
91 openstack_version=${OPENSTACK_MASTER_CLIENTS_VERSION}
93 for package in ${packages[*]}; do
94 echo "Get the current support version of the package ${package}"
95 wget https://raw.githubusercontent.com/openstack/requirements/stable/${openstack_version}/upper-constraints.txt -O /tmp/constraints.txt 2>/dev/null
96 echo "$PYTHON -m pip install --upgrade --no-deps ${package} --no-cache-dir -c /tmp/constraints.txt"
97 $PYTHON -m pip install --upgrade --no-deps ${package} --no-cache-dir -c /tmp/constraints.txt
98 echo "$PYTHON -m pip install ${package} --no-cache-dir -c /tmp/constraints.txt"
99 $PYTHON -m pip install ${package} --no-cache-dir -c /tmp/constraints.txt
102 if [ "${ENABLE_NETWORKING_L2GW}" == "yes" ]; then
103 #networking-l2gw is not officially available in any release yet. Getting the latest stable version.
104 $PYTHON -m pip install networking-l2gw==11.0.0
108 #Function to install rdo release
109 # This will help avoiding installing wrong version of packages which causes
110 # functionality failures
111 function install_rdo_release() {
113 case ${OPENSTACK_BRANCH} in
115 ${SSH} ${ip} "sudo yum install -y https://repos.fedorapeople.org/repos/openstack/openstack-pike/rdo-release-pike-1.noarch.rpm"
119 ${SSH} ${ip} "sudo yum install -y https://repos.fedorapeople.org/repos/openstack/openstack-queens/rdo-release-queens-1.noarch.rpm"
123 ${SSH} ${ip} "sudo yum install -y https://repos.fedorapeople.org/repos/openstack/openstack-queens/rdo-release-queens-1.noarch.rpm"
128 # Involves just setting up the shared directory
129 function setup_live_migration_control() {
131 printf "${control_ip}:Setup directory Share with NFS"
132 cat > ${WORKSPACE}/setup_live_migration_control.sh << EOF
133 sudo mkdir --mode=777 /vm_instances
134 sudo chown -R jenkins:jenkins /vm_instances
135 sudo yum install -y nfs-utils
136 printf "/vm_instances *(rw,no_root_squash)" | sudo tee -a /etc/exports
137 sudo systemctl start rpcbind nfs-server
140 scp ${WORKSPACE}/setup_live_migration_control.sh ${control_ip}:/tmp/setup_live_migration_control.sh
141 ssh ${control_ip} "bash /tmp/setup_live_migration_control.sh"
144 # Involves mounting the share and configuring the libvirtd
145 function setup_live_migration_compute() {
148 printf "${compute_ip}:Mount Shared directory from ${control_ip}"
149 printf "${compute_ip}:Configure libvirt in listen mode"
150 cat > ${WORKSPACE}/setup_live_migration_compute.sh << EOF
151 sudo yum install -y libvirt libvirt-devel nfs-utils
152 sudo crudini --verbose --set --inplace /etc/libvirt/libvirtd.conf '' listen_tls 0
153 sudo crudini --verbose --set --inplace /etc/libvirt/libvirtd.conf '' listen_tcp 1
154 sudo crudini --verbose --set --inplace /etc/libvirt/libvirtd.conf '' auth_tcp '"none"'
155 sudo crudini --verbose --set --inplace /etc/sysconfig/libvirtd '' LIBVIRTD_ARGS '"--listen"'
156 sudo mkdir --mode=777 -p /var/instances
157 sudo chown -R jenkins:jenkins /var/instances
158 sudo chmod o+x /var/instances
159 sudo systemctl start rpcbind
160 sudo mount -t nfs ${control_ip}:/vm_instances /var/instances
163 scp ${WORKSPACE}/setup_live_migration_compute.sh ${compute_ip}:/tmp/setup_live_migration_compute.sh
164 ssh ${compute_ip} "bash /tmp/setup_live_migration_compute.sh"
167 # Add enable_services and disable_services to the local.conf
168 function add_os_services() {
169 local core_services=$1
170 local enable_services=$2
171 local disable_services=$3
172 local local_conf_file_name=$4
173 local enable_network_services=$5
175 cat >> ${local_conf_file_name} << EOF
176 enable_service $(csv2ssv "${core_services}")
178 if [ -n "${enable_services}" ]; then
179 cat >> ${local_conf_file_name} << EOF
180 enable_service $(csv2ssv "${enable_services}")
183 if [ -n "${disable_services}" ]; then
184 cat >> ${local_conf_file_name} << EOF
185 disable_service $(csv2ssv "${disable_services}")
188 if [ -n "${enable_network_services}" ]; then
189 cat >> ${local_conf_file_name} << EOF
190 enable_service $(csv2ssv "${enable_network_services}")
195 function create_control_node_local_conf() {
198 ODL_OVS_MANAGERS="$3"
200 local_conf_file_name=${WORKSPACE}/local.conf_control_${HOSTIP}
201 cat > ${local_conf_file_name} << EOF
207 # Increase the wait used by stack to poll for services
213 add_os_services "${CORE_OS_CONTROL_SERVICES}" "${ENABLE_OS_SERVICES}" "${DISABLE_OS_SERVICES}" "${local_conf_file_name}" "${ENABLE_OS_NETWORK_SERVICES}"
215 cat >> ${local_conf_file_name} << EOF
218 SERVICE_HOST=\$HOST_IP
219 Q_ML2_TENANT_NETWORK_TYPE=${TENANT_NETWORK_TYPE}
220 NEUTRON_CREATE_INITIAL_NETWORKS=${CREATE_INITIAL_NETWORKS}
225 ODL_PORT_BINDING_CONTROLLER=${ODL_ML2_PORT_BINDING}
226 ODL_OVS_MANAGERS=${ODL_OVS_MANAGERS}
228 MYSQL_HOST=\$SERVICE_HOST
229 RABBIT_HOST=\$SERVICE_HOST
230 GLANCE_HOSTPORT=\$SERVICE_HOST:9292
231 KEYSTONE_AUTH_HOST=\$SERVICE_HOST
232 KEYSTONE_SERVICE_HOST=\$SERVICE_HOST
234 ADMIN_PASSWORD=${ADMIN_PASSWORD}
235 DATABASE_PASSWORD=${ADMIN_PASSWORD}
236 RABBIT_PASSWORD=${ADMIN_PASSWORD}
237 SERVICE_TOKEN=${ADMIN_PASSWORD}
238 SERVICE_PASSWORD=${ADMIN_PASSWORD}
240 NEUTRON_LBAAS_SERVICE_PROVIDERV2=${LBAAS_SERVICE_PROVIDER} # Only relevant if neutron-lbaas plugin is enabled
241 NEUTRON_SFC_DRIVERS=${ODL_SFC_DRIVER} # Only relevant if networking-sfc plugin is enabled
242 NEUTRON_FLOWCLASSIFIER_DRIVERS=${ODL_SFC_DRIVER} # Only relevant if networking-sfc plugin is enabled
244 PUBLIC_BRIDGE=${PUBLIC_BRIDGE}
245 PUBLIC_PHYSICAL_NETWORK=${PUBLIC_PHYSICAL_NETWORK}
246 ML2_VLAN_RANGES=${PUBLIC_PHYSICAL_NETWORK}
247 ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS}
249 if [ "${TENANT_NETWORK_TYPE}" == "local" ]; then
250 cat >> ${local_conf_file_name} << EOF
251 ENABLE_TENANT_TUNNELS=false
255 if [ "${ODL_ML2_DRIVER_VERSION}" == "v2" ]; then
256 echo "ODL_V2DRIVER=True" >> ${local_conf_file_name}
259 for plugin_name in ${ENABLE_OS_PLUGINS}; do
260 if [ "$plugin_name" == "networking-odl" ]; then
261 ENABLE_PLUGIN_ARGS="${ODL_ML2_DRIVER_REPO} ${ODL_ML2_BRANCH}"
262 elif [ "$plugin_name" == "kuryr-kubernetes" ]; then
263 ENABLE_PLUGIN_ARGS="${DEVSTACK_KUBERNETES_PLUGIN_REPO} master" # note: kuryr-kubernetes only exists in master at the moment
264 elif [ "$plugin_name" == "neutron-lbaas" ]; then
265 ENABLE_PLUGIN_ARGS="${DEVSTACK_LBAAS_PLUGIN_REPO} ${OPENSTACK_BRANCH}"
266 IS_LBAAS_PLUGIN_ENABLED="yes"
267 elif [ "$plugin_name" == "networking-sfc" ]; then
268 ENABLE_PLUGIN_ARGS="${DEVSTACK_NETWORKING_SFC_PLUGIN_REPO} master"
269 IS_SFC_PLUGIN_ENABLED="yes"
271 echo "Error: Invalid plugin $plugin_name, unsupported"
274 cat >> ${local_conf_file_name} << EOF
276 enable_plugin ${plugin_name} ${ENABLE_PLUGIN_ARGS}
281 if [ "${ENABLE_NETWORKING_L2GW}" == "yes" ]; then
282 cat >> ${local_conf_file_name} << EOF
284 enable_plugin networking-l2gw ${NETWORKING_L2GW_DRIVER} ${ODL_ML2_BRANCH}
285 NETWORKING_L2GW_SERVICE_DRIVER=L2GW:OpenDaylight:networking_odl.l2gateway.driver_v2.OpenDaylightL2gwDriver:default
289 if [ "${ODL_ML2_DRIVER_VERSION}" == "v2" ]; then
290 SERVICE_PLUGINS="odl-router_v2"
292 SERVICE_PLUGINS="odl-router"
294 if [ "${ENABLE_NETWORKING_L2GW}" == "yes" ]; then
295 SERVICE_PLUGINS+=", networking_l2gw.services.l2gateway.plugin.L2GatewayPlugin"
297 if [ "${IS_LBAAS_PLUGIN_ENABLED}" == "yes" ]; then
298 SERVICE_PLUGINS+=", lbaasv2"
300 if [ "${IS_SFC_PLUGIN_ENABLED}" == "yes" ]; then
301 SERVICE_PLUGINS+=", networking_sfc.services.flowclassifier.plugin.FlowClassifierPlugin,networking_sfc.services.sfc.plugin.SfcPlugin"
304 cat >> ${local_conf_file_name} << EOF
306 [[post-config|\$NEUTRON_CONF]]
308 service_plugins = ${SERVICE_PLUGINS}
309 log_dir = /opt/stack/logs
311 [[post-config|/etc/neutron/plugins/ml2/ml2_conf.ini]]
313 minimize_polling=True
316 # Needed for VLAN provider tests - because our provider networks are always encapsulated in VXLAN (br-physnet1)
317 # MTU(1400) + VXLAN(50) + VLAN(4) = 1454 < MTU eth0/br-physnet1(1458)
318 physical_network_mtus = ${PUBLIC_PHYSICAL_NETWORK}:1400
322 if [ "${ENABLE_NETWORKING_L2GW}" == "yes" ]; then
323 cat >> ${local_conf_file_name} << EOF
326 enable_dhcp_service = True
330 cat >> ${local_conf_file_name} << EOF
333 # Trigger n-odl full sync every 30 secs.
334 maintenance_interval = 30
336 [[post-config|/etc/neutron/dhcp_agent.ini]]
338 force_metadata = True
339 enable_isolated_metadata = True
340 log_dir = /opt/stack/logs
342 [[post-config|/etc/nova/nova.conf]]
344 discover_hosts_in_cells_interval = 30
347 force_config_drive = False
348 force_raw_images = False
349 log_dir = /opt/stack/logs
353 if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then
354 cat >> ${local_conf_file_name} << EOF
356 force_raw_images = False
357 log_dir = /opt/stack/logs
359 live_migration_uri = qemu+tcp://%s/system
364 if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then
365 echo "Combo local.conf created:"
367 echo "Control local.conf created:"
369 cat ${local_conf_file_name}
370 } # create_control_node_local_conf()
372 function create_compute_node_local_conf() {
376 ODL_OVS_MANAGERS="$4"
378 local_conf_file_name=${WORKSPACE}/local.conf_compute_${HOSTIP}
379 cat > ${local_conf_file_name} << EOF
385 # Increase the wait used by stack to poll for the nova service on the control node
386 NOVA_READY_TIMEOUT=1800
391 add_os_services "${CORE_OS_COMPUTE_SERVICES}" "${ENABLE_OS_COMPUTE_SERVICES}" "${DISABLE_OS_SERVICES}" "${local_conf_file_name}"
393 cat >> ${local_conf_file_name} << EOF
395 SERVICE_HOST=${SERVICEHOST}
396 Q_ML2_TENANT_NETWORK_TYPE=${TENANT_NETWORK_TYPE}
401 ODL_PORT_BINDING_CONTROLLER=${ODL_ML2_PORT_BINDING}
402 ODL_OVS_MANAGERS=${ODL_OVS_MANAGERS}
404 Q_HOST=\$SERVICE_HOST
405 MYSQL_HOST=\$SERVICE_HOST
406 RABBIT_HOST=\$SERVICE_HOST
407 GLANCE_HOSTPORT=\$SERVICE_HOST:9292
408 KEYSTONE_AUTH_HOST=\$SERVICE_HOST
409 KEYSTONE_SERVICE_HOST=\$SERVICE_HOST
411 ADMIN_PASSWORD=${ADMIN_PASSWORD}
412 DATABASE_PASSWORD=${ADMIN_PASSWORD}
413 RABBIT_PASSWORD=${ADMIN_PASSWORD}
414 SERVICE_TOKEN=${ADMIN_PASSWORD}
415 SERVICE_PASSWORD=${ADMIN_PASSWORD}
417 PUBLIC_BRIDGE=${PUBLIC_BRIDGE}
418 PUBLIC_PHYSICAL_NETWORK=${PUBLIC_PHYSICAL_NETWORK}
419 ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS}
422 if [[ "${ENABLE_OS_PLUGINS}" =~ networking-odl ]]; then
423 cat >> ${local_conf_file_name} << EOF
425 enable_plugin networking-odl ${ODL_ML2_DRIVER_REPO} ${ODL_ML2_BRANCH}
429 cat >> ${local_conf_file_name} << EOF
431 [[post-config|/etc/nova/nova.conf]]
433 auth_strategy = keystone
436 force_raw_images = False
437 log_dir = /opt/stack/logs
439 live_migration_uri = qemu+tcp://%s/system
443 echo "Compute local.conf created:"
444 cat ${local_conf_file_name}
445 } # create_compute_node_local_conf()
447 function configure_haproxy_for_neutron_requests() {
449 # shellcheck disable=SC2206
452 cat > ${WORKSPACE}/install_ha_proxy.sh<< EOF
453 sudo systemctl stop firewalld
454 sudo yum -y install policycoreutils-python haproxy
457 cat > ${WORKSPACE}/haproxy.cfg << EOF
463 pidfile /tmp/haproxy.pid
471 timeout http-request 10s
482 listen opendaylight_rest
486 listen opendaylight_websocket
493 for odlip in ${ODL_IPS[*]}; do
494 sed -i "/listen opendaylight$/a server controller-${odlindex} ${odlip}:8080 check fall 5 inter 2000 rise 2" ${WORKSPACE}/haproxy.cfg
495 sed -i "/listen opendaylight_rest$/a server controller-rest-${odlindex} ${odlip}:8181 check fall 5 inter 2000 rise 2" ${WORKSPACE}/haproxy.cfg
496 sed -i "/listen opendaylight_websocket$/a server controller-websocket-${odlindex} ${odlip}:8185 check fall 5 inter 2000 rise 2" ${WORKSPACE}/haproxy.cfg
497 odlindex=$((odlindex+1))
501 echo "Dump haproxy.cfg"
502 cat ${WORKSPACE}/haproxy.cfg
504 cat > ${WORKSPACE}/deploy_ha_proxy.sh<< EOF
505 sudo chown haproxy:haproxy /tmp/haproxy.cfg
506 sudo sed -i 's/\\/etc\\/haproxy\\/haproxy.cfg/\\/tmp\\/haproxy.cfg/g' /usr/lib/systemd/system/haproxy.service
507 sudo /usr/sbin/semanage permissive -a haproxy_t
508 sudo systemctl restart haproxy
511 sudo systemctl status haproxy
515 scp ${WORKSPACE}/install_ha_proxy.sh ${MGRIP}:/tmp
516 ${SSH} ${MGRIP} "sudo bash /tmp/install_ha_proxy.sh"
517 scp ${WORKSPACE}/haproxy.cfg ${MGRIP}:/tmp
518 scp ${WORKSPACE}/deploy_ha_proxy.sh ${MGRIP}:/tmp
519 ${SSH} ${MGRIP} "sudo bash /tmp/deploy_ha_proxy.sh"
520 } # configure_haproxy_for_neutron_requests()
522 # Following three functions are debugging helpers when debugging devstack changes.
523 # Keeping them for now so we can simply call them when needed.
527 function get_hostnames () {
529 local ctrlip=${OPENSTACK_CONTROL_NODE_1_IP}
530 local comp1ip=${OPENSTACK_COMPUTE_NODE_1_IP}
531 local comp2ip=${OPENSTACK_COMPUTE_NODE_2_IP}
532 ctrlhn=$(${SSH} ${ctrlip} "hostname")
533 comp1hn=$(${SSH} ${comp1ip} "hostname")
534 comp2hn=$(${SSH} ${comp2ip} "hostname")
535 echo "hostnames: ${ctrlhn}, ${comp1hn}, ${comp2hn}"
539 function check_firewall() {
542 local ctrlip=${OPENSTACK_CONTROL_NODE_1_IP}
543 local comp1ip=${OPENSTACK_COMPUTE_NODE_1_IP}
544 local comp2ip=${OPENSTACK_COMPUTE_NODE_2_IP}
546 echo "check_firewall on control"
548 sudo systemctl status firewalld
549 sudo systemctl -l status iptables
550 sudo iptables --line-numbers -nvL
552 echo "check_firewall on compute 1"
554 sudo systemctl status firewalld
555 sudo systemctl -l status iptables
556 sudo iptables --line-numbers -nvL
558 echo "check_firewall on compute 2"
560 sudo systemctl status firewalld
561 sudo systemctl -l status iptables
562 sudo iptables --line-numbers -nvL
566 function get_service () {
570 local ctrlip=${OPENSTACK_CONTROL_NODE_1_IP}
571 local comp1ip=${OPENSTACK_COMPUTE_NODE_1_IP}
573 #if [ ${idx} -eq 1 ]; then
574 if [ ${iter} -eq 1 ] || [ ${iter} -gt 16 ]; then
575 curl http://${ctrlip}:5000
576 curl http://${ctrlip}:35357
577 curl http://${ctrlip}/identity
579 source /opt/stack/devstack/openrc admin admin;
581 openstack configuration show --unmask;
582 openstack service list
583 openstack --os-cloud devstack-admin --os-region RegionOne compute service list
584 openstack hypervisor list;
592 # Check if rabbitmq is ready by looking for a pid in it's status.
593 # The function returns the status of the grep command which callers can check.
594 function is_rabbitmq_ready() {
596 local grepfor="nova_cell1"
598 ${SSH} ${ip} "sudo rabbitmqctl list_vhosts" > rabbit.txt
599 grep ${grepfor} rabbit.txt
602 # retry the given command ($3) until success for a number of iterations ($1)
603 # sleeping ($2) between tries.
605 local -r -i max_tries=${1}
606 local -r -i sleep_time=${2}
611 echo "retry ${cmd}: attempt: ${retries}"
614 if ((${rc} == 0)); then
617 if ((${retries} == ${max_tries})); then
628 ODL_PROVIDER_MAPPINGS="\${PUBLIC_PHYSICAL_NETWORK}:${PUBLIC_BRIDGE}"
632 # Always compare the lists below against the devstack upstream ENABLED_SERVICES in
633 # https://github.com/openstack-dev/devstack/blob/master/stackrc#L52
634 # ODL CSIT does not use vnc, cinder, q-agt, q-l3 or horizon so they are not included below.
635 # collect performance stats
636 CORE_OS_CONTROL_SERVICES="dstat"
638 CORE_OS_CONTROL_SERVICES+=",g-api,g-reg"
640 CORE_OS_CONTROL_SERVICES+=",key"
641 # Nova - services to support libvirt
642 CORE_OS_CONTROL_SERVICES+=",n-api,n-api-meta,n-cauth,n-cond,n-crt,n-obj,n-sch"
643 # ODL - services to connect to ODL
644 CORE_OS_CONTROL_SERVICES+=",odl-compute,odl-neutron"
645 # Additional services
646 CORE_OS_CONTROL_SERVICES+=",mysql,rabbit"
648 # collect performance stats
649 CORE_OS_COMPUTE_SERVICES="dstat"
650 # computes only need nova and odl
651 CORE_OS_COMPUTE_SERVICES+=",n-cpu,odl-compute"
653 cat > ${WORKSPACE}/disable_firewall.sh << EOF
654 sudo systemctl stop firewalld
655 # Open these ports to match the tutorial vms
656 # http/https (80/443), samba (445), netbios (137,138,139)
657 sudo iptables -I INPUT -p tcp -m multiport --dports 80,443,139,445 -j ACCEPT
658 sudo iptables -I INPUT -p udp -m multiport --dports 137,138 -j ACCEPT
659 # OpenStack services as well as vxlan tunnel ports 4789 and 9876
660 # identity public/admin (5000/35357), ampq (5672), vnc (6080), nova (8774), glance (9292), neutron (9696)
661 sudo sudo iptables -I INPUT -p tcp -m multiport --dports 5000,5672,6080,8774,9292,9696,35357 -j ACCEPT
662 sudo sudo iptables -I INPUT -p udp -m multiport --dports 4789,9876 -j ACCEPT
663 sudo iptables-save > /etc/sysconfig/iptables
664 sudo systemctl restart iptables
665 sudo iptables --line-numbers -nvL
669 cat > ${WORKSPACE}/get_devstack.sh << EOF
670 sudo systemctl stop firewalld
671 sudo yum install bridge-utils python-pip -y
672 #sudo systemctl stop NetworkManager
673 #Disable NetworkManager and kill dhclient and dnsmasq
674 sudo systemctl stop NetworkManager
675 sudo killall dhclient
677 #Workaround for mysql failure
678 echo "127.0.0.1 localhost \${HOSTNAME}" >> /tmp/hosts
679 echo "::1 localhost \${HOSTNAME}" >> /tmp/hosts
680 sudo mv /tmp/hosts /etc/hosts
681 sudo mkdir /opt/stack
682 echo "Create RAM disk for /opt/stack"
683 sudo mount -t tmpfs -o size=2G tmpfs /opt/stack
684 sudo chmod 777 /opt/stack
686 echo "git clone https://git.openstack.org/openstack-dev/devstack --branch ${OPENSTACK_BRANCH}"
687 git clone https://git.openstack.org/openstack-dev/devstack --branch ${OPENSTACK_BRANCH}
689 if [ -n "${DEVSTACK_HASH}" ]; then
690 echo "git checkout ${DEVSTACK_HASH}"
691 git checkout ${DEVSTACK_HASH}
693 git --no-pager log --pretty=format:'%h %<(13)%ar%<(13)%cr %<(20,trunc)%an%d %s%b' -n20
696 echo "workaround: do not upgrade openvswitch"
697 sudo yum install -y yum-plugin-versionlock
698 sudo yum versionlock add openvswitch
700 #Install qemu-img command in Control Node for Pike
701 echo "Install qemu-img application"
702 sudo yum install -y qemu-img
705 cat > "${WORKSPACE}/setup_host_cell_mapping.sh" << EOF
706 sudo nova-manage cell_v2 map_cell0
707 sudo nova-manage cell_v2 simple_cell_setup
708 sudo nova-manage db sync
709 sudo nova-manage cell_v2 discover_hosts
712 NUM_OPENSTACK_SITES=${NUM_OPENSTACK_SITES:-1}
716 os_interval=$(( ${NUM_OPENSTACK_SYSTEM} / ${NUM_OPENSTACK_SITES} ))
717 ha_proxy_index=${os_interval}
719 for i in `seq 1 ${NUM_OPENSTACK_SITES}`; do
720 if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then
721 echo "Configure HAProxy"
722 ODL_HAPROXYIP_PARAM=OPENSTACK_HAPROXY_${i}_IP
723 ha_proxy_index=$(( $ha_proxy_index + $os_interval ))
724 odl_index=$(((i - 1) * 3 + 1))
725 ODL_IP_PARAM1=ODL_SYSTEM_$((odl_index++))_IP
726 ODL_IP_PARAM2=ODL_SYSTEM_$((odl_index++))_IP
727 ODL_IP_PARAM3=ODL_SYSTEM_$((odl_index++))_IP
728 ODLMGRIP[$i]=${!ODL_HAPROXYIP_PARAM} # ODL Northbound uses HAProxy VIP
729 ODL_OVS_MGRS[$i]="${!ODL_IP_PARAM1},${!ODL_IP_PARAM2},${!ODL_IP_PARAM3}" # OVSDB connects to all ODL IPs
730 configure_haproxy_for_neutron_requests ${!ODL_HAPROXYIP_PARAM} "${ODL_OVS_MGRS[$i]}"
732 ODL_IP_PARAM=ODL_SYSTEM_${i}_IP
733 ODL_OVS_MGRS[$i]="${!ODL_IP_PARAM}" # ODL Northbound uses ODL IP
734 ODLMGRIP[$i]=${!ODL_IP_PARAM} # OVSDB connects to ODL IP
739 for i in `seq 1 ${NUM_OPENSTACK_CONTROL_NODES}`; do
740 cip=OPENSTACK_CONTROL_NODE_${i}_IP
742 os_ip_list+=("${ip}")
745 for i in `seq 1 ${NUM_OPENSTACK_COMPUTE_NODES}`; do
746 cip=OPENSTACK_COMPUTE_NODE_${i}_IP
748 os_ip_list+=("${ip}")
751 for i in "${!os_ip_list[@]}"; do
753 tcpdump_start "${i}" "${ip}" "port 6653"
756 # Begin stacking the nodes, starting with the controller(s) and then the compute(s)
758 for i in `seq 1 ${NUM_OPENSTACK_CONTROL_NODES}`; do
759 CONTROLIP=OPENSTACK_CONTROL_NODE_${i}_IP
760 echo "Configure the stack of the control node ${i} of ${NUM_OPENSTACK_CONTROL_NODES}: ${!CONTROLIP}"
761 scp ${WORKSPACE}/disable_firewall.sh ${!CONTROLIP}:/tmp
762 ${SSH} ${!CONTROLIP} "sudo bash /tmp/disable_firewall.sh"
763 create_etc_hosts ${!CONTROLIP}
764 scp ${WORKSPACE}/hosts_file ${!CONTROLIP}:/tmp/hosts
765 scp ${WORKSPACE}/get_devstack.sh ${!CONTROLIP}:/tmp
766 # devstack Master is yet to migrate fully to lib/neutron, there are some ugly hacks that is
767 # affecting the stacking.
768 # Workaround For Queens, Make the physical Network as physnet1 in lib/neutron
769 # In Queens the neutron new libs are used and do not have the following options from Pike and earlier:
770 # Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS could be used for the flat_networks
771 # and Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS could be used for the ml2_type_vlan
772 ${SSH} ${!CONTROLIP} "bash /tmp/get_devstack.sh > /tmp/get_devstack.sh.txt 2>&1"
773 if [ "${ODL_ML2_BRANCH}" == "stable/queens" ]; then
774 ssh ${!CONTROLIP} "sed -i 's/flat_networks public/flat_networks public,physnet1/' /opt/stack/devstack/lib/neutron"
775 ssh ${!CONTROLIP} "sed -i '186i iniset \$NEUTRON_CORE_PLUGIN_CONF ml2_type_vlan network_vlan_ranges public:1:4094,physnet1:1:4094' /opt/stack/devstack/lib/neutron"
777 create_control_node_local_conf ${!CONTROLIP} ${ODLMGRIP[$i]} "${ODL_OVS_MGRS[$i]}"
778 scp ${WORKSPACE}/local.conf_control_${!CONTROLIP} ${!CONTROLIP}:/opt/stack/devstack/local.conf
779 echo "Install rdo release to avoid incompatible Package versions"
780 install_rdo_release ${!CONTROLIP}
781 setup_live_migration_control ${!CONTROLIP}
782 if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then
783 setup_live_migration_compute ${!CONTROLIP} ${!CONTROLIP}
785 echo "Stack the control node ${i} of ${NUM_OPENSTACK_CONTROL_NODES}: ${CONTROLIP}"
786 ssh ${!CONTROLIP} "cd /opt/stack/devstack; nohup ./stack.sh > /opt/stack/devstack/nohup.out 2>&1 &"
787 ssh ${!CONTROLIP} "ps -ef | grep stack.sh"
788 ssh ${!CONTROLIP} "ls -lrt /opt/stack/devstack/nohup.out"
789 os_node_list+=("${!CONTROLIP}")
792 # This is a backup to the CELLSV2_SETUP=singleconductor workaround. Keeping it here as an easy lookup
794 # Let the control node get started to avoid a race condition where the computes start and try to access
795 # the nova_cell1 on the control node before it is created. If that happens, the nova-compute service on the
796 # compute exits and does not attempt to restart.
797 # 180s is chosen because in test runs the control node usually finished in 17-20 minutes and the computes finished
798 # in 17 minutes, so take the max difference of 3 minutes and the jobs should still finish around the same time.
799 # one of the following errors is seen in the compute n-cpu.log:
800 # Unhandled error: NotAllowed: Connection.open: (530) NOT_ALLOWED - access to vhost 'nova_cell1' refused for user 'stackrabbit'
801 # AccessRefused: (0, 0): (403) ACCESS_REFUSED - Login was refused using authentication mechanism AMQPLAIN. For details see the broker logfile.
802 # Compare that timestamp to this log in the control stack.log: sudo rabbitmqctl set_permissions -p nova_cell1 stackrabbit
803 # If the n-cpu.log is earlier than the control stack.log timestamp then the failure condition is likely hit.
804 if [ ${NUM_OPENSTACK_COMPUTE_NODES} -gt 0 ]; then
805 WAIT_FOR_RABBITMQ_MINUTES=60
806 echo "Wait a maximum of ${WAIT_FOR_RABBITMQ_MINUTES}m until rabbitmq is ready and nova_cell1 created to allow the controller to create nova_cell1 before the computes need it"
808 retry ${WAIT_FOR_RABBITMQ_MINUTES} 60 "is_rabbitmq_ready ${OPENSTACK_CONTROL_NODE_1_IP}"
811 if ((${rc} == 0)); then
812 echo "rabbitmq is ready, starting ${NUM_OPENSTACK_COMPUTE_NODES} compute(s)"
814 echo "rabbitmq was not ready in ${WAIT_FOR_RABBITMQ_MINUTES}m"
819 for i in `seq 1 ${NUM_OPENSTACK_COMPUTE_NODES}`; do
820 NUM_COMPUTES_PER_SITE=$((NUM_OPENSTACK_COMPUTE_NODES / NUM_OPENSTACK_SITES))
821 SITE_INDEX=$((((i - 1) / NUM_COMPUTES_PER_SITE) + 1)) # We need the site index to infer the control node IP for this compute
822 COMPUTEIP=OPENSTACK_COMPUTE_NODE_${i}_IP
823 CONTROLIP=OPENSTACK_CONTROL_NODE_${SITE_INDEX}_IP
824 echo "Configure the stack of the compute node ${i} of ${NUM_OPENSTACK_COMPUTE_NODES}: ${!COMPUTEIP}"
825 scp ${WORKSPACE}/disable_firewall.sh "${!COMPUTEIP}:/tmp"
826 ${SSH} "${!COMPUTEIP}" "sudo bash /tmp/disable_firewall.sh"
827 create_etc_hosts ${!COMPUTEIP} ${!CONTROLIP}
828 scp ${WORKSPACE}/hosts_file ${!COMPUTEIP}:/tmp/hosts
829 scp ${WORKSPACE}/get_devstack.sh ${!COMPUTEIP}:/tmp
830 ${SSH} ${!COMPUTEIP} "bash /tmp/get_devstack.sh > /tmp/get_devstack.sh.txt 2>&1"
831 create_compute_node_local_conf ${!COMPUTEIP} ${!CONTROLIP} ${ODLMGRIP[$SITE_INDEX]} "${ODL_OVS_MGRS[$SITE_INDEX]}"
832 scp ${WORKSPACE}/local.conf_compute_${!COMPUTEIP} ${!COMPUTEIP}:/opt/stack/devstack/local.conf
833 echo "Install rdo release to avoid incompatible Package versions"
834 install_rdo_release ${!COMPUTEIP}
835 setup_live_migration_compute ${!COMPUTEIP} ${!CONTROLIP}
836 echo "Stack the compute node ${i} of ${NUM_OPENSTACK_COMPUTE_NODES}: ${!COMPUTEIP}"
837 ssh ${!COMPUTEIP} "cd /opt/stack/devstack; nohup ./stack.sh > /opt/stack/devstack/nohup.out 2>&1 &"
838 ssh ${!COMPUTEIP} "ps -ef | grep stack.sh"
839 os_node_list+=("${!COMPUTEIP}")
842 echo "nodelist: ${os_node_list[*]}"
844 # This script runs on the openstack nodes. It greps for a string that devstack writes when stacking is complete.
845 # The script then writes a status depending on the grep output that is later scraped by the robot vm to control
846 # the status polling.
847 cat > ${WORKSPACE}/check_stacking.sh << EOF
848 > /tmp/stack_progress
849 ps -ef | grep "stack.sh" | grep -v grep
851 if [ \${ret} -eq 1 ]; then
852 grep "This is your host IP address:" /opt/stack/devstack/nohup.out
853 if [ \$? -eq 0 ]; then
854 echo "Stacking Complete" > /tmp/stack_progress
856 echo "Stacking Failed" > /tmp/stack_progress
858 elif [ \${ret} -eq 0 ]; then
859 echo "Still Stacking" > /tmp/stack_progress
866 # Check if the stacking is finished. Poll all nodes every 60s for one hour.
869 while [ ${in_progress} -eq 1 ]; do
870 iteration=$(($iteration + 1))
871 for index in "${!os_node_list[@]}"; do
872 echo "node $index ${os_node_list[index]}: checking stacking status attempt ${iteration} of 60"
873 scp ${WORKSPACE}/check_stacking.sh ${os_node_list[index]}:/tmp
874 ${SSH} ${os_node_list[index]} "bash /tmp/check_stacking.sh"
875 scp ${os_node_list[index]}:/tmp/stack_progress .
877 stacking_status=`cat stack_progress`
879 # get_service "${iteration}" "${index}"
880 if [ "$stacking_status" == "Still Stacking" ]; then
882 elif [ "$stacking_status" == "Stacking Failed" ]; then
883 echo "node $index ${os_node_list[index]}: stacking has failed"
885 elif [ "$stacking_status" == "Stacking Complete" ]; then
886 echo "node $index ${os_node_list[index]}: stacking complete"
887 unset 'os_node_list[index]'
888 if [ ${#os_node_list[@]} -eq 0 ]; then
893 echo "sleep for a minute before the next check"
895 if [ ${iteration} -eq 60 ]; then
896 echo "stacking has failed - took longer than 60m"
901 # Further configuration now that stacking is complete.
902 NUM_COMPUTES_PER_SITE=$((NUM_OPENSTACK_COMPUTE_NODES / NUM_OPENSTACK_SITES))
903 for i in `seq 1 ${NUM_OPENSTACK_SITES}`; do
904 echo "Configure the Control Node"
905 CONTROLIP=OPENSTACK_CONTROL_NODE_${i}_IP
906 # Gather Compute IPs for the site
907 for j in `seq 1 ${NUM_COMPUTES_PER_SITE}`; do
908 COMPUTE_INDEX=$(((i-1) * NUM_COMPUTES_PER_SITE + j))
909 IP_VAR=OPENSTACK_COMPUTE_NODE_${COMPUTE_INDEX}_IP
910 COMPUTE_IPS[$((j-1))]=${!IP_VAR}
913 echo "sleep for 60s and print hypervisor-list"
915 ${SSH} ${!CONTROLIP} "cd /opt/stack/devstack; source openrc admin admin; nova hypervisor-list"
916 # in the case that we are doing openstack (control + compute) all in one node, then the number of hypervisors
917 # will be the same as the number of openstack systems. However, if we are doing multinode openstack then the
918 # assumption is we have a single control node and the rest are compute nodes, so the number of expected hypervisors
919 # is one less than the total number of openstack systems
920 if [ $((NUM_OPENSTACK_SYSTEM / NUM_OPENSTACK_SITES)) -eq 1 ]; then
921 expected_num_hypervisors=1
923 expected_num_hypervisors=${NUM_COMPUTES_PER_SITE}
924 if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then
925 expected_num_hypervisors=$((expected_num_hypervisors + 1))
928 num_hypervisors=$(${SSH} ${!CONTROLIP} "cd /opt/stack/devstack; source openrc admin admin; openstack hypervisor list -f value | wc -l" | tail -1 | tr -d "\r")
929 if ! [ "${num_hypervisors}" ] || ! [ ${num_hypervisors} -eq ${expected_num_hypervisors} ]; then
930 echo "Error: Only $num_hypervisors hypervisors detected, expected $expected_num_hypervisors"
934 # Gather Compute IPs for the site
935 for j in `seq 1 ${NUM_COMPUTES_PER_SITE}`; do
936 COMPUTE_INDEX=$(((i-1) * NUM_COMPUTES_PER_SITE + j))
937 IP_VAR=OPENSTACK_COMPUTE_NODE_${COMPUTE_INDEX}_IP
938 COMPUTE_IPS[$((j-1))]=${!IP_VAR}
942 echo "prepare external networks by adding vxlan tunnels between all nodes on a separate bridge..."
943 # FIXME Should there be a unique gateway IP and devstack index for each site?
945 for ip in ${!CONTROLIP} ${COMPUTE_IPS[*]}; do
946 # FIXME - Workaround, ODL (new netvirt) currently adds PUBLIC_BRIDGE as a port in br-int since it doesn't see such a bridge existing when we stack
947 ${SSH} $ip "sudo ovs-vsctl --if-exists del-port br-int $PUBLIC_BRIDGE"
948 ${SSH} $ip "sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE -- set bridge $PUBLIC_BRIDGE other-config:disable-in-band=true other_config:hwaddr=f6:00:00:ff:01:0$((devstack_index++))"
952 if [ "${IPSEC_VXLAN_TUNNELS_ENABLED}" == "yes" ]; then
953 # shellcheck disable=SC2206
954 ALL_NODES=(${!CONTROLIP} ${COMPUTE_IPS[*]})
955 for ((inx_ip1=0; inx_ip1<$((${#ALL_NODES[@]} - 1)); inx_ip1++)); do
956 for ((inx_ip2=$((inx_ip1 + 1)); inx_ip2<${#ALL_NODES[@]}; inx_ip2++)); do
957 KEY1=0x$(dd if=/dev/urandom count=32 bs=1 2> /dev/null| xxd -p -c 64)
958 KEY2=0x$(dd if=/dev/urandom count=32 bs=1 2> /dev/null| xxd -p -c 64)
959 ID=0x$(dd if=/dev/urandom count=4 bs=1 2> /dev/null| xxd -p -c 8)
960 ip1=${ALL_NODES[$inx_ip1]}
961 ip2=${ALL_NODES[$inx_ip2]}
962 ${SSH} $ip1 "sudo ip xfrm state add src $ip1 dst $ip2 proto esp spi $ID reqid $ID mode transport auth sha256 $KEY1 enc aes $KEY2"
963 ${SSH} $ip1 "sudo ip xfrm state add src $ip2 dst $ip1 proto esp spi $ID reqid $ID mode transport auth sha256 $KEY1 enc aes $KEY2"
964 ${SSH} $ip1 "sudo ip xfrm policy add src $ip1 dst $ip2 proto udp dir out tmpl src $ip1 dst $ip2 proto esp reqid $ID mode transport"
965 ${SSH} $ip1 "sudo ip xfrm policy add src $ip2 dst $ip1 proto udp dir in tmpl src $ip2 dst $ip1 proto esp reqid $ID mode transport"
967 ${SSH} $ip2 "sudo ip xfrm state add src $ip2 dst $ip1 proto esp spi $ID reqid $ID mode transport auth sha256 $KEY1 enc aes $KEY2"
968 ${SSH} $ip2 "sudo ip xfrm state add src $ip1 dst $ip2 proto esp spi $ID reqid $ID mode transport auth sha256 $KEY1 enc aes $KEY2"
969 ${SSH} $ip2 "sudo ip xfrm policy add src $ip2 dst $ip1 proto udp dir out tmpl src $ip2 dst $ip1 proto esp reqid $ID mode transport"
970 ${SSH} $ip2 "sudo ip xfrm policy add src $ip1 dst $ip2 proto udp dir in tmpl src $ip1 dst $ip2 proto esp reqid $ID mode transport"
974 for ip in ${!CONTROLIP} ${COMPUTE_IPS[*]}; do
975 echo "ip xfrm configuration for node $ip:"
976 ${SSH} $ip "sudo ip xfrm policy list"
977 ${SSH} $ip "sudo ip xfrm state list"
981 # Control Node - PUBLIC_BRIDGE will act as the external router
982 # Parameter values below are used in integration/test - changing them requires updates in intergration/test as well
983 EXTNET_GATEWAY_IP="10.10.10.250"
984 EXTNET_INTERNET_IP="10.9.9.9"
985 EXTNET_PNF_IP="10.10.10.253"
986 ${SSH} ${!CONTROLIP} "sudo ifconfig ${PUBLIC_BRIDGE} up ${EXTNET_GATEWAY_IP}/24"
988 # Control Node - external net PNF simulation
989 ${SSH} ${!CONTROLIP} "
990 sudo ip netns add pnf_ns;
991 sudo ip link add pnf_veth0 type veth peer name pnf_veth1;
992 sudo ip link set pnf_veth1 netns pnf_ns;
993 sudo ip link set pnf_veth0 up;
994 sudo ip netns exec pnf_ns ifconfig pnf_veth1 up ${EXTNET_PNF_IP}/24;
995 sudo ovs-vsctl add-port ${PUBLIC_BRIDGE} pnf_veth0;
998 # Control Node - external net internet address simulation
999 ${SSH} ${!CONTROLIP} "
1000 sudo ip tuntap add dev internet_tap mode tap;
1001 sudo ifconfig internet_tap up ${EXTNET_INTERNET_IP}/24;
1006 for compute_ip in ${COMPUTE_IPS[*]}; do
1007 # Tunnel from controller to compute
1008 COMPUTEPORT=compute$(( compute_index++ ))_vxlan
1009 ${SSH} ${!CONTROLIP} "
1010 sudo ovs-vsctl add-port $PUBLIC_BRIDGE $COMPUTEPORT -- set interface $COMPUTEPORT type=vxlan options:local_ip=${!CONTROLIP} options:remote_ip=$compute_ip options:dst_port=9876 options:key=flow
1012 # Tunnel from compute to controller
1013 CONTROLPORT="control_vxlan"
1014 ${SSH} $compute_ip "
1015 sudo ovs-vsctl add-port $PUBLIC_BRIDGE $CONTROLPORT -- set interface $CONTROLPORT type=vxlan options:local_ip=$compute_ip options:remote_ip=${!CONTROLIP} options:dst_port=9876 options:key=flow
1020 if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then
1021 odlmgrip=OPENSTACK_HAPROXY_1_IP
1022 HA_PROXY_IP=${!odlmgrip}
1023 HA_PROXY_1_IP=${!odlmgrip}
1024 odlmgrip2=OPENSTACK_HAPROXY_2_IP
1025 HA_PROXY_2_IP=${!odlmgrip2}
1026 odlmgrip3=OPENSTACK_HAPROXY_1_IP
1027 HA_PROXY_3_IP=${!odlmgrip3}
1029 HA_PROXY_IP=${ODL_SYSTEM_IP}
1030 HA_PROXY_1_IP=${ODL_SYSTEM_1_IP}
1031 HA_PROXY_2_IP=${ODL_SYSTEM_2_IP}
1032 HA_PROXY_3_IP=${ODL_SYSTEM_3_IP}
1035 echo "Locating test plan to use..."
1036 testplan_filepath="${WORKSPACE}/test/csit/testplans/${STREAMTESTPLAN}"
1037 if [ ! -f "${testplan_filepath}" ]; then
1038 testplan_filepath="${WORKSPACE}/test/csit/testplans/${TESTPLAN}"
1041 echo "Changing the testplan path..."
1042 cat "${testplan_filepath}" | sed "s:integration:${WORKSPACE}:" > testplan.txt
1045 # Use the testplan if specific SUITES are not defined.
1046 if [ -z "${SUITES}" ]; then
1047 SUITES=`egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' testplan.txt | tr '\012' ' '`
1050 workpath="${WORKSPACE}/test/csit/suites"
1051 for suite in ${SUITES}; do
1052 fullsuite="${workpath}/${suite}"
1053 if [ -z "${newsuites}" ]; then
1054 newsuites+=${fullsuite}
1056 newsuites+=" "${fullsuite}
1062 #install all client versions required for this job testing
1063 install_openstack_clients_in_robot_vm
1065 # TODO: run openrc on control node and then scrape the vars from it
1066 # Environment Variables Needed to execute Openstack Client for NetVirt Jobs
1067 cat > /tmp/os_netvirt_client_rc << EOF
1068 export OS_USERNAME=admin
1069 export OS_PASSWORD=admin
1070 export OS_PROJECT_NAME=admin
1071 export OS_USER_DOMAIN_NAME=default
1072 export OS_PROJECT_DOMAIN_NAME=default
1073 export OS_AUTH_URL="http://${!CONTROLIP}/identity"
1074 export OS_IDENTITY_API_VERSION=3
1075 export OS_IMAGE_API_VERSION=2
1076 export OS_TENANT_NAME=admin
1080 source /tmp/os_netvirt_client_rc
1082 echo "Get all versions before executing pybot"
1083 echo "openstack --version"
1086 echo "nova --version"
1089 echo "neutron --version"
1093 stacktime=$(timer $totaltmr)
1094 printf "Stacking elapsed time: %s\n" "${stacktime}"
1096 echo "Starting Robot test suites ${SUITES} ..."
1097 # please add pybot -v arguments on a single line and alphabetized
1099 for suite in ${SUITES}; do
1100 # prepend an incremental counter to the suite name so that the full robot log combining all the suites as is done
1101 # in the rebot step below will list all the suites in chronological order as rebot seems to alphabetize them
1102 let "suite_num = suite_num + 1"
1103 suite_index="$(printf %02d ${suite_num})"
1104 suite_name="$(basename ${suite} | cut -d. -f1)"
1105 log_name="${suite_index}_${suite_name}"
1106 pybot -N ${log_name} \
1107 -c critical -e exclude -e skip_if_${DISTROSTREAM} \
1108 --log log_${log_name}.html --report report_${log_name}.html --output output_${log_name}.xml \
1109 --removekeywords wuks \
1110 --removekeywords name:SetupUtils.Setup_Utils_For_Setup_And_Teardown \
1111 --removekeywords name:SetupUtils.Setup_Test_With_Logging_And_Without_Fast_Failing \
1112 --removekeywords name:OpenStackOperations.Add_OVS_Logging_On_All_OpenStack_Nodes \
1113 -v BUNDLEFOLDER:${BUNDLEFOLDER} \
1114 -v BUNDLE_URL:${ACTUAL_BUNDLE_URL} \
1115 -v CMP_INSTANCES_SHARED_PATH:/var/instances \
1116 -v CONTROLLERFEATURES:"${CONTROLLERFEATURES}" \
1117 -v CONTROLLER_USER:${USER} \
1118 -v DEVSTACK_DEPLOY_PATH:/opt/stack/devstack \
1119 -v ENABLE_ITM_DIRECT_TUNNELS:${ENABLE_ITM_DIRECT_TUNNELS} \
1120 -v HA_PROXY_IP:${HA_PROXY_IP} \
1121 -v HA_PROXY_1_IP:${HA_PROXY_1_IP} \
1122 -v HA_PROXY_2_IP:${HA_PROXY_2_IP} \
1123 -v HA_PROXY_3_IP:${HA_PROXY_3_IP} \
1124 -v JDKVERSION:${JDKVERSION} \
1125 -v JENKINS_WORKSPACE:${WORKSPACE} \
1126 -v NEXUSURL_PREFIX:${NEXUSURL_PREFIX} \
1127 -v NUM_ODL_SYSTEM:${NUM_ODL_SYSTEM} \
1128 -v NUM_OPENSTACK_SITES:${NUM_OPENSTACK_SITES} \
1129 -v NUM_OS_SYSTEM:${NUM_OPENSTACK_SYSTEM} \
1130 -v NUM_TOOLS_SYSTEM:${NUM_TOOLS_SYSTEM} \
1131 -v ODL_SNAT_MODE:${ODL_SNAT_MODE} \
1132 -v ODL_STREAM:${DISTROSTREAM} \
1133 -v ODL_SYSTEM_IP:${ODL_SYSTEM_IP} \
1134 -v ODL_SYSTEM_1_IP:${ODL_SYSTEM_1_IP} \
1135 -v ODL_SYSTEM_2_IP:${ODL_SYSTEM_2_IP} \
1136 -v ODL_SYSTEM_3_IP:${ODL_SYSTEM_3_IP} \
1137 -v ODL_SYSTEM_4_IP:${ODL_SYSTEM_4_IP} \
1138 -v ODL_SYSTEM_5_IP:${ODL_SYSTEM_5_IP} \
1139 -v ODL_SYSTEM_6_IP:${ODL_SYSTEM_6_IP} \
1140 -v ODL_SYSTEM_7_IP:${ODL_SYSTEM_7_IP} \
1141 -v ODL_SYSTEM_8_IP:${ODL_SYSTEM_8_IP} \
1142 -v ODL_SYSTEM_9_IP:${ODL_SYSTEM_9_IP} \
1143 -v OS_CONTROL_NODE_IP:${OPENSTACK_CONTROL_NODE_1_IP} \
1144 -v OS_CONTROL_NODE_1_IP:${OPENSTACK_CONTROL_NODE_1_IP} \
1145 -v OS_CONTROL_NODE_2_IP:${OPENSTACK_CONTROL_NODE_2_IP} \
1146 -v OS_CONTROL_NODE_3_IP:${OPENSTACK_CONTROL_NODE_3_IP} \
1147 -v OPENSTACK_BRANCH:${OPENSTACK_BRANCH} \
1148 -v OS_COMPUTE_1_IP:${OPENSTACK_COMPUTE_NODE_1_IP} \
1149 -v OS_COMPUTE_2_IP:${OPENSTACK_COMPUTE_NODE_2_IP} \
1150 -v OS_COMPUTE_3_IP:${OPENSTACK_COMPUTE_NODE_3_IP} \
1151 -v OS_COMPUTE_4_IP:${OPENSTACK_COMPUTE_NODE_4_IP} \
1152 -v OS_COMPUTE_5_IP:${OPENSTACK_COMPUTE_NODE_5_IP} \
1153 -v OS_COMPUTE_6_IP:${OPENSTACK_COMPUTE_NODE_6_IP} \
1154 -v OPENSTACK_TOPO:${OPENSTACK_TOPO} \
1155 -v OS_USER:${USER} \
1156 -v PUBLIC_PHYSICAL_NETWORK:${PUBLIC_PHYSICAL_NETWORK} \
1157 -v SECURITY_GROUP_MODE:${SECURITY_GROUP_MODE} \
1158 -v TOOLS_SYSTEM_IP:${TOOLS_SYSTEM_1_IP} \
1159 -v TOOLS_SYSTEM_1_IP:${TOOLS_SYSTEM_1_IP} \
1160 -v TOOLS_SYSTEM_2_IP:${TOOLS_SYSTEM_2_IP} \
1161 -v USER_HOME:${HOME} \
1163 ${TESTOPTIONS} ${suite} || true
1165 #rebot exit codes seem to be different
1166 rebot --output ${WORKSPACE}/output.xml --log log_full.html --report report.html -N openstack output_*.xml || true
1168 echo "Examining the files in data/log and checking file size"
1169 ssh ${ODL_SYSTEM_IP} "ls -altr /tmp/${BUNDLEFOLDER}/data/log/"
1170 ssh ${ODL_SYSTEM_IP} "du -hs /tmp/${BUNDLEFOLDER}/data/log/*"
1172 echo "Tests Executed"
1173 printf "Total elapsed time: %s, stacking time: %s\n" "$(timer $totaltmr)" "${stacktime}"
1174 true # perhaps Jenkins is testing last exit code
1175 # vim: ts=4 sw=4 sts=4 et ft=sh :