Merge changes I7b52aa72,I2a77a84a
[releng/builder.git] / jjb / integration / integration-deploy-openstack-run-test.sh
1 #!/bin/bash
2 # Activate robotframework virtualenv
3 # ${ROBOT_VENV} comes from the integration-install-robotframework.sh
4 # script.
5 # shellcheck source=${ROBOT_VENV}/bin/activate disable=SC1091
6 source ${ROBOT_VENV}/bin/activate
7 source /tmp/common-functions.sh ${BUNDLEFOLDER}
8 # Ensure we fail the job if any steps fail.
9 set -ex -o pipefail
10 totaltmr=$(timer)
11 get_os_deploy
12
13 PYTHON="${ROBOT_VENV}/bin/python"
14 SSH="ssh -t -t"
15 ADMIN_PASSWORD="admin"
16 OPENSTACK_MASTER_CLIENTS_VERSION="queens"
17 #Size of the partition to /opt/stack in control and compute nodes
18 TMPFS_SIZE=2G
19 if [ "${ODL_ML2_BRANCH}" == "stable/rocky" ]; then
20             TMPFS_SIZE=12G
21 fi
22 # TODO: remove this work to run changes.py if/when it's moved higher up to be visible at the Robot level
23 printf "\nshowing recent changes that made it into the distribution used by this job:\n"
24 $PYTHON -m pip install --upgrade urllib3
25 python ${WORKSPACE}/test/tools/distchanges/changes.py -d /tmp/distribution_folder \
26                   -u ${ACTUAL_BUNDLE_URL} -b ${DISTROBRANCH} \
27                   -r ssh://jenkins-${SILO}@git.opendaylight.org:29418 || true
28
29 printf "\nshowing recent changes that made it into integration/test used by this job:\n"
30 cd ${WORKSPACE}/test
31 printf "Hash    Author Date                    Commit Date                    Author               Subject\n"
32 printf "%s\n" "------- ------------------------------ ------------------------------ -------------------- -----------------------------"
33 git --no-pager log --pretty=format:'%h %<(30)%ad %<(30)%cd %<(20,trunc)%an%d %s' -n20
34 printf "\n"
35 cd -
36
37 cat << EOF
38 #################################################
39 ##         Deploy Openstack 3-node             ##
40 #################################################
41 EOF
42
43 # Catch command errors and collect logs.
44 # This ensures logs are collected when script commands fail rather than simply exiting.
45 function trap_handler() {
46     local prog="$0"
47     local lastline="$1"
48     local lasterr="$2"
49     echo "trap_handler: ${prog}: line ${lastline}: exit status of last command: ${lasterr}"
50     echo "trap_handler: command: ${BASH_COMMAND}"
51     exit 1
52 } # trap_handler()
53
54 trap 'trap_handler ${LINENO} ${$?}' ERR
55
56 print_job_parameters
57
58 function create_etc_hosts() {
59     NODE_IP=$1
60     CTRL_IP=$2
61     : > ${WORKSPACE}/hosts_file
62     for iter in `seq 1 ${NUM_OPENSTACK_COMPUTE_NODES}`; do
63         COMPUTE_IP=OPENSTACK_COMPUTE_NODE_${iter}_IP
64         if [ "${!COMPUTE_IP}" == "${NODE_IP}" ]; then
65            CONTROL_HNAME=$(${SSH}  ${CTRL_IP}  "hostname")
66            echo "${CTRL_IP}   ${CONTROL_HNAME}" >> ${WORKSPACE}/hosts_file
67         else
68            COMPUTE_HNAME=$(${SSH}  ${!COMPUTE_IP}  "hostname")
69            echo "${!COMPUTE_IP}   ${COMPUTE_HNAME}" >> ${WORKSPACE}/hosts_file
70         fi
71     done
72
73     echo "Created the hosts file for ${NODE_IP}:"
74     cat ${WORKSPACE}/hosts_file
75 } # create_etc_hosts()
76
77 #function to install Openstack Clients for Testing
78 #This will pull the latest versions compatiable with the
79 # openstack release
80 function install_openstack_clients_in_robot_vm() {
81     packages=("python-novaclient" "python-neutronclient" "python-openstackclient")
82     local os_plugins
83     os_plugins=$(csv2ssv "${ENABLE_OS_PLUGINS}")
84     for plugin_name in $os_plugins; do
85         if [ "$plugin_name" == "networking-sfc" ]; then
86             packages+=("networking-sfc")
87         fi
88     done
89     openstack_version=$(echo ${OPENSTACK_BRANCH} | cut -d/ -f2)
90     #If the job tests "master", we will use the clients from previous released stable version to avoid failures
91     if [ "${openstack_version}" == "master" ]; then
92        openstack_version=${OPENSTACK_MASTER_CLIENTS_VERSION}
93     fi
94     for package in ${packages[*]}; do
95        echo "Get the current support version of the package ${package}"
96        wget https://raw.githubusercontent.com/openstack/requirements/stable/${openstack_version}/upper-constraints.txt -O /tmp/constraints.txt 2>/dev/null
97        #python openstackclient version in rocky contradicts with version in global-jjb and stops openstackclient installation in rocky. Will be removed based on version change in global-jjb.
98        sed -i s/python-openstackclient===3.16.2/python-openstackclient===3.14.0/ /tmp/constraints.txt
99        echo "$PYTHON -m pip install --upgrade --no-deps ${package} --no-cache-dir -c /tmp/constraints.txt"
100        $PYTHON -m pip install --upgrade --no-deps ${package} --no-cache-dir -c /tmp/constraints.txt
101        echo "$PYTHON -m pip install ${package} --no-cache-dir -c /tmp/constraints.txt"
102        $PYTHON -m pip install ${package} --no-cache-dir -c /tmp/constraints.txt
103     done
104
105     if [ "${ENABLE_NETWORKING_L2GW}" == "yes" ]; then
106         #networking-l2gw is not officially available in any release yet. Getting the latest stable version.
107         $PYTHON -m pip install networking-l2gw==11.0.0
108     fi
109 }
110
111 #Function to install rdo release
112 # This will help avoiding installing wrong version of packages which causes
113 # functionality failures
114 function install_rdo_release() {
115     local ip=$1
116     case ${OPENSTACK_BRANCH} in
117        *rocky*)
118           ${SSH} ${ip} "sudo yum install -y https://repos.fedorapeople.org/repos/openstack/openstack-rocky/rdo-release-rocky-1.noarch.rpm"
119           ;;
120
121        *queens*)
122           ${SSH} ${ip} "sudo yum install -y https://repos.fedorapeople.org/repos/openstack/openstack-queens/rdo-release-queens-1.noarch.rpm"
123           ;;
124
125        master)
126           ${SSH} ${ip} "sudo yum install -y https://repos.fedorapeople.org/repos/openstack/openstack-rocky/rdo-release-rocky-1.noarch.rpm"
127           ;;
128     esac
129 }
130
131 # Involves just setting up the shared directory
132 function setup_live_migration_control() {
133     local control_ip=$1
134     printf "%s:Setup directory Share with NFS" "${control_ip}"
135     cat > ${WORKSPACE}/setup_live_migration_control.sh << EOF
136 sudo mkdir --mode=777 /vm_instances
137 sudo chown -R jenkins:jenkins /vm_instances
138 sudo yum install -y nfs-utils
139 printf "/vm_instances *(rw,no_root_squash)" | sudo tee -a /etc/exports
140 sudo systemctl start rpcbind nfs-server
141 sudo exportfs
142 EOF
143     scp ${WORKSPACE}/setup_live_migration_control.sh ${control_ip}:/tmp/setup_live_migration_control.sh
144     ssh ${control_ip} "bash /tmp/setup_live_migration_control.sh"
145 }
146
147 #Fix Problem caused due to new libvirt version in CentOS repo.
148 #The libvirt-python 3.10 does not support all the new API exposed
149 #This fix will force devstack to use latest libvirt-python
150 #from pypi.org (latest version as of 06-Dec-2018)
151 function fix_libvirt_python_build() {
152     local ip=$1
153     ${SSH} ${ip} "
154         cd /opt/stack;
155         git clone https://git.openstack.org/openstack/requirements;
156         cd requirements;
157         git checkout ${ODL_ML2_BRANCH};
158         sed -i s/libvirt-python===3.10.0/libvirt-python===4.10.0/ upper-constraints.txt
159         "
160 }
161
162 # Involves mounting the share and configuring the libvirtd
163 function setup_live_migration_compute() {
164     local compute_ip=$1
165     local control_ip=$2
166     printf "%s:Mount Shared directory from ${control_ip}" "${compute_ip}"
167     printf "%s:Configure libvirt in listen mode" "${compute_ip}"
168     cat >  ${WORKSPACE}/setup_live_migration_compute.sh << EOF
169 sudo yum install -y libvirt libvirt-devel nfs-utils
170 sudo crudini --verbose  --set --inplace /etc/libvirt/libvirtd.conf '' listen_tls 0
171 sudo crudini --verbose  --set --inplace /etc/libvirt/libvirtd.conf '' listen_tcp 1
172 sudo crudini --verbose  --set --inplace /etc/libvirt/libvirtd.conf '' auth_tcp '"none"'
173 sudo crudini --verbose  --set --inplace /etc/sysconfig/libvirtd '' LIBVIRTD_ARGS '"--listen"'
174 sudo mkdir --mode=777 -p /var/instances
175 sudo chown -R jenkins:jenkins /var/instances
176 sudo chmod o+x /var/instances
177 sudo systemctl start rpcbind
178 sudo mount -t nfs ${control_ip}:/vm_instances /var/instances
179 sudo mount
180 EOF
181     scp ${WORKSPACE}/setup_live_migration_compute.sh ${compute_ip}:/tmp/setup_live_migration_compute.sh
182     ssh ${compute_ip} "bash /tmp/setup_live_migration_compute.sh"
183 }
184
185 # Add enable_services and disable_services to the local.conf
186 function add_os_services() {
187     local core_services=$1
188     local enable_services=$2
189     local disable_services=$3
190     local local_conf_file_name=$4
191     local enable_network_services=$5
192
193     cat >> ${local_conf_file_name} << EOF
194 enable_service $(csv2ssv "${core_services}")
195 EOF
196     if [ -n "${enable_services}" ]; then
197         cat >> ${local_conf_file_name} << EOF
198 enable_service $(csv2ssv "${enable_services}")
199 EOF
200     fi
201     if [ -n "${disable_services}" ]; then
202         cat >> ${local_conf_file_name} << EOF
203 disable_service $(csv2ssv "${disable_services}")
204 EOF
205     fi
206     if [ -n "${enable_network_services}" ]; then
207         cat >> ${local_conf_file_name} << EOF
208 enable_service $(csv2ssv "${enable_network_services}")
209 EOF
210     fi
211 }
212
213 function create_control_node_local_conf() {
214     HOSTIP=$1
215     MGRIP=$2
216     ODL_OVS_MANAGERS="$3"
217
218     local_conf_file_name=${WORKSPACE}/local.conf_control_${HOSTIP}
219     cat > ${local_conf_file_name} << EOF
220 [[local|localrc]]
221 LOGFILE=stack.sh.log
222 LOG_COLOR=False
223 USE_SYSTEMD=True
224 RECLONE=${RECLONE}
225 # Increase the wait used by stack to poll for services
226 SERVICE_TIMEOUT=120
227
228 disable_all_services
229 EOF
230
231     add_os_services "${CORE_OS_CONTROL_SERVICES}" "${ENABLE_OS_SERVICES}" "${DISABLE_OS_SERVICES}" "${local_conf_file_name}" "${ENABLE_OS_NETWORK_SERVICES}"
232
233     cat >> ${local_conf_file_name} << EOF
234
235 HOST_IP=${HOSTIP}
236 SERVICE_HOST=\$HOST_IP
237 Q_ML2_TENANT_NETWORK_TYPE=${TENANT_NETWORK_TYPE}
238 NEUTRON_CREATE_INITIAL_NETWORKS=${CREATE_INITIAL_NETWORKS}
239
240 ODL_MODE=manual
241 ODL_MGR_IP=${MGRIP}
242 ODL_PORT=${ODL_PORT}
243 ODL_PORT_BINDING_CONTROLLER=${ODL_ML2_PORT_BINDING}
244 ODL_OVS_MANAGERS=${ODL_OVS_MANAGERS}
245
246 MYSQL_HOST=\$SERVICE_HOST
247 RABBIT_HOST=\$SERVICE_HOST
248 GLANCE_HOSTPORT=\$SERVICE_HOST:9292
249 KEYSTONE_AUTH_HOST=\$SERVICE_HOST
250 KEYSTONE_SERVICE_HOST=\$SERVICE_HOST
251
252 ADMIN_PASSWORD=${ADMIN_PASSWORD}
253 DATABASE_PASSWORD=${ADMIN_PASSWORD}
254 RABBIT_PASSWORD=${ADMIN_PASSWORD}
255 SERVICE_TOKEN=${ADMIN_PASSWORD}
256 SERVICE_PASSWORD=${ADMIN_PASSWORD}
257
258 NEUTRON_LBAAS_SERVICE_PROVIDERV2=${LBAAS_SERVICE_PROVIDER} # Only relevant if neutron-lbaas plugin is enabled
259 NEUTRON_SFC_DRIVERS=${ODL_SFC_DRIVER} # Only relevant if networking-sfc plugin is enabled
260 NEUTRON_FLOWCLASSIFIER_DRIVERS=${ODL_SFC_DRIVER} # Only relevant if networking-sfc plugin is enabled
261 ETCD_PORT=2379
262 PUBLIC_BRIDGE=${PUBLIC_BRIDGE}
263 PUBLIC_PHYSICAL_NETWORK=${PUBLIC_PHYSICAL_NETWORK}
264 ML2_VLAN_RANGES=${PUBLIC_PHYSICAL_NETWORK}
265 ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS}
266 EOF
267     if [ "${TENANT_NETWORK_TYPE}" == "local" ]; then
268         cat >> ${local_conf_file_name} << EOF
269 ENABLE_TENANT_TUNNELS=false
270 EOF
271     fi
272
273     if [ "${ODL_ML2_DRIVER_VERSION}" == "v2" ]; then
274         echo "ODL_V2DRIVER=True" >> ${local_conf_file_name}
275     fi
276     IFS=,
277     for plugin_name in ${ENABLE_OS_PLUGINS}; do
278         if [ "$plugin_name" == "networking-odl" ]; then
279             ENABLE_PLUGIN_ARGS="${ODL_ML2_DRIVER_REPO} ${ODL_ML2_BRANCH}"
280         elif [ "$plugin_name" == "kuryr-kubernetes" ]; then
281             ENABLE_PLUGIN_ARGS="${DEVSTACK_KUBERNETES_PLUGIN_REPO} master" # note: kuryr-kubernetes only exists in master at the moment
282         elif [ "$plugin_name" == "neutron-lbaas" ]; then
283             ENABLE_PLUGIN_ARGS="${DEVSTACK_LBAAS_PLUGIN_REPO} ${OPENSTACK_BRANCH}"
284             IS_LBAAS_PLUGIN_ENABLED="yes"
285         elif [ "$plugin_name" == "networking-sfc" ]; then
286             ENABLE_PLUGIN_ARGS="${DEVSTACK_NETWORKING_SFC_PLUGIN_REPO} ${OPENSTACK_BRANCH}"
287             IS_SFC_PLUGIN_ENABLED="yes"
288         else
289             echo "Error: Invalid plugin $plugin_name, unsupported"
290             continue
291         fi
292         cat >> ${local_conf_file_name} << EOF
293
294 enable_plugin ${plugin_name} ${ENABLE_PLUGIN_ARGS}
295 EOF
296     done
297     unset IFS
298
299     if [ "${ENABLE_NETWORKING_L2GW}" == "yes" ]; then
300         cat >> ${local_conf_file_name} << EOF
301
302 enable_plugin networking-l2gw ${NETWORKING_L2GW_DRIVER} ${ODL_ML2_BRANCH}
303 NETWORKING_L2GW_SERVICE_DRIVER=L2GW:OpenDaylight:networking_odl.l2gateway.driver_v2.OpenDaylightL2gwDriver:default
304 EOF
305     fi
306
307     if [ "${ODL_ML2_DRIVER_VERSION}" == "v2" ]; then
308        SERVICE_PLUGINS="odl-router_v2"
309     else
310        SERVICE_PLUGINS="odl-router"
311     fi
312     if [ "${ENABLE_NETWORKING_L2GW}" == "yes" ]; then
313         SERVICE_PLUGINS+=", networking_l2gw.services.l2gateway.plugin.L2GatewayPlugin"
314     fi
315     if [ "${IS_LBAAS_PLUGIN_ENABLED}" == "yes" ]; then
316         SERVICE_PLUGINS+=", lbaasv2"
317     fi
318     if [ "${IS_SFC_PLUGIN_ENABLED}" == "yes" ]; then
319         SERVICE_PLUGINS+=", networking_sfc.services.flowclassifier.plugin.FlowClassifierPlugin,networking_sfc.services.sfc.plugin.SfcPlugin"
320     fi
321
322     cat >> ${local_conf_file_name} << EOF
323
324 [[post-config|\$NEUTRON_CONF]]
325 [DEFAULT]
326 service_plugins = ${SERVICE_PLUGINS}
327 log_dir = /opt/stack/logs
328
329 [[post-config|/etc/neutron/plugins/ml2/ml2_conf.ini]]
330 [agent]
331 minimize_polling=True
332
333 [ml2]
334 # Needed for VLAN provider tests - because our provider networks are always encapsulated in VXLAN (br-physnet1)
335 # MTU(1400) + VXLAN(50) + VLAN(4) = 1454 < MTU eth0/br-physnet1(1458)
336 physical_network_mtus = ${PUBLIC_PHYSICAL_NETWORK}:1400
337 path_mtu = 1458
338 EOF
339     if [ "${ENABLE_GRE_TYPE_DRIVERS}" == "yes" ]; then
340         cat >> ${local_conf_file_name} << EOF
341 type_drivers = local,flat,vlan,gre,vxlan
342 [ml2_type_gre]
343 tunnel_id_ranges = 1:1000
344 EOF
345     fi
346     if [ "${ENABLE_NETWORKING_L2GW}" == "yes" ]; then
347         cat >> ${local_conf_file_name} << EOF
348
349 [ml2_odl]
350 enable_dhcp_service = True
351 EOF
352     fi
353
354     cat >> ${local_conf_file_name} << EOF
355
356 [ml2_odl]
357 # Trigger n-odl full sync every 30 secs.
358 maintenance_interval = 30
359
360 [[post-config|/etc/neutron/dhcp_agent.ini]]
361 [DEFAULT]
362 force_metadata = True
363 enable_isolated_metadata = True
364 log_dir = /opt/stack/logs
365
366 [[post-config|/etc/nova/nova.conf]]
367 [scheduler]
368 discover_hosts_in_cells_interval = 30
369
370 [DEFAULT]
371 force_config_drive = False
372 force_raw_images = False
373 log_dir = /opt/stack/logs
374
375 EOF
376
377     if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then
378         cat >> ${local_conf_file_name} << EOF
379 use_neutron = True
380 force_raw_images = False
381 log_dir = /opt/stack/logs
382 [libvirt]
383 live_migration_uri = qemu+tcp://%s/system
384 virt_type = qemu
385 EOF
386     fi
387
388     if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then
389         echo "Combo local.conf created:"
390     else
391         echo "Control local.conf created:"
392     fi
393     cat ${local_conf_file_name}
394 } # create_control_node_local_conf()
395
396 function create_compute_node_local_conf() {
397     HOSTIP=$1
398     SERVICEHOST=$2
399     MGRIP=$3
400     ODL_OVS_MANAGERS="$4"
401
402     local_conf_file_name=${WORKSPACE}/local.conf_compute_${HOSTIP}
403     cat > ${local_conf_file_name} << EOF
404 [[local|localrc]]
405 LOGFILE=stack.sh.log
406 LOG_COLOR=False
407 USE_SYSTEMD=True
408 RECLONE=${RECLONE}
409 # Increase the wait used by stack to poll for the nova service on the control node
410 NOVA_READY_TIMEOUT=1800
411
412 disable_all_services
413 EOF
414
415     add_os_services "${CORE_OS_COMPUTE_SERVICES}" "${ENABLE_OS_COMPUTE_SERVICES}" "${DISABLE_OS_SERVICES}" "${local_conf_file_name}"
416
417     cat >> ${local_conf_file_name} << EOF
418 HOST_IP=${HOSTIP}
419 SERVICE_HOST=${SERVICEHOST}
420 Q_ML2_TENANT_NETWORK_TYPE=${TENANT_NETWORK_TYPE}
421
422 ODL_MODE=manual
423 ODL_MGR_IP=${MGRIP}
424 ODL_PORT=${ODL_PORT}
425 ODL_PORT_BINDING_CONTROLLER=${ODL_ML2_PORT_BINDING}
426 ODL_OVS_MANAGERS=${ODL_OVS_MANAGERS}
427
428 Q_HOST=\$SERVICE_HOST
429 MYSQL_HOST=\$SERVICE_HOST
430 RABBIT_HOST=\$SERVICE_HOST
431 GLANCE_HOSTPORT=\$SERVICE_HOST:9292
432 KEYSTONE_AUTH_HOST=\$SERVICE_HOST
433 KEYSTONE_SERVICE_HOST=\$SERVICE_HOST
434
435 ADMIN_PASSWORD=${ADMIN_PASSWORD}
436 DATABASE_PASSWORD=${ADMIN_PASSWORD}
437 RABBIT_PASSWORD=${ADMIN_PASSWORD}
438 SERVICE_TOKEN=${ADMIN_PASSWORD}
439 SERVICE_PASSWORD=${ADMIN_PASSWORD}
440
441 PUBLIC_BRIDGE=${PUBLIC_BRIDGE}
442 PUBLIC_PHYSICAL_NETWORK=${PUBLIC_PHYSICAL_NETWORK}
443 ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS}
444 EOF
445
446     if [[ "${ENABLE_OS_PLUGINS}" =~ networking-odl ]]; then
447         cat >> ${local_conf_file_name} << EOF
448
449 enable_plugin networking-odl ${ODL_ML2_DRIVER_REPO} ${ODL_ML2_BRANCH}
450 EOF
451     fi
452
453     cat >> ${local_conf_file_name} << EOF
454
455 [[post-config|/etc/nova/nova.conf]]
456 [api]
457 auth_strategy = keystone
458 [DEFAULT]
459 use_neutron = True
460 force_raw_images = False
461 log_dir = /opt/stack/logs
462 [libvirt]
463 live_migration_uri = qemu+tcp://%s/system
464 virt_type = qemu
465 EOF
466
467     echo "Compute local.conf created:"
468     cat ${local_conf_file_name}
469 } # create_compute_node_local_conf()
470
471 function configure_haproxy_for_neutron_requests() {
472     local -r haproxy_ip=$1
473     # shellcheck disable=SC2206
474     local -r odl_ips=(${2//,/ })
475
476     cat > ${WORKSPACE}/install_ha_proxy.sh<< EOF
477 sudo systemctl stop firewalld
478 sudo yum -y install policycoreutils-python haproxy
479 EOF
480
481     cat > ${WORKSPACE}/haproxy.cfg << EOF
482 global
483   daemon
484   group  haproxy
485   log  /dev/log local0 debug
486   maxconn  20480
487   pidfile  /tmp/haproxy.pid
488   ssl-default-bind-ciphers  !SSLv2:kEECDH:kRSA:kEDH:kPSK:+3DES:!aNULL:!eNULL:!MD5:!EXP:!RC4:!SEED:!IDEA:!DES
489   ssl-default-bind-options  no-sslv3 no-tlsv10
490   stats  socket /var/lib/haproxy/stats mode 600 level user
491   stats  timeout 2m
492   user  haproxy
493
494 defaults
495   log  global
496   option  log-health-checks
497   maxconn  4096
498   mode  tcp
499   retries  3
500   timeout  http-request 10s
501   timeout  queue 2m
502   timeout  connect 5s
503   timeout  client 5s
504   timeout  server 5s
505
506 listen opendaylight
507   bind ${haproxy_ip}:8181 transparent
508   mode http
509   http-request set-header X-Forwarded-Proto https if { ssl_fc }
510   http-request set-header X-Forwarded-Proto http if !{ ssl_fc }
511   option httpchk GET /diagstatus
512   option httplog
513 EOF
514
515     odlindex=1
516     for odlip in ${odl_ips[*]}; do
517         echo "  server opendaylight-rest-${odlindex} ${odlip}:8181 check fall 5 inter 2000 rise 2" >> ${WORKSPACE}/haproxy.cfg
518         odlindex=$((odlindex+1))
519     done
520
521     cat >> ${WORKSPACE}/haproxy.cfg << EOF
522
523 listen opendaylight_ws
524   bind ${haproxy_ip}:8185 transparent
525   mode http
526   timeout tunnel 3600s
527   option httpchk GET /data-change-event-subscription/neutron:neutron/neutron:ports/datastore=OPERATIONAL/scope=SUBTREE HTTP/1.1\r\nHost:\ ws.opendaylight.org\r\nConnection:\ Upgrade\r\nUpgrade:\ websocket\r\nSec-WebSocket-Key:\ haproxy\r\nSec-WebSocket-Version:\ 13\r\nSec-WebSocket-Protocol:\ echo-protocol
528   http-check expect status 101
529 EOF
530
531     odlindex=1
532     for odlip in ${odl_ips[*]}; do
533         echo "  server opendaylight-ws-${odlindex} ${odlip}:8185 check fall 3 inter 1000 rise 2" >> ${WORKSPACE}/haproxy.cfg
534         odlindex=$((odlindex+1))
535     done
536
537     echo "Dump haproxy.cfg"
538     cat ${WORKSPACE}/haproxy.cfg
539
540     cat > ${WORKSPACE}/deploy_ha_proxy.sh<< EOF
541 sudo chown haproxy:haproxy /tmp/haproxy.cfg
542 sudo sed -i 's/\\/etc\\/haproxy\\/haproxy.cfg/\\/tmp\\/haproxy.cfg/g' /usr/lib/systemd/system/haproxy.service
543 sudo /usr/sbin/semanage permissive -a haproxy_t
544 sudo systemctl restart haproxy
545 sleep 3
546 sudo netstat -tunpl
547 sudo systemctl status haproxy
548 true
549 EOF
550
551     scp ${WORKSPACE}/install_ha_proxy.sh ${haproxy_ip}:/tmp
552     ${SSH} ${haproxy_ip} "sudo bash /tmp/install_ha_proxy.sh"
553     scp ${WORKSPACE}/haproxy.cfg ${haproxy_ip}:/tmp
554     scp ${WORKSPACE}/deploy_ha_proxy.sh ${haproxy_ip}:/tmp
555     ${SSH} ${haproxy_ip} "sudo bash /tmp/deploy_ha_proxy.sh"
556 } # configure_haproxy_for_neutron_requests()
557
558 # Following three functions are debugging helpers when debugging devstack changes.
559 # Keeping them for now so we can simply call them when needed.
560 ctrlhn=""
561 comp1hn=""
562 comp2hn=""
563 function get_hostnames () {
564     set +e
565     local ctrlip=${OPENSTACK_CONTROL_NODE_1_IP}
566     local comp1ip=${OPENSTACK_COMPUTE_NODE_1_IP}
567     local comp2ip=${OPENSTACK_COMPUTE_NODE_2_IP}
568     ctrlhn=$(${SSH} ${ctrlip} "hostname")
569     comp1hn=$(${SSH} ${comp1ip} "hostname")
570     comp2hn=$(${SSH} ${comp2ip} "hostname")
571     echo "hostnames: ${ctrlhn}, ${comp1hn}, ${comp2hn}"
572     set -e
573 }
574
575 function check_firewall() {
576     set +e
577     echo $-
578     local ctrlip=${OPENSTACK_CONTROL_NODE_1_IP}
579     local comp1ip=${OPENSTACK_COMPUTE_NODE_1_IP}
580     local comp2ip=${OPENSTACK_COMPUTE_NODE_2_IP}
581
582     echo "check_firewall on control"
583     ${SSH} ${ctrlip} "
584         sudo systemctl status firewalld
585         sudo systemctl -l status iptables
586         sudo iptables --line-numbers -nvL
587     " || true
588     echo "check_firewall on compute 1"
589     ${SSH} ${comp1ip} "
590         sudo systemctl status firewalld
591         sudo systemctl -l status iptables
592         sudo iptables --line-numbers -nvL
593     " || true
594     echo "check_firewall on compute 2"
595     ${SSH} ${comp2ip} "
596         sudo systemctl status firewalld
597         sudo systemctl -l status iptables
598         sudo iptables --line-numbers -nvL
599     " || true
600 }
601
602 function get_service () {
603     set +e
604     local iter=$1
605     #local idx=$2
606     local ctrlip=${OPENSTACK_CONTROL_NODE_1_IP}
607     local comp1ip=${OPENSTACK_COMPUTE_NODE_1_IP}
608
609     #if [ ${idx} -eq 1 ]; then
610         if [ ${iter} -eq 1 ] || [ ${iter} -gt 16 ]; then
611             curl http://${ctrlip}:5000
612             curl http://${ctrlip}:35357
613             curl http://${ctrlip}/identity
614             ${SSH} ${ctrlip} "
615                 source /opt/stack/devstack/openrc admin admin;
616                 env
617                 openstack configuration show --unmask;
618                 openstack service list
619                 openstack --os-cloud devstack-admin --os-region RegionOne compute service list
620                 openstack hypervisor list;
621             " || true
622             check_firewall
623         fi
624     #fi
625     set -e
626 }
627
628 # Check if rabbitmq is ready by looking for a pid in it's status.
629 # The function returns the status of the grep command which callers can check.
630 function is_rabbitmq_ready() {
631     local -r ip=${1}
632     local grepfor="nova_cell1"
633     rm -f rabbit.txt
634     ${SSH} ${ip} "sudo rabbitmqctl list_vhosts" > rabbit.txt
635     grep ${grepfor} rabbit.txt
636 }
637
638 # retry the given command ($3) until success for a number of iterations ($1)
639 # sleeping ($2) between tries.
640 function retry() {
641     local -r -i max_tries=${1}
642     local -r -i sleep_time=${2}
643     local -r cmd=${3}
644     local -i retries=1
645     local -i rc=1
646     while true; do
647         echo "retry ${cmd}: attempt: ${retries}"
648         ${cmd}
649         rc=$?
650         if ((${rc} == 0)); then
651             break;
652         else
653             if ((${retries} == ${max_tries})); then
654                 break
655             else
656                 ((retries++))
657                 sleep ${sleep_time}
658             fi
659         fi
660     done
661     return ${rc}
662 }
663
664 function install_ovs() {
665     local -r node=${1}
666     local -r rpm_path=${2}
667
668     if [ "${OVS_INSTALL:0:1}" = "v" ]; then
669        # An OVS version was given, so we build it ourselves from OVS git repo.
670        # Only on the first node though, consecutive nodes will use RPMs
671        # built for the first one.
672        [ ! -d "${rpm_path}" ] && mkdir -p "${rpm_path}" && build_ovs ${node} ${OVS_INSTALL} "${rpm_path}"
673        # Install OVS from path
674        install_ovs_from_path ${node} "${rpm_path}"
675     elif [ "${OVS_INSTALL:0:4}" = "http" ]; then
676        # Otherwise, install from rpm repo directly.
677        install_ovs_from_repo ${node} ${OVS_INSTALL}
678     else
679        echo "Expected either an OVS version git tag or a repo http url"
680        exit 1
681     fi
682 }
683
684 ODL_PROVIDER_MAPPINGS="\${PUBLIC_PHYSICAL_NETWORK}:${PUBLIC_BRIDGE}"
685 RECLONE=False
686 ODL_PORT=8181
687
688 # Always compare the lists below against the devstack upstream ENABLED_SERVICES in
689 # https://github.com/openstack-dev/devstack/blob/master/stackrc#L52
690 # ODL CSIT does not use vnc, cinder, q-agt, q-l3 or horizon so they are not included below.
691 # collect performance stats
692 CORE_OS_CONTROL_SERVICES="dstat"
693 # Glance
694 CORE_OS_CONTROL_SERVICES+=",g-api,g-reg"
695 # Keystone
696 CORE_OS_CONTROL_SERVICES+=",key"
697 # Nova - services to support libvirt
698 CORE_OS_CONTROL_SERVICES+=",n-api,n-api-meta,n-cauth,n-cond,n-crt,n-obj,n-sch"
699 # ODL - services to connect to ODL
700 CORE_OS_CONTROL_SERVICES+=",odl-compute,odl-neutron"
701 # Additional services
702 CORE_OS_CONTROL_SERVICES+=",mysql,rabbit"
703
704 # collect performance stats
705 CORE_OS_COMPUTE_SERVICES="dstat"
706 # computes only need nova and odl
707 CORE_OS_COMPUTE_SERVICES+=",n-cpu,odl-compute"
708
709 cat > ${WORKSPACE}/disable_firewall.sh << EOF
710 sudo systemctl stop firewalld
711 # Open these ports to match the tutorial vms
712 # http/https (80/443), samba (445), netbios (137,138,139)
713 sudo iptables -I INPUT -p tcp -m multiport --dports 80,443,139,445 -j ACCEPT
714 sudo iptables -I INPUT -p udp -m multiport --dports 137,138 -j ACCEPT
715 # OpenStack services as well as vxlan tunnel ports 4789 and 9876
716 # identity public/admin (5000/35357), ampq (5672), vnc (6080), nova (8774), glance (9292), neutron (9696)
717 sudo sudo iptables -I INPUT -p tcp -m multiport --dports 5000,5672,6080,8774,9292,9696,35357 -j ACCEPT
718 sudo sudo iptables -I INPUT -p udp -m multiport --dports 4789,9876 -j ACCEPT
719 sudo iptables-save > /etc/sysconfig/iptables
720 sudo systemctl restart iptables
721 sudo iptables --line-numbers -nvL
722 true
723 EOF
724
725 #For SFC Tests a larger partition is required for creating instances with Ubuntu
726 if [[ "${ENABLE_OS_PLUGINS}" =~ networking-sfc ]]; then
727    TMPFS_SIZE=12G
728 fi
729 cat > ${WORKSPACE}/get_devstack.sh << EOF
730 sudo systemctl stop firewalld
731 sudo yum install bridge-utils python-pip -y
732 #sudo systemctl stop  NetworkManager
733 #Disable NetworkManager and kill dhclient and dnsmasq
734 sudo systemctl stop NetworkManager
735 sudo killall dhclient
736 sudo killall dnsmasq
737 #Workaround for mysql failure
738 echo "127.0.0.1   localhost \${HOSTNAME}" >> /tmp/hosts
739 echo "::1         localhost \${HOSTNAME}" >> /tmp/hosts
740 sudo mv /tmp/hosts /etc/hosts
741 sudo mkdir /opt/stack
742 echo "Create RAM disk for /opt/stack"
743 sudo mount -t tmpfs -o size=${TMPFS_SIZE} tmpfs /opt/stack
744 sudo chmod 777 /opt/stack
745 cd /opt/stack
746 echo "git clone https://git.openstack.org/openstack-dev/devstack --branch ${OPENSTACK_BRANCH}"
747 git clone https://git.openstack.org/openstack-dev/devstack --branch ${OPENSTACK_BRANCH}
748 cd devstack
749 if [ -n "${DEVSTACK_HASH}" ]; then
750     echo "git checkout ${DEVSTACK_HASH}"
751     git checkout ${DEVSTACK_HASH}
752 fi
753 wget https://raw.githubusercontent.com/shague/odl_tools/master/fix-logging.patch.txt -O /tmp/fix-logging.patch.txt
754 patch --verbose -p1 -i /tmp/fix-logging.patch.txt
755 git --no-pager log --pretty=format:'%h %<(13)%ar%<(13)%cr %<(20,trunc)%an%d %s%b' -n20
756 echo
757
758 echo "workaround: do not upgrade openvswitch"
759 sudo yum install -y yum-plugin-versionlock
760 sudo yum versionlock add openvswitch
761 EOF
762
763 cat > "${WORKSPACE}/setup_host_cell_mapping.sh" << EOF
764 sudo nova-manage cell_v2 map_cell0
765 sudo nova-manage cell_v2 simple_cell_setup
766 sudo nova-manage db sync
767 sudo nova-manage cell_v2 discover_hosts
768 EOF
769
770 cat > "${WORKSPACE}/workaround_networking_sfc.sh" << EOF
771 cd /opt/stack
772 git clone https://git.openstack.org/openstack/networking-sfc
773 cd networking-sfc
774 git checkout ${OPENSTACK_BRANCH}
775 git checkout master -- devstack/plugin.sh
776 EOF
777
778 NUM_OPENSTACK_SITES=${NUM_OPENSTACK_SITES:-1}
779 compute_index=1
780 os_node_list=()
781
782 if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then
783     echo "Configure HAProxy"
784     ODL_HAPROXYIP_PARAM=OPENSTACK_HAPROXY_1_IP
785     ODL_IP_PARAM1=ODL_SYSTEM_1_IP
786     ODL_IP_PARAM2=ODL_SYSTEM_2_IP
787     ODL_IP_PARAM3=ODL_SYSTEM_3_IP
788     ODLMGRIP=${!ODL_HAPROXYIP_PARAM} # ODL Northbound uses HAProxy VIP
789     ODL_OVS_MGRS="${!ODL_IP_PARAM1},${!ODL_IP_PARAM2},${!ODL_IP_PARAM3}" # OVSDB connects to all ODL IPs
790     configure_haproxy_for_neutron_requests ${!ODL_HAPROXYIP_PARAM} "${ODL_OVS_MGRS}"
791 else
792     ODL_IP_PARAM=ODL_SYSTEM_1_IP
793     ODLMGRIP=${!ODL_IP_PARAM} # OVSDB connects to ODL IP
794     ODL_OVS_MGRS="${!ODL_IP_PARAM}" # ODL Northbound uses ODL IP
795 fi
796
797 os_ip_list=()
798 for i in `seq 1 ${NUM_OPENSTACK_CONTROL_NODES}`; do
799     cip=OPENSTACK_CONTROL_NODE_${i}_IP
800     ip=${!cip}
801     os_ip_list+=("${ip}")
802 done
803
804 for i in `seq 1 ${NUM_OPENSTACK_COMPUTE_NODES}`; do
805     cip=OPENSTACK_COMPUTE_NODE_${i}_IP
806     ip=${!cip}
807     os_ip_list+=("${ip}")
808 done
809
810 for i in "${!os_ip_list[@]}"; do
811     ip=${os_ip_list[i]}
812     tcpdump_start "${i}" "${ip}" "port 6653"
813 done
814
815
816 # Begin stacking the nodes, starting with the controller(s) and then the compute(s)
817
818 for i in `seq 1 ${NUM_OPENSTACK_CONTROL_NODES}`; do
819     CONTROLIP=OPENSTACK_CONTROL_NODE_${i}_IP
820     echo "Configure the stack of the control node ${i} of ${NUM_OPENSTACK_CONTROL_NODES}: ${!CONTROLIP}"
821     scp ${WORKSPACE}/disable_firewall.sh ${!CONTROLIP}:/tmp
822     ${SSH} ${!CONTROLIP} "sudo bash /tmp/disable_firewall.sh"
823     create_etc_hosts ${!CONTROLIP}
824     scp ${WORKSPACE}/hosts_file ${!CONTROLIP}:/tmp/hosts
825     scp ${WORKSPACE}/get_devstack.sh ${!CONTROLIP}:/tmp
826     # devstack Master is yet to migrate fully to lib/neutron, there are some ugly hacks that is
827     # affecting the stacking.
828     # Workaround For Queens, Make the physical Network as physnet1 in lib/neutron
829     # In Queens the neutron new libs are used and do not have the following options from Pike and earlier:
830     # Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS could be used for the flat_networks
831     # and Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS could be used for the ml2_type_vlan
832     ${SSH} ${!CONTROLIP} "bash /tmp/get_devstack.sh > /tmp/get_devstack.sh.txt 2>&1"
833     ssh ${!CONTROLIP} "sed -i 's/flat_networks public/flat_networks public,physnet1/' /opt/stack/devstack/lib/neutron"
834     ssh ${!CONTROLIP} "sed -i '186i iniset \$NEUTRON_CORE_PLUGIN_CONF ml2_type_vlan network_vlan_ranges public:1:4094,physnet1:1:4094' /opt/stack/devstack/lib/neutron"
835     #Workaround for networking-sfc to configure the paramaters in neutron.conf if the
836     # services used are neutron-api, neutron-dhcp etc instead of q-agt.
837     # Can be removed if the patch https://review.openstack.org/#/c/596287/ gets merged
838     if [[ "${ENABLE_OS_PLUGINS}" =~ networking-sfc ]]; then
839        scp ${WORKSPACE}/workaround_networking_sfc.sh ${!CONTROLIP}:/tmp/
840        ssh ${!CONTROLIP} "bash -x /tmp/workaround_networking_sfc.sh"
841     fi
842     create_control_node_local_conf ${!CONTROLIP} ${ODLMGRIP} "${ODL_OVS_MGRS}"
843     scp ${WORKSPACE}/local.conf_control_${!CONTROLIP} ${!CONTROLIP}:/opt/stack/devstack/local.conf
844     echo "Install rdo release to avoid incompatible Package versions"
845     install_rdo_release ${!CONTROLIP}
846     setup_live_migration_control ${!CONTROLIP}
847     if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then
848         setup_live_migration_compute ${!CONTROLIP} ${!CONTROLIP}
849     fi
850     [ -n "${OVS_INSTALL}" ] && install_ovs ${!CONTROLIP} /tmp/ovs_rpms
851     if [[ "${ENABLE_OS_PLUGINS}" =~ networking-sfc ]]; then
852         # This should be really done by networking-odl devstack plugin,
853         # but in the meantime do it ourselves
854         ssh ${!CONTROLIP} "sudo ovs-vsctl set Open_vSwitch . external_ids:of-tunnel=true"
855     fi
856     fix_libvirt_python_build ${!CONTROLIP}
857     echo "Stack the control node ${i} of ${NUM_OPENSTACK_CONTROL_NODES}: ${CONTROLIP}"
858     # Workaround: fixing boneheaded polkit issue, to be removed later
859     ssh ${!CONTROLIP} "sudo bash -c 'echo deltarpm=0 >> /etc/yum.conf && yum -y update polkit'"
860     ssh ${!CONTROLIP} "cd /opt/stack/devstack; nohup ./stack.sh > /opt/stack/devstack/nohup.out 2>&1 &"
861     ssh ${!CONTROLIP} "ps -ef | grep stack.sh"
862     ssh ${!CONTROLIP} "ls -lrt /opt/stack/devstack/nohup.out"
863     os_node_list+=("${!CONTROLIP}")
864 done
865
866 # This is a backup to the CELLSV2_SETUP=singleconductor workaround. Keeping it here as an easy lookup
867 # if needed.
868 # Let the control node get started to avoid a race condition where the computes start and try to access
869 # the nova_cell1 on the control node before it is created. If that happens, the nova-compute service on the
870 # compute exits and does not attempt to restart.
871 # 180s is chosen because in test runs the control node usually finished in 17-20 minutes and the computes finished
872 # in 17 minutes, so take the max difference of 3 minutes and the jobs should still finish around the same time.
873 # one of the following errors is seen in the compute n-cpu.log:
874 # Unhandled error: NotAllowed: Connection.open: (530) NOT_ALLOWED - access to vhost 'nova_cell1' refused for user 'stackrabbit'
875 # AccessRefused: (0, 0): (403) ACCESS_REFUSED - Login was refused using authentication mechanism AMQPLAIN. For details see the broker logfile.
876 # Compare that timestamp to this log in the control stack.log: sudo rabbitmqctl set_permissions -p nova_cell1 stackrabbit
877 # If the n-cpu.log is earlier than the control stack.log timestamp then the failure condition is likely hit.
878 if [ ${NUM_OPENSTACK_COMPUTE_NODES} -gt 0 ]; then
879     WAIT_FOR_RABBITMQ_MINUTES=60
880     echo "Wait a maximum of ${WAIT_FOR_RABBITMQ_MINUTES}m until rabbitmq is ready and nova_cell1 created to allow the controller to create nova_cell1 before the computes need it"
881     set +e
882     retry ${WAIT_FOR_RABBITMQ_MINUTES} 60 "is_rabbitmq_ready ${OPENSTACK_CONTROL_NODE_1_IP}"
883     rc=$?
884     set -e
885     if ((${rc} == 0)); then
886       echo "rabbitmq is ready, starting ${NUM_OPENSTACK_COMPUTE_NODES} compute(s)"
887     else
888       echo "rabbitmq was not ready in ${WAIT_FOR_RABBITMQ_MINUTES}m"
889       exit 1
890     fi
891 fi
892
893 for i in `seq 1 ${NUM_OPENSTACK_COMPUTE_NODES}`; do
894     COMPUTEIP=OPENSTACK_COMPUTE_NODE_${i}_IP
895     CONTROLIP=OPENSTACK_CONTROL_NODE_1_IP
896     echo "Configure the stack of the compute node ${i} of ${NUM_OPENSTACK_COMPUTE_NODES}: ${!COMPUTEIP}"
897     scp ${WORKSPACE}/disable_firewall.sh "${!COMPUTEIP}:/tmp"
898     ${SSH} "${!COMPUTEIP}" "sudo bash /tmp/disable_firewall.sh"
899     create_etc_hosts ${!COMPUTEIP} ${!CONTROLIP}
900     scp ${WORKSPACE}/hosts_file ${!COMPUTEIP}:/tmp/hosts
901     scp ${WORKSPACE}/get_devstack.sh  ${!COMPUTEIP}:/tmp
902     ${SSH} ${!COMPUTEIP} "bash /tmp/get_devstack.sh > /tmp/get_devstack.sh.txt 2>&1"
903     create_compute_node_local_conf ${!COMPUTEIP} ${!CONTROLIP} ${ODLMGRIP} "${ODL_OVS_MGRS}"
904     scp ${WORKSPACE}/local.conf_compute_${!COMPUTEIP} ${!COMPUTEIP}:/opt/stack/devstack/local.conf
905     echo "Install rdo release to avoid incompatible Package versions"
906     install_rdo_release ${!COMPUTEIP}
907     setup_live_migration_compute ${!COMPUTEIP} ${!CONTROLIP}
908     [ -n "${OVS_INSTALL}" ] && install_ovs ${!COMPUTEIP} /tmp/ovs_rpms
909     if [[ "${ENABLE_OS_PLUGINS}" =~ networking-sfc ]]; then
910         # This should be really done by networking-odl devstack plugin,
911         # but in the meantime do it ourselves
912         ssh ${!COMPUTEIP} "sudo ovs-vsctl set Open_vSwitch . external_ids:of-tunnel=true"
913     fi
914     fix_libvirt_python_build ${!COMPUTEIP}
915     echo "Stack the compute node ${i} of ${NUM_OPENSTACK_COMPUTE_NODES}: ${!COMPUTEIP}"
916     ssh ${!COMPUTEIP} "cd /opt/stack/devstack; nohup ./stack.sh > /opt/stack/devstack/nohup.out 2>&1 &"
917     ssh ${!COMPUTEIP} "ps -ef | grep stack.sh"
918     os_node_list+=("${!COMPUTEIP}")
919 done
920
921 echo "nodelist: ${os_node_list[*]}"
922
923 # This script runs on the openstack nodes. It greps for a string that devstack writes when stacking is complete.
924 # The script then writes a status depending on the grep output that is later scraped by the robot vm to control
925 # the status polling.
926 cat > ${WORKSPACE}/check_stacking.sh << EOF
927 > /tmp/stack_progress
928 ps -ef | grep "stack.sh" | grep -v grep
929 ret=\$?
930 if [ \${ret} -eq 1 ]; then
931     grep "This is your host IP address:" /opt/stack/devstack/nohup.out
932     if [ \$? -eq 0 ]; then
933         echo "Stacking Complete" > /tmp/stack_progress
934     else
935         echo "Stacking Failed" > /tmp/stack_progress
936     fi
937 elif [ \${ret} -eq 0 ]; then
938     echo "Still Stacking" > /tmp/stack_progress
939 fi
940 EOF
941
942 # devstack debugging
943 # get_hostnames
944
945 # Check if the stacking is finished. Poll all nodes every 60s for one hour.
946 iteration=0
947 in_progress=1
948 while [ ${in_progress} -eq 1 ]; do
949     iteration=$(($iteration + 1))
950     for index in "${!os_node_list[@]}"; do
951         echo "node $index ${os_node_list[index]}: checking stacking status attempt ${iteration} of 60"
952         scp ${WORKSPACE}/check_stacking.sh  ${os_node_list[index]}:/tmp
953         ${SSH} ${os_node_list[index]} "bash /tmp/check_stacking.sh"
954         scp ${os_node_list[index]}:/tmp/stack_progress .
955         cat stack_progress
956         stacking_status=`cat stack_progress`
957         # devstack debugging
958         # get_service "${iteration}" "${index}"
959         if [ "$stacking_status" == "Still Stacking" ]; then
960             continue
961         elif [ "$stacking_status" == "Stacking Failed" ]; then
962             echo "node $index ${os_node_list[index]}: stacking has failed"
963             exit 1
964         elif [ "$stacking_status" == "Stacking Complete" ]; then
965             echo "node $index ${os_node_list[index]}: stacking complete"
966             unset 'os_node_list[index]'
967             if  [ ${#os_node_list[@]} -eq 0 ]; then
968                 in_progress=0
969             fi
970         fi
971     done
972     echo "sleep for a minute before the next check"
973     sleep 60
974     if [ ${iteration} -eq 60 ]; then
975         echo "stacking has failed - took longer than 60m"
976         exit 1
977     fi
978 done
979
980 # Further configuration now that stacking is complete.
981 echo "Configure the Control Node"
982 CONTROLIP=OPENSTACK_CONTROL_NODE_1_IP
983 # Gather Compute IPs for the site
984 for i in `seq 1 ${NUM_OPENSTACK_COMPUTE_NODES}`; do
985     IP_VAR=OPENSTACK_COMPUTE_NODE_${i}_IP
986     COMPUTE_IPS[$((i-1))]=${!IP_VAR}
987 done
988
989 echo "sleep for 60s and print hypervisor-list"
990 sleep 60
991 ${SSH} ${!CONTROLIP} "cd /opt/stack/devstack; source openrc admin admin; nova hypervisor-list"
992 # in the case that we are doing openstack (control + compute) all in one node, then the number of hypervisors
993 # will be the same as the number of openstack systems. However, if we are doing multinode openstack then the
994 # assumption is we have a single control node and the rest are compute nodes, so the number of expected hypervisors
995 # is one less than the total number of openstack systems
996 if [ ${NUM_OPENSTACK_SYSTEM} -eq 1 ]; then
997     expected_num_hypervisors=1
998 else
999     expected_num_hypervisors=${NUM_OPENSTACK_COMPUTE_NODES}
1000     if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then
1001         expected_num_hypervisors=$((expected_num_hypervisors + 1))
1002     fi
1003 fi
1004 num_hypervisors=$(${SSH} ${!CONTROLIP} "cd /opt/stack/devstack; source openrc admin admin; openstack hypervisor list -f value | wc -l" | tail -1 | tr -d "\r")
1005 if ! [ "${num_hypervisors}" ] || ! [ ${num_hypervisors} -eq ${expected_num_hypervisors} ]; then
1006     echo "Error: Only $num_hypervisors hypervisors detected, expected $expected_num_hypervisors"
1007     exit 1
1008 fi
1009
1010 # External Network
1011 echo "prepare external networks by adding vxlan tunnels between all nodes on a separate bridge..."
1012 # FIXME Should there be a unique gateway IP and devstack index for each site?
1013 devstack_index=1
1014 for ip in ${!CONTROLIP} ${COMPUTE_IPS[*]}; do
1015     # FIXME - Workaround, ODL (new netvirt) currently adds PUBLIC_BRIDGE as a port in br-int since it doesn't see such a bridge existing when we stack
1016     ${SSH} $ip "sudo ovs-vsctl --if-exists del-port br-int $PUBLIC_BRIDGE"
1017     ${SSH} $ip "sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE -- set bridge $PUBLIC_BRIDGE other-config:disable-in-band=true other_config:hwaddr=f6:00:00:ff:01:0$((devstack_index++))"
1018 done
1019
1020 # ipsec support
1021 if [ "${IPSEC_VXLAN_TUNNELS_ENABLED}" == "yes" ]; then
1022     # shellcheck disable=SC2206
1023     ALL_NODES=(${!CONTROLIP} ${COMPUTE_IPS[*]})
1024     for ((inx_ip1=0; inx_ip1<$((${#ALL_NODES[@]} - 1)); inx_ip1++)); do
1025         for ((inx_ip2=$((inx_ip1 + 1)); inx_ip2<${#ALL_NODES[@]}; inx_ip2++)); do
1026             KEY1=0x$(dd if=/dev/urandom count=32 bs=1 2> /dev/null| xxd -p -c 64)
1027             KEY2=0x$(dd if=/dev/urandom count=32 bs=1 2> /dev/null| xxd -p -c 64)
1028             ID=0x$(dd if=/dev/urandom count=4 bs=1 2> /dev/null| xxd -p -c 8)
1029             ip1=${ALL_NODES[$inx_ip1]}
1030             ip2=${ALL_NODES[$inx_ip2]}
1031             ${SSH} $ip1 "sudo ip xfrm state add src $ip1 dst $ip2 proto esp spi $ID reqid $ID mode transport auth sha256 $KEY1 enc aes $KEY2"
1032             ${SSH} $ip1 "sudo ip xfrm state add src $ip2 dst $ip1 proto esp spi $ID reqid $ID mode transport auth sha256 $KEY1 enc aes $KEY2"
1033             ${SSH} $ip1 "sudo ip xfrm policy add src $ip1 dst $ip2 proto udp dir out tmpl src $ip1 dst $ip2 proto esp reqid $ID mode transport"
1034             ${SSH} $ip1 "sudo ip xfrm policy add src $ip2 dst $ip1 proto udp dir in tmpl src $ip2 dst $ip1 proto esp reqid $ID mode transport"
1035
1036             ${SSH} $ip2 "sudo ip xfrm state add src $ip2 dst $ip1 proto esp spi $ID reqid $ID mode transport auth sha256 $KEY1 enc aes $KEY2"
1037             ${SSH} $ip2 "sudo ip xfrm state add src $ip1 dst $ip2 proto esp spi $ID reqid $ID mode transport auth sha256 $KEY1 enc aes $KEY2"
1038             ${SSH} $ip2 "sudo ip xfrm policy add src $ip2 dst $ip1 proto udp dir out tmpl src $ip2 dst $ip1 proto esp reqid $ID mode transport"
1039             ${SSH} $ip2 "sudo ip xfrm policy add src $ip1 dst $ip2 proto udp dir in tmpl src $ip1 dst $ip2 proto esp reqid $ID mode transport"
1040         done
1041     done
1042
1043     for ip in ${!CONTROLIP} ${COMPUTE_IPS[*]}; do
1044         echo "ip xfrm configuration for node $ip:"
1045         ${SSH} $ip "sudo ip xfrm policy list"
1046         ${SSH} $ip "sudo ip xfrm state list"
1047     done
1048 fi
1049
1050 # Control Node - PUBLIC_BRIDGE will act as the external router
1051 # Parameter values below are used in integration/test - changing them requires updates in intergration/test as well
1052 EXTNET_GATEWAY_IP="10.10.10.250"
1053 EXTNET_INTERNET_IP="10.9.9.9"
1054 EXTNET_PNF_IP="10.10.10.253"
1055 ${SSH} ${!CONTROLIP} "sudo ifconfig ${PUBLIC_BRIDGE} up ${EXTNET_GATEWAY_IP}/24"
1056
1057 # Control Node - external net PNF simulation
1058 ${SSH} ${!CONTROLIP} "
1059     sudo ip netns add pnf_ns;
1060     sudo ip link add pnf_veth0 type veth peer name pnf_veth1;
1061     sudo ip link set pnf_veth1 netns pnf_ns;
1062     sudo ip link set pnf_veth0 up;
1063     sudo ip netns exec pnf_ns ifconfig pnf_veth1 up ${EXTNET_PNF_IP}/24;
1064     sudo ovs-vsctl add-port ${PUBLIC_BRIDGE} pnf_veth0;
1065 "
1066
1067 # Control Node - external net internet address simulation
1068 ${SSH} ${!CONTROLIP} "
1069     sudo ip tuntap add dev internet_tap mode tap;
1070     sudo ifconfig internet_tap up ${EXTNET_INTERNET_IP}/24;
1071 "
1072
1073 # Computes
1074 compute_index=1
1075 for compute_ip in ${COMPUTE_IPS[*]}; do
1076     # Tunnel from controller to compute
1077     COMPUTEPORT=compute$(( compute_index++ ))_vxlan
1078     ${SSH} ${!CONTROLIP} "
1079         sudo ovs-vsctl add-port $PUBLIC_BRIDGE $COMPUTEPORT -- set interface $COMPUTEPORT type=vxlan options:local_ip=${!CONTROLIP} options:remote_ip=$compute_ip options:dst_port=9876 options:key=flow
1080     "
1081     # Tunnel from compute to controller
1082     CONTROLPORT="control_vxlan"
1083     ${SSH} $compute_ip "
1084         sudo ovs-vsctl add-port $PUBLIC_BRIDGE $CONTROLPORT -- set interface $CONTROLPORT type=vxlan options:local_ip=$compute_ip options:remote_ip=${!CONTROLIP} options:dst_port=9876 options:key=flow
1085     "
1086 done
1087
1088 if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then
1089     odlmgrip=OPENSTACK_HAPROXY_1_IP
1090     HA_PROXY_IP=${!odlmgrip}
1091     HA_PROXY_1_IP=${!odlmgrip}
1092     odlmgrip2=OPENSTACK_HAPROXY_2_IP
1093     HA_PROXY_2_IP=${!odlmgrip2}
1094     odlmgrip3=OPENSTACK_HAPROXY_1_IP
1095     HA_PROXY_3_IP=${!odlmgrip3}
1096 else
1097     HA_PROXY_IP=${ODL_SYSTEM_IP}
1098     HA_PROXY_1_IP=${ODL_SYSTEM_1_IP}
1099     HA_PROXY_2_IP=${ODL_SYSTEM_2_IP}
1100     HA_PROXY_3_IP=${ODL_SYSTEM_3_IP}
1101 fi
1102
1103 get_test_suites SUITES
1104
1105 #install all client versions required for this job testing
1106 install_openstack_clients_in_robot_vm
1107
1108 # TODO: run openrc on control node and then scrape the vars from it
1109 # Environment Variables Needed to execute Openstack Client for NetVirt Jobs
1110 cat > /tmp/os_netvirt_client_rc << EOF
1111 export OS_USERNAME=admin
1112 export OS_PASSWORD=admin
1113 export OS_PROJECT_NAME=admin
1114 export OS_USER_DOMAIN_NAME=default
1115 export OS_PROJECT_DOMAIN_NAME=default
1116 export OS_AUTH_URL="http://${!CONTROLIP}/identity"
1117 export OS_IDENTITY_API_VERSION=3
1118 export OS_IMAGE_API_VERSION=2
1119 export OS_TENANT_NAME=admin
1120 unset OS_CLOUD
1121 EOF
1122
1123 source /tmp/os_netvirt_client_rc
1124
1125 echo "Get all versions before executing robot"
1126 echo "openstack --version"
1127 which openstack
1128 openstack --version
1129 echo "nova --version"
1130 which nova
1131 nova --version
1132 echo "neutron --version"
1133 which neutron
1134 neutron --version
1135
1136 stacktime=$(timer $totaltmr)
1137 printf "Stacking elapsed time: %s\n" "${stacktime}"
1138
1139 echo "Starting Robot test suites ${SUITES} ..."
1140 # please add robot -v arguments on a single line and alphabetized
1141 suite_num=0
1142 for suite in ${SUITES}; do
1143     # prepend an incremental counter to the suite name so that the full robot log combining all the suites as is done
1144     # in the rebot step below will list all the suites in chronological order as rebot seems to alphabetize them
1145     let "suite_num = suite_num + 1"
1146     suite_index="$(printf %02d ${suite_num})"
1147     suite_name="$(basename ${suite} | cut -d. -f1)"
1148     log_name="${suite_index}_${suite_name}"
1149     robot -N ${log_name} \
1150     -c critical -e exclude -e skip_if_${DISTROSTREAM} \
1151     --log log_${log_name}.html --report report_${log_name}.html --output output_${log_name}.xml \
1152     --removekeywords wuks \
1153     --removekeywords name:SetupUtils.Setup_Utils_For_Setup_And_Teardown \
1154     --removekeywords name:SetupUtils.Setup_Test_With_Logging_And_Without_Fast_Failing \
1155     --removekeywords name:OpenStackOperations.Add_OVS_Logging_On_All_OpenStack_Nodes \
1156     -v BUNDLEFOLDER:${BUNDLEFOLDER} \
1157     -v BUNDLE_URL:${ACTUAL_BUNDLE_URL} \
1158     -v CMP_INSTANCES_SHARED_PATH:/var/instances \
1159     -v CONTROLLERFEATURES:"${CONTROLLERFEATURES}" \
1160     -v CONTROLLER_USER:${USER} \
1161     -v DEVSTACK_DEPLOY_PATH:/opt/stack/devstack \
1162     -v ENABLE_ITM_DIRECT_TUNNELS:${ENABLE_ITM_DIRECT_TUNNELS} \
1163     -v HA_PROXY_IP:${HA_PROXY_IP} \
1164     -v HA_PROXY_1_IP:${HA_PROXY_1_IP} \
1165     -v HA_PROXY_2_IP:${HA_PROXY_2_IP} \
1166     -v HA_PROXY_3_IP:${HA_PROXY_3_IP} \
1167     -v JDKVERSION:${JDKVERSION} \
1168     -v JENKINS_WORKSPACE:${WORKSPACE} \
1169     -v NEXUSURL_PREFIX:${NEXUSURL_PREFIX} \
1170     -v NUM_ODL_SYSTEM:${NUM_ODL_SYSTEM} \
1171     -v NUM_OS_SYSTEM:${NUM_OPENSTACK_SYSTEM} \
1172     -v NUM_TOOLS_SYSTEM:${NUM_TOOLS_SYSTEM} \
1173     -v ODL_SNAT_MODE:${ODL_SNAT_MODE} \
1174     -v ODL_STREAM:${DISTROSTREAM} \
1175     -v ODL_SYSTEM_IP:${ODL_SYSTEM_IP} \
1176     -v ODL_SYSTEM_1_IP:${ODL_SYSTEM_1_IP} \
1177     -v ODL_SYSTEM_2_IP:${ODL_SYSTEM_2_IP} \
1178     -v ODL_SYSTEM_3_IP:${ODL_SYSTEM_3_IP} \
1179     -v ODL_SYSTEM_4_IP:${ODL_SYSTEM_4_IP} \
1180     -v ODL_SYSTEM_5_IP:${ODL_SYSTEM_5_IP} \
1181     -v ODL_SYSTEM_6_IP:${ODL_SYSTEM_6_IP} \
1182     -v ODL_SYSTEM_7_IP:${ODL_SYSTEM_7_IP} \
1183     -v ODL_SYSTEM_8_IP:${ODL_SYSTEM_8_IP} \
1184     -v ODL_SYSTEM_9_IP:${ODL_SYSTEM_9_IP} \
1185     -v OS_CONTROL_NODE_IP:${OPENSTACK_CONTROL_NODE_1_IP} \
1186     -v OS_CONTROL_NODE_1_IP:${OPENSTACK_CONTROL_NODE_1_IP} \
1187     -v OS_CONTROL_NODE_2_IP:${OPENSTACK_CONTROL_NODE_2_IP} \
1188     -v OS_CONTROL_NODE_3_IP:${OPENSTACK_CONTROL_NODE_3_IP} \
1189     -v OPENSTACK_BRANCH:${OPENSTACK_BRANCH} \
1190     -v OS_COMPUTE_1_IP:${OPENSTACK_COMPUTE_NODE_1_IP} \
1191     -v OS_COMPUTE_2_IP:${OPENSTACK_COMPUTE_NODE_2_IP} \
1192     -v OS_COMPUTE_3_IP:${OPENSTACK_COMPUTE_NODE_3_IP} \
1193     -v OS_COMPUTE_4_IP:${OPENSTACK_COMPUTE_NODE_4_IP} \
1194     -v OS_COMPUTE_5_IP:${OPENSTACK_COMPUTE_NODE_5_IP} \
1195     -v OS_COMPUTE_6_IP:${OPENSTACK_COMPUTE_NODE_6_IP} \
1196     -v OPENSTACK_TOPO:${OPENSTACK_TOPO} \
1197     -v OS_USER:${USER} \
1198     -v PUBLIC_PHYSICAL_NETWORK:${PUBLIC_PHYSICAL_NETWORK} \
1199     -v SECURITY_GROUP_MODE:${SECURITY_GROUP_MODE} \
1200     -v TOOLS_SYSTEM_IP:${TOOLS_SYSTEM_1_IP} \
1201     -v TOOLS_SYSTEM_1_IP:${TOOLS_SYSTEM_1_IP} \
1202     -v TOOLS_SYSTEM_2_IP:${TOOLS_SYSTEM_2_IP} \
1203     -v TOOLS_SYSTEM_3_IP:${TOOLS_SYSTEM_3_IP} \
1204     -v USER_HOME:${HOME} \
1205     -v WORKSPACE:/tmp \
1206     ${TESTOPTIONS} ${suite} || true
1207 done
1208 #rebot exit codes seem to be different
1209 rebot --output ${WORKSPACE}/output.xml --log log_full.html --report report.html -N openstack output_*.xml || true
1210
1211 echo "Examining the files in data/log and checking file size"
1212 ssh ${ODL_SYSTEM_IP} "ls -altr /tmp/${BUNDLEFOLDER}/data/log/"
1213 ssh ${ODL_SYSTEM_IP} "du -hs /tmp/${BUNDLEFOLDER}/data/log/*"
1214
1215 echo "Tests Executed"
1216 printf "Total elapsed time: %s, stacking time: %s\n" "$(timer $totaltmr)" "${stacktime}"
1217 true  # perhaps Jenkins is testing last exit code
1218 # vim: ts=4 sw=4 sts=4 et ft=sh :