4af458b9bb41d2e3bfda6201600fd2d511dd6939
[releng/builder.git] / jjb / integration / integration-deploy-openstack-run-test.sh
1 #@IgnoreInspection BashAddShebang
2 # Activate robotframework virtualenv
3 # ${ROBOT_VENV} comes from the integration-install-robotframework.sh
4 # script.
5 # shellcheck source=${ROBOT_VENV}/bin/activate disable=SC1091
6 source ${ROBOT_VENV}/bin/activate
7 PYTHON="${ROBOT_VENV}/bin/python"
8 SSH="ssh -t -t"
9 ADMIN_PASSWORD=admin
10
11 # TODO: remove this work to run changes.py if/when it's moved higher up to be visible at the Robot level
12 echo "showing recent changes that made it in to the distribution used by this job"
13 $PYTHON -m pip install --upgrade urllib3
14 python ${WORKSPACE}/test/tools/distchanges/changes.py -d /tmp/distribution_folder \
15                   -u ${ACTUAL_BUNDLE_URL} -b ${DISTROBRANCH} \
16                   -r ssh://jenkins-${SILO}@git.opendaylight.org:29418 || true
17
18 echo "#################################################"
19 echo "##         Deploy Openstack 3-node             ##"
20 echo "#################################################"
21
22 # Catch command errors and collect logs.
23 # This ensures logs are collected when script commands fail rather than simply exiting.
24 function trap_handler() {
25     local prog="$0"
26     local lastline="$1"
27     local lasterr="$2"
28     echo "${prog}: line ${lastline}: exit status of last command: ${lasterr}"
29     echo "command: ${BASH_COMMAND}"
30     collect_logs
31     exit 1
32 } # trap_handler()
33
34 trap 'trap_handler ${LINENO} ${$?}' ERR
35
36 function create_etc_hosts() {
37     NODE_IP=$1
38     CTRL_IP=$2
39     : > ${WORKSPACE}/hosts_file
40     for iter in `seq 1 ${NUM_OPENSTACK_COMPUTE_NODES}`
41     do
42         COMPUTE_IP=OPENSTACK_COMPUTE_NODE_${iter}_IP
43         if [ "${!COMPUTE_IP}" == "${NODE_IP}" ]; then
44            CONTROL_HNAME=$(${SSH}  ${CTRL_IP}  "hostname")
45            echo "${CTRL_IP}   ${CONTROL_HNAME}" >> ${WORKSPACE}/hosts_file
46         else
47            COMPUTE_HNAME=$(${SSH}  ${!COMPUTE_IP}  "hostname")
48            echo "${!COMPUTE_IP}   ${COMPUTE_HNAME}" >> ${WORKSPACE}/hosts_file
49         fi
50     done
51
52     echo "Created the hosts file for ${NODE_IP}:"
53     cat ${WORKSPACE}/hosts_file
54 } # create_etc_hosts()
55
56 # convert commas in csv strings to spaces (ssv)
57 function csv2ssv() {
58     local csv=$1
59     if [ -n "${csv}" ]; then
60         ssv=$(echo ${csv} | sed 's/,/ /g' | sed 's/\ \ */\ /g')
61     fi
62
63     echo "${ssv}"
64 } # csv2ssv
65
66 # Add enable_services and disable_services to the local.conf
67 function add_os_services() {
68     local core_services=$1
69     local enable_services=$2
70     local disable_services=$3
71     local local_conf_file_name=$4
72
73     cat >> ${local_conf_file_name} << EOF
74 enable_service $(csv2ssv "${core_services}")
75 EOF
76     if [ -n "${enable_services}" ]; then
77         cat >> ${local_conf_file_name} << EOF
78 enable_service $(csv2ssv "${enable_services}")
79 EOF
80     fi
81     if [ -n "${disable_services}" ]; then
82         cat >> ${local_conf_file_name} << EOF
83 disable_service $(csv2ssv "${disable_services}")
84 EOF
85     fi
86 }
87
88 function create_control_node_local_conf() {
89     HOSTIP=$1
90     MGRIP=$2
91     ODL_OVS_MANAGERS="$3"
92
93     local_conf_file_name=${WORKSPACE}/local.conf_control_${HOSTIP}
94     cat > ${local_conf_file_name} << EOF
95 [[local|localrc]]
96 LOGFILE=stack.sh.log
97 USE_SCREEN=True
98 SCREEN_LOGDIR=/opt/stack/data/log
99 LOG_COLOR=False
100 RECLONE=${RECLONE}
101
102 disable_all_services
103 EOF
104
105     add_os_services "${CORE_OS_CONTROL_SERVICES}" "${ENABLE_OS_SERVICES}" "${DISABLE_OS_SERVICES}" "${local_conf_file_name}"
106
107     cat >> ${local_conf_file_name} << EOF
108
109 HOST_IP=${HOSTIP}
110 SERVICE_HOST=\$HOST_IP
111 Q_ML2_TENANT_NETWORK_TYPE=${TENANT_NETWORK_TYPE}
112 NEUTRON_CREATE_INITIAL_NETWORKS=${CREATE_INITIAL_NETWORKS}
113
114 ODL_MODE=manual
115 ODL_MGR_IP=${MGRIP}
116 ODL_PORT=8080
117 ODL_PORT_BINDING_CONTROLLER=${ODL_ML2_PORT_BINDING}
118 ODL_OVS_MANAGERS=${ODL_OVS_MANAGERS}
119
120 MYSQL_HOST=\$SERVICE_HOST
121 RABBIT_HOST=\$SERVICE_HOST
122 GLANCE_HOSTPORT=\$SERVICE_HOST:9292
123 KEYSTONE_AUTH_HOST=\$SERVICE_HOST
124 KEYSTONE_SERVICE_HOST=\$SERVICE_HOST
125
126 ADMIN_PASSWORD=${ADMIN_PASSWORD}
127 DATABASE_PASSWORD=${ADMIN_PASSWORD}
128 RABBIT_PASSWORD=${ADMIN_PASSWORD}
129 SERVICE_TOKEN=${ADMIN_PASSWORD}
130 SERVICE_PASSWORD=${ADMIN_PASSWORD}
131
132 NEUTRON_LBAAS_SERVICE_PROVIDERV2=${LBAAS_SERVICE_PROVIDER} # Only relevant if neutron-lbaas plugin is enabled
133 NEUTRON_SFC_DRIVERS=${ODL_SFC_DRIVER} # Only relevant if networking-sfc plugin is enabled
134 NEUTRON_FLOWCLASSIFIER_DRIVERS=${ODL_SFC_DRIVER} # Only relevant if networking-sfc plugin is enabled
135 ETCD_PORT=2379
136 EOF
137
138     if [ "${ODL_ML2_DRIVER_VERSION}" == "v2" ]; then
139         echo "ODL_V2DRIVER=True" >> ${local_conf_file_name}
140     fi
141
142     IFS=,
143     for plugin_name in ${ENABLE_OS_PLUGINS}; do
144         if [ "$plugin_name" == "networking-odl" ]; then
145             ENABLE_PLUGIN_ARGS="${ODL_ML2_DRIVER_REPO} ${ODL_ML2_BRANCH}"
146         elif [ "$plugin_name" == "kuryr-kubernetes" ]; then
147             ENABLE_PLUGIN_ARGS="${DEVSTACK_KUBERNETES_PLUGIN_REPO} master" # note: kuryr-kubernetes only exists in master at the moment
148         elif [ "$plugin_name" == "neutron-lbaas" ]; then
149             ENABLE_PLUGIN_ARGS="${DEVSTACK_LBAAS_PLUGIN_REPO} ${OPENSTACK_BRANCH}"
150             IS_LBAAS_PLUGIN_ENABLED="yes"
151         elif [ "$plugin_name" == "networking-sfc" ]; then
152             ENABLE_PLUGIN_ARGS="${DEVSTACK_NETWORKING_SFC_PLUGIN_REPO} ${OPENSTACK_BRANCH}"
153         else
154             echo "Error: Invalid plugin $plugin_name, unsupported"
155             continue
156         fi
157         cat >> ${local_conf_file_name} << EOF
158
159 enable_plugin ${plugin_name} ${ENABLE_PLUGIN_ARGS}
160 EOF
161     done
162     unset IFS
163
164     if [ "${ENABLE_NETWORKING_L2GW}" == "yes" ]; then
165         cat >> ${local_conf_file_name} << EOF
166
167 enable_plugin networking-l2gw ${NETWORKING_L2GW_DRIVER} ${ODL_ML2_BRANCH}
168 NETWORKING_L2GW_SERVICE_DRIVER=L2GW:OpenDaylight:networking_odl.l2gateway.driver.OpenDaylightL2gwDriver:default
169 EOF
170     fi
171
172     if [ "${ODL_ENABLE_L3_FWD}" == "yes" ]; then
173         cat >> ${local_conf_file_name} << EOF
174
175 PUBLIC_BRIDGE=${PUBLIC_BRIDGE}
176 PUBLIC_PHYSICAL_NETWORK=${PUBLIC_PHYSICAL_NETWORK}
177 ML2_VLAN_RANGES=${PUBLIC_PHYSICAL_NETWORK}
178 ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS}
179 EOF
180
181         if [ "${ODL_ML2_DRIVER_VERSION}" == "v2" ]; then
182            SERVICE_PLUGINS="odl-router_v2"
183         else
184            SERVICE_PLUGINS="odl-router"
185         fi
186         if [ "${ENABLE_NETWORKING_L2GW}" == "yes" ]; then
187             SERVICE_PLUGINS+=", networking_l2gw.services.l2gateway.plugin.L2GatewayPlugin"
188         fi
189         if [ "${IS_LBAAS_PLUGIN_ENABLED}" == "yes" ]; then
190             SERVICE_PLUGINS+=", lbaasv2"
191         fi
192     fi #check for ODL_ENABLE_L3_FWD
193
194     cat >> ${local_conf_file_name} << EOF
195
196 [[post-config|\$NEUTRON_CONF]]
197 [DEFAULT]
198 service_plugins = ${SERVICE_PLUGINS}
199
200 [[post-config|/etc/neutron/plugins/ml2/ml2_conf.ini]]
201 [agent]
202 minimize_polling=True
203
204 [ml2]
205 # Needed for VLAN provider tests - because our provider networks are always encapsulated in VXLAN (br-physnet1)
206 # MTU(1440) + VXLAN(50) + VLAN(4) = 1494 < MTU eth0/br-physnet1(1500)
207 physical_network_mtus = ${PUBLIC_PHYSICAL_NETWORK}:1440
208 path_mtu = 1490
209
210 # workaround for port-status not working due to https://bugs.opendaylight.org/show_bug.cgi?id=9092
211 [ml2_odl]
212 odl_features=nothing
213
214 [[post-config|/etc/neutron/dhcp_agent.ini]]
215 [DEFAULT]
216 force_metadata = True
217 enable_isolated_metadata = True
218
219 [[post-config|/etc/nova/nova.conf]]
220 [DEFAULT]
221 force_config_drive = False
222
223 [scheduler]
224 discover_hosts_in_cells_interval = 30
225 EOF
226
227     echo "Control local.conf created:"
228     cat ${local_conf_file_name}
229 } # create_control_node_local_conf()
230
231 function create_compute_node_local_conf() {
232     HOSTIP=$1
233     SERVICEHOST=$2
234     MGRIP=$3
235     ODL_OVS_MANAGERS="$4"
236
237     local_conf_file_name=${WORKSPACE}/local.conf_compute_${HOSTIP}
238     cat > ${local_conf_file_name} << EOF
239 [[local|localrc]]
240 LOGFILE=stack.sh.log
241 LOG_COLOR=False
242 USE_SCREEN=True
243 SCREEN_LOGDIR=/opt/stack/data/log
244 RECLONE=${RECLONE}
245
246 disable_all_services
247 EOF
248
249     add_os_services "${CORE_OS_COMPUTE_SERVICES}" "${ENABLE_OS_COMPUTE_SERVICES}" "${DISABLE_OS_SERVICES}" "${local_conf_file_name}"
250
251     cat >> ${local_conf_file_name} << EOF
252
253 HOST_IP=${HOSTIP}
254 SERVICE_HOST=${SERVICEHOST}
255 Q_ML2_TENANT_NETWORK_TYPE=${TENANT_NETWORK_TYPE}
256
257 ODL_MODE=manual
258 ODL_MGR_IP=${MGRIP}
259 ODL_PORT=8080
260 ODL_PORT_BINDING_CONTROLLER=${ODL_ML2_PORT_BINDING}
261 ODL_OVS_MANAGERS=${ODL_OVS_MANAGERS}
262
263 Q_HOST=\$SERVICE_HOST
264 MYSQL_HOST=\$SERVICE_HOST
265 RABBIT_HOST=\$SERVICE_HOST
266 GLANCE_HOSTPORT=\$SERVICE_HOST:9292
267 KEYSTONE_AUTH_HOST=\$SERVICE_HOST
268 KEYSTONE_SERVICE_HOST=\$SERVICE_HOST
269
270 ADMIN_PASSWORD=${ADMIN_PASSWORD}
271 DATABASE_PASSWORD=${ADMIN_PASSWORD}
272 RABBIT_PASSWORD=${ADMIN_PASSWORD}
273 SERVICE_TOKEN=${ADMIN_PASSWORD}
274 SERVICE_PASSWORD=${ADMIN_PASSWORD}
275 EOF
276
277     if [[ "${ENABLE_OS_PLUGINS}" =~ networking-odl ]]; then
278         cat >> ${local_conf_file_name} << EOF
279
280 enable_plugin networking-odl ${ODL_ML2_DRIVER_REPO} ${ODL_ML2_BRANCH}
281 EOF
282     fi
283
284     if [ "${ODL_ENABLE_L3_FWD}" == "yes" ]; then
285         cat >> ${local_conf_file_name} << EOF
286
287 PUBLIC_BRIDGE=${PUBLIC_BRIDGE}
288 PUBLIC_PHYSICAL_NETWORK=${PUBLIC_PHYSICAL_NETWORK}
289 ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS}
290 Q_L3_ENABLED=True
291 ODL_L3=${ODL_L3}
292 EOF
293     fi
294
295     cat >> ${local_conf_file_name} << EOF
296
297 [[post-config|/etc/nova/nova.conf]]
298 [api]
299 auth_strategy = keystone
300 [DEFAULT]
301 use_neutron = True
302 EOF
303
304     echo "Compute local.conf created:"
305     cat ${local_conf_file_name}
306 } # create_compute_node_local_conf()
307
308 function configure_haproxy_for_neutron_requests() {
309     MGRIP=$1
310     ODL_IPS=(${2//,/ })
311
312     cat > ${WORKSPACE}/install_ha_proxy.sh<< EOF
313 sudo systemctl stop firewalld
314 sudo yum -y install policycoreutils-python haproxy
315 EOF
316
317 cat > ${WORKSPACE}/haproxy.cfg << EOF
318 global
319   daemon
320   group  haproxy
321   log  /dev/log local0
322   maxconn  20480
323   pidfile  /tmp/haproxy.pid
324   user  haproxy
325
326 defaults
327   log  global
328   maxconn  4096
329   mode  tcp
330   retries  3
331   timeout  http-request 10s
332   timeout  queue 1m
333   timeout  connect 10s
334   timeout  client 1m
335   timeout  server 1m
336   timeout  check 10s
337
338 listen opendaylight
339   bind ${MGRIP}:8080
340   balance source
341 EOF
342
343     odlindex=1
344     for odlip in ${ODL_IPS[*]}; do
345         cat >> ${WORKSPACE}/haproxy.cfg << EOF
346   server controller-${odlindex} ${odlip}:8080 check fall 5 inter 2000 rise 2
347 EOF
348         odlindex=$((odlindex+1))
349     done
350
351     cat >> ${WORKSPACE}/haproxy.cfg << EOF
352 listen opendaylight_rest
353   bind ${MGRIP}:8181
354   balance source
355 EOF
356
357     odlindex=1
358     for odlip in ${ODL_IPS[*]}; do
359         cat >> ${WORKSPACE}/haproxy.cfg << EOF
360   server controller-rest-${odlindex} ${odlip}:8181 check fall 5 inter 2000 rise 2
361 EOF
362         odlindex=$((odlindex+1))
363     done
364
365     echo "Dump haproxy.cfg"
366     cat ${WORKSPACE}/haproxy.cfg
367
368     cat > ${WORKSPACE}/deploy_ha_proxy.sh<< EOF
369 sudo chown haproxy:haproxy /tmp/haproxy.cfg
370 sudo sed -i 's/\\/etc\\/haproxy\\/haproxy.cfg/\\/tmp\\/haproxy.cfg/g' /usr/lib/systemd/system/haproxy.service
371 sudo /usr/sbin/semanage permissive -a haproxy_t
372 sudo systemctl restart haproxy
373 sleep 3
374 sudo netstat -tunpl
375 sudo systemctl status haproxy
376 true
377 EOF
378
379     scp ${WORKSPACE}/install_ha_proxy.sh ${MGRIP}:/tmp
380     ${SSH} ${MGRIP} "sudo bash /tmp/install_ha_proxy.sh"
381     scp ${WORKSPACE}/haproxy.cfg ${MGRIP}:/tmp
382     scp ${WORKSPACE}/deploy_ha_proxy.sh ${MGRIP}:/tmp
383     ${SSH} ${MGRIP} "sudo bash /tmp/deploy_ha_proxy.sh"
384 } # configure_haproxy_for_neutron_requests()
385
386 function collect_logs () {
387     set +e  # We do not want to create red dot just because something went wrong while fetching logs.
388     for i in `seq 1 ${NUM_ODL_SYSTEM}`; do
389         CONTROLLERIP=ODL_SYSTEM_${i}_IP
390         echo "Lets's take the karaf thread dump again..."
391         KARAF_PID=$(ssh ${!CONTROLLERIP} "ps aux | grep ${KARAF_ARTIFACT} | grep -v grep | tr -s ' ' | cut -f2 -d' '")
392         ssh ${!CONTROLLERIP} "jstack $KARAF_PID"> ${WORKSPACE}/karaf_${i}_threads_after.log || true
393         echo "killing karaf process..."
394         ${SSH} "${!CONTROLLERIP}" bash -c 'ps axf | grep karaf | grep -v grep | awk '"'"'{print "kill -9 " $1}'"'"' | sh'
395     done
396
397     cat > extra_debug.sh << EOF
398 echo -e "/usr/sbin/lsmod | /usr/bin/grep openvswitch\n"
399 /usr/sbin/lsmod | /usr/bin/grep openvswitch
400 echo -e "\ngrep ct_ /var/log/openvswitch/ovs-vswitchd.log\n"
401 grep ct_ /var/log/openvswitch/ovs-vswitchd.log
402 echo -e "\novsdb-tool -mm show-log\n"
403 ovsdb-tool -mm show-log
404 echo -e "\nsudo netstat -punta\n"
405 sudo netstat -punta
406 echo -e "\nsudo getenforce\n"
407 sudo getenforce
408 echo -e "\njournalctl > /tmp/journalctl.log\n"
409 sudo journalctl > /tmp/journalctl.log
410 EOF
411
412     sleep 5
413     # FIXME: Do not create .tar and gzip before copying.
414     for i in `seq 1 ${NUM_ODL_SYSTEM}`; do
415         CONTROLLERIP=ODL_SYSTEM_${i}_IP
416         ${SSH} "${!CONTROLLERIP}"  "cp -r /tmp/${BUNDLEFOLDER}/data/log /tmp/odl_log"
417         ${SSH} "${!CONTROLLERIP}"  "tar -cf /tmp/odl${i}_karaf.log.tar /tmp/odl_log/*"
418         scp "${!CONTROLLERIP}:/tmp/odl${i}_karaf.log.tar" "${WORKSPACE}/odl${i}_karaf.log.tar"
419         ${SSH} "${!CONTROLLERIP}"  "tar -cf /tmp/odl${i}_zrpcd.log.tar /tmp/zrpcd.init.log"
420         scp "${!CONTROLLERIP}:/tmp/odl${i}_zrpcd.log.tar" "${WORKSPACE}/odl${i}_zrpcd.log.tar"
421         tar -xvf ${WORKSPACE}/odl${i}_karaf.log.tar -C . --strip-components 2 --transform s/karaf/odl${i}_karaf/g
422         grep "ROBOT MESSAGE\| ERROR " odl${i}_karaf.log > odl${i}_err.log
423         grep "ROBOT MESSAGE\|Exception" odl${i}_karaf.log > odl${i}_exception.log
424         grep "ROBOT MESSAGE\| ERROR \| WARN \|Exception" odl${i}_karaf.log > odl${i}_err_warn_exception.log
425         rm ${WORKSPACE}/odl${i}_karaf.log.tar
426     done
427
428     # Since this log collection work is happening before the archive build macro which also
429     # creates the ${WORKSPACE}/archives dir, we have to do it here first.  The mkdir in the
430     # archives build step will essentially be a noop.
431     mkdir -p ${WORKSPACE}/archives
432
433     # Control Node
434     for i in `seq 1 ${NUM_OPENSTACK_CONTROL_NODES}`; do
435         OSIP=OPENSTACK_CONTROL_NODE_${i}_IP
436         NODE_FOLDER="control_${i}"
437         mkdir -p ${NODE_FOLDER}
438         scp ${!OSIP}:/etc/kuryr/kuryr.conf ${NODE_FOLDER}
439         scp ${!OSIP}:/etc/neutron/dhcp_agent.ini ${NODE_FOLDER}
440         scp ${!OSIP}:/etc/neutron/metadata_agent.ini ${NODE_FOLDER}
441         scp ${!OSIP}:/etc/neutron/neutron.conf ${NODE_FOLDER}
442         scp ${!OSIP}:/etc/neutron/neutron_lbaas.conf ${NODE_FOLDER}
443         scp ${!OSIP}:/etc/neutron/plugins/ml2/ml2_conf.ini ${NODE_FOLDER}
444         scp ${!OSIP}:/etc/neutron/services/loadbalancer/haproxy/lbaas_agent.ini ${NODE_FOLDER}
445         scp ${!OSIP}:/etc/nova/nova.conf ${NODE_FOLDER}
446         scp ${!OSIP}:/opt/stack/devstack/nohup.out ${NODE_FOLDER}/stack.log
447         scp ${!OSIP}:/opt/stack/requirements/upper-constraints.txt ${NODE_FOLDER}
448         scp ${!OSIP}:/tmp/get_devstack.sh.txt ${NODE_FOLDER}
449         scp ${!OSIP}:/var/log/openvswitch/ovs-vswitchd.log ${NODE_FOLDER}
450         scp ${!OSIP}:/var/log/openvswitch/ovsdb-server.log ${NODE_FOLDER}
451         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/etc/hosts ${NODE_FOLDER}
452         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/usr/lib/systemd/system/haproxy.service ${NODE_FOLDER}
453         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/audit/audit.log ${NODE_FOLDER}
454         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/messages ${NODE_FOLDER}
455         rsync -avhe ssh ${!OSIP}:/opt/stack/logs/* ${NODE_FOLDER} # rsync to prevent copying of symbolic links
456         scp extra_debug.sh ${!OSIP}:/tmp
457         ${SSH} ${!OSIP} "bash /tmp/extra_debug.sh > /tmp/extra_debug.log"
458         scp ${!OSIP}:/tmp/extra_debug.log ${NODE_FOLDER}
459         scp ${!OSIP}:/tmp/journalctl.log ${NODE_FOLDER}
460         scp ${!OSIP}:/tmp/*.xz ${NODE_FOLDER}
461         mv local.conf_control_${!OSIP} ${NODE_FOLDER}/local.conf
462         mv ${NODE_FOLDER} ${WORKSPACE}/archives/
463     done
464
465     # Compute Nodes
466     for i in `seq 1 ${NUM_OPENSTACK_COMPUTE_NODES}`; do
467         OSIP=OPENSTACK_COMPUTE_NODE_${i}_IP
468         NODE_FOLDER="compute_${i}"
469         mkdir -p ${NODE_FOLDER}
470         scp ${!OSIP}:/etc/nova/nova.conf ${NODE_FOLDER}
471         scp ${!OSIP}:/opt/stack/devstack/nohup.out ${NODE_FOLDER}/stack.log
472         scp ${!OSIP}:/opt/stack/requirements/upper-constraints.txt ${NODE_FOLDER}
473         scp ${!OSIP}:/tmp/get_devstack.sh.txt ${NODE_FOLDER}
474         scp ${!OSIP}:/var/log/openvswitch/ovs-vswitchd.log ${NODE_FOLDER}
475         scp ${!OSIP}:/var/log/openvswitch/ovsdb-server.log ${NODE_FOLDER}
476         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/etc/hosts ${NODE_FOLDER}
477         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/audit/audit.log ${NODE_FOLDER}
478         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/dmesg.log ${NODE_FOLDER}
479         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/libvirt ${NODE_FOLDER}
480         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/messages ${NODE_FOLDER}
481         rsync --rsync-path="sudo rsync" -avhe ssh ${!OSIP}:/var/log/nova-agent.log ${NODE_FOLDER}
482         rsync -avhe ssh ${!OSIP}:/opt/stack/logs/* ${NODE_FOLDER} # rsync to prevent copying of symbolic links
483         scp extra_debug.sh ${!OSIP}:/tmp
484         ${SSH} ${!OSIP} "bash /tmp/extra_debug.sh > /tmp/extra_debug.log"
485         scp ${!OSIP}:/tmp/extra_debug.log ${NODE_FOLDER}
486         scp ${!OSIP}:/tmp/journalctl.log ${NODE_FOLDER}
487         scp ${!OSIP}:/tmp/*.xz ${NODE_FOLDER}/
488         mv local.conf_compute_${!OSIP} ${NODE_FOLDER}/local.conf
489         mv ${NODE_FOLDER} ${WORKSPACE}/archives/
490     done
491
492     # Tempest
493     DEVSTACK_TEMPEST_DIR="/opt/stack/tempest"
494     TESTREPO=".stestr"
495     # Look for tempest test results in the $TESTREPO dir and copy if found
496     if ${SSH} ${OPENSTACK_CONTROL_NODE_1_IP} "sudo sh -c '[ -f ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/0 ]'"; then
497         ${SSH} ${OPENSTACK_CONTROL_NODE_1_IP} "for I in \$(sudo ls ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/ | grep -E '^[0-9]+$'); do sudo sh -c \"${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/subunit-1to2 < ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/\${I} >> ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt\"; done"
498         ${SSH} ${OPENSTACK_CONTROL_NODE_1_IP} "sudo sh -c '${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/python ${DEVSTACK_TEMPEST_DIR}/.tox/tempest/lib/python2.7/site-packages/os_testr/subunit2html.py ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt ${DEVSTACK_TEMPEST_DIR}/tempest_results.html'"
499         TEMPEST_LOGS_DIR=${WORKSPACE}/archives/tempest
500         mkdir -p ${TEMPEST_LOGS_DIR}
501         scp ${OPENSTACK_CONTROL_NODE_1_IP}:${DEVSTACK_TEMPEST_DIR}/tempest_results.html ${TEMPEST_LOGS_DIR}
502         scp ${OPENSTACK_CONTROL_NODE_1_IP}:${DEVSTACK_TEMPEST_DIR}/tempest.log ${TEMPEST_LOGS_DIR}
503         mv ${WORKSPACE}/tempest_output* ${TEMPEST_LOGS_DIR}
504     else
505         echo "tempest results not found in ${DEVSTACK_TEMPEST_DIR}/${TESTREPO}/0"
506     fi
507 } # collect_logs()
508
509 # if we are using the new netvirt impl, as determined by the feature name
510 # odl-netvirt-openstack (note: old impl is odl-ovsdb-openstack) then we
511 # want PROVIDER_MAPPINGS to be used -- this should be fixed if we want to support
512 # external networks in legacy netvirt
513 if [[ ${CONTROLLERFEATURES} == *"odl-netvirt-openstack"* ]]; then
514   ODL_PROVIDER_MAPPINGS="\${PUBLIC_PHYSICAL_NETWORK}:${PUBLIC_BRIDGE}"
515 else
516   ODL_PROVIDER_MAPPINGS=
517 fi
518
519 # if we are using the old netvirt impl, as determined by the feature name
520 # odl-ovsdb-openstack (note: new impl is odl-netvirt-openstack) then we
521 # want ODL_L3 to be True.  New impl wants it False
522 if [[ ${CONTROLLERFEATURES} == *"odl-ovsdb-openstack"* ]]; then
523     ODL_L3=True
524 else
525     ODL_L3=False
526 fi
527
528 RECLONE=False
529
530 # Always compare the lists below against the devstack upstream ENABLED_SERVICES in
531 # https://github.com/openstack-dev/devstack/blob/master/stackrc#L52
532 # ODL CSIT does not use vnc, cinder, q-agt, q-l3 or horizon so they are not included below.
533 # collect performance stats
534 CORE_OS_CONTROL_SERVICES="dstat"
535 # Glance
536 CORE_OS_CONTROL_SERVICES+=",g-api,g-reg"
537 # Keystone
538 CORE_OS_CONTROL_SERVICES+=",key"
539 # Nova - services to support libvirt
540 CORE_OS_CONTROL_SERVICES+=",n-api,n-api-meta,n-cauth,n-cond,n-crt,n-obj,n-sch"
541 # ODL - services to connect to ODL
542 CORE_OS_CONTROL_SERVICES+=",odl-compute,odl-neutron"
543 # Neutron
544 CORE_OS_CONTROL_SERVICES+=",q-dhcp,q-meta,q-svc"
545 # Additional services
546 CORE_OS_CONTROL_SERVICES+=",mysql,rabbit"
547
548 # computes only need nova and odl
549 CORE_OS_COMPUTE_SERVICES="n-cpu,odl-compute"
550
551 cat > ${WORKSPACE}/disable_firewall.sh << EOF
552 sudo systemctl stop firewalld
553 sudo systemctl stop iptables
554 true
555 EOF
556
557 cat > ${WORKSPACE}/get_devstack.sh << EOF
558 sudo systemctl stop firewalld
559 sudo yum install bridge-utils python-pip -y
560 #sudo systemctl stop  NetworkManager
561 #Disable NetworkManager and kill dhclient and dnsmasq
562 sudo systemctl stop NetworkManager
563 sudo killall dhclient
564 sudo killall dnsmasq
565 #Workaround for mysql failure
566 echo "127.0.0.1   localhost \${HOSTNAME}" >> /tmp/hosts
567 echo "::1         localhost \${HOSTNAME}" >> /tmp/hosts
568 sudo mv /tmp/hosts /etc/hosts
569 sudo mkdir /opt/stack
570 sudo chmod 777 /opt/stack
571 cd /opt/stack
572 echo "git clone https://git.openstack.org/openstack-dev/devstack --branch ${OPENSTACK_BRANCH}"
573 git clone https://git.openstack.org/openstack-dev/devstack --branch ${OPENSTACK_BRANCH}
574 cd devstack
575 if [ -n "${DEVSTACK_HASH}" ]; then
576     echo "git checkout ${DEVSTACK_HASH}"
577     git checkout ${DEVSTACK_HASH}
578 fi
579 git --no-pager log --pretty=format:'%h %<(13)%ar%<(13)%cr %<(20,trunc)%an%d %s\n%b' -n20
580 EOF
581
582 cat > "${WORKSPACE}/setup_host_cell_mapping.sh" << EOF
583 sudo nova-manage cell_v2 map_cell0
584 sudo nova-manage cell_v2 simple_cell_setup
585 sudo nova-manage db sync
586 sudo nova-manage cell_v2 discover_hosts
587 EOF
588
589 NUM_OPENSTACK_SITES=${NUM_OPENSTACK_SITES:-1}
590 compute_index=1
591 odl_index=1
592 os_node_list=()
593 os_interval=$(( ${NUM_OPENSTACK_SYSTEM} / ${NUM_OPENSTACK_SITES} ))
594 ha_proxy_index=${os_interval}
595
596 for i in `seq 1 ${NUM_OPENSTACK_SITES}`; do
597     if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then
598         echo "Configure HAProxy"
599         ODL_HAPROXYIP_PARAM=OPENSTACK_HAPROXY_${i}_IP
600         ha_proxy_index=$(( $ha_proxy_index + $os_interval ))
601         odl_index=$(((i - 1) * 3 + 1))
602         ODL_IP_PARAM1=ODL_SYSTEM_$((odl_index++))_IP
603         ODL_IP_PARAM2=ODL_SYSTEM_$((odl_index++))_IP
604         ODL_IP_PARAM3=ODL_SYSTEM_$((odl_index++))_IP
605         ODLMGRIP[$i]=${!ODL_HAPROXYIP_PARAM} # ODL Northbound uses HAProxy VIP
606         ODL_OVS_MGRS[$i]="${!ODL_IP_PARAM1},${!ODL_IP_PARAM2},${!ODL_IP_PARAM3}" # OVSDB connects to all ODL IPs
607         configure_haproxy_for_neutron_requests ${!ODL_HAPROXYIP_PARAM} "${ODL_OVS_MGRS[$i]}"
608     else
609         ODL_IP_PARAM=ODL_SYSTEM_${i}_IP
610         ODL_OVS_MGRS[$i]="${!ODL_IP_PARAM}" # ODL Northbound uses ODL IP
611         ODLMGRIP[$i]=${!ODL_IP_PARAM} # OVSDB connects to ODL IP
612     fi
613 done
614
615 # Begin stacking the nodes, starting with the controller(s) and then the compute(s)
616
617 for i in `seq 1 ${NUM_OPENSTACK_CONTROL_NODES}`; do
618     CONTROLIP=OPENSTACK_CONTROL_NODE_${i}_IP
619     echo "Stack the control node ${i} of ${NUM_OPENSTACK_CONTROL_NODES}: ${CONTROLIP}"
620     create_etc_hosts ${!CONTROLIP}
621     scp ${WORKSPACE}/hosts_file ${!CONTROLIP}:/tmp/hosts
622     scp ${WORKSPACE}/get_devstack.sh ${!CONTROLIP}:/tmp
623     ${SSH} ${!CONTROLIP} "bash /tmp/get_devstack.sh > /tmp/get_devstack.sh.txt 2>&1"
624     create_control_node_local_conf ${!CONTROLIP} ${ODLMGRIP[$i]} "${ODL_OVS_MGRS[$i]}"
625     scp ${WORKSPACE}/local.conf_control_${!CONTROLIP} ${!CONTROLIP}:/opt/stack/devstack/local.conf
626     ssh ${!CONTROLIP} "cd /opt/stack/devstack; nohup ./stack.sh > /opt/stack/devstack/nohup.out 2>&1 &"
627     ssh ${!CONTROLIP} "ps -ef | grep stack.sh"
628     ssh ${!CONTROLIP} "ls -lrt /opt/stack/devstack/nohup.out"
629     os_node_list+=(${!CONTROLIP})
630     # Workaround for stable/newton jobs
631     # TODO: can this be removed now?
632     if [ "${ODL_ML2_BRANCH}" == "stable/newton" ]; then
633         ssh ${!CONTROLIP} "cd /opt/stack; git clone https://git.openstack.org/openstack/requirements; cd requirements; git checkout stable/newton; sed -i /appdirs/d upper-constraints.txt"
634     fi
635 done
636
637 for i in `seq 1 ${NUM_OPENSTACK_COMPUTE_NODES}`; do
638     NUM_COMPUTES_PER_SITE=$((NUM_OPENSTACK_COMPUTE_NODES / NUM_OPENSTACK_SITES))
639     SITE_INDEX=$((((i - 1) / NUM_COMPUTES_PER_SITE) + 1)) # We need the site index to infer the control node IP for this compute
640     COMPUTEIP=OPENSTACK_COMPUTE_NODE_${i}_IP
641     CONTROLIP=OPENSTACK_CONTROL_NODE_${SITE_INDEX}_IP
642     echo "Stack the compute node ${i} of ${NUM_OPENSTACK_COMPUTE_NODES}: ${COMPUTEIP}"
643     create_etc_hosts ${!COMPUTEIP} ${!CONTROLIP}
644     scp ${WORKSPACE}/hosts_file ${!COMPUTEIP}:/tmp/hosts
645     scp ${WORKSPACE}/get_devstack.sh  ${!COMPUTEIP}:/tmp
646     ${SSH} ${!COMPUTEIP} "bash /tmp/get_devstack.sh > /tmp/get_devstack.sh.txt 2>&1"
647     create_compute_node_local_conf ${!COMPUTEIP} ${!CONTROLIP} ${ODLMGRIP[$SITE_INDEX]} "${ODL_OVS_MGRS[$SITE_INDEX]}"
648     scp ${WORKSPACE}/local.conf_compute_${!COMPUTEIP} ${!COMPUTEIP}:/opt/stack/devstack/local.conf
649     ssh ${!COMPUTEIP} "cd /opt/stack/devstack; nohup ./stack.sh > /opt/stack/devstack/nohup.out 2>&1 &"
650     ssh ${!COMPUTEIP} "ps -ef | grep stack.sh"
651     os_node_list+=(${!COMPUTEIP})
652     # Workaround for https://review.openstack.org/#/c/491032/
653     # Modify upper-constraints to use libvirt-python 3.2.0
654     if [ "${ODL_ML2_BRANCH}" == "stable/ocata" ]; then
655         ${SSH} ${!COMPUTEIP} "
656             cd /opt/stack;
657             git clone https://git.openstack.org/openstack/requirements;
658             cd requirements;
659             git checkout stable/ocata;
660             sed -i s/libvirt-python===2.5.0/libvirt-python===3.2.0/ upper-constraints.txt
661         "
662     fi
663 done
664
665 echo "nodelist: ${os_node_list[*]}"
666
667 # This script runs on the openstack nodes. It greps for a string that devstack writes when stacking is complete.
668 # The script then writes a status depending on the grep output that is later scraped by the robot vm to control
669 # the status polling.
670 cat > ${WORKSPACE}/check_stacking.sh << EOF
671 > /tmp/stack_progress
672 ps -ef | grep "stack.sh" | grep -v grep
673 ret=\$?
674 if [ \${ret} -eq 1 ]; then
675     grep "This is your host IP address:" /opt/stack/devstack/nohup.out
676     if [ \$? -eq 0 ]; then
677         echo "Stacking Complete" > /tmp/stack_progress
678     else
679         echo "Stacking Failed" > /tmp/stack_progress
680     fi
681 elif [ \${ret} -eq 0 ]; then
682     echo "Still Stacking" > /tmp/stack_progress
683 fi
684 EOF
685
686 # Check if the stacking is finished. Poll all nodes every 60s for one hour.
687 iteration=0
688 in_progress=1
689 while [ ${in_progress} -eq 1 ]; do
690     iteration=$(($iteration + 1))
691     for index in "${!os_node_list[@]}"; do
692         echo "node $index ${os_node_list[index]}: checking stacking status attempt ${iteration} of 60"
693         scp ${WORKSPACE}/check_stacking.sh  ${os_node_list[index]}:/tmp
694         ${SSH} ${os_node_list[index]} "bash /tmp/check_stacking.sh"
695         scp ${os_node_list[index]}:/tmp/stack_progress .
696         cat stack_progress
697         stacking_status=`cat stack_progress`
698         if [ "$stacking_status" == "Still Stacking" ]; then
699             continue
700         elif [ "$stacking_status" == "Stacking Failed" ]; then
701             echo "node $index ${os_node_list[index]}: stacking has failed"
702             collect_logs
703             exit 1
704         elif [ "$stacking_status" == "Stacking Complete" ]; then
705             echo "node $index ${os_node_list[index]}: stacking complete"
706             unset 'os_node_list[index]'
707             if  [ ${#os_node_list[@]} -eq 0 ]; then
708                 in_progress=0
709             fi
710         fi
711     done
712     echo "sleep for a minute before the next check"
713     sleep 60
714     if [ ${iteration} -eq 60 ]; then
715         echo "stacking has failed - took longer than 60m"
716         collect_logs
717         exit 1
718     fi
719 done
720
721 # Further configuration now that stacking is complete.
722 NUM_COMPUTES_PER_SITE=$((NUM_OPENSTACK_COMPUTE_NODES / NUM_OPENSTACK_SITES))
723 for i in `seq 1 ${NUM_OPENSTACK_SITES}`; do
724     echo "Configure the Control Node"
725     CONTROLIP=OPENSTACK_CONTROL_NODE_${i}_IP
726     # Gather Compute IPs for the site
727     for j in `seq 1 ${NUM_COMPUTES_PER_SITE}`; do
728         COMPUTE_INDEX=$(((i-1) * NUM_COMPUTES_PER_SITE + j))
729         IP_VAR=OPENSTACK_COMPUTE_NODE_${COMPUTE_INDEX}_IP
730         COMPUTE_IPS[$((j-1))]=${!IP_VAR}
731     done
732
733     # Need to disable firewalld and iptables in compute nodes as well
734     for ip in ${COMPUTE_IPS[*]}; do
735         scp ${WORKSPACE}/disable_firewall.sh "${ip}:/tmp"
736         ${SSH} "${ip}" "sudo bash /tmp/disable_firewall.sh"
737     done
738
739     #Need to disable firewalld and iptables in control node
740     echo "Stop Firewall in Control Node for compute nodes to be able to reach the ports and add to hypervisor-list"
741     scp ${WORKSPACE}/disable_firewall.sh ${!CONTROLIP}:/tmp
742     ${SSH} ${!CONTROLIP} "sudo bash /tmp/disable_firewall.sh"
743
744     echo "sleep for 60s and print hypervisor-list"
745     sleep 60
746     # In Ocata if we do not enable the n-cpu in control node then
747     # we need to discover hosts manually and ensure that they are mapped to cells.
748     # reference: https://ask.openstack.org/en/question/102256/how-to-configure-placement-service-for-compute-node-on-ocata/
749     if [ "${OPENSTACK_BRANCH}" == "stable/ocata" ]; then
750         scp ${WORKSPACE}/setup_host_cell_mapping.sh  ${!CONTROLIP}:/tmp
751         ${SSH} ${!CONTROLIP} "sudo bash /tmp/setup_host_cell_mapping.sh"
752     fi
753     ${SSH} ${!CONTROLIP} "cd /opt/stack/devstack; source openrc admin admin; nova hypervisor-list"
754     # in the case that we are doing openstack (control + compute) all in one node, then the number of hypervisors
755     # will be the same as the number of openstack systems. However, if we are doing multinode openstack then the
756     # assumption is we have a single control node and the rest are compute nodes, so the number of expected hypervisors
757     # is one less than the total number of openstack systems
758     if [ $((NUM_OPENSTACK_SYSTEM / NUM_OPENSTACK_SITES)) -eq 1 ]; then
759         expected_num_hypervisors=1
760     else
761         expected_num_hypervisors=${NUM_COMPUTES_PER_SITE}
762     fi
763     num_hypervisors=$(${SSH} ${!CONTROLIP} "cd /opt/stack/devstack; source openrc admin admin; openstack hypervisor list -f value | wc -l" | tail -1 | tr -d "\r")
764     if ! [ "${num_hypervisors}" ] || ! [ ${num_hypervisors} -eq ${expected_num_hypervisors} ]; then
765         echo "Error: Only $num_hypervisors hypervisors detected, expected $expected_num_hypervisors"
766         collect_logs
767         exit 1
768     fi
769
770     # upgrading pip, urllib3 and httplib2 so that tempest tests can be run on openstack control node
771     # this needs to happen after devstack runs because it seems devstack is pulling in specific versions
772     # of these libs that are not working for tempest.
773     ${SSH} ${!CONTROLIP} "sudo pip install --upgrade pip"
774     ${SSH} ${!CONTROLIP} "sudo pip install urllib3 --upgrade"
775     ${SSH} ${!CONTROLIP} "sudo pip install httplib2 --upgrade"
776
777     # Gather Compute IPs for the site
778     for j in `seq 1 ${NUM_COMPUTES_PER_SITE}`; do
779         COMPUTE_INDEX=$(((i-1) * NUM_COMPUTES_PER_SITE + j))
780         IP_VAR=OPENSTACK_COMPUTE_NODE_${COMPUTE_INDEX}_IP
781         COMPUTE_IPS[$((j-1))]=${!IP_VAR}
782     done
783
784     # Need to disable firewalld and iptables in compute nodes as well
785     for ip in ${COMPUTE_IPS[*]}; do
786         scp ${WORKSPACE}/disable_firewall.sh "${ip}:/tmp"
787         ${SSH} "${ip}" "sudo bash /tmp/disable_firewall.sh"
788     done
789
790     # External Network
791     echo "prepare external networks by adding vxlan tunnels between all nodes on a separate bridge..."
792     # FIXME Should there be a unique gateway IP and devstack index for each site?
793     devstack_index=1
794     for ip in ${!CONTROLIP} ${COMPUTE_IPS[*]}; do
795         # FIXME - Workaround, ODL (new netvirt) currently adds PUBLIC_BRIDGE as a port in br-int since it doesn't see such a bridge existing when we stack
796         ${SSH} $ip "sudo ovs-vsctl --if-exists del-port br-int $PUBLIC_BRIDGE"
797         ${SSH} $ip "sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE -- set bridge $PUBLIC_BRIDGE other-config:disable-in-band=true other_config:hwaddr=f6:00:00:ff:01:0$((devstack_index++))"
798     done
799
800     # ipsec support
801     if [ "${IPSEC_VXLAN_TUNNELS_ENABLED}" == "yes" ]; then
802         ALL_NODES=(${!CONTROLIP} ${COMPUTE_IPS[*]})
803         for ((inx_ip1=0; inx_ip1<$((${#ALL_NODES[@]} - 1)); inx_ip1++)); do
804             for ((inx_ip2=$((inx_ip1 + 1)); inx_ip2<${#ALL_NODES[@]}; inx_ip2++)); do
805                 KEY1=0x$(dd if=/dev/urandom count=32 bs=1 2> /dev/null| xxd -p -c 64)
806                 KEY2=0x$(dd if=/dev/urandom count=32 bs=1 2> /dev/null| xxd -p -c 64)
807                 ID=0x$(dd if=/dev/urandom count=4 bs=1 2> /dev/null| xxd -p -c 8)
808                 ip1=${ALL_NODES[$inx_ip1]}
809                 ip2=${ALL_NODES[$inx_ip2]}
810                 ${SSH} $ip1 "sudo ip xfrm state add src $ip1 dst $ip2 proto esp spi $ID reqid $ID mode transport auth sha256 $KEY1 enc aes $KEY2"
811                 ${SSH} $ip1 "sudo ip xfrm state add src $ip2 dst $ip1 proto esp spi $ID reqid $ID mode transport auth sha256 $KEY1 enc aes $KEY2"
812                 ${SSH} $ip1 "sudo ip xfrm policy add src $ip1 dst $ip2 proto udp dir out tmpl src $ip1 dst $ip2 proto esp reqid $ID mode transport"
813                 ${SSH} $ip1 "sudo ip xfrm policy add src $ip2 dst $ip1 proto udp dir in tmpl src $ip2 dst $ip1 proto esp reqid $ID mode transport"
814
815                 ${SSH} $ip2 "sudo ip xfrm state add src $ip2 dst $ip1 proto esp spi $ID reqid $ID mode transport auth sha256 $KEY1 enc aes $KEY2"
816                 ${SSH} $ip2 "sudo ip xfrm state add src $ip1 dst $ip2 proto esp spi $ID reqid $ID mode transport auth sha256 $KEY1 enc aes $KEY2"
817                 ${SSH} $ip2 "sudo ip xfrm policy add src $ip2 dst $ip1 proto udp dir out tmpl src $ip2 dst $ip1 proto esp reqid $ID mode transport"
818                 ${SSH} $ip2 "sudo ip xfrm policy add src $ip1 dst $ip2 proto udp dir in tmpl src $ip1 dst $ip2 proto esp reqid $ID mode transport"
819             done
820         done
821
822         for ip in ${!CONTROLIP} ${COMPUTE_IPS[*]}; do
823             echo "ip xfrm configuration for node $ip:"
824             ${SSH} $ip "sudo ip xfrm policy list"
825             ${SSH} $ip "sudo ip xfrm state list"
826         done
827     fi
828
829     # Control Node - PUBLIC_BRIDGE will act as the external router
830     # Parameter values below are used in integration/test - changing them requires updates in intergration/test as well
831     EXTNET_GATEWAY_IP="10.10.10.250"
832     EXTNET_INTERNET_IP="10.9.9.9"
833     EXTNET_PNF_IP="10.10.10.253"
834     ${SSH} ${!CONTROLIP} "sudo ifconfig ${PUBLIC_BRIDGE} up ${EXTNET_GATEWAY_IP}/24"
835
836     # Control Node - external net PNF simulation
837     ${SSH} ${!CONTROLIP} "
838         sudo ip netns add pnf_ns;
839         sudo ip link add pnf_veth0 type veth peer name pnf_veth1;
840         sudo ip link set pnf_veth1 netns pnf_ns;
841         sudo ip link set pnf_veth0 up;
842         sudo ip netns exec pnf_ns ifconfig pnf_veth1 up ${EXTNET_PNF_IP}/24;
843         sudo ovs-vsctl add-port ${PUBLIC_BRIDGE} pnf_veth0;
844     "
845
846     # Control Node - external net internet address simulation
847     ${SSH} ${!CONTROLIP} "
848         sudo ip tuntap add dev internet_tap mode tap;
849         sudo ifconfig internet_tap up ${EXTNET_INTERNET_IP}/24;
850     "
851
852     # Computes
853     compute_index=1
854     for compute_ip in ${COMPUTE_IPS[*]}; do
855         # Tunnel from controller to compute
856         COMPUTEPORT=compute$(( compute_index++ ))_vxlan
857         ${SSH} ${!CONTROLIP} "
858             sudo ovs-vsctl add-port $PUBLIC_BRIDGE $COMPUTEPORT -- set interface $COMPUTEPORT type=vxlan options:local_ip=${!CONTROLIP} options:remote_ip=$compute_ip options:dst_port=9876 options:key=flow
859         "
860         # Tunnel from compute to controller
861         CONTROLPORT="control_vxlan"
862         ${SSH} $compute_ip "
863             sudo ovs-vsctl add-port $PUBLIC_BRIDGE $CONTROLPORT -- set interface $CONTROLPORT type=vxlan options:local_ip=$compute_ip options:remote_ip=${!CONTROLIP} options:dst_port=9876 options:key=flow
864         "
865     done
866 done
867
868 if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then
869     odlmgrip=OPENSTACK_HAPROXY_1_IP
870     HA_PROXY_IP=${!odlmgrip}
871     HA_PROXY_1_IP=${!odlmgrip}
872     odlmgrip2=OPENSTACK_HAPROXY_2_IP
873     HA_PROXY_2_IP=${!odlmgrip2}
874     odlmgrip3=OPENSTACK_HAPROXY_1_IP
875     HA_PROXY_3_IP=${!odlmgrip3}
876 else
877     HA_PROXY_IP=${ODL_SYSTEM_IP}
878     HA_PROXY_1_IP=${ODL_SYSTEM_1_IP}
879     HA_PROXY_2_IP=${ODL_SYSTEM_2_IP}
880     HA_PROXY_3_IP=${ODL_SYSTEM_3_IP}
881 fi
882
883 echo "Locating test plan to use..."
884 testplan_filepath="${WORKSPACE}/test/csit/testplans/${STREAMTESTPLAN}"
885 if [ ! -f "${testplan_filepath}" ]; then
886     testplan_filepath="${WORKSPACE}/test/csit/testplans/${TESTPLAN}"
887 fi
888
889 echo "Changing the testplan path..."
890 cat "${testplan_filepath}" | sed "s:integration:${WORKSPACE}:" > testplan.txt
891 cat testplan.txt
892
893 # Use the testplan if specific SUITES are not defined.
894 if [ -z "${SUITES}" ]; then
895     SUITES=`egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' testplan.txt | tr '\012' ' '`
896 fi
897
898 if [ "${OPENSTACK_BRANCH}" == "stable/pike" ]; then
899    AUTH="http://${!CONTROLIP}/identity"
900 else
901    AUTH="http://${!CONTROLIP}:35357/v3"
902 fi
903
904 # Environment Variables Needed to execute Openstack Client for NetVirt Jobs
905 cat > /tmp/os_netvirt_client_rc << EOF
906 export OS_USERNAME=admin
907 export OS_PASSWORD=admin
908 export OS_PROJECT_NAME=admin
909 export OS_USER_DOMAIN_NAME=default
910 export OS_PROJECT_DOMAIN_NAME=default
911 export OS_AUTH_URL=${AUTH}
912 export OS_IDENTITY_API_VERSION=3
913 export OS_IMAGE_API_VERSION=2
914 export OS_TENANT_NAME=admin
915 unset OS_CLOUD
916 EOF
917
918 source /tmp/os_netvirt_client_rc
919
920 echo "Starting Robot test suites ${SUITES} ..."
921 # please add pybot -v arguments on a single line and alphabetized
922 pybot -N ${TESTPLAN} --removekeywords wuks -c critical -e exclude -e skip_if_${DISTROSTREAM} \
923     -v BUNDLEFOLDER:${BUNDLEFOLDER} \
924     -v BUNDLE_URL:${ACTUAL_BUNDLE_URL} \
925     -v CONTROLLER_USER:${USER} \
926     -v DEVSTACK_DEPLOY_PATH:/opt/stack/devstack \
927     -v HA_PROXY_IP:${HA_PROXY_IP} \
928     -v HA_PROXY_1_IP:${HA_PROXY_1_IP} \
929     -v HA_PROXY_2_IP:${HA_PROXY_2_IP} \
930     -v HA_PROXY_3_IP:${HA_PROXY_3_IP} \
931     -v JDKVERSION:${JDKVERSION} \
932     -v NEXUSURL_PREFIX:${NEXUSURL_PREFIX} \
933     -v NUM_ODL_SYSTEM:${NUM_ODL_SYSTEM} \
934     -v NUM_OPENSTACK_SITES:${NUM_OPENSTACK_SITES} \
935     -v NUM_OS_SYSTEM:${NUM_OPENSTACK_SYSTEM} \
936     -v NUM_TOOLS_SYSTEM:${NUM_TOOLS_SYSTEM} \
937     -v ODL_SNAT_MODE:${ODL_SNAT_MODE} \
938     -v ODL_STREAM:${DISTROSTREAM} \
939     -v ODL_SYSTEM_IP:${ODL_SYSTEM_IP} \
940     -v ODL_SYSTEM_1_IP:${ODL_SYSTEM_1_IP} \
941     -v ODL_SYSTEM_2_IP:${ODL_SYSTEM_2_IP} \
942     -v ODL_SYSTEM_3_IP:${ODL_SYSTEM_3_IP} \
943     -v ODL_SYSTEM_4_IP:${ODL_SYSTEM_4_IP} \
944     -v ODL_SYSTEM_5_IP:${ODL_SYSTEM_5_IP} \
945     -v ODL_SYSTEM_6_IP:${ODL_SYSTEM_6_IP} \
946     -v ODL_SYSTEM_7_IP:${ODL_SYSTEM_7_IP} \
947     -v ODL_SYSTEM_8_IP:${ODL_SYSTEM_8_IP} \
948     -v ODL_SYSTEM_9_IP:${ODL_SYSTEM_9_IP} \
949     -v OS_CONTROL_NODE_IP:${OPENSTACK_CONTROL_NODE_1_IP} \
950     -v OS_CONTROL_NODE_1_IP:${OPENSTACK_CONTROL_NODE_1_IP} \
951     -v OS_CONTROL_NODE_2_IP:${OPENSTACK_CONTROL_NODE_2_IP} \
952     -v OS_CONTROL_NODE_3_IP:${OPENSTACK_CONTROL_NODE_3_IP} \
953     -v OPENSTACK_BRANCH:${OPENSTACK_BRANCH} \
954     -v OS_COMPUTE_1_IP:${OPENSTACK_COMPUTE_NODE_1_IP} \
955     -v OS_COMPUTE_2_IP:${OPENSTACK_COMPUTE_NODE_2_IP} \
956     -v OS_COMPUTE_3_IP:${OPENSTACK_COMPUTE_NODE_3_IP} \
957     -v OS_COMPUTE_4_IP:${OPENSTACK_COMPUTE_NODE_4_IP} \
958     -v OS_COMPUTE_5_IP:${OPENSTACK_COMPUTE_NODE_5_IP} \
959     -v OS_COMPUTE_6_IP:${OPENSTACK_COMPUTE_NODE_6_IP} \
960     -v OS_USER:${USER} \
961     -v PUBLIC_PHYSICAL_NETWORK:${PUBLIC_PHYSICAL_NETWORK} \
962     -v SECURITY_GROUP_MODE:${SECURITY_GROUP_MODE} \
963     -v TOOLS_SYSTEM_IP:${TOOLS_SYSTEM_1_IP} \
964     -v TOOLS_SYSTEM_1_IP:${TOOLS_SYSTEM_1_IP} \
965     -v TOOLS_SYSTEM_2_IP:${TOOLS_SYSTEM_2_IP} \
966     -v USER_HOME:${HOME} \
967     -v WORKSPACE:/tmp \
968     ${TESTOPTIONS} ${SUITES} || true
969
970 echo "Examining the files in data/log and checking file size"
971 ssh ${ODL_SYSTEM_IP} "ls -altr /tmp/${BUNDLEFOLDER}/data/log/"
972 ssh ${ODL_SYSTEM_IP} "du -hs /tmp/${BUNDLEFOLDER}/data/log/*"
973
974 echo "Tests Executed"
975 collect_logs
976
977 true  # perhaps Jenkins is testing last exit code
978 # vim: ts=4 sw=4 sts=4 et ft=sh :