Merge "Move builder jobs to run on minion nodes"
[releng/builder.git] / jjb / integration / include-raw-integration-deploy-openstack-run-test.sh
1 #@IgnoreInspection BashAddShebang
2 # Activate robotframework virtualenv
3 # ${ROBOT_VENV} comes from the include-raw-integration-install-robotframework.sh
4 # script.
5 source ${ROBOT_VENV}/bin/activate
6
7 # TODO: remove this work to run changes.py if/when it's moved higher up to be visible at the Robot level
8 echo "showing recent changes that made it in to the distribution used by this job"
9 pip install --upgrade urllib3
10 python ${WORKSPACE}/test/tools/distchanges/changes.py -d /tmp/distribution_folder \
11                   -u ${ACTUALBUNDLEURL} -b ${BRANCH} \
12                   -r ssh://jenkins-${SILO}@git.opendaylight.org:29418 || true
13
14 echo "#################################################"
15 echo "##         Deploy Openstack 3-node             ##"
16 echo "#################################################"
17
18
19 SSH="ssh -t -t"
20
21 function create_control_node_local_conf {
22 local_conf_file_name=${WORKSPACE}/local.conf_control
23 #Needs to be removed
24 if [ "${ODL_ML2_BRANCH}" == "stable/mitaka" ]; then
25    RECLONE=no
26 else
27    RECLONE=yes
28 fi
29 cat > ${local_conf_file_name} << EOF
30 [[local|localrc]]
31 LOGFILE=stack.sh.log
32 SCREEN_LOGDIR=/opt/stack/data/log
33 LOG_COLOR=False
34 RECLONE=${RECLONE}
35 EOF
36
37 IFS=,
38 for service_name in ${DISABLE_OS_SERVICES}
39 do
40 cat >> ${local_conf_file_name} << EOF
41 disable_service ${service_name}
42 EOF
43 done
44 for service_name in ${ENABLE_OS_SERVICES}
45 do
46 cat >> ${local_conf_file_name} << EOF
47 enable_service ${service_name}
48 EOF
49 done
50 unset IFS
51
52 cat >> ${local_conf_file_name} << EOF
53 HOST_IP=$OPENSTACK_CONTROL_NODE_IP
54 SERVICE_HOST=\$HOST_IP
55
56 NEUTRON_CREATE_INITIAL_NETWORKS=False
57 Q_PLUGIN=ml2
58 Q_ML2_TENANT_NETWORK_TYPE=${TENANT_NETWORK_TYPE}
59 Q_OVS_USE_VETH=True
60
61 ENABLE_TENANT_TUNNELS=True
62
63 MYSQL_HOST=\$SERVICE_HOST
64 RABBIT_HOST=\$SERVICE_HOST
65 GLANCE_HOSTPORT=\$SERVICE_HOST:9292
66 KEYSTONE_AUTH_HOST=\$SERVICE_HOST
67 KEYSTONE_SERVICE_HOST=\$SERVICE_HOST
68
69 MYSQL_PASSWORD=mysql
70 RABBIT_PASSWORD=rabbit
71 SERVICE_TOKEN=service
72 SERVICE_PASSWORD=admin
73 ADMIN_PASSWORD=admin
74
75 enable_plugin networking-odl ${ODL_ML2_DRIVER_REPO} ${ODL_ML2_BRANCH}
76
77 ODL_PORT=8080
78 ODL_MODE=externalodl
79 LIBVIRT_TYPE=qemu
80
81 EOF
82
83
84 if [ "${ODL_ML2_DRIVER_VERSION}" == "v2" ]; then
85     echo "ODL_V2DRIVER=True" >> ${local_conf_file_name}
86 fi
87
88 if [ "${NUM_ODL_SYSTEM}" -gt 1 ]; then
89 odl_list=${ODL_SYSTEM_1_IP}
90 for i in `seq 2 ${NUM_ODL_SYSTEM}`
91 do
92 odlip=ODL_SYSTEM_${i}_IP
93 odl_list=${odl_list},${!odlip}
94 done
95 if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then
96 HA_PROXY_INDEX=${NUM_OPENSTACK_SYSTEM}
97 odlmgrip=OPENSTACK_COMPUTE_NODE_${HA_PROXY_INDEX}_IP
98 odl_mgr_ip=${!odlmgrip}
99 else
100 odl_mgr_ip=${ODL_SYSTEM_1_IP}
101 fi
102 cat >> ${local_conf_file_name} << EOF
103 ODL_OVS_MANAGERS=${odl_list}
104 ODL_MGR_IP=${odl_mgr_ip}
105 EOF
106 else
107 cat >> ${local_conf_file_name} << EOF
108 ODL_MGR_IP=${ODL_SYSTEM_1_IP}
109 EOF
110 fi
111
112 # if we are using the old netvirt impl, as determined by the feature name
113 # odl-ovsdb-openstack (note: new impl is odl-netvirt-openstack) then we
114 # want ODL_L3 to be True.  New impl wants it False
115 if [[ ${CONTROLLERFEATURES} == *"odl-ovsdb-openstack"* ]]; then
116   ODL_L3=True
117 else
118   ODL_L3=False
119 fi
120
121 # if we are using the new netvirt impl, as determined by the feature name
122 # odl-netvirt-openstack (note: old impl is odl-ovsdb-openstack) then we
123 # want PROVIDER_MAPPINGS to be used -- this should be fixed if we want to support
124 # external networks in legacy netvirt
125 if [[ ${CONTROLLERFEATURES} == *"odl-netvirt-openstack"* ]]; then
126   ODL_PROVIDER_MAPPINGS="\${PUBLIC_PHYSICAL_NETWORK}:${PUBLIC_BRIDGE}"
127 else
128   ODL_PROVIDER_MAPPINGS=
129 fi
130
131 if [ "${ODL_ENABLE_L3_FWD}" == "yes" ]; then
132 cat >> ${local_conf_file_name} << EOF
133 PUBLIC_BRIDGE=${PUBLIC_BRIDGE}
134 PUBLIC_PHYSICAL_NETWORK=physnet1 # FIXME this should be a parameter
135 ML2_VLAN_RANGES=physnet1
136 ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS}
137
138 disable_service q-l3
139 Q_L3_ENABLED=True
140 ODL_L3=${ODL_L3}
141 PUBLIC_INTERFACE=br100
142 EOF
143
144 if [ "${ODL_ML2_BRANCH}" == "stable/mitaka" ]; then
145 cat >> ${local_conf_file_name} << EOF
146 [[post-config|\$NEUTRON_CONF]]
147 [DEFAULT]
148 service_plugins = networking_odl.l3.l3_odl.OpenDaylightL3RouterPlugin
149
150 EOF
151 fi #check for ODL_ML2_BRANCH
152
153 fi #ODL_ENABLE_L3_FWD check
154
155 cat >> ${local_conf_file_name} << EOF
156 [[post-config|/etc/neutron/plugins/ml2/ml2_conf.ini]]
157 [agent]
158 minimize_polling=True
159
160 [ml2]
161 # Needed for VLAN provider tests - because our provider networks are always encapsulated in VXLAN (br-physnet1)
162 # MTU(1440) + VXLAN(50) + VLAN(4) = 1494 < MTU eth0/br-phynset1(1500)
163 physical_network_mtus = physnet1:1440
164
165 [[post-config|/etc/neutron/dhcp_agent.ini]]
166 [DEFAULT]
167 force_metadata = True
168 enable_isolated_metadata = True
169
170 [[post-config|/etc/nova/nova.conf]]
171 [DEFAULT]
172 force_config_drive = False
173
174 EOF
175
176 echo "local.conf Created...."
177 cat ${local_conf_file_name}
178 }
179
180 function create_compute_node_local_conf {
181 HOSTIP=$1
182 #Needs to be removed
183 if [ "${ODL_ML2_BRANCH}" == "stable/mitaka" ]; then
184    RECLONE=no
185 else
186    RECLONE=yes
187 fi
188 local_conf_file_name=${WORKSPACE}/local.conf_compute_${HOSTIP}
189 cat > ${local_conf_file_name} << EOF
190 [[local|localrc]]
191 LOGFILE=stack.sh.log
192 LOG_COLOR=False
193 SCREEN_LOGDIR=/opt/stack/data/log
194 RECLONE=${RECLONE}
195
196 NOVA_VNC_ENABLED=True
197 MULTI_HOST=1
198 ENABLED_SERVICES=n-cpu
199
200 HOST_IP=${HOSTIP}
201 SERVICE_HOST=${OPENSTACK_CONTROL_NODE_IP}
202
203 Q_PLUGIN=ml2
204 ENABLE_TENANT_TUNNELS=True
205 Q_ML2_TENANT_NETWORK_TYPE=vxlan
206
207 Q_HOST=\$SERVICE_HOST
208 MYSQL_HOST=\$SERVICE_HOST
209 RABBIT_HOST=\$SERVICE_HOST
210 GLANCE_HOSTPORT=\$SERVICE_HOST:9292
211 KEYSTONE_AUTH_HOST=\$SERVICE_HOST
212 KEYSTONE_SERVICE_HOST=\$SERVICE_HOST
213
214 MYSQL_PASSWORD=mysql
215 RABBIT_PASSWORD=rabbit
216 SERVICE_TOKEN=service
217 SERVICE_PASSWORD=admin
218 ADMIN_PASSWORD=admin
219
220 enable_plugin networking-odl ${ODL_ML2_DRIVER_REPO} ${ODL_ML2_BRANCH}
221 ODL_MODE=compute
222 LIBVIRT_TYPE=qemu
223 EOF
224
225 if [ "${NUM_ODL_SYSTEM}" -gt 1 ]; then
226 odl_list=${ODL_SYSTEM_1_IP}
227 for i in `seq 2 ${NUM_ODL_SYSTEM}`
228 do
229 odlip=ODL_SYSTEM_${i}_IP
230 odl_list=${odl_list},${!odlip}
231 done
232 if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then
233 HA_PROXY_INDEX=${NUM_OPENSTACK_SYSTEM}
234 odlmgrip=OPENSTACK_COMPUTE_NODE_${HA_PROXY_INDEX}_IP
235 odl_mgr_ip=${!odlmgrip}
236 else
237 odl_mgr_ip=${ODL_SYSTEM_1_IP}
238 fi
239 cat >> ${local_conf_file_name} << EOF
240 ODL_OVS_MANAGERS=${odl_list}
241 ODL_MGR_IP=${odl_mgr_ip}
242 EOF
243 else
244 cat >> ${local_conf_file_name} << EOF
245 ODL_MGR_IP=${ODL_SYSTEM_1_IP}
246 EOF
247 fi
248
249 # if we are using the new netvirt impl, as determined by the feature name
250 # odl-netvirt-openstack (note: old impl is odl-ovsdb-openstack) then we
251 # want PROVIDER_MAPPINGS to be used -- this should be fixed if we want to support
252 # external networks in legacy netvirt
253 if [[ ${CONTROLLERFEATURES} == *"odl-netvirt-openstack"* ]]; then
254   ODL_PROVIDER_MAPPINGS="\${PUBLIC_PHYSICAL_NETWORK}:${PUBLIC_BRIDGE}"
255 else
256   ODL_PROVIDER_MAPPINGS=
257 fi
258
259 if [ "${ODL_ENABLE_L3_FWD}" == "yes" ]; then
260 cat >> ${local_conf_file_name} << EOF
261 # Uncomment lines below if odl-compute is to be used for l3 forwarding
262 Q_L3_ENABLED=True
263 ODL_L3=${ODL_L3}
264 PUBLIC_INTERFACE=br100 # FIXME do we use br100 at all?
265 PUBLIC_BRIDGE=${PUBLIC_BRIDGE}
266 PUBLIC_PHYSICAL_NETWORK=physnet1 # FIXME this should be a parameter
267 ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS}
268 EOF
269 fi
270 echo "local.conf Created...."
271 cat ${local_conf_file_name}
272 }
273
274 function configure_haproxy_for_neutron_requests () {
275 HA_PROXY_INDEX=${NUM_OPENSTACK_SYSTEM}
276 odlmgrip=OPENSTACK_COMPUTE_NODE_${HA_PROXY_INDEX}_IP
277 ha_proxy_ip=${!odlmgrip}
278
279 cat > ${WORKSPACE}/install_ha_proxy.sh<< EOF
280 sudo systemctl stop firewalld
281 sudo yum -y install policycoreutils-python haproxy
282 EOF
283
284 cat > ${WORKSPACE}/haproxy.cfg << EOF
285 global
286   daemon
287   group  haproxy
288   log  /dev/log local0
289   maxconn  20480
290   pidfile  /tmp/haproxy.pid
291   user  haproxy
292
293 defaults
294   log  global
295   maxconn  4096
296   mode  tcp
297   retries  3
298   timeout  http-request 10s
299   timeout  queue 1m
300   timeout  connect 10s
301   timeout  client 1m
302   timeout  server 1m
303   timeout  check 10s
304
305 listen opendaylight
306   bind ${ha_proxy_ip}:8080
307   balance source
308 EOF
309
310 for i in `seq 1 ${NUM_ODL_SYSTEM}`
311 do
312 odlip=ODL_SYSTEM_${i}_IP
313 cat >> ${WORKSPACE}/haproxy.cfg << EOF
314   server controller-$i ${!odlip}:8080 check fall 5 inter 2000 rise 2
315 EOF
316 done
317
318 cat >> ${WORKSPACE}/haproxy.cfg << EOF
319 listen opendaylight_rest
320   bind ${ha_proxy_ip}:8181
321   balance source
322 EOF
323
324 for i in `seq 1 ${NUM_ODL_SYSTEM}`
325 do
326 odlip=ODL_SYSTEM_${i}_IP
327 cat >> ${WORKSPACE}/haproxy.cfg << EOF
328   server controller-rest-$i ${!odlip}:8181 check fall 5 inter 2000 rise 2
329 EOF
330 done
331
332 cat > ${WORKSPACE}/deploy_ha_proxy.sh<< EOF
333 sudo chown haproxy:haproxy /tmp/haproxy.cfg
334 sudo sed -i 's/\\/etc\\/haproxy\\/haproxy.cfg/\\/tmp\\/haproxy.cfg/g' /usr/lib/systemd/system/haproxy.service
335 sudo /usr/sbin/semanage permissive -a haproxy_t
336 sudo systemctl restart haproxy
337 sleep 3
338 sudo netstat -tunpl
339 sudo systemctl status haproxy
340 true
341 EOF
342 scp ${WORKSPACE}/install_ha_proxy.sh ${ha_proxy_ip}:/tmp
343 ${SSH} ${ha_proxy_ip} "sudo bash /tmp/install_ha_proxy.sh"
344 scp ${WORKSPACE}/haproxy.cfg ${ha_proxy_ip}:/tmp
345 scp ${WORKSPACE}/deploy_ha_proxy.sh ${ha_proxy_ip}:/tmp
346 ${SSH} ${ha_proxy_ip} "sudo bash /tmp/deploy_ha_proxy.sh"
347 }
348
349 function collect_logs_and_exit (){
350 set +e  # We do not want to create red dot just because something went wrong while fetching logs.
351 for i in `seq 1 ${NUM_ODL_SYSTEM}`
352 do
353     CONTROLLERIP=ODL_SYSTEM_${i}_IP
354     echo "killing karaf process..."
355     ${SSH} "${!CONTROLLERIP}" bash -c 'ps axf | grep karaf | grep -v grep | awk '"'"'{print "kill -9 " $1}'"'"' | sh'
356 done
357
358 cat > extra_debug.sh << EOF
359 echo -e "/usr/sbin/lsmod | /usr/bin/grep openvswitch\n"
360 /usr/sbin/lsmod | /usr/bin/grep openvswitch
361 echo -e "\ngrep ct_ /var/log/openvswitch/ovs-vswitchd.log\n"
362 grep ct_ /var/log/openvswitch/ovs-vswitchd.log
363 EOF
364
365 sleep 5
366 # FIXME: Do not create .tar and gzip before copying.
367 for i in `seq 1 ${NUM_ODL_SYSTEM}`
368 do
369     CONTROLLERIP=ODL_SYSTEM_${i}_IP
370     ${SSH} "${!CONTROLLERIP}"  "cp -r /tmp/${BUNDLEFOLDER}/data/log /tmp/odl_log"
371     ${SSH} "${!CONTROLLERIP}"  "tar -cf /tmp/odl${i}_karaf.log.tar /tmp/odl_log/*"
372     scp "${!CONTROLLERIP}:/tmp/odl${i}_karaf.log.tar" "${WORKSPACE}/odl${i}_karaf.log.tar"
373     tar -xvf ${WORKSPACE}/odl${i}_karaf.log.tar -C . --strip-components 2 --transform s/karaf/odl${i}_karaf/g
374     rm ${WORKSPACE}/odl${i}_karaf.log.tar
375 done
376
377 # Since this log collection work is happening before the archive build macro which also
378 # creates the ${WORKSPACE}/archives dir, we have to do it here first.  The mkdir in the
379 # archives build step will essentially be a noop.
380 mkdir -p ${WORKSPACE}/archives
381
382 # Control Node
383 OS_CTRL_FOLDER="control"
384 mkdir -p ${OS_CTRL_FOLDER}
385 scp ${OPENSTACK_CONTROL_NODE_IP}:/opt/stack/devstack/nohup.out ${OS_CTRL_FOLDER}/stack.log
386 scp ${OPENSTACK_CONTROL_NODE_IP}:/var/log/openvswitch/ovs-vswitchd.log ${OS_CTRL_FOLDER}/ovs-vswitchd.log
387 rsync -avhe ssh ${OPENSTACK_CONTROL_NODE_IP}:/opt/stack/logs/* ${OS_CTRL_FOLDER} # rsync to prevent copying of symbolic links
388 scp extra_debug.sh ${OPENSTACK_CONTROL_NODE_IP}:/tmp
389 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "bash /tmp/extra_debug.sh > /tmp/extra_debug.log"
390 scp ${OPENSTACK_CONTROL_NODE_IP}:/tmp/extra_debug.log ${OS_CTRL_FOLDER}/extra_debug.log
391 mv local.conf_control ${OS_CTRL_FOLDER}/local.conf
392 mv ${OS_CTRL_FOLDER} ${WORKSPACE}/archives/
393
394 # Compute Nodes
395 for i in `seq 1 $((NUM_OPENSTACK_SYSTEM - 1))`
396 do
397     OSIP=OPENSTACK_COMPUTE_NODE_${i}_IP
398     OS_COMPUTE_FOLDER="compute_${i}"
399     mkdir -p ${OS_COMPUTE_FOLDER}
400     scp ${!OSIP}:/opt/stack/devstack/nohup.out ${OS_COMPUTE_FOLDER}/stack.log
401     scp ${!OSIP}:/var/log/openvswitch/ovs-vswitchd.log ${OS_COMPUTE_FOLDER}/ovs-vswitchd.log
402     rsync -avhe ssh ${!OSIP}:/opt/stack/logs/* ${OS_COMPUTE_FOLDER} # rsync to prevent copying of symbolic links
403     scp extra_debug.sh ${!OSIP}:/tmp
404     ${SSH} ${!OSIP} "bash /tmp/extra_debug.sh > /tmp/extra_debug.log"
405     scp ${!OSIP}:/tmp/extra_debug.log ${OS_COMPUTE_FOLDER}/extra_debug.log
406     mv local.conf_compute_${!OSIP} ${OS_COMPUTE_FOLDER}/local.conf
407     mv ${OS_COMPUTE_FOLDER} ${WORKSPACE}/archives/
408 done
409
410 ls local.conf* | xargs -I % mv % %.log
411 }
412
413 cat > ${WORKSPACE}/disable_firewall.sh << EOF
414 sudo systemctl stop firewalld
415 sudo systemctl stop iptables
416 true
417 EOF
418
419 cat > ${WORKSPACE}/get_devstack.sh << EOF
420 sudo systemctl stop firewalld
421 sudo yum install bridge-utils -y
422 sudo systemctl stop  NetworkManager
423 #Disable NetworkManager and kill dhclient and dnsmasq
424 sudo systemctl stop NetworkManager
425 sudo killall dhclient
426 sudo killall dnsmasq
427 #Workaround for mysql failure
428 echo "127.0.0.1    localhost \${HOSTNAME}" > /tmp/hosts
429 echo "::1   localhost  \${HOSTNAME}" >> /tmp/hosts
430 sudo mv /tmp/hosts /etc/hosts
431 sudo /usr/sbin/brctl addbr br100
432 #sudo ifconfig eth0 mtu 2000
433 sudo mkdir /opt/stack
434 sudo chmod 777 /opt/stack
435 cd /opt/stack
436 git clone https://git.openstack.org/openstack-dev/devstack
437 cd devstack
438 git checkout $OPENSTACK_BRANCH
439 EOF
440
441 echo "Create HAProxy if needed"
442 if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then
443  echo "Need to configure HAProxy"
444  configure_haproxy_for_neutron_requests
445 fi
446
447 os_node_list=()
448 echo "Stack the Control Node"
449 scp ${WORKSPACE}/get_devstack.sh ${OPENSTACK_CONTROL_NODE_IP}:/tmp
450 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "bash /tmp/get_devstack.sh"
451 create_control_node_local_conf
452 scp ${WORKSPACE}/local.conf_control ${OPENSTACK_CONTROL_NODE_IP}:/opt/stack/devstack/local.conf
453
454
455 # Workworund for successful stacking with  Mitaka
456 if [ "${ODL_ML2_BRANCH}" == "stable/mitaka" ]; then
457
458   # Workaround for problems with latest versions/specified versions in requirements of openstack
459   # Openstacksdk,libvirt-python -> the current version does not work with  Mitaka diue to some requirements
460   # conflict and breaks when trying to stack
461   # paramiko -> Problems with tempest tests due to paramiko incompatibility with pycrypto.
462   # the problem has been solved with  version 1.17. If the latest version of paramiko is used, it causes
463   # other timeout problems
464   ssh ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack; git clone https://git.openstack.org/openstack/requirements; cd requirements; git checkout stable/mitaka; sed -i /openstacksdk/d upper-constraints.txt; sed -i /libvirt-python/d upper-constraints.txt; sed -i /paramiko/d upper-constraints.txt"
465   ssh ${OPENSTACK_CONTROL_NODE_IP} "sudo pip install deprecation"
466   ssh ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack; git clone https://github.com/openstack/python-openstacksdk; cd python-openstacksdk; sudo python setup.py install"
467   ssh ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack; git clone https://github.com/paramiko/paramiko; cd paramiko; git checkout 1.17; sudo python setup.py install"
468 fi
469
470 ssh ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack/devstack; nohup ./stack.sh > /opt/stack/devstack/nohup.out 2>&1 &"
471 ssh ${OPENSTACK_CONTROL_NODE_IP} "ps -ef | grep stack.sh"
472 ssh ${OPENSTACK_CONTROL_NODE_IP} "ls -lrt /opt/stack/devstack/nohup.out"
473 os_node_list+=(${OPENSTACK_CONTROL_NODE_IP})
474
475
476 for i in `seq 1 $((NUM_OPENSTACK_SYSTEM - 1))`
477 do
478     COMPUTEIP=OPENSTACK_COMPUTE_NODE_${i}_IP
479     scp ${WORKSPACE}/get_devstack.sh  ${!COMPUTEIP}:/tmp
480     ${SSH} ${!COMPUTEIP} "bash /tmp/get_devstack.sh"
481     create_compute_node_local_conf ${!COMPUTEIP}
482     scp ${WORKSPACE}/local.conf_compute_${!COMPUTEIP} ${!COMPUTEIP}:/opt/stack/devstack/local.conf
483     if [ "${ODL_ML2_BRANCH}" == "stable/mitaka" ]; then
484        ssh ${!COMPUTEIP} "cd /opt/stack; git clone https://git.openstack.org/openstack/requirements; cd requirements; git checkout stable/mitaka; sed -i /libvirt-python/d upper-constraints.txt"
485     fi
486     ssh ${!COMPUTEIP} "cd /opt/stack/devstack; nohup ./stack.sh > /opt/stack/devstack/nohup.out 2>&1 &"
487     ssh ${!COMPUTEIP} "ps -ef | grep stack.sh"
488     os_node_list+=(${!COMPUTEIP})
489 done
490
491 cat > ${WORKSPACE}/check_stacking.sh << EOF
492 > /tmp/stack_progress
493 ps -ef | grep "stack.sh" | grep -v grep
494 ret=\$?
495 if [ \${ret} -eq 1 ]; then
496   grep "This is your host IP address:" /opt/stack/devstack/nohup.out
497   if [ \$? -eq 0 ]; then
498      echo "Stacking Complete" > /tmp/stack_progress
499   else
500      echo "Stacking Failed" > /tmp/stack_progress
501   fi
502 elif [ \${ret} -eq 0 ]; then
503   echo "Still Stacking" > /tmp/stack_progress
504 fi
505 EOF
506
507 #the checking is repeated for an hour
508 iteration=0
509 in_progress=1
510 while [ ${in_progress} -eq 1 ]; do
511 iteration=$(($iteration + 1))
512 for index in ${!os_node_list[@]}
513 do
514 echo "Check the status of stacking in ${os_node_list[index]}"
515 scp ${WORKSPACE}/check_stacking.sh  ${os_node_list[index]}:/tmp
516 ${SSH} ${os_node_list[index]} "bash /tmp/check_stacking.sh"
517 scp ${os_node_list[index]}:/tmp/stack_progress .
518 #debug
519 cat stack_progress
520 stacking_status=`cat stack_progress`
521 if [ "$stacking_status" == "Still Stacking" ]; then
522   continue
523 elif [ "$stacking_status" == "Stacking Failed" ]; then
524   collect_logs_and_exit
525   exit 1
526 elif [ "$stacking_status" == "Stacking Complete" ]; then
527   unset os_node_list[index]
528   if  [ ${#os_node_list[@]} -eq 0 ]; then
529      in_progress=0
530   fi
531 fi
532 done
533  echo "sleep for a minute before the next check"
534  sleep 60
535  if [ ${iteration} -eq 60 ]; then
536   collect_logs_and_exit
537   exit 1
538  fi
539 done
540
541 #Need to disable firewalld and iptables in control node
542 echo "Stop Firewall in Control Node for compute nodes to be able to reach the ports and add to hypervisor-list"
543 scp ${WORKSPACE}/disable_firewall.sh ${OPENSTACK_CONTROL_NODE_IP}:/tmp
544 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo bash /tmp/disable_firewall.sh"
545 echo "sleep for a minute and print hypervisor-list"
546 sleep 60
547 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack/devstack; source openrc admin admin; nova hypervisor-list"
548 # in the case that we are doing openstack (control + compute) all in one node, then the number of hypervisors
549 # will be the same as the number of openstack systems. However, if we are doing multinode openstack then the
550 # assumption is we have a single control node and the rest are compute nodes, so the number of expected hypervisors
551 # is one less than the total number of openstack systems
552 if [ "${NUM_OPENSTACK_SYSTEM}" -eq 1 ]; then
553   expected_num_hypervisors=1
554 else
555   expected_num_hypervisors=$((NUM_OPENSTACK_SYSTEM - 1))
556 fi
557 num_hypervisors=$(${SSH} ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack/devstack; source openrc admin admin; openstack hypervisor list -f value | wc -l" | tail -1 | tr -d "\r")
558 if ! [ "${num_hypervisors}" ] || ! [ ${num_hypervisors} -eq ${expected_num_hypervisors} ]; then
559   echo "Error: Only $num_hypervisors hypervisors detected, expected $expected_num_hypervisors"
560   collect_logs_and_exit
561   exit 1
562 fi
563
564 #Need to disable firewalld and iptables in compute nodes as well
565 for i in `seq 1 $((NUM_OPENSTACK_SYSTEM - 1))`
566 do
567     OSIP=OPENSTACK_COMPUTE_NODE_${i}_IP
568     scp ${WORKSPACE}/disable_firewall.sh "${!OSIP}:/tmp"
569     ${SSH} "${!OSIP}" "sudo bash /tmp/disable_firewall.sh"
570 done
571
572 # upgrading pip, urllib3 and httplib2 so that tempest tests can be run on ${OPENSTACK_CONTROL_NODE_IP}
573 # this needs to happen after devstack runs because it seems devstack is pulling in specific versions
574 # of these libs that are not working for tempest.
575 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo pip install --upgrade pip"
576 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo pip install urllib3 --upgrade"
577 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo pip install httplib2 --upgrade"
578
579 for i in `seq 1 $((NUM_OPENSTACK_SYSTEM - 1))`
580 do
581     IP_VAR=OPENSTACK_COMPUTE_NODE_${i}_IP
582     COMPUTE_IPS[$((i-1))]=${!IP_VAR}
583 done
584
585 # External Network
586 echo "prepare external networks by adding vxlan tunnels between all nodes on a separate bridge..."
587 devstack_index=1
588 for ip in ${OPENSTACK_CONTROL_NODE_IP} ${COMPUTE_IPS[*]}
589 do
590     # FIXME - Workaround, ODL (new netvirt) currently adds PUBLIC_BRIDGE as a port in br-int since it doesn't see such a bridge existing when we stack
591     ${SSH} $ip "sudo ovs-vsctl --if-exists del-port br-int $PUBLIC_BRIDGE"
592     ${SSH} $ip "sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE -- set bridge $PUBLIC_BRIDGE other-config:disable-in-band=true other_config:hwaddr=f6:00:00:ff:01:0$((devstack_index++))"
593 done
594
595 # Control Node - PUBLIC_BRIDGE will act as the external router
596 GATEWAY_IP="10.10.10.250" # FIXME this should be a parameter, also shared with integration-test
597 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo ifconfig $PUBLIC_BRIDGE up ${GATEWAY_IP}/24"
598 compute_index=1
599 for compute_ip in ${COMPUTE_IPS[*]}
600 do
601     # Tunnel from controller to compute
602     PORT_NAME=compute$((compute_index++))_vxlan
603     ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo ovs-vsctl add-port $PUBLIC_BRIDGE $PORT_NAME -- set interface $PORT_NAME type=vxlan options:local_ip="${OPENSTACK_CONTROL_NODE_IP}" options:remote_ip="$compute_ip" options:dst_port=9876 options:key=flow"
604
605     # Tunnel from compute to controller
606     PORT_NAME=control_vxlan
607     ${SSH} ${compute_ip} "sudo ovs-vsctl add-port $PUBLIC_BRIDGE $PORT_NAME -- set interface $PORT_NAME type=vxlan options:local_ip="$compute_ip" options:remote_ip="${OPENSTACK_CONTROL_NODE_IP}" options:dst_port=9876 options:key=flow"
608 done
609
610 if [ "${NUM_ODL_SYSTEM}" -gt 1 ]; then
611   HA_PROXY_INDEX=${NUM_OPENSTACK_SYSTEM}
612   odlmgrip=OPENSTACK_COMPUTE_NODE_${HA_PROXY_INDEX}_IP
613   HA_PROXY_IP=${!odlmgrip}
614 else
615   HA_PROXY_IP=${ODL_SYSTEM_IP}
616 fi
617 echo "Locating test plan to use..."
618 testplan_filepath="${WORKSPACE}/test/csit/testplans/${STREAMTESTPLAN}"
619 if [ ! -f "${testplan_filepath}" ]; then
620     testplan_filepath="${WORKSPACE}/test/csit/testplans/${TESTPLAN}"
621 fi
622
623 echo "Changing the testplan path..."
624 cat "${testplan_filepath}" | sed "s:integration:${WORKSPACE}:" > testplan.txt
625 cat testplan.txt
626
627 SUITES=`egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' testplan.txt | tr '\012' ' '`
628
629 echo "Starting Robot test suites ${SUITES} ..."
630 # please add pybot -v arguments on a single line and alphabetized
631 pybot -N ${TESTPLAN} --removekeywords wuks -c critical -e exclude \
632     -v BUNDLEFOLDER:${BUNDLEFOLDER} \
633     -v BUNDLE_URL:${ACTUALBUNDLEURL} \
634     -v CONTROLLER_USER:${USER} \
635     -v DEVSTACK_DEPLOY_PATH:/opt/stack/devstack \
636     -v HA_PROXY_IP:${HA_PROXY_IP} \
637     -v JDKVERSION:${JDKVERSION} \
638     -v NEXUSURL_PREFIX:${NEXUSURL_PREFIX} \
639     -v NUM_ODL_SYSTEM:${NUM_ODL_SYSTEM} \
640     -v NUM_OS_SYSTEM:${NUM_OPENSTACK_SYSTEM} \
641     -v NUM_TOOLS_SYSTEM:${NUM_TOOLS_SYSTEM} \
642     -v ODL_STREAM:${DISTROSTREAM} \
643     -v ODL_SYSTEM_IP:${ODL_SYSTEM_IP} \
644     -v ODL_SYSTEM_1_IP:${ODL_SYSTEM_1_IP} \
645     -v ODL_SYSTEM_2_IP:${ODL_SYSTEM_2_IP} \
646     -v ODL_SYSTEM_3_IP:${ODL_SYSTEM_3_IP} \
647     -v OS_CONTROL_NODE_IP:${OPENSTACK_CONTROL_NODE_IP} \
648     -v OPENSTACK_BRANCH:${OPENSTACK_BRANCH} \
649     -v OS_COMPUTE_1_IP:${OPENSTACK_COMPUTE_NODE_1_IP} \
650     -v OS_COMPUTE_2_IP:${OPENSTACK_COMPUTE_NODE_2_IP} \
651     -v OS_USER:${USER} \
652     -v PUBLIC_PHYSICAL_NETWORK:${PUBLIC_PHYSICAL_NETWORK} \
653     -v TOOLS_SYSTEM_IP:${TOOLS_SYSTEM_1_IP} \
654     -v TOOLS_SYSTEM_1_IP:${TOOLS_SYSTEM_1_IP} \
655     -v TOOLS_SYSTEM_2_IP:${TOOLS_SYSTEM_2_IP} \
656     -v USER_HOME:${HOME} \
657     -v WORKSPACE:/tmp \
658     ${TESTOPTIONS} ${SUITES} || true
659
660 echo "Examining the files in data/log and checking filesize"
661 ssh ${ODL_SYSTEM_IP} "ls -altr /tmp/${BUNDLEFOLDER}/data/log/"
662 ssh ${ODL_SYSTEM_IP} "du -hs /tmp/${BUNDLEFOLDER}/data/log/*"
663
664 echo "Tests Executed"
665 DEVSTACK_TEMPEST_DIR="/opt/stack/tempest"
666 if $(ssh ${OPENSTACK_CONTROL_NODE_IP} "sudo sh -c '[ -f ${DEVSTACK_TEMPEST_DIR}/.testrepository/0 ]'"); then # if Tempest results exist
667     ssh ${OPENSTACK_CONTROL_NODE_IP} "for I in \$(sudo ls ${DEVSTACK_TEMPEST_DIR}/.testrepository/ | grep -E '^[0-9]+$'); do sudo sh -c \"${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/subunit-1to2 < ${DEVSTACK_TEMPEST_DIR}/.testrepository/\${I} >> ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt\"; done"
668     ssh ${OPENSTACK_CONTROL_NODE_IP} "sudo sh -c '${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/python ${DEVSTACK_TEMPEST_DIR}/.tox/tempest/lib/python2.7/site-packages/os_testr/subunit2html.py ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt ${DEVSTACK_TEMPEST_DIR}/tempest_results.html'"
669     scp ${OPENSTACK_CONTROL_NODE_IP}:${DEVSTACK_TEMPEST_DIR}/tempest_results.html ${WORKSPACE}/
670 fi
671 collect_logs_and_exit
672
673 true  # perhaps Jenkins is testing last exit code
674 # vim: ts=4 sw=4 sts=4 et ft=sh :