f80311d492ef6b6961454a92a120d1afc940bfdb
[releng/builder.git] / jjb / integration / include-raw-integration-deploy-openstack-run-test.sh
1 #@IgnoreInspection BashAddShebang
2 # Activate robotframework virtualenv
3 # ${ROBOT_VENV} comes from the include-raw-integration-install-robotframework.sh
4 # script.
5 source ${ROBOT_VENV}/bin/activate
6
7 # TODO: remove this work to run changes.py if/when it's moved higher up to be visible at the Robot level
8 echo "showing recent changes that made it in to the distribution used by this job"
9 pip install --upgrade urllib3
10 python ${WORKSPACE}/test/tools/distchanges/changes.py -d /tmp/distribution_folder \
11                   -u ${ACTUALBUNDLEURL} -b ${BRANCH} \
12                   -r ssh://jenkins-${SILO}@git.opendaylight.org:29418 || true
13
14 echo "#################################################"
15 echo "##         Deploy Openstack 3-node             ##"
16 echo "#################################################"
17
18
19 SSH="ssh -t -t"
20
21 function create_control_node_local_conf {
22 local_conf_file_name=${WORKSPACE}/local.conf_control
23 #Needs to be removed
24 if [ "${ODL_ML2_BRANCH}" != "stable/ocata" ]; then
25    RECLONE=no
26 else
27    RECLONE=yes
28 fi
29 cat > ${local_conf_file_name} << EOF
30 [[local|localrc]]
31 LOGFILE=stack.sh.log
32 SCREEN_LOGDIR=/opt/stack/data/log
33 LOG_COLOR=False
34 RECLONE=${RECLONE}
35 EOF
36
37 IFS=,
38 for service_name in ${DISABLE_OS_SERVICES}
39 do
40 cat >> ${local_conf_file_name} << EOF
41 disable_service ${service_name}
42 EOF
43 done
44 for service_name in ${ENABLE_OS_SERVICES}
45 do
46 cat >> ${local_conf_file_name} << EOF
47 enable_service ${service_name}
48 EOF
49 done
50 unset IFS
51
52 cat >> ${local_conf_file_name} << EOF
53 HOST_IP=$OPENSTACK_CONTROL_NODE_IP
54 SERVICE_HOST=\$HOST_IP
55
56 NEUTRON_CREATE_INITIAL_NETWORKS=False
57 Q_PLUGIN=ml2
58 Q_ML2_TENANT_NETWORK_TYPE=${TENANT_NETWORK_TYPE}
59 Q_OVS_USE_VETH=True
60
61 ENABLE_TENANT_TUNNELS=True
62
63 MYSQL_HOST=\$SERVICE_HOST
64 RABBIT_HOST=\$SERVICE_HOST
65 GLANCE_HOSTPORT=\$SERVICE_HOST:9292
66 KEYSTONE_AUTH_HOST=\$SERVICE_HOST
67 KEYSTONE_SERVICE_HOST=\$SERVICE_HOST
68
69 MYSQL_PASSWORD=mysql
70 RABBIT_PASSWORD=rabbit
71 SERVICE_TOKEN=service
72 SERVICE_PASSWORD=admin
73 ADMIN_PASSWORD=admin
74
75 enable_plugin networking-odl ${ODL_ML2_DRIVER_REPO} ${ODL_ML2_BRANCH}
76
77 ODL_PORT=8080
78 ODL_MODE=externalodl
79 LIBVIRT_TYPE=qemu
80
81 EOF
82
83 if [ "${ENABLE_NETWORKING_L2GW}" == "yes" ]; then
84 cat >> ${local_conf_file_name} << EOF
85
86 enable_plugin networking-l2gw ${NETWORKING_L2GW_DRIVER} ${ODL_ML2_BRANCH}
87 NETWORKING_L2GW_SERVICE_DRIVER=L2GW:OpenDaylight:networking_odl.l2gateway.driver.OpenDaylightL2gwDriver:default
88 ENABLED_SERVICES+=,neutron,q-svc,nova,q-meta
89
90 EOF
91 fi
92
93 if [ "${ODL_ML2_DRIVER_VERSION}" == "v2" ]; then
94     echo "ODL_V2DRIVER=True" >> ${local_conf_file_name}
95 fi
96
97 if [ "${NUM_ODL_SYSTEM}" -gt 1 ]; then
98 odl_list=${ODL_SYSTEM_1_IP}
99 for i in `seq 2 ${NUM_ODL_SYSTEM}`
100 do
101 odlip=ODL_SYSTEM_${i}_IP
102 odl_list=${odl_list},${!odlip}
103 done
104 if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then
105 HA_PROXY_INDEX=${NUM_OPENSTACK_SYSTEM}
106 odlmgrip=OPENSTACK_COMPUTE_NODE_${HA_PROXY_INDEX}_IP
107 odl_mgr_ip=${!odlmgrip}
108 else
109 odl_mgr_ip=${ODL_SYSTEM_1_IP}
110 fi
111 cat >> ${local_conf_file_name} << EOF
112 ODL_OVS_MANAGERS=${odl_list}
113 ODL_MGR_IP=${odl_mgr_ip}
114 EOF
115 else
116 cat >> ${local_conf_file_name} << EOF
117 ODL_MGR_IP=${ODL_SYSTEM_1_IP}
118 EOF
119 fi
120
121 # if we are using the old netvirt impl, as determined by the feature name
122 # odl-ovsdb-openstack (note: new impl is odl-netvirt-openstack) then we
123 # want ODL_L3 to be True.  New impl wants it False
124 if [[ ${CONTROLLERFEATURES} == *"odl-ovsdb-openstack"* ]]; then
125   ODL_L3=True
126 else
127   ODL_L3=False
128 fi
129
130 # if we are using the new netvirt impl, as determined by the feature name
131 # odl-netvirt-openstack (note: old impl is odl-ovsdb-openstack) then we
132 # want PROVIDER_MAPPINGS to be used -- this should be fixed if we want to support
133 # external networks in legacy netvirt
134 if [[ ${CONTROLLERFEATURES} == *"odl-netvirt-openstack"* ]]; then
135   ODL_PROVIDER_MAPPINGS="\${PUBLIC_PHYSICAL_NETWORK}:${PUBLIC_BRIDGE}"
136 else
137   ODL_PROVIDER_MAPPINGS=
138 fi
139
140 if [ "${ODL_ENABLE_L3_FWD}" == "yes" ]; then
141 cat >> ${local_conf_file_name} << EOF
142 PUBLIC_BRIDGE=${PUBLIC_BRIDGE}
143 PUBLIC_PHYSICAL_NETWORK=physnet1 # FIXME this should be a parameter
144 ML2_VLAN_RANGES=physnet1
145 ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS}
146
147 disable_service q-l3
148 PUBLIC_INTERFACE=br100
149 EOF
150
151 if [ -z ${DISABLE_ODL_L3_PLUGIN} ] || [ "${DISABLE_ODL_L3_PLUGIN}" == "no" ]; then
152 if [ "${ODL_ML2_BRANCH}" == "stable/mitaka" ]; then
153 cat >> ${local_conf_file_name} << EOF
154 Q_L3_ENABLED=True
155 ODL_L3=${ODL_L3}
156 [[post-config|\$NEUTRON_CONF]]
157 [DEFAULT]
158 service_plugins = networking_odl.l3.l3_odl.OpenDaylightL3RouterPlugin
159
160 EOF
161 fi #check for ODL_ML2_BRANCH
162 fi #check for DISABLE_ODL_L3_PLUGIN
163
164 fi #ODL_ENABLE_L3_FWD check
165
166 cat >> ${local_conf_file_name} << EOF
167 [[post-config|/etc/neutron/plugins/ml2/ml2_conf.ini]]
168 [agent]
169 minimize_polling=True
170
171 [ml2]
172 # Needed for VLAN provider tests - because our provider networks are always encapsulated in VXLAN (br-physnet1)
173 # MTU(1440) + VXLAN(50) + VLAN(4) = 1494 < MTU eth0/br-phynset1(1500)
174 physical_network_mtus = physnet1:1440
175
176 [[post-config|/etc/neutron/dhcp_agent.ini]]
177 [DEFAULT]
178 force_metadata = True
179 enable_isolated_metadata = True
180
181 [[post-config|/etc/nova/nova.conf]]
182 [DEFAULT]
183 force_config_drive = False
184
185 EOF
186
187 echo "local.conf Created...."
188 cat ${local_conf_file_name}
189 }
190
191 function create_compute_node_local_conf {
192 HOSTIP=$1
193 #Needs to be removed
194 if [ "${ODL_ML2_BRANCH}" != "stable/ocata" ]; then
195    RECLONE=no
196 else
197    RECLONE=yes
198 fi
199 local_conf_file_name=${WORKSPACE}/local.conf_compute_${HOSTIP}
200 cat > ${local_conf_file_name} << EOF
201 [[local|localrc]]
202 LOGFILE=stack.sh.log
203 LOG_COLOR=False
204 SCREEN_LOGDIR=/opt/stack/data/log
205 RECLONE=${RECLONE}
206
207 NOVA_VNC_ENABLED=True
208 MULTI_HOST=1
209 ENABLED_SERVICES=n-cpu
210
211 HOST_IP=${HOSTIP}
212 SERVICE_HOST=${OPENSTACK_CONTROL_NODE_IP}
213
214 Q_PLUGIN=ml2
215 ENABLE_TENANT_TUNNELS=True
216 Q_ML2_TENANT_NETWORK_TYPE=vxlan
217
218 Q_HOST=\$SERVICE_HOST
219 MYSQL_HOST=\$SERVICE_HOST
220 RABBIT_HOST=\$SERVICE_HOST
221 GLANCE_HOSTPORT=\$SERVICE_HOST:9292
222 KEYSTONE_AUTH_HOST=\$SERVICE_HOST
223 KEYSTONE_SERVICE_HOST=\$SERVICE_HOST
224
225 MYSQL_PASSWORD=mysql
226 RABBIT_PASSWORD=rabbit
227 SERVICE_TOKEN=service
228 SERVICE_PASSWORD=admin
229 ADMIN_PASSWORD=admin
230
231 enable_plugin networking-odl ${ODL_ML2_DRIVER_REPO} ${ODL_ML2_BRANCH}
232 ODL_MODE=compute
233 LIBVIRT_TYPE=qemu
234 EOF
235
236 if [ "${NUM_ODL_SYSTEM}" -gt 1 ]; then
237 odl_list=${ODL_SYSTEM_1_IP}
238 for i in `seq 2 ${NUM_ODL_SYSTEM}`
239 do
240 odlip=ODL_SYSTEM_${i}_IP
241 odl_list=${odl_list},${!odlip}
242 done
243 if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then
244 HA_PROXY_INDEX=${NUM_OPENSTACK_SYSTEM}
245 odlmgrip=OPENSTACK_COMPUTE_NODE_${HA_PROXY_INDEX}_IP
246 odl_mgr_ip=${!odlmgrip}
247 else
248 odl_mgr_ip=${ODL_SYSTEM_1_IP}
249 fi
250 cat >> ${local_conf_file_name} << EOF
251 ODL_OVS_MANAGERS=${odl_list}
252 ODL_MGR_IP=${odl_mgr_ip}
253 EOF
254 else
255 cat >> ${local_conf_file_name} << EOF
256 ODL_MGR_IP=${ODL_SYSTEM_1_IP}
257 EOF
258 fi
259
260 # if we are using the new netvirt impl, as determined by the feature name
261 # odl-netvirt-openstack (note: old impl is odl-ovsdb-openstack) then we
262 # want PROVIDER_MAPPINGS to be used -- this should be fixed if we want to support
263 # external networks in legacy netvirt
264 if [[ ${CONTROLLERFEATURES} == *"odl-netvirt-openstack"* ]]; then
265   ODL_PROVIDER_MAPPINGS="\${PUBLIC_PHYSICAL_NETWORK}:${PUBLIC_BRIDGE}"
266 else
267   ODL_PROVIDER_MAPPINGS=
268 fi
269
270 if [ "${ODL_ENABLE_L3_FWD}" == "yes" ]; then
271 cat >> ${local_conf_file_name} << EOF
272 # Uncomment lines below if odl-compute is to be used for l3 forwarding
273 Q_L3_ENABLED=True
274 ODL_L3=${ODL_L3}
275 PUBLIC_INTERFACE=br100 # FIXME do we use br100 at all?
276 PUBLIC_BRIDGE=${PUBLIC_BRIDGE}
277 PUBLIC_PHYSICAL_NETWORK=physnet1 # FIXME this should be a parameter
278 ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS}
279 EOF
280 fi
281 echo "local.conf Created...."
282 cat ${local_conf_file_name}
283 }
284
285 function configure_haproxy_for_neutron_requests () {
286 HA_PROXY_INDEX=${NUM_OPENSTACK_SYSTEM}
287 odlmgrip=OPENSTACK_COMPUTE_NODE_${HA_PROXY_INDEX}_IP
288 ha_proxy_ip=${!odlmgrip}
289
290 cat > ${WORKSPACE}/install_ha_proxy.sh<< EOF
291 sudo systemctl stop firewalld
292 sudo yum -y install policycoreutils-python haproxy
293 EOF
294
295 cat > ${WORKSPACE}/haproxy.cfg << EOF
296 global
297   daemon
298   group  haproxy
299   log  /dev/log local0
300   maxconn  20480
301   pidfile  /tmp/haproxy.pid
302   user  haproxy
303
304 defaults
305   log  global
306   maxconn  4096
307   mode  tcp
308   retries  3
309   timeout  http-request 10s
310   timeout  queue 1m
311   timeout  connect 10s
312   timeout  client 1m
313   timeout  server 1m
314   timeout  check 10s
315
316 listen opendaylight
317   bind ${ha_proxy_ip}:8080
318   balance source
319 EOF
320
321 for i in `seq 1 ${NUM_ODL_SYSTEM}`
322 do
323 odlip=ODL_SYSTEM_${i}_IP
324 cat >> ${WORKSPACE}/haproxy.cfg << EOF
325   server controller-$i ${!odlip}:8080 check fall 5 inter 2000 rise 2
326 EOF
327 done
328
329 cat >> ${WORKSPACE}/haproxy.cfg << EOF
330 listen opendaylight_rest
331   bind ${ha_proxy_ip}:8181
332   balance source
333 EOF
334
335 for i in `seq 1 ${NUM_ODL_SYSTEM}`
336 do
337 odlip=ODL_SYSTEM_${i}_IP
338 cat >> ${WORKSPACE}/haproxy.cfg << EOF
339   server controller-rest-$i ${!odlip}:8181 check fall 5 inter 2000 rise 2
340 EOF
341 done
342
343 cat > ${WORKSPACE}/deploy_ha_proxy.sh<< EOF
344 sudo chown haproxy:haproxy /tmp/haproxy.cfg
345 sudo sed -i 's/\\/etc\\/haproxy\\/haproxy.cfg/\\/tmp\\/haproxy.cfg/g' /usr/lib/systemd/system/haproxy.service
346 sudo /usr/sbin/semanage permissive -a haproxy_t
347 sudo systemctl restart haproxy
348 sleep 3
349 sudo netstat -tunpl
350 sudo systemctl status haproxy
351 true
352 EOF
353 scp ${WORKSPACE}/install_ha_proxy.sh ${ha_proxy_ip}:/tmp
354 ${SSH} ${ha_proxy_ip} "sudo bash /tmp/install_ha_proxy.sh"
355 scp ${WORKSPACE}/haproxy.cfg ${ha_proxy_ip}:/tmp
356 scp ${WORKSPACE}/deploy_ha_proxy.sh ${ha_proxy_ip}:/tmp
357 ${SSH} ${ha_proxy_ip} "sudo bash /tmp/deploy_ha_proxy.sh"
358 }
359
360 function collect_logs_and_exit (){
361 set +e  # We do not want to create red dot just because something went wrong while fetching logs.
362 for i in `seq 1 ${NUM_ODL_SYSTEM}`
363 do
364     CONTROLLERIP=ODL_SYSTEM_${i}_IP
365     echo "killing karaf process..."
366     ${SSH} "${!CONTROLLERIP}" bash -c 'ps axf | grep karaf | grep -v grep | awk '"'"'{print "kill -9 " $1}'"'"' | sh'
367 done
368
369 cat > extra_debug.sh << EOF
370 echo -e "/usr/sbin/lsmod | /usr/bin/grep openvswitch\n"
371 /usr/sbin/lsmod | /usr/bin/grep openvswitch
372 echo -e "\ngrep ct_ /var/log/openvswitch/ovs-vswitchd.log\n"
373 grep ct_ /var/log/openvswitch/ovs-vswitchd.log
374 EOF
375
376 sleep 5
377 # FIXME: Do not create .tar and gzip before copying.
378 for i in `seq 1 ${NUM_ODL_SYSTEM}`
379 do
380     CONTROLLERIP=ODL_SYSTEM_${i}_IP
381     ${SSH} "${!CONTROLLERIP}"  "cp -r /tmp/${BUNDLEFOLDER}/data/log /tmp/odl_log"
382     ${SSH} "${!CONTROLLERIP}"  "tar -cf /tmp/odl${i}_karaf.log.tar /tmp/odl_log/*"
383     scp "${!CONTROLLERIP}:/tmp/odl${i}_karaf.log.tar" "${WORKSPACE}/odl${i}_karaf.log.tar"
384     tar -xvf ${WORKSPACE}/odl${i}_karaf.log.tar -C . --strip-components 2 --transform s/karaf/odl${i}_karaf/g
385     rm ${WORKSPACE}/odl${i}_karaf.log.tar
386 done
387
388 # Since this log collection work is happening before the archive build macro which also
389 # creates the ${WORKSPACE}/archives dir, we have to do it here first.  The mkdir in the
390 # archives build step will essentially be a noop.
391 mkdir -p ${WORKSPACE}/archives
392
393 # Control Node
394 OS_CTRL_FOLDER="control"
395 mkdir -p ${OS_CTRL_FOLDER}
396 scp ${OPENSTACK_CONTROL_NODE_IP}:/opt/stack/devstack/nohup.out ${OS_CTRL_FOLDER}/stack.log
397 scp ${OPENSTACK_CONTROL_NODE_IP}:/var/log/openvswitch/ovs-vswitchd.log ${OS_CTRL_FOLDER}/ovs-vswitchd.log
398 rsync -avhe ssh ${OPENSTACK_CONTROL_NODE_IP}:/opt/stack/logs/* ${OS_CTRL_FOLDER} # rsync to prevent copying of symbolic links
399 scp extra_debug.sh ${OPENSTACK_CONTROL_NODE_IP}:/tmp
400 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "bash /tmp/extra_debug.sh > /tmp/extra_debug.log"
401 scp ${OPENSTACK_CONTROL_NODE_IP}:/tmp/extra_debug.log ${OS_CTRL_FOLDER}/extra_debug.log
402 mv local.conf_control ${OS_CTRL_FOLDER}/local.conf
403 mv ${OS_CTRL_FOLDER} ${WORKSPACE}/archives/
404
405 # Compute Nodes
406 for i in `seq 1 $((NUM_OPENSTACK_SYSTEM - 1))`
407 do
408     OSIP=OPENSTACK_COMPUTE_NODE_${i}_IP
409     OS_COMPUTE_FOLDER="compute_${i}"
410     mkdir -p ${OS_COMPUTE_FOLDER}
411     scp ${!OSIP}:/opt/stack/devstack/nohup.out ${OS_COMPUTE_FOLDER}/stack.log
412     scp ${!OSIP}:/var/log/openvswitch/ovs-vswitchd.log ${OS_COMPUTE_FOLDER}/ovs-vswitchd.log
413     rsync -avhe ssh ${!OSIP}:/opt/stack/logs/* ${OS_COMPUTE_FOLDER} # rsync to prevent copying of symbolic links
414     scp extra_debug.sh ${!OSIP}:/tmp
415     ${SSH} ${!OSIP} "bash /tmp/extra_debug.sh > /tmp/extra_debug.log"
416     scp ${!OSIP}:/tmp/extra_debug.log ${OS_COMPUTE_FOLDER}/extra_debug.log
417     mv local.conf_compute_${!OSIP} ${OS_COMPUTE_FOLDER}/local.conf
418     mv ${OS_COMPUTE_FOLDER} ${WORKSPACE}/archives/
419 done
420
421 ls local.conf* | xargs -I % mv % %.log
422 }
423
424 cat > ${WORKSPACE}/disable_firewall.sh << EOF
425 sudo systemctl stop firewalld
426 sudo systemctl stop iptables
427 true
428 EOF
429
430 cat > ${WORKSPACE}/get_devstack.sh << EOF
431 sudo systemctl stop firewalld
432 sudo yum install bridge-utils -y
433 sudo systemctl stop  NetworkManager
434 #Disable NetworkManager and kill dhclient and dnsmasq
435 sudo systemctl stop NetworkManager
436 sudo killall dhclient
437 sudo killall dnsmasq
438 #Workaround for mysql failure
439 echo "127.0.0.1    localhost \${HOSTNAME}" > /tmp/hosts
440 echo "::1   localhost  \${HOSTNAME}" >> /tmp/hosts
441 sudo mv /tmp/hosts /etc/hosts
442 sudo /usr/sbin/brctl addbr br100
443 #sudo ifconfig eth0 mtu 2000
444 sudo mkdir /opt/stack
445 sudo chmod 777 /opt/stack
446 cd /opt/stack
447 git clone https://git.openstack.org/openstack-dev/devstack
448 cd devstack
449 git checkout $OPENSTACK_BRANCH
450 EOF
451
452 echo "Create HAProxy if needed"
453 if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then
454  echo "Need to configure HAProxy"
455  configure_haproxy_for_neutron_requests
456 fi
457
458 os_node_list=()
459 echo "Stack the Control Node"
460 scp ${WORKSPACE}/get_devstack.sh ${OPENSTACK_CONTROL_NODE_IP}:/tmp
461 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "bash /tmp/get_devstack.sh"
462 create_control_node_local_conf
463 scp ${WORKSPACE}/local.conf_control ${OPENSTACK_CONTROL_NODE_IP}:/opt/stack/devstack/local.conf
464
465
466 # Workworund for successful stacking with  Mitaka
467 if [ "${ODL_ML2_BRANCH}" == "stable/mitaka" ]; then
468
469   # Workaround for problems with latest versions/specified versions in requirements of openstack
470   # Openstacksdk,libvirt-python -> the current version does not work with  Mitaka diue to some requirements
471   # conflict and breaks when trying to stack
472   # paramiko -> Problems with tempest tests due to paramiko incompatibility with pycrypto.
473   # the problem has been solved with  version 1.17. If the latest version of paramiko is used, it causes
474   # other timeout problems
475   ssh ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack; git clone https://git.openstack.org/openstack/requirements; cd requirements; git checkout stable/mitaka; sed -i /openstacksdk/d upper-constraints.txt; sed -i /libvirt-python/d upper-constraints.txt; sed -i /paramiko/d upper-constraints.txt"
476   ssh ${OPENSTACK_CONTROL_NODE_IP} "sudo pip install deprecation"
477   ssh ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack; git clone https://github.com/openstack/python-openstacksdk; cd python-openstacksdk; sudo python setup.py install"
478   ssh ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack; git clone https://github.com/paramiko/paramiko; cd paramiko; git checkout 1.17; sudo python setup.py install"
479 fi
480
481 ssh ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack/devstack; nohup ./stack.sh > /opt/stack/devstack/nohup.out 2>&1 &"
482 ssh ${OPENSTACK_CONTROL_NODE_IP} "ps -ef | grep stack.sh"
483 ssh ${OPENSTACK_CONTROL_NODE_IP} "ls -lrt /opt/stack/devstack/nohup.out"
484 os_node_list+=(${OPENSTACK_CONTROL_NODE_IP})
485
486 #Workaround for stable/newton jobs
487 if [ "${ODL_ML2_BRANCH}" == "stable/newton" ]; then
488   ssh ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack; git clone https://git.openstack.org/openstack/requirements; cd requirements; git checkout stable/newton; sed -i /appdirs/d upper-constraints.txt"
489 fi
490
491
492 for i in `seq 1 $((NUM_OPENSTACK_SYSTEM - 1))`
493 do
494     COMPUTEIP=OPENSTACK_COMPUTE_NODE_${i}_IP
495     scp ${WORKSPACE}/get_devstack.sh  ${!COMPUTEIP}:/tmp
496     ${SSH} ${!COMPUTEIP} "bash /tmp/get_devstack.sh"
497     create_compute_node_local_conf ${!COMPUTEIP}
498     scp ${WORKSPACE}/local.conf_compute_${!COMPUTEIP} ${!COMPUTEIP}:/opt/stack/devstack/local.conf
499     if [ "${ODL_ML2_BRANCH}" == "stable/mitaka" ]; then
500        ssh ${!COMPUTEIP} "cd /opt/stack; git clone https://git.openstack.org/openstack/requirements; cd requirements; git checkout stable/mitaka; sed -i /libvirt-python/d upper-constraints.txt"
501     fi
502     ssh ${!COMPUTEIP} "cd /opt/stack/devstack; nohup ./stack.sh > /opt/stack/devstack/nohup.out 2>&1 &"
503     ssh ${!COMPUTEIP} "ps -ef | grep stack.sh"
504     os_node_list+=(${!COMPUTEIP})
505 done
506
507 cat > ${WORKSPACE}/check_stacking.sh << EOF
508 > /tmp/stack_progress
509 ps -ef | grep "stack.sh" | grep -v grep
510 ret=\$?
511 if [ \${ret} -eq 1 ]; then
512   grep "This is your host IP address:" /opt/stack/devstack/nohup.out
513   if [ \$? -eq 0 ]; then
514      echo "Stacking Complete" > /tmp/stack_progress
515   else
516      echo "Stacking Failed" > /tmp/stack_progress
517   fi
518 elif [ \${ret} -eq 0 ]; then
519   echo "Still Stacking" > /tmp/stack_progress
520 fi
521 EOF
522
523 #the checking is repeated for an hour
524 iteration=0
525 in_progress=1
526 while [ ${in_progress} -eq 1 ]; do
527 iteration=$(($iteration + 1))
528 for index in ${!os_node_list[@]}
529 do
530 echo "Check the status of stacking in ${os_node_list[index]}"
531 scp ${WORKSPACE}/check_stacking.sh  ${os_node_list[index]}:/tmp
532 ${SSH} ${os_node_list[index]} "bash /tmp/check_stacking.sh"
533 scp ${os_node_list[index]}:/tmp/stack_progress .
534 #debug
535 cat stack_progress
536 stacking_status=`cat stack_progress`
537 if [ "$stacking_status" == "Still Stacking" ]; then
538   continue
539 elif [ "$stacking_status" == "Stacking Failed" ]; then
540   collect_logs_and_exit
541   exit 1
542 elif [ "$stacking_status" == "Stacking Complete" ]; then
543   unset os_node_list[index]
544   if  [ ${#os_node_list[@]} -eq 0 ]; then
545      in_progress=0
546   fi
547 fi
548 done
549  echo "sleep for a minute before the next check"
550  sleep 60
551  if [ ${iteration} -eq 60 ]; then
552   collect_logs_and_exit
553   exit 1
554  fi
555 done
556
557 #Need to disable firewalld and iptables in control node
558 echo "Stop Firewall in Control Node for compute nodes to be able to reach the ports and add to hypervisor-list"
559 scp ${WORKSPACE}/disable_firewall.sh ${OPENSTACK_CONTROL_NODE_IP}:/tmp
560 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo bash /tmp/disable_firewall.sh"
561 echo "sleep for a minute and print hypervisor-list"
562 sleep 60
563 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack/devstack; source openrc admin admin; nova hypervisor-list"
564 # in the case that we are doing openstack (control + compute) all in one node, then the number of hypervisors
565 # will be the same as the number of openstack systems. However, if we are doing multinode openstack then the
566 # assumption is we have a single control node and the rest are compute nodes, so the number of expected hypervisors
567 # is one less than the total number of openstack systems
568 if [ "${NUM_OPENSTACK_SYSTEM}" -eq 1 ]; then
569   expected_num_hypervisors=1
570 else
571   expected_num_hypervisors=$((NUM_OPENSTACK_SYSTEM - 1))
572 fi
573 num_hypervisors=$(${SSH} ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack/devstack; source openrc admin admin; openstack hypervisor list -f value | wc -l" | tail -1 | tr -d "\r")
574 if ! [ "${num_hypervisors}" ] || ! [ ${num_hypervisors} -eq ${expected_num_hypervisors} ]; then
575   echo "Error: Only $num_hypervisors hypervisors detected, expected $expected_num_hypervisors"
576   collect_logs_and_exit
577   exit 1
578 fi
579
580 #Need to disable firewalld and iptables in compute nodes as well
581 for i in `seq 1 $((NUM_OPENSTACK_SYSTEM - 1))`
582 do
583     OSIP=OPENSTACK_COMPUTE_NODE_${i}_IP
584     scp ${WORKSPACE}/disable_firewall.sh "${!OSIP}:/tmp"
585     ${SSH} "${!OSIP}" "sudo bash /tmp/disable_firewall.sh"
586 done
587
588 # upgrading pip, urllib3 and httplib2 so that tempest tests can be run on ${OPENSTACK_CONTROL_NODE_IP}
589 # this needs to happen after devstack runs because it seems devstack is pulling in specific versions
590 # of these libs that are not working for tempest.
591 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo pip install --upgrade pip"
592 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo pip install urllib3 --upgrade"
593 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo pip install httplib2 --upgrade"
594
595 for i in `seq 1 $((NUM_OPENSTACK_SYSTEM - 1))`
596 do
597     IP_VAR=OPENSTACK_COMPUTE_NODE_${i}_IP
598     COMPUTE_IPS[$((i-1))]=${!IP_VAR}
599 done
600
601 # External Network
602 echo "prepare external networks by adding vxlan tunnels between all nodes on a separate bridge..."
603 devstack_index=1
604 for ip in ${OPENSTACK_CONTROL_NODE_IP} ${COMPUTE_IPS[*]}
605 do
606     # FIXME - Workaround, ODL (new netvirt) currently adds PUBLIC_BRIDGE as a port in br-int since it doesn't see such a bridge existing when we stack
607     ${SSH} $ip "sudo ovs-vsctl --if-exists del-port br-int $PUBLIC_BRIDGE"
608     ${SSH} $ip "sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE -- set bridge $PUBLIC_BRIDGE other-config:disable-in-band=true other_config:hwaddr=f6:00:00:ff:01:0$((devstack_index++))"
609 done
610
611 # Control Node - PUBLIC_BRIDGE will act as the external router
612 GATEWAY_IP="10.10.10.250" # FIXME this should be a parameter, also shared with integration-test
613 ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo ifconfig $PUBLIC_BRIDGE up ${GATEWAY_IP}/24"
614 compute_index=1
615 for compute_ip in ${COMPUTE_IPS[*]}
616 do
617     # Tunnel from controller to compute
618     PORT_NAME=compute$((compute_index++))_vxlan
619     ${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo ovs-vsctl add-port $PUBLIC_BRIDGE $PORT_NAME -- set interface $PORT_NAME type=vxlan options:local_ip="${OPENSTACK_CONTROL_NODE_IP}" options:remote_ip="$compute_ip" options:dst_port=9876 options:key=flow"
620
621     # Tunnel from compute to controller
622     PORT_NAME=control_vxlan
623     ${SSH} ${compute_ip} "sudo ovs-vsctl add-port $PUBLIC_BRIDGE $PORT_NAME -- set interface $PORT_NAME type=vxlan options:local_ip="$compute_ip" options:remote_ip="${OPENSTACK_CONTROL_NODE_IP}" options:dst_port=9876 options:key=flow"
624 done
625
626 if [ "${NUM_ODL_SYSTEM}" -gt 1 ]; then
627   HA_PROXY_INDEX=${NUM_OPENSTACK_SYSTEM}
628   odlmgrip=OPENSTACK_COMPUTE_NODE_${HA_PROXY_INDEX}_IP
629   HA_PROXY_IP=${!odlmgrip}
630 else
631   HA_PROXY_IP=${ODL_SYSTEM_IP}
632 fi
633 echo "Locating test plan to use..."
634 testplan_filepath="${WORKSPACE}/test/csit/testplans/${STREAMTESTPLAN}"
635 if [ ! -f "${testplan_filepath}" ]; then
636     testplan_filepath="${WORKSPACE}/test/csit/testplans/${TESTPLAN}"
637 fi
638
639 echo "Changing the testplan path..."
640 cat "${testplan_filepath}" | sed "s:integration:${WORKSPACE}:" > testplan.txt
641 cat testplan.txt
642
643 SUITES=`egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' testplan.txt | tr '\012' ' '`
644
645 echo "Starting Robot test suites ${SUITES} ..."
646 # please add pybot -v arguments on a single line and alphabetized
647 pybot -N ${TESTPLAN} --removekeywords wuks -c critical -e exclude \
648     -v BUNDLEFOLDER:${BUNDLEFOLDER} \
649     -v BUNDLE_URL:${ACTUALBUNDLEURL} \
650     -v CONTROLLER_USER:${USER} \
651     -v DEVSTACK_DEPLOY_PATH:/opt/stack/devstack \
652     -v HA_PROXY_IP:${HA_PROXY_IP} \
653     -v JDKVERSION:${JDKVERSION} \
654     -v NEXUSURL_PREFIX:${NEXUSURL_PREFIX} \
655     -v NUM_ODL_SYSTEM:${NUM_ODL_SYSTEM} \
656     -v NUM_OS_SYSTEM:${NUM_OPENSTACK_SYSTEM} \
657     -v NUM_TOOLS_SYSTEM:${NUM_TOOLS_SYSTEM} \
658     -v ODL_STREAM:${DISTROSTREAM} \
659     -v ODL_SYSTEM_IP:${ODL_SYSTEM_IP} \
660     -v ODL_SYSTEM_1_IP:${ODL_SYSTEM_1_IP} \
661     -v ODL_SYSTEM_2_IP:${ODL_SYSTEM_2_IP} \
662     -v ODL_SYSTEM_3_IP:${ODL_SYSTEM_3_IP} \
663     -v OS_CONTROL_NODE_IP:${OPENSTACK_CONTROL_NODE_IP} \
664     -v OPENSTACK_BRANCH:${OPENSTACK_BRANCH} \
665     -v OS_COMPUTE_1_IP:${OPENSTACK_COMPUTE_NODE_1_IP} \
666     -v OS_COMPUTE_2_IP:${OPENSTACK_COMPUTE_NODE_2_IP} \
667     -v OS_USER:${USER} \
668     -v PUBLIC_PHYSICAL_NETWORK:${PUBLIC_PHYSICAL_NETWORK} \
669     -v TOOLS_SYSTEM_IP:${TOOLS_SYSTEM_1_IP} \
670     -v TOOLS_SYSTEM_1_IP:${TOOLS_SYSTEM_1_IP} \
671     -v TOOLS_SYSTEM_2_IP:${TOOLS_SYSTEM_2_IP} \
672     -v USER_HOME:${HOME} \
673     -v WORKSPACE:/tmp \
674     ${TESTOPTIONS} ${SUITES} || true
675
676 echo "Examining the files in data/log and checking filesize"
677 ssh ${ODL_SYSTEM_IP} "ls -altr /tmp/${BUNDLEFOLDER}/data/log/"
678 ssh ${ODL_SYSTEM_IP} "du -hs /tmp/${BUNDLEFOLDER}/data/log/*"
679
680 echo "Tests Executed"
681 DEVSTACK_TEMPEST_DIR="/opt/stack/tempest"
682 if $(ssh ${OPENSTACK_CONTROL_NODE_IP} "sudo sh -c '[ -f ${DEVSTACK_TEMPEST_DIR}/.testrepository/0 ]'"); then # if Tempest results exist
683     ssh ${OPENSTACK_CONTROL_NODE_IP} "for I in \$(sudo ls ${DEVSTACK_TEMPEST_DIR}/.testrepository/ | grep -E '^[0-9]+$'); do sudo sh -c \"${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/subunit-1to2 < ${DEVSTACK_TEMPEST_DIR}/.testrepository/\${I} >> ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt\"; done"
684     ssh ${OPENSTACK_CONTROL_NODE_IP} "sudo sh -c '${DEVSTACK_TEMPEST_DIR}/.tox/tempest/bin/python ${DEVSTACK_TEMPEST_DIR}/.tox/tempest/lib/python2.7/site-packages/os_testr/subunit2html.py ${DEVSTACK_TEMPEST_DIR}/subunit_log.txt ${DEVSTACK_TEMPEST_DIR}/tempest_results.html'"
685     scp ${OPENSTACK_CONTROL_NODE_IP}:${DEVSTACK_TEMPEST_DIR}/tempest_results.html ${WORKSPACE}/
686 fi
687 collect_logs_and_exit
688
689 true  # perhaps Jenkins is testing last exit code
690 # vim: ts=4 sw=4 sts=4 et ft=sh :