Merge "Add common functions to openstack deploy script"
[releng/builder.git] / jjb / integration / integration-deploy-controller-run-test.sh
1 #@IgnoreInspection BashAddShebang
2 # Activate robotframework virtualenv
3 # ${ROBOT_VENV} comes from the integration-install-robotframework.sh
4 # script.
5 # shellcheck source=${ROBOT_VENV}/bin/activate disable=SC1091
6 source ${ROBOT_VENV}/bin/activate
7 source /tmp/common-functions.sh ${BUNDLEFOLDER}
8
9 if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then
10     echo "Configure cluster"
11     AKKACONF=/tmp/${BUNDLEFOLDER}/configuration/initial/akka.conf
12     MODULESCONF=/tmp/${BUNDLEFOLDER}/configuration/initial/modules.conf
13     MODULESHARDSCONF=/tmp/${BUNDLEFOLDER}/configuration/initial/module-shards.conf
14 fi
15
16 if [ ${CONTROLLERSCOPE} == 'all' ]; then
17     ACTUALFEATURES="odl-integration-compatible-with-all,${CONTROLLERFEATURES}"
18     export CONTROLLERMEM="3072m"
19     COOLDOWN_PERIOD="180"
20 else
21     ACTUALFEATURES="odl-infrautils-ready,${CONTROLLERFEATURES}"
22     COOLDOWN_PERIOD="60"
23 fi
24
25 # Some versions of jenkins job builder result in feature list containing spaces
26 # and ending in newline. Remove all that.
27 ACTUALFEATURES=`echo "${ACTUALFEATURES}" | tr -d '\n \r'`
28 echo "ACTUALFEATURES: ${ACTUALFEATURES}"
29
30 # In the case that we want to install features via karaf shell, a space separated list of
31 # ACTUALFEATURES IS NEEDED
32 SPACE_SEPARATED_FEATURES=$(echo "${ACTUALFEATURES}" | tr ',' ' ')
33 echo "SPACE_SEPARATED_FEATURES: ${SPACE_SEPARATED_FEATURES}"
34
35 if [ -f "${WORKSPACE}/test/csit/scriptplans/${TESTPLAN}" ]; then
36     echo "scriptplan exists!!!"
37     echo "Changing the scriptplan path..."
38     cat ${WORKSPACE}/test/csit/scriptplans/${TESTPLAN} | sed "s:integration:${WORKSPACE}:" > scriptplan.txt
39     cat scriptplan.txt
40     for line in $( egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' scriptplan.txt ); do
41         echo "Executing ${line}..."
42         # shellcheck source=${line} disable=SC1091
43         source ${line}
44     done
45 fi
46
47 cat > ${WORKSPACE}/configuration-script.sh <<EOF
48 set -x
49 source /tmp/common-functions.sh ${BUNDLEFOLDER}
50
51 echo "Changing to /tmp"
52 cd /tmp
53
54 echo "Downloading the distribution..."
55 wget --progress=dot:mega '${ACTUAL_BUNDLE_URL}'
56
57 echo "Extracting the new controller..."
58 unzip -q ${BUNDLE}
59
60 echo "Adding external repositories..."
61 sed -ie "s%org.ops4j.pax.url.mvn.repositories=%org.ops4j.pax.url.mvn.repositories=http://repo1.maven.org/maven2@id=central, http://repository.springsource.com/maven/bundles/release@id=spring.ebr.release, http://repository.springsource.com/maven/bundles/external@id=spring.ebr.external, http://zodiac.springsource.com/maven/bundles/release@id=gemini, http://repository.apache.org/content/groups/snapshots-group@id=apache@snapshots@noreleases, https://oss.sonatype.org/content/repositories/snapshots@id=sonatype.snapshots.deploy@snapshots@noreleases, https://oss.sonatype.org/content/repositories/ops4j-snapshots@id=ops4j.sonatype.snapshots.deploy@snapshots@noreleases%g" ${MAVENCONF}
62 cat ${MAVENCONF}
63
64 if [[ "$USEFEATURESBOOT" == "True" ]]; then
65     echo "Configuring the startup features..."
66     sed -ie "s/\(featuresBoot=\|featuresBoot =\)/featuresBoot = ${ACTUALFEATURES},/g" ${FEATURESCONF}
67 fi
68
69 FEATURE_TEST_STRING="features-integration-test"
70 if [[ "$KARAF_VERSION" == "karaf4" ]]; then
71     FEATURE_TEST_STRING="features-test"
72 fi
73
74 sed -ie "s%\(featuresRepositories=\|featuresRepositories =\)%featuresRepositories = mvn:org.opendaylight.integration/\${FEATURE_TEST_STRING}/${BUNDLEVERSION}/xml/features,mvn:org.apache.karaf.decanter/apache-karaf-decanter/1.0.0/xml/features,%g" ${FEATURESCONF}
75 cat ${FEATURESCONF}
76
77 if [ "${ODL_ENABLE_L3_FWD}" == "yes" ]; then
78     echo "Enable the l3.fwd in custom.properties..."
79     echo "ovsdb.l3.fwd.enabled=yes" >> ${CUSTOMPROP}
80 fi
81 cat ${CUSTOMPROP}
82
83 echo "Configuring the log..."
84 sed -ie 's/log4j.appender.out.maxBackupIndex=10/log4j.appender.out.maxBackupIndex=1/g' ${LOGCONF}
85 # FIXME: Make log size limit configurable from build parameter.
86 sed -ie 's/log4j.appender.out.maxFileSize=1MB/log4j.appender.out.maxFileSize=30GB/g' ${LOGCONF}
87 echo "log4j.logger.org.opendaylight.yangtools.yang.parser.repo.YangTextSchemaContextResolver = WARN" >> ${LOGCONF}
88 # Add custom logging levels
89 # CONTROLLERDEBUGMAP is expected to be a key:value map of space separated values like "module:level module2:level2"
90 # where module is abbreviated and does not include org.opendaylight
91 unset IFS
92 if [ -n "${CONTROLLERDEBUGMAP}" ]; then
93     for kv in ${CONTROLLERDEBUGMAP}; do
94         module=\${kv%%:*}
95         level=\${kv#*:}
96         if [ -n \${module} ] && [ -n \${level} ]; then
97             echo "log4j.logger.org.opendaylight.\${module} = \${level}" >> ${LOGCONF}
98         fi
99     done
100 fi
101 echo "cat ${LOGCONF}"
102 cat ${LOGCONF}
103
104 set_java_vars "${JAVA_HOME}"
105
106 echo "Listing all open ports on controller system..."
107 netstat -pnatu
108
109 if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then
110
111     # Copy shard file if exists
112     if [ -f /tmp/custom_shard_config.txt ]; then
113         echo "Custom shard config exists!!!"
114         echo "Copying the shard config..."
115         cp /tmp/custom_shard_config.txt /tmp/${BUNDLEFOLDER}/bin/
116     fi
117
118     echo "Configuring cluster"
119     /tmp/${BUNDLEFOLDER}/bin/configure_cluster.sh \$1 \$2
120
121     echo "Dump akka.conf"
122     cat ${AKKACONF}
123
124     echo "Dump modules.conf"
125     cat ${MODULESCONF}
126
127      echo "Dump module-shards.conf"
128      cat ${MODULESHARDSCONF}
129 fi
130
131 EOF
132 # cat > ${WORKSPACE}/configuration-script.sh <<EOF
133
134 # Create the startup script to be run on controller.
135 cat > ${WORKSPACE}/startup-script.sh <<EOF
136
137 echo "Redirecting karaf console output to karaf_console.log"
138 export KARAF_REDIRECT="/tmp/${BUNDLEFOLDER}/data/log/karaf_console.log"
139 mkdir -p /tmp/${BUNDLEFOLDER}/data/log
140
141 echo "Starting controller..."
142 /tmp/${BUNDLEFOLDER}/bin/start
143
144 EOF
145 # cat > ${WORKSPACE}/startup-script.sh <<EOF
146
147 cat > ${WORKSPACE}/post-startup-script.sh <<EOF
148
149 if [[ "$USEFEATURESBOOT" != "True" ]]; then
150
151     # wait up to 60s for karaf port 8101 to be opened, polling every 5s
152     loop_count=0;
153     until [[ \$loop_count -ge 12 ]]; do
154         netstat -na | grep 8101 && break;
155         loop_count=\$[\$loop_count+1];
156         sleep 5;
157     done
158
159     echo "going to feature:install --no-auto-refresh ${SPACE_SEPARATED_FEATURES} one at a time"
160     for feature in ${SPACE_SEPARATED_FEATURES}; do
161         sshpass -p karaf ssh -o StrictHostKeyChecking=no \
162                              -o UserKnownHostsFile=/dev/null \
163                              -o LogLevel=error \
164                              -p 8101 karaf@localhost \
165                              feature:install --no-auto-refresh \$feature;
166     done
167
168     echo "ssh to karaf console to list -i installed features"
169     sshpass -p karaf ssh -o StrictHostKeyChecking=no \
170                          -o UserKnownHostsFile=/dev/null \
171                          -o LogLevel=error \
172                          -p 8101 karaf@localhost \
173                          feature:list -i
174 fi
175
176 echo "Waiting for controller to come up..."
177 COUNT="0"
178 while true; do
179     RESP="\$( curl --user admin:admin -sL -w "%{http_code} %{url_effective}\\n" http://localhost:8181/restconf/modules -o /dev/null )"
180     echo \$RESP
181     if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then
182         SHARD="\$( curl --user admin:admin -sL -w "%{http_code} %{url_effective}\\n" http://localhost:8181/jolokia/read/org.opendaylight.controller:Category=Shards,name=\member-\$1-shard-inventory-config,type=DistributedConfigDatastore)"
183         echo \$SHARD
184     fi
185     if ([[ \$RESP == *"200"* ]] && ([[ "${ENABLE_HAPROXY_FOR_NEUTRON}" != "yes" ]] || [[ \$SHARD  == *'"status":200'* ]])); then
186         echo Controller is UP
187         break
188     elif (( "\$COUNT" > "600" )); then
189         echo Timeout Controller DOWN
190         echo "Dumping first 500K bytes of karaf log..."
191         head --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
192         echo "Dumping last 500K bytes of karaf log..."
193         tail --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
194         echo "Listing all open ports on controller system"
195         netstat -pnatu
196         exit 1
197     else
198         COUNT=\$(( \${COUNT} + 1 ))
199         sleep 1
200         if [[ \$((\$COUNT % 5)) == 0 ]]; then
201             echo already waited \${COUNT} seconds...
202         fi
203     fi
204 done
205
206 echo "Listing all open ports on controller system..."
207 netstat -pnatu
208
209 function exit_on_log_file_message {
210     echo "looking for \"\$1\" in log file"
211     if grep --quiet "\$1" "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"; then
212         echo ABORTING: found "\$1"
213         echo "Dumping first 500K bytes of karaf log..."
214         head --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
215         echo "Dumping last 500K bytes of karaf log..."
216         tail --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
217         exit 1
218     fi
219 }
220
221 exit_on_log_file_message 'BindException: Address already in use'
222 exit_on_log_file_message 'server is unhealthy'
223
224 EOF
225 # cat > ${WORKSPACE}/post-startup-script.sh <<EOF
226
227 [ "$NUM_OPENSTACK_SITES" ] || NUM_OPENSTACK_SITES=1
228 NUM_ODLS_PER_SITE=$((NUM_ODL_SYSTEM / NUM_OPENSTACK_SITES))
229 for i in `seq 1 ${NUM_OPENSTACK_SITES}`
230 do
231     # Get full list of ODL nodes for this site
232     odl_node_list=
233     for j in `seq 1 ${NUM_ODLS_PER_SITE}`
234     do
235         odl_ip=ODL_SYSTEM_$(((i - 1) * NUM_ODLS_PER_SITE + j))_IP
236         odl_node_list="${odl_node_list} ${!odl_ip}"
237     done
238
239     for j in `seq 1 ${NUM_ODLS_PER_SITE}`
240     do
241         odl_ip=ODL_SYSTEM_$(((i - 1) * NUM_ODLS_PER_SITE + j))_IP
242         # Copy over the config script to controller and execute it (parameters are used only for cluster)
243         echo "Execute the configuration script on controller ${!odl_ip} for index $j with node list ${odl_node_list}"
244         scp ${WORKSPACE}/configuration-script.sh ${!odl_ip}:/tmp
245         ssh ${!odl_ip} "bash /tmp/configuration-script.sh ${j} '${odl_node_list}'"
246     done
247 done
248
249 echo "Locating config plan to use..."
250 configplan_filepath="${WORKSPACE}/test/csit/configplans/${STREAMTESTPLAN}"
251 if [ ! -f "${configplan_filepath}" ]; then
252     configplan_filepath="${WORKSPACE}/test/csit/configplans/${TESTPLAN}"
253 fi
254
255 if [ -f "${configplan_filepath}" ]; then
256     echo "configplan exists!!!"
257     echo "Changing the configplan path..."
258     cat ${configplan_filepath} | sed "s:integration:${WORKSPACE}:" > configplan.txt
259     cat configplan.txt
260     for line in $( egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' configplan.txt ); do
261         echo "Executing ${line}..."
262         # shellcheck source=${line} disable=SC1091
263         source ${line}
264     done
265 fi
266
267 # Copy over the startup script to controller and execute it.
268 for i in `seq 1 ${NUM_ODL_SYSTEM}`
269 do
270     CONTROLLERIP=ODL_SYSTEM_${i}_IP
271     echo "Execute the startup script on controller ${!CONTROLLERIP}"
272     scp ${WORKSPACE}/startup-script.sh ${!CONTROLLERIP}:/tmp
273     ssh ${!CONTROLLERIP} "bash /tmp/startup-script.sh"
274 done
275
276 seed_index=1
277 for i in `seq 1 ${NUM_ODL_SYSTEM}`
278 do
279     CONTROLLERIP=ODL_SYSTEM_${i}_IP
280     echo "Execute the post startup script on controller ${!CONTROLLERIP}"
281     scp ${WORKSPACE}/post-startup-script.sh ${!CONTROLLERIP}:/tmp
282     ssh ${!CONTROLLERIP} "bash /tmp/post-startup-script.sh $(( seed_index++ ))"
283     if [ $(( $i % (${NUM_ODL_SYSTEM} / ${NUM_OPENSTACK_SITES}) )) == 0 ]; then
284         seed_index=1
285     fi
286 done
287
288 echo "Cool down for ${COOLDOWN_PERIOD} seconds :)..."
289 sleep ${COOLDOWN_PERIOD}
290
291 echo "Generating controller variables..."
292 for i in `seq 1 ${NUM_ODL_SYSTEM}`
293 do
294     CONTROLLERIP=ODL_SYSTEM_${i}_IP
295     odl_variables=${odl_variables}" -v ${CONTROLLERIP}:${!CONTROLLERIP}"
296     echo "Lets's take the karaf thread dump"
297     ssh ${!CONTROLLERIP} "sudo ps aux" > ${WORKSPACE}/ps_before.log
298     pid=$(grep org.apache.karaf.main.Main ${WORKSPACE}/ps_before.log | grep -v grep | tr -s ' ' | cut -f2 -d' ')
299     echo "karaf main: org.apache.karaf.main.Main, pid:${pid}"
300     ssh ${!CONTROLLERIP} "jstack ${pid}" > ${WORKSPACE}/karaf_${i}_${pid}_threads_before.log || true
301 done
302
303 if [ ${NUM_OPENSTACK_SYSTEM} -gt 0 ]; then
304    echo "Exiting without running tests to deploy openstack for testing"
305    exit
306 fi
307
308 echo "Generating mininet variables..."
309 for i in `seq 1 ${NUM_TOOLS_SYSTEM}`
310 do
311     MININETIP=TOOLS_SYSTEM_${i}_IP
312     tools_variables=${tools_variables}" -v ${MININETIP}:${!MININETIP}"
313 done
314
315 echo "Locating test plan to use..."
316 testplan_filepath="${WORKSPACE}/test/csit/testplans/${STREAMTESTPLAN}"
317 if [ ! -f "${testplan_filepath}" ]; then
318     testplan_filepath="${WORKSPACE}/test/csit/testplans/${TESTPLAN}"
319 fi
320
321 echo "Changing the testplan path..."
322 cat "${testplan_filepath}" | sed "s:integration:${WORKSPACE}:" > testplan.txt
323 cat testplan.txt
324
325 # Use the testplan if specific SUITES are not defined.
326 if [ -z "${SUITES}" ]; then
327     SUITES=`egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' testplan.txt | tr '\012' ' '`
328 else
329     newsuites=""
330     workpath="${WORKSPACE}/test/csit/suites"
331     for suite in ${SUITES}; do
332         fullsuite="${workpath}/${suite}"
333         if [ -z "${newsuites}" ]; then
334             newsuites+=${fullsuite}
335         else
336             newsuites+=" "${fullsuite}
337         fi
338     done
339     SUITES=${newsuites}
340 fi
341
342 echo "Starting Robot test suites ${SUITES} ..."
343 pybot -N ${TESTPLAN} --removekeywords wuks -c critical -e exclude -e skip_if_${DISTROSTREAM} -v BUNDLEFOLDER:${BUNDLEFOLDER} -v WORKSPACE:/tmp \
344 -v JAVA_HOME:${JAVA_HOME} -v BUNDLE_URL:${ACTUAL_BUNDLE_URL} -v NEXUSURL_PREFIX:${NEXUSURL_PREFIX} \
345 -v CONTROLLER:${ODL_SYSTEM_IP} -v ODL_SYSTEM_IP:${ODL_SYSTEM_IP} -v ODL_SYSTEM_1_IP:${ODL_SYSTEM_IP} \
346 -v CONTROLLER_USER:${USER} -v ODL_SYSTEM_USER:${USER} \
347 -v TOOLS_SYSTEM_IP:${TOOLS_SYSTEM_IP} -v TOOLS_SYSTEM_2_IP:${TOOLS_SYSTEM_2_IP} -v TOOLS_SYSTEM_3_IP:${TOOLS_SYSTEM_3_IP} \
348 -v TOOLS_SYSTEM_4_IP:${TOOLS_SYSTEM_4_IP} -v TOOLS_SYSTEM_5_IP:${TOOLS_SYSTEM_5_IP} -v TOOLS_SYSTEM_6_IP:${TOOLS_SYSTEM_6_IP} \
349 -v TOOLS_SYSTEM_USER:${USER} -v JDKVERSION:${JDKVERSION} -v ODL_STREAM:${DISTROSTREAM} -v NUM_ODL_SYSTEM:${NUM_ODL_SYSTEM} \
350 -v MININET:${TOOLS_SYSTEM_IP} -v MININET1:${TOOLS_SYSTEM_2_IP} -v MININET2:${TOOLS_SYSTEM_3_IP} \
351 -v MININET3:${TOOLS_SYSTEM_4_IP} -v MININET4:${TOOLS_SYSTEM_5_IP} -v MININET5:${TOOLS_SYSTEM_6_IP} \
352 -v MININET_USER:${USER} -v USER_HOME:${HOME} ${TESTOPTIONS} ${SUITES} || true
353 # FIXME: Sort (at least -v) options alphabetically.
354
355 echo "Examining the files in data/log and checking filesize"
356 ssh ${ODL_SYSTEM_IP} "ls -altr /tmp/${BUNDLEFOLDER}/data/log/"
357 ssh ${ODL_SYSTEM_IP} "du -hs /tmp/${BUNDLEFOLDER}/data/log/*"
358
359 for i in `seq 1 ${NUM_ODL_SYSTEM}`
360 do
361     CONTROLLERIP=ODL_SYSTEM_${i}_IP
362     echo "Lets's take the karaf thread dump again..."
363     ssh ${!CONTROLLERIP} "sudo ps aux" > ${WORKSPACE}/ps_after.log
364     pid=$(grep org.apache.karaf.main.Main ${WORKSPACE}/ps_after.log | grep -v grep | tr -s ' ' | cut -f2 -d' ')
365     echo "karaf main: org.apache.karaf.main.Main, pid:${pid}"
366     ssh ${!CONTROLLERIP} "jstack ${pid}" > ${WORKSPACE}/karaf_${i}_${pid}_threads_after.log || true
367     echo "Killing ODL"
368     set +e  # We do not want to create red dot just because something went wrong while fetching logs.
369     ssh "${!CONTROLLERIP}" bash -c 'ps axf | grep karaf | grep -v grep | awk '"'"'{print "kill -9 " $1}'"'"' | sh'
370 done
371
372 sleep 5
373 # FIXME: Unify the copy process between various scripts.
374 # TODO: Use rsync.
375 for i in `seq 1 ${NUM_ODL_SYSTEM}`
376 do
377     CONTROLLERIP=ODL_SYSTEM_${i}_IP
378     echo "Compressing karaf.log ${i}"
379     ssh ${!CONTROLLERIP} gzip --best /tmp/${BUNDLEFOLDER}/data/log/karaf.log
380     echo "Fetching compressed karaf.log ${i}"
381     scp "${!CONTROLLERIP}:/tmp/${BUNDLEFOLDER}/data/log/karaf.log.gz" "odl${i}_karaf.log.gz" && ssh ${!CONTROLLERIP} rm -f "/tmp/${BUNDLEFOLDER}/data/log/karaf.log.gz"
382     # TODO: Should we compress the output log file as well?
383     scp "${!CONTROLLERIP}:/tmp/${BUNDLEFOLDER}/data/log/karaf_console.log" "odl${i}_karaf_console.log" && ssh ${!CONTROLLERIP} rm -f "/tmp/${BUNDLEFOLDER}/data/log/karaf_console.log"
384     echo "Fetch GC logs"
385     # FIXME: Put member index in filename, instead of directory name.
386     mkdir -p "gclogs-${i}"
387     scp "${!CONTROLLERIP}:/tmp/${BUNDLEFOLDER}/data/log/*.log" "gclogs-${i}/" && ssh ${!CONTROLLERIP} rm -f "/tmp/${BUNDLEFOLDER}/data/log/*.log"
388 done
389
390 echo "Examine copied files"
391 ls -lt
392
393 true  # perhaps Jenkins is testing last exit code
394
395 # vim: ts=4 sw=4 sts=4 et ft=sh :