Merge "Increase ocata packer timeout from 60m to 75m"
[releng/builder.git] / jjb / integration / integration-deploy-controller-run-test.sh
1 #@IgnoreInspection BashAddShebang
2 # Activate robotframework virtualenv
3 # ${ROBOT_VENV} comes from the integration-install-robotframework.sh
4 # script.
5 # shellcheck source=${ROBOT_VENV}/bin/activate disable=SC1091
6 source ${ROBOT_VENV}/bin/activate
7
8
9 if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then
10     echo "Configure cluster"
11     AKKACONF=/tmp/${BUNDLEFOLDER}/configuration/initial/akka.conf
12     MODULESCONF=/tmp/${BUNDLEFOLDER}/configuration/initial/modules.conf
13     MODULESHARDSCONF=/tmp/${BUNDLEFOLDER}/configuration/initial/module-shards.conf
14 fi
15
16 if [ ${CONTROLLERSCOPE} == 'all' ]; then
17     ACTUALFEATURES="odl-integration-compatible-with-all,${CONTROLLERFEATURES}"
18     export CONTROLLERMEM="3072m"
19     COOLDOWN_PERIOD="180"
20 else
21     ACTUALFEATURES="odl-infrautils-ready,${CONTROLLERFEATURES}"
22     COOLDOWN_PERIOD="60"
23 fi
24
25 # Some versions of jenkins job builder result in feature list containing spaces
26 # and ending in newline. Remove all that.
27 ACTUALFEATURES=`echo "${ACTUALFEATURES}" | tr -d '\n \r'`
28 echo "ACTUALFEATURES: ${ACTUALFEATURES}"
29
30 # In the case that we want to install features via karaf shell, a space separated list of
31 # ACTUALFEATURES IS NEEDED
32 SPACE_SEPARATED_FEATURES=$(echo "${ACTUALFEATURES}" | tr ',' ' ')
33 echo "SPACE_SEPARATED_FEATURES: ${SPACE_SEPARATED_FEATURES}"
34
35 if [ -f "${WORKSPACE}/test/csit/scriptplans/${TESTPLAN}" ]; then
36     echo "scriptplan exists!!!"
37     echo "Changing the scriptplan path..."
38     cat ${WORKSPACE}/test/csit/scriptplans/${TESTPLAN} | sed "s:integration:${WORKSPACE}:" > scriptplan.txt
39     cat scriptplan.txt
40     for line in $( egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' scriptplan.txt ); do
41         echo "Executing ${line}..."
42         # shellcheck source=${line} disable=SC1091
43         source ${line}
44     done
45 fi
46
47 cat > ${WORKSPACE}/configuration-script.sh <<EOF
48
49 echo "Changing to /tmp"
50 cd /tmp
51
52 echo "Downloading the distribution..."
53 wget --progress=dot:mega '${ACTUAL_BUNDLE_URL}'
54
55 echo "Extracting the new controller..."
56 unzip -q ${BUNDLE}
57
58 echo "Adding external repositories..."
59 sed -ie "s%org.ops4j.pax.url.mvn.repositories=%org.ops4j.pax.url.mvn.repositories=http://repo1.maven.org/maven2@id=central, http://repository.springsource.com/maven/bundles/release@id=spring.ebr.release, http://repository.springsource.com/maven/bundles/external@id=spring.ebr.external, http://zodiac.springsource.com/maven/bundles/release@id=gemini, http://repository.apache.org/content/groups/snapshots-group@id=apache@snapshots@noreleases, https://oss.sonatype.org/content/repositories/snapshots@id=sonatype.snapshots.deploy@snapshots@noreleases, https://oss.sonatype.org/content/repositories/ops4j-snapshots@id=ops4j.sonatype.snapshots.deploy@snapshots@noreleases%g" ${MAVENCONF}
60 cat ${MAVENCONF}
61
62 if [[ "$USEFEATURESBOOT" == "True" ]]; then
63     echo "Configuring the startup features..."
64     sed -ie "s/\(featuresBoot=\|featuresBoot =\)/featuresBoot = ${ACTUALFEATURES},/g" ${FEATURESCONF}
65 fi
66
67 FEATURE_TEST_STRING="features-integration-test"
68 if [[ "$KARAF_VERSION" == "karaf4" ]]; then
69     FEATURE_TEST_STRING="features-test"
70 fi
71
72 sed -ie "s%\(featuresRepositories=\|featuresRepositories =\)%featuresRepositories = mvn:org.opendaylight.integration/\${FEATURE_TEST_STRING}/${BUNDLEVERSION}/xml/features,mvn:org.apache.karaf.decanter/apache-karaf-decanter/1.0.0/xml/features,%g" ${FEATURESCONF}
73 cat ${FEATURESCONF}
74
75 if [ "${ODL_ENABLE_L3_FWD}" == "yes" ]; then
76     echo "Enable the l3.fwd in custom.properties..."
77     echo "ovsdb.l3.fwd.enabled=yes" >> ${CUSTOMPROP}
78 fi
79 cat ${CUSTOMPROP}
80
81 echo "Configuring the log..."
82 sed -ie 's/log4j.appender.out.maxBackupIndex=10/log4j.appender.out.maxBackupIndex=1/g' ${LOGCONF}
83 # FIXME: Make log size limit configurable from build parameter.
84 sed -ie 's/log4j.appender.out.maxFileSize=1MB/log4j.appender.out.maxFileSize=30GB/g' ${LOGCONF}
85 echo "log4j.logger.org.opendaylight.yangtools.yang.parser.repo.YangTextSchemaContextResolver = WARN" >> ${LOGCONF}
86 # Add custom logging levels
87 # CONTROLLERDEBUGMAP is expected to be a key:value map of space separated values like "module:level module2:level2"
88 # where module is abbreviated and does not include org.opendaylight
89 unset IFS
90 if [ -n "${CONTROLLERDEBUGMAP}" ]; then
91     for kv in ${CONTROLLERDEBUGMAP}; do
92         module=\${kv%%:*}
93         level=\${kv#*:}
94         if [ -n \${module} ] && [ -n \${level} ]; then
95             echo "log4j.logger.org.opendaylight.\${module} = \${level}" >> ${LOGCONF}
96         fi
97     done
98 fi
99 cat ${LOGCONF}
100
101 set_java_vars
102
103 echo "Listing all open ports on controller system..."
104 netstat -pnatu
105
106 if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then
107
108     # Copy shard file if exists
109     if [ -f /tmp/custom_shard_config.txt ]; then
110         echo "Custom shard config exists!!!"
111         echo "Copying the shard config..."
112         cp /tmp/custom_shard_config.txt /tmp/${BUNDLEFOLDER}/bin/
113     fi
114
115     echo "Configuring cluster"
116     /tmp/${BUNDLEFOLDER}/bin/configure_cluster.sh \$1 \$2
117
118     echo "Dump akka.conf"
119     cat ${AKKACONF}
120
121     echo "Dump modules.conf"
122     cat ${MODULESCONF}
123
124      echo "Dump module-shards.conf"
125      cat ${MODULESHARDSCONF}
126 fi
127
128 EOF
129
130 # Create the startup script to be run on controller.
131 cat > ${WORKSPACE}/startup-script.sh <<EOF
132
133 echo "Redirecting karaf console output to karaf_console.log"
134 export KARAF_REDIRECT="/tmp/${BUNDLEFOLDER}/data/log/karaf_console.log"
135 mkdir -p /tmp/${BUNDLEFOLDER}/data/log
136
137 echo "Starting controller..."
138 /tmp/${BUNDLEFOLDER}/bin/start
139
140 EOF
141
142 cat > ${WORKSPACE}/post-startup-script.sh <<EOF
143
144 if [[ "$USEFEATURESBOOT" != "True" ]]; then
145
146     # wait up to 60s for karaf port 8101 to be opened, polling every 5s
147     loop_count=0;
148     until [[ \$loop_count -ge 12 ]]; do
149         netstat -na | grep 8101 && break;
150         loop_count=\$[\$loop_count+1];
151         sleep 5;
152     done
153
154     echo "going to feature:install --no-auto-refresh ${SPACE_SEPARATED_FEATURES} one at a time"
155     for feature in ${SPACE_SEPARATED_FEATURES}; do
156         sshpass -p karaf ssh -o StrictHostKeyChecking=no \
157                              -o UserKnownHostsFile=/dev/null \
158                              -o LogLevel=error \
159                              -p 8101 karaf@localhost \
160                              feature:install --no-auto-refresh \$feature;
161     done
162
163     echo "ssh to karaf console to list -i installed features"
164     sshpass -p karaf ssh -o StrictHostKeyChecking=no \
165                          -o UserKnownHostsFile=/dev/null \
166                          -o LogLevel=error \
167                          -p 8101 karaf@localhost \
168                          feature:list -i
169 fi
170
171 echo "Waiting for controller to come up..."
172 COUNT="0"
173 while true; do
174     RESP="\$( curl --user admin:admin -sL -w "%{http_code} %{url_effective}\\n" http://localhost:8181/restconf/modules -o /dev/null )"
175     echo \$RESP
176     if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then
177         SHARD="\$( curl --user admin:admin -sL -w "%{http_code} %{url_effective}\\n" http://localhost:8181/jolokia/read/org.opendaylight.controller:Category=Shards,name=\member-\$1-shard-inventory-config,type=DistributedConfigDatastore)"
178         echo \$SHARD
179     fi
180     if ([[ \$RESP == *"200"* ]] && ([[ "${ENABLE_HAPROXY_FOR_NEUTRON}" != "yes" ]] || [[ \$SHARD  == *'"status":200'* ]])); then
181         echo Controller is UP
182         break
183     elif (( "\$COUNT" > "600" )); then
184         echo Timeout Controller DOWN
185         echo "Dumping first 500K bytes of karaf log..."
186         head --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
187         echo "Dumping last 500K bytes of karaf log..."
188         tail --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
189         echo "Listing all open ports on controller system"
190         netstat -pnatu
191         exit 1
192     else
193         COUNT=\$(( \${COUNT} + 1 ))
194         sleep 1
195         if [[ \$((\$COUNT % 5)) == 0 ]]; then
196             echo already waited \${COUNT} seconds...
197         fi
198     fi
199 done
200
201 echo "Listing all open ports on controller system..."
202 netstat -pnatu
203
204 function exit_on_log_file_message {
205     echo "looking for \"\$1\" in log file"
206     if grep --quiet "\$1" "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"; then
207         echo ABORTING: found "\$1"
208         echo "Dumping first 500K bytes of karaf log..."
209         head --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
210         echo "Dumping last 500K bytes of karaf log..."
211         tail --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
212         exit 1
213     fi
214 }
215
216 exit_on_log_file_message 'BindException: Address already in use'
217 exit_on_log_file_message 'server is unhealthy'
218
219 EOF
220
221 [ "$NUM_OPENSTACK_SITES" ] || NUM_OPENSTACK_SITES=1
222 NUM_ODLS_PER_SITE=$((NUM_ODL_SYSTEM / NUM_OPENSTACK_SITES))
223 for i in `seq 1 ${NUM_OPENSTACK_SITES}`
224 do
225     # Get full list of ODL nodes for this site
226     odl_node_list=
227     for j in `seq 1 ${NUM_ODLS_PER_SITE}`
228     do
229         odl_ip=ODL_SYSTEM_$(((i - 1) * NUM_ODLS_PER_SITE + j))_IP
230         odl_node_list="${odl_node_list} ${!odl_ip}"
231     done
232
233     for j in `seq 1 ${NUM_ODLS_PER_SITE}`
234     do
235         odl_ip=ODL_SYSTEM_$(((i - 1) * NUM_ODLS_PER_SITE + j))_IP
236         # Copy over the config script to controller and execute it (parameters are used only for cluster)
237         echo "Execute the configuration script on controller ${!odl_ip} for index $j with node list ${odl_node_list}"
238         scp ${WORKSPACE}/configuration-script.sh ${!odl_ip}:/tmp
239         ssh ${!odl_ip} "bash /tmp/configuration-script.sh ${j} '${odl_node_list}'"
240     done
241 done
242
243 echo "Locating config plan to use..."
244 configplan_filepath="${WORKSPACE}/test/csit/configplans/${STREAMTESTPLAN}"
245 if [ ! -f "${configplan_filepath}" ]; then
246     configplan_filepath="${WORKSPACE}/test/csit/configplans/${TESTPLAN}"
247 fi
248
249 if [ -f "${configplan_filepath}" ]; then
250     echo "configplan exists!!!"
251     echo "Changing the configplan path..."
252     cat ${configplan_filepath} | sed "s:integration:${WORKSPACE}:" > configplan.txt
253     cat configplan.txt
254     for line in $( egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' configplan.txt ); do
255         echo "Executing ${line}..."
256         # shellcheck source=${line} disable=SC1091
257         source ${line}
258     done
259 fi
260
261 # Copy over the startup script to controller and execute it.
262 for i in `seq 1 ${NUM_ODL_SYSTEM}`
263 do
264     CONTROLLERIP=ODL_SYSTEM_${i}_IP
265     echo "Execute the startup script on controller ${!CONTROLLERIP}"
266     scp ${WORKSPACE}/startup-script.sh ${!CONTROLLERIP}:/tmp
267     ssh ${!CONTROLLERIP} "bash /tmp/startup-script.sh"
268 done
269
270 seed_index=1
271 for i in `seq 1 ${NUM_ODL_SYSTEM}`
272 do
273     CONTROLLERIP=ODL_SYSTEM_${i}_IP
274     echo "Execute the post startup script on controller ${!CONTROLLERIP}"
275     scp ${WORKSPACE}/post-startup-script.sh ${!CONTROLLERIP}:/tmp
276     ssh ${!CONTROLLERIP} "bash /tmp/post-startup-script.sh $(( seed_index++ ))"
277     if [ $(( $i % (${NUM_ODL_SYSTEM} / ${NUM_OPENSTACK_SITES}) )) == 0 ]; then
278         seed_index=1
279     fi
280 done
281
282 echo "Cool down for ${COOLDOWN_PERIOD} seconds :)..."
283 sleep ${COOLDOWN_PERIOD}
284
285 echo "Generating controller variables..."
286 for i in `seq 1 ${NUM_ODL_SYSTEM}`
287 do
288     CONTROLLERIP=ODL_SYSTEM_${i}_IP
289     odl_variables=${odl_variables}" -v ${CONTROLLERIP}:${!CONTROLLERIP}"
290     echo "Lets's take the karaf thread dump"
291     ssh ${!CONTROLLERIP} "sudo ps aux" > ${WORKSPACE}/ps_before.log
292     pid=$(grep org.apache.karaf.main.Main ${WORKSPACE}/ps_before.log | grep -v grep | tr -s ' ' | cut -f2 -d' ')
293     echo "karaf main: org.apache.karaf.main.Main, pid:${pid}"
294     ssh ${!CONTROLLERIP} "jstack ${pid}" > ${WORKSPACE}/karaf_${i}_${pid}_threads_before.log || true
295 done
296
297 if [ ${NUM_OPENSTACK_SYSTEM} -gt 0 ]; then
298    echo "Exiting without running tests to deploy openstack for testing"
299    exit
300 fi
301
302 echo "Generating mininet variables..."
303 for i in `seq 1 ${NUM_TOOLS_SYSTEM}`
304 do
305     MININETIP=TOOLS_SYSTEM_${i}_IP
306     tools_variables=${tools_variables}" -v ${MININETIP}:${!MININETIP}"
307 done
308
309 echo "Locating test plan to use..."
310 testplan_filepath="${WORKSPACE}/test/csit/testplans/${STREAMTESTPLAN}"
311 if [ ! -f "${testplan_filepath}" ]; then
312     testplan_filepath="${WORKSPACE}/test/csit/testplans/${TESTPLAN}"
313 fi
314
315 echo "Changing the testplan path..."
316 cat "${testplan_filepath}" | sed "s:integration:${WORKSPACE}:" > testplan.txt
317 cat testplan.txt
318
319 # Use the testplan if specific SUITES are not defined.
320 if [ -z "${SUITES}" ]; then
321     SUITES=`egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' testplan.txt | tr '\012' ' '`
322 else
323     newsuites=""
324     workpath="${WORKSPACE}/test/csit/suites"
325     for suite in ${SUITES}; do
326         fullsuite="${workpath}/${suite}"
327         if [ -z "${newsuites}" ]; then
328             newsuites+=${fullsuite}
329         else
330             newsuites+=" "${fullsuite}
331         fi
332     done
333     SUITES=${newsuites}
334 fi
335
336 echo "Starting Robot test suites ${SUITES} ..."
337 pybot -N ${TESTPLAN} --removekeywords wuks -c critical -e exclude -e skip_if_${DISTROSTREAM} -v BUNDLEFOLDER:${BUNDLEFOLDER} -v WORKSPACE:/tmp \
338 -v JAVA_HOME:${JAVA_HOME} -v BUNDLE_URL:${ACTUAL_BUNDLE_URL} -v NEXUSURL_PREFIX:${NEXUSURL_PREFIX} \
339 -v CONTROLLER:${ODL_SYSTEM_IP} -v ODL_SYSTEM_IP:${ODL_SYSTEM_IP} -v ODL_SYSTEM_1_IP:${ODL_SYSTEM_IP} \
340 -v CONTROLLER_USER:${USER} -v ODL_SYSTEM_USER:${USER} \
341 -v TOOLS_SYSTEM_IP:${TOOLS_SYSTEM_IP} -v TOOLS_SYSTEM_2_IP:${TOOLS_SYSTEM_2_IP} -v TOOLS_SYSTEM_3_IP:${TOOLS_SYSTEM_3_IP} \
342 -v TOOLS_SYSTEM_4_IP:${TOOLS_SYSTEM_4_IP} -v TOOLS_SYSTEM_5_IP:${TOOLS_SYSTEM_5_IP} -v TOOLS_SYSTEM_6_IP:${TOOLS_SYSTEM_6_IP} \
343 -v TOOLS_SYSTEM_USER:${USER} -v JDKVERSION:${JDKVERSION} -v ODL_STREAM:${DISTROSTREAM} -v NUM_ODL_SYSTEM:${NUM_ODL_SYSTEM} \
344 -v MININET:${TOOLS_SYSTEM_IP} -v MININET1:${TOOLS_SYSTEM_2_IP} -v MININET2:${TOOLS_SYSTEM_3_IP} \
345 -v MININET3:${TOOLS_SYSTEM_4_IP} -v MININET4:${TOOLS_SYSTEM_5_IP} -v MININET5:${TOOLS_SYSTEM_6_IP} \
346 -v MININET_USER:${USER} -v USER_HOME:${HOME} ${TESTOPTIONS} ${SUITES} || true
347 # FIXME: Sort (at least -v) options alphabetically.
348
349 echo "Examining the files in data/log and checking filesize"
350 ssh ${ODL_SYSTEM_IP} "ls -altr /tmp/${BUNDLEFOLDER}/data/log/"
351 ssh ${ODL_SYSTEM_IP} "du -hs /tmp/${BUNDLEFOLDER}/data/log/*"
352
353 for i in `seq 1 ${NUM_ODL_SYSTEM}`
354 do
355     CONTROLLERIP=ODL_SYSTEM_${i}_IP
356     echo "Lets's take the karaf thread dump again..."
357     ssh ${!CONTROLLERIP} "sudo ps aux" > ${WORKSPACE}/ps_after.log
358     pid=$(grep org.apache.karaf.main.Main ${WORKSPACE}/ps_after.log | grep -v grep | tr -s ' ' | cut -f2 -d' ')
359     echo "karaf main: org.apache.karaf.main.Main, pid:${pid}"
360     ssh ${!CONTROLLERIP} "jstack ${pid}" > ${WORKSPACE}/karaf_${i}_${pid}_threads_after.log || true
361     echo "Killing ODL"
362     set +e  # We do not want to create red dot just because something went wrong while fetching logs.
363     ssh "${!CONTROLLERIP}" bash -c 'ps axf | grep karaf | grep -v grep | awk '"'"'{print "kill -9 " $1}'"'"' | sh'
364 done
365
366 sleep 5
367 # FIXME: Unify the copy process between various scripts.
368 # TODO: Use rsync.
369 for i in `seq 1 ${NUM_ODL_SYSTEM}`
370 do
371     CONTROLLERIP=ODL_SYSTEM_${i}_IP
372     echo "Compressing karaf.log ${i}"
373     ssh ${!CONTROLLERIP} gzip --best /tmp/${BUNDLEFOLDER}/data/log/karaf.log
374     echo "Fetching compressed karaf.log ${i}"
375     scp "${!CONTROLLERIP}:/tmp/${BUNDLEFOLDER}/data/log/karaf.log.gz" "odl${i}_karaf.log.gz" && ssh ${!CONTROLLERIP} rm -f "/tmp/${BUNDLEFOLDER}/data/log/karaf.log.gz"
376     # TODO: Should we compress the output log file as well?
377     scp "${!CONTROLLERIP}:/tmp/${BUNDLEFOLDER}/data/log/karaf_console.log" "odl${i}_karaf_console.log" && ssh ${!CONTROLLERIP} rm -f "/tmp/${BUNDLEFOLDER}/data/log/karaf_console.log"
378     echo "Fetch GC logs"
379     # FIXME: Put member index in filename, instead of directory name.
380     mkdir -p "gclogs-${i}"
381     scp "${!CONTROLLERIP}:/tmp/${BUNDLEFOLDER}/data/log/*.log" "gclogs-${i}/" && ssh ${!CONTROLLERIP} rm -f "/tmp/${BUNDLEFOLDER}/data/log/*.log"
382 done
383
384 echo "Examine copied files"
385 ls -lt
386
387 true  # perhaps Jenkins is testing last exit code
388
389 # vim: ts=4 sw=4 sts=4 et ft=sh :