Switch the csit-verify-1node to use job prefix
[releng/builder.git] / jjb / integration / integration-deploy-controller-run-test.sh
1 #@IgnoreInspection BashAddShebang
2 # Activate robotframework virtualenv
3 # ${ROBOT_VENV} comes from the integration-install-robotframework.sh
4 # script.
5 # shellcheck source=${ROBOT_VENV}/bin/activate disable=SC1091
6 source ${ROBOT_VENV}/bin/activate
7
8
9 if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then
10     echo "Configure cluster"
11     AKKACONF=/tmp/${BUNDLEFOLDER}/configuration/initial/akka.conf
12     MODULESCONF=/tmp/${BUNDLEFOLDER}/configuration/initial/modules.conf
13     MODULESHARDSCONF=/tmp/${BUNDLEFOLDER}/configuration/initial/module-shards.conf
14 fi
15
16 if [ ${CONTROLLERSCOPE} == 'all' ]; then
17     ACTUALFEATURES="odl-integration-compatible-with-all,${CONTROLLERFEATURES}"
18     export CONTROLLERMEM="3072m"
19     COOLDOWN_PERIOD="180"
20 else
21     ACTUALFEATURES="odl-infrautils-ready,${CONTROLLERFEATURES}"
22     COOLDOWN_PERIOD="60"
23 fi
24
25 # Some versions of jenkins job builder result in feature list containing spaces
26 # and ending in newline. Remove all that.
27 ACTUALFEATURES=`echo "${ACTUALFEATURES}" | tr -d '\n \r'`
28 echo "ACTUALFEATURES: ${ACTUALFEATURES}"
29
30 # In the case that we want to install features via karaf shell, a space separated list of
31 # ACTUALFEATURES IS NEEDED
32 SPACE_SEPARATED_FEATURES=$(echo "${ACTUALFEATURES}" | tr ',' ' ')
33 echo "SPACE_SEPARATED_FEATURES: ${SPACE_SEPARATED_FEATURES}"
34
35 if [ -f "${WORKSPACE}/test/csit/scriptplans/${TESTPLAN}" ]; then
36     echo "scriptplan exists!!!"
37     echo "Changing the scriptplan path..."
38     cat ${WORKSPACE}/test/csit/scriptplans/${TESTPLAN} | sed "s:integration:${WORKSPACE}:" > scriptplan.txt
39     cat scriptplan.txt
40     for line in $( egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' scriptplan.txt ); do
41         echo "Executing ${line}..."
42         # shellcheck source=${line} disable=SC1091
43         source ${line}
44     done
45 fi
46
47 cat > ${WORKSPACE}/configuration-script.sh <<EOF
48
49 echo "Changing to /tmp"
50 cd /tmp
51
52 echo "Downloading the distribution..."
53 wget --progress=dot:mega '${ACTUAL_BUNDLE_URL}'
54
55 echo "Extracting the new controller..."
56 unzip -q ${BUNDLE}
57
58 echo "Adding external repositories..."
59 sed -ie "s%org.ops4j.pax.url.mvn.repositories=%org.ops4j.pax.url.mvn.repositories=http://repo1.maven.org/maven2@id=central, http://repository.springsource.com/maven/bundles/release@id=spring.ebr.release, http://repository.springsource.com/maven/bundles/external@id=spring.ebr.external, http://zodiac.springsource.com/maven/bundles/release@id=gemini, http://repository.apache.org/content/groups/snapshots-group@id=apache@snapshots@noreleases, https://oss.sonatype.org/content/repositories/snapshots@id=sonatype.snapshots.deploy@snapshots@noreleases, https://oss.sonatype.org/content/repositories/ops4j-snapshots@id=ops4j.sonatype.snapshots.deploy@snapshots@noreleases%g" ${MAVENCONF}
60 cat ${MAVENCONF}
61
62 if [[ "$USEFEATURESBOOT" == "True" ]]; then
63     echo "Configuring the startup features..."
64     sed -ie "s/\(featuresBoot=\|featuresBoot =\)/featuresBoot = ${ACTUALFEATURES},/g" ${FEATURESCONF}
65 fi
66
67 FEATURE_INDEX_STRING="features-integration-index"
68 FEATURE_TEST_STRING="features-integration-test"
69 if [[ "$KARAF_VERSION" == "karaf4" ]]; then
70     FEATURE_INDEX_STRING="features-index"
71     FEATURE_TEST_STRING="features-test"
72 fi
73
74 sed -ie "s%\(featuresRepositories=\|featuresRepositories =\)%featuresRepositories = mvn:org.opendaylight.integration/\${FEATURE_TEST_STRING}/${BUNDLEVERSION}/xml/features,mvn:org.apache.karaf.decanter/apache-karaf-decanter/1.0.0/xml/features,%g" ${FEATURESCONF}
75 cat ${FEATURESCONF}
76
77 if [ "${ODL_ENABLE_L3_FWD}" == "yes" ]; then
78     echo "Enable the l3.fwd in custom.properties..."
79     echo "ovsdb.l3.fwd.enabled=yes" >> ${CUSTOMPROP}
80 fi
81 cat ${CUSTOMPROP}
82
83 echo "Configuring the log..."
84 sed -ie 's/log4j.appender.out.maxBackupIndex=10/log4j.appender.out.maxBackupIndex=1/g' ${LOGCONF}
85 # FIXME: Make log size limit configurable from build parameter.
86 sed -ie 's/log4j.appender.out.maxFileSize=1MB/log4j.appender.out.maxFileSize=30GB/g' ${LOGCONF}
87 echo "log4j.logger.org.opendaylight.yangtools.yang.parser.repo.YangTextSchemaContextResolver = WARN" >> ${LOGCONF}
88 # Add custom logging levels
89 # CONTROLLERDEBUGMAP is expected to be a key:value map of space separated values like "module:level module2:level2"
90 # where module is abbreviated and does not include org.opendaylight
91 unset IFS
92 if [ -n "${CONTROLLERDEBUGMAP}" ]; then
93     for kv in ${CONTROLLERDEBUGMAP}; do
94         module=\${kv%%:*}
95         level=\${kv#*:}
96         if [ -n \${module} ] && [ -n \${level} ]; then
97             echo "log4j.logger.org.opendaylight.\${module} = \${level}" >> ${LOGCONF}
98         fi
99     done
100 fi
101 cat ${LOGCONF}
102
103 set_java_vars
104
105 echo "Listing all open ports on controller system..."
106 netstat -pnatu
107
108 if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then
109
110     # Copy shard file if exists
111     if [ -f /tmp/custom_shard_config.txt ]; then
112         echo "Custom shard config exists!!!"
113         echo "Copying the shard config..."
114         cp /tmp/custom_shard_config.txt /tmp/${BUNDLEFOLDER}/bin/
115     fi
116
117     echo "Configuring cluster"
118     /tmp/${BUNDLEFOLDER}/bin/configure_cluster.sh \$1 \$2
119
120     echo "Dump akka.conf"
121     cat ${AKKACONF}
122
123     echo "Dump modules.conf"
124     cat ${MODULESCONF}
125
126      echo "Dump module-shards.conf"
127      cat ${MODULESHARDSCONF}
128 fi
129
130 EOF
131
132 # Create the startup script to be run on controller.
133 cat > ${WORKSPACE}/startup-script.sh <<EOF
134
135 echo "Redirecting karaf console output to karaf_console.log"
136 export KARAF_REDIRECT="/tmp/${BUNDLEFOLDER}/data/log/karaf_console.log"
137 mkdir -p /tmp/${BUNDLEFOLDER}/data/log
138
139 echo "Starting controller..."
140 /tmp/${BUNDLEFOLDER}/bin/start
141
142 EOF
143
144 cat > ${WORKSPACE}/post-startup-script.sh <<EOF
145
146 if [[ "$USEFEATURESBOOT" != "True" ]]; then
147
148     # wait up to 60s for karaf port 8101 to be opened, polling every 5s
149     loop_count=0;
150     until [[ \$loop_count -ge 12 ]]; do
151         netstat -na | grep 8101 && break;
152         loop_count=\$[\$loop_count+1];
153         sleep 5;
154     done
155
156     echo "going to feature:install --no-auto-refresh ${SPACE_SEPARATED_FEATURES} one at a time"
157     for feature in ${SPACE_SEPARATED_FEATURES}; do
158         sshpass -p karaf ssh -o StrictHostKeyChecking=no \
159                              -o UserKnownHostsFile=/dev/null \
160                              -o LogLevel=error \
161                              -p 8101 karaf@localhost \
162                              feature:install --no-auto-refresh \$feature;
163     done
164
165     echo "ssh to karaf console to list -i installed features"
166     sshpass -p karaf ssh -o StrictHostKeyChecking=no \
167                          -o UserKnownHostsFile=/dev/null \
168                          -o LogLevel=error \
169                          -p 8101 karaf@localhost \
170                          feature:list -i
171 fi
172
173 echo "Waiting for controller to come up..."
174 COUNT="0"
175 while true; do
176     RESP="\$( curl --user admin:admin -sL -w "%{http_code} %{url_effective}\\n" http://localhost:8181/restconf/modules -o /dev/null )"
177     echo \$RESP
178     if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then
179         SHARD="\$( curl --user admin:admin -sL -w "%{http_code} %{url_effective}\\n" http://localhost:8181/jolokia/read/org.opendaylight.controller:Category=Shards,name=\member-\$1-shard-inventory-config,type=DistributedConfigDatastore)"
180         echo \$SHARD
181     fi
182     if ([[ \$RESP == *"200"* ]] && ([[ "${ENABLE_HAPROXY_FOR_NEUTRON}" != "yes" ]] || [[ \$SHARD  == *'"status":200'* ]])); then
183         echo Controller is UP
184         break
185     elif (( "\$COUNT" > "600" )); then
186         echo Timeout Controller DOWN
187         echo "Dumping first 500K bytes of karaf log..."
188         head --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
189         echo "Dumping last 500K bytes of karaf log..."
190         tail --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
191         echo "Listing all open ports on controller system"
192         netstat -pnatu
193         exit 1
194     else
195         COUNT=\$(( \${COUNT} + 1 ))
196         sleep 1
197         if [[ \$((\$COUNT % 5)) == 0 ]]; then
198             echo already waited \${COUNT} seconds...
199         fi
200     fi
201 done
202
203 echo "Listing all open ports on controller system..."
204 netstat -pnatu
205
206 function exit_on_log_file_message {
207     echo "looking for \"\$1\" in log file"
208     if grep --quiet "\$1" "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"; then
209         echo ABORTING: found "\$1"
210         echo "Dumping first 500K bytes of karaf log..."
211         head --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
212         echo "Dumping last 500K bytes of karaf log..."
213         tail --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
214         exit 1
215     fi
216 }
217
218 exit_on_log_file_message 'BindException: Address already in use'
219 exit_on_log_file_message 'server is unhealthy'
220
221 EOF
222
223 [ "$NUM_OPENSTACK_SITES" ] || NUM_OPENSTACK_SITES=1
224 NUM_ODLS_PER_SITE=$((NUM_ODL_SYSTEM / NUM_OPENSTACK_SITES))
225 for i in `seq 1 ${NUM_OPENSTACK_SITES}`
226 do
227     # Get full list of ODL nodes for this site
228     odl_node_list=
229     for j in `seq 1 ${NUM_ODLS_PER_SITE}`
230     do
231         odl_ip=ODL_SYSTEM_$(((i - 1) * NUM_ODLS_PER_SITE + j))_IP
232         odl_node_list="${odl_node_list} ${!odl_ip}"
233     done
234
235     for j in `seq 1 ${NUM_ODLS_PER_SITE}`
236     do
237         odl_ip=ODL_SYSTEM_$(((i - 1) * NUM_ODLS_PER_SITE + j))_IP
238         # Copy over the config script to controller and execute it (parameters are used only for cluster)
239         echo "Execute the configuration script on controller ${!odl_ip} for index $j with node list ${odl_node_list}"
240         scp ${WORKSPACE}/configuration-script.sh ${!odl_ip}:/tmp
241         ssh ${!odl_ip} "bash /tmp/configuration-script.sh ${j} '${odl_node_list}'"
242     done
243 done
244
245 echo "Locating config plan to use..."
246 configplan_filepath="${WORKSPACE}/test/csit/configplans/${STREAMTESTPLAN}"
247 if [ ! -f "${configplan_filepath}" ]; then
248     configplan_filepath="${WORKSPACE}/test/csit/configplans/${TESTPLAN}"
249 fi
250
251 if [ -f "${configplan_filepath}" ]; then
252     echo "configplan exists!!!"
253     echo "Changing the configplan path..."
254     cat ${configplan_filepath} | sed "s:integration:${WORKSPACE}:" > configplan.txt
255     cat configplan.txt
256     for line in $( egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' configplan.txt ); do
257         echo "Executing ${line}..."
258         # shellcheck source=${line} disable=SC1091
259         source ${line}
260     done
261 fi
262
263 # Copy over the startup script to controller and execute it.
264 for i in `seq 1 ${NUM_ODL_SYSTEM}`
265 do
266     CONTROLLERIP=ODL_SYSTEM_${i}_IP
267     echo "Execute the startup script on controller ${!CONTROLLERIP}"
268     scp ${WORKSPACE}/startup-script.sh ${!CONTROLLERIP}:/tmp
269     ssh ${!CONTROLLERIP} "bash /tmp/startup-script.sh"
270 done
271
272 seed_index=1
273 for i in `seq 1 ${NUM_ODL_SYSTEM}`
274 do
275     CONTROLLERIP=ODL_SYSTEM_${i}_IP
276     echo "Execute the post startup script on controller ${!CONTROLLERIP}"
277     scp ${WORKSPACE}/post-startup-script.sh ${!CONTROLLERIP}:/tmp
278     ssh ${!CONTROLLERIP} "bash /tmp/post-startup-script.sh $(( seed_index++ ))"
279     if [ $(( $i % (${NUM_ODL_SYSTEM} / ${NUM_OPENSTACK_SITES}) )) == 0 ]; then
280         seed_index=1
281     fi
282 done
283
284 echo "Cool down for ${COOLDOWN_PERIOD} seconds :)..."
285 sleep ${COOLDOWN_PERIOD}
286
287 echo "Generating controller variables..."
288 for i in `seq 1 ${NUM_ODL_SYSTEM}`
289 do
290     CONTROLLERIP=ODL_SYSTEM_${i}_IP
291     odl_variables=${odl_variables}" -v ${CONTROLLERIP}:${!CONTROLLERIP}"
292     echo "Lets's take the karaf thread dump"
293     ssh ${!CONTROLLERIP} "sudo ps aux" > ${WORKSPACE}/ps_before.log
294     pid=$(grep org.apache.karaf.main.Main ${WORKSPACE}/ps_before.log | grep -v grep | tr -s ' ' | cut -f2 -d' ')
295     echo "karaf main: org.apache.karaf.main.Main, pid:${pid}"
296     ssh ${!CONTROLLERIP} "jstack ${pid}" > ${WORKSPACE}/karaf_${i}_${pid}_threads_before.log || true
297 done
298
299 if [ ${NUM_OPENSTACK_SYSTEM} -gt 0 ]; then
300    echo "Exiting without running tests to deploy openstack for testing"
301    exit
302 fi
303
304 echo "Generating mininet variables..."
305 for i in `seq 1 ${NUM_TOOLS_SYSTEM}`
306 do
307     MININETIP=TOOLS_SYSTEM_${i}_IP
308     tools_variables=${tools_variables}" -v ${MININETIP}:${!MININETIP}"
309 done
310
311 echo "Locating test plan to use..."
312 testplan_filepath="${WORKSPACE}/test/csit/testplans/${STREAMTESTPLAN}"
313 if [ ! -f "${testplan_filepath}" ]; then
314     testplan_filepath="${WORKSPACE}/test/csit/testplans/${TESTPLAN}"
315 fi
316
317 echo "Changing the testplan path..."
318 cat "${testplan_filepath}" | sed "s:integration:${WORKSPACE}:" > testplan.txt
319 cat testplan.txt
320
321 # Use the testplan if specific SUITES are not defined.
322 if [ -z "${SUITES}" ]; then
323     SUITES=`egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' testplan.txt | tr '\012' ' '`
324 else
325     newsuites=""
326     workpath="${WORKSPACE}/test/csit/suites"
327     for suite in ${SUITES}; do
328         fullsuite="${workpath}/${suite}"
329         if [ -z "${newsuites}" ]; then
330             newsuites+=${fullsuite}
331         else
332             newsuites+=" "${fullsuite}
333         fi
334     done
335     SUITES=${newsuites}
336 fi
337
338 echo "Starting Robot test suites ${SUITES} ..."
339 pybot -N ${TESTPLAN} --removekeywords wuks -c critical -e exclude -e skip_if_${DISTROSTREAM} -v BUNDLEFOLDER:${BUNDLEFOLDER} -v WORKSPACE:/tmp \
340 -v JAVA_HOME:${JAVA_HOME} -v BUNDLE_URL:${ACTUAL_BUNDLE_URL} -v NEXUSURL_PREFIX:${NEXUSURL_PREFIX} \
341 -v CONTROLLER:${ODL_SYSTEM_IP} -v ODL_SYSTEM_IP:${ODL_SYSTEM_IP} -v ODL_SYSTEM_1_IP:${ODL_SYSTEM_IP} \
342 -v CONTROLLER_USER:${USER} -v ODL_SYSTEM_USER:${USER} \
343 -v TOOLS_SYSTEM_IP:${TOOLS_SYSTEM_IP} -v TOOLS_SYSTEM_2_IP:${TOOLS_SYSTEM_2_IP} -v TOOLS_SYSTEM_3_IP:${TOOLS_SYSTEM_3_IP} \
344 -v TOOLS_SYSTEM_4_IP:${TOOLS_SYSTEM_4_IP} -v TOOLS_SYSTEM_5_IP:${TOOLS_SYSTEM_5_IP} -v TOOLS_SYSTEM_6_IP:${TOOLS_SYSTEM_6_IP} \
345 -v TOOLS_SYSTEM_USER:${USER} -v JDKVERSION:${JDKVERSION} -v ODL_STREAM:${DISTROSTREAM} -v NUM_ODL_SYSTEM:${NUM_ODL_SYSTEM} \
346 -v MININET:${TOOLS_SYSTEM_IP} -v MININET1:${TOOLS_SYSTEM_2_IP} -v MININET2:${TOOLS_SYSTEM_3_IP} \
347 -v MININET3:${TOOLS_SYSTEM_4_IP} -v MININET4:${TOOLS_SYSTEM_5_IP} -v MININET5:${TOOLS_SYSTEM_6_IP} \
348 -v MININET_USER:${USER} -v USER_HOME:${HOME} ${TESTOPTIONS} ${SUITES} || true
349 # FIXME: Sort (at least -v) options alphabetically.
350
351 echo "Examining the files in data/log and checking filesize"
352 ssh ${ODL_SYSTEM_IP} "ls -altr /tmp/${BUNDLEFOLDER}/data/log/"
353 ssh ${ODL_SYSTEM_IP} "du -hs /tmp/${BUNDLEFOLDER}/data/log/*"
354
355 for i in `seq 1 ${NUM_ODL_SYSTEM}`
356 do
357     CONTROLLERIP=ODL_SYSTEM_${i}_IP
358     echo "Lets's take the karaf thread dump again..."
359     ssh ${!CONTROLLERIP} "sudo ps aux" > ${WORKSPACE}/ps_after.log
360     pid=$(grep org.apache.karaf.main.Main ${WORKSPACE}/ps_after.log | grep -v grep | tr -s ' ' | cut -f2 -d' ')
361     echo "karaf main: org.apache.karaf.main.Main, pid:${pid}"
362     ssh ${!CONTROLLERIP} "jstack ${pid}" > ${WORKSPACE}/karaf_${i}_${pid}_threads_after.log || true
363     echo "Killing ODL"
364     set +e  # We do not want to create red dot just because something went wrong while fetching logs.
365     ssh "${!CONTROLLERIP}" bash -c 'ps axf | grep karaf | grep -v grep | awk '"'"'{print "kill -9 " $1}'"'"' | sh'
366 done
367
368 sleep 5
369 # FIXME: Unify the copy process between various scripts.
370 # TODO: Use rsync.
371 for i in `seq 1 ${NUM_ODL_SYSTEM}`
372 do
373     CONTROLLERIP=ODL_SYSTEM_${i}_IP
374     echo "Compressing karaf.log ${i}"
375     ssh ${!CONTROLLERIP} gzip --best /tmp/${BUNDLEFOLDER}/data/log/karaf.log
376     echo "Fetching compressed karaf.log ${i}"
377     scp "${!CONTROLLERIP}:/tmp/${BUNDLEFOLDER}/data/log/karaf.log.gz" "odl${i}_karaf.log.gz" && ssh ${!CONTROLLERIP} rm -f "/tmp/${BUNDLEFOLDER}/data/log/karaf.log.gz"
378     # TODO: Should we compress the output log file as well?
379     scp "${!CONTROLLERIP}:/tmp/${BUNDLEFOLDER}/data/log/karaf_console.log" "odl${i}_karaf_console.log" && ssh ${!CONTROLLERIP} rm -f "/tmp/${BUNDLEFOLDER}/data/log/karaf_console.log"
380     echo "Fetch GC logs"
381     # FIXME: Put member index in filename, instead of directory name.
382     mkdir -p "gclogs-${i}"
383     scp "${!CONTROLLERIP}:/tmp/${BUNDLEFOLDER}/data/log/*.log" "gclogs-${i}/" && ssh ${!CONTROLLERIP} rm -f "/tmp/${BUNDLEFOLDER}/data/log/*.log"
384 done
385
386 echo "Examine copied files"
387 ls -lt
388
389 true  # perhaps Jenkins is testing last exit code
390
391 # vim: ts=4 sw=4 sts=4 et ft=sh :