X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?a=blobdiff_plain;f=jjb%2Fintegration%2Finclude-raw-integration-deploy-controller-run-test.sh;h=3e7ed10308255ef59668574aa9fe03c273441316;hb=4aaa45a1cafdf3191f28a3885a7010fa966a6bda;hp=ced677afbb988d11888999093a8512a768e1d498;hpb=9457ef7a1dd3b0531bb9fd666380bab01f26ddab;p=releng%2Fbuilder.git diff --git a/jjb/integration/include-raw-integration-deploy-controller-run-test.sh b/jjb/integration/include-raw-integration-deploy-controller-run-test.sh index ced677afb..3e7ed1030 100644 --- a/jjb/integration/include-raw-integration-deploy-controller-run-test.sh +++ b/jjb/integration/include-raw-integration-deploy-controller-run-test.sh @@ -2,6 +2,7 @@ # Activate robotframework virtualenv # ${ROBOT_VENV} comes from the include-raw-integration-install-robotframework.sh # script. +# shellcheck source=${ROBOT_VENV}/bin/activate disable=SC1091 source ${ROBOT_VENV}/bin/activate CONTROLLERMEM="2048m" @@ -11,13 +12,6 @@ if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then AKKACONF=/tmp/${BUNDLEFOLDER}/configuration/initial/akka.conf MODULESCONF=/tmp/${BUNDLEFOLDER}/configuration/initial/modules.conf MODULESHARDSCONF=/tmp/${BUNDLEFOLDER}/configuration/initial/module-shards.conf - # Create the string for odl nodes - odl_node_list="${ODL_SYSTEM_1_IP}" - for i in `seq 2 ${NUM_ODL_SYSTEM}` ; do - CONTROLLERIP=ODL_SYSTEM_${i}_IP - odl_node_list="${odl_node_list} ${!CONTROLLERIP}" - done - echo ${odl_node_list} fi if [ ${CONTROLLERSCOPE} == 'all' ]; then @@ -40,6 +34,7 @@ if [ -f "${WORKSPACE}/test/csit/scriptplans/${TESTPLAN}" ]; then cat scriptplan.txt for line in $( egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' scriptplan.txt ); do echo "Executing ${line}..." + # shellcheck source=${line} disable=SC1091 source ${line} done fi @@ -50,7 +45,7 @@ echo "Changing to /tmp" cd /tmp echo "Downloading the distribution..." -wget --progress=dot:mega '${ACTUALBUNDLEURL}' +wget --progress=dot:mega '${ACTUAL_BUNDLE_URL}' echo "Extracting the new controller..." unzip -q ${BUNDLE} @@ -58,7 +53,7 @@ unzip -q ${BUNDLE} echo "Configuring the startup features..." FEATURESCONF=/tmp/${BUNDLEFOLDER}/etc/org.apache.karaf.features.cfg CUSTOMPROP=/tmp/${BUNDLEFOLDER}/etc/custom.properties -sed -ie "s/featuresBoot=.*/featuresBoot=config,standard,region,package,kar,ssh,management,${ACTUALFEATURES}/g" \${FEATURESCONF} +sed -ie "s/\(featuresBoot=\|featuresBoot =\)/featuresBoot = ${ACTUALFEATURES},/g" \${FEATURESCONF} sed -ie "s%mvn:org.opendaylight.integration/features-integration-index/${BUNDLEVERSION}/xml/features%mvn:org.opendaylight.integration/features-integration-index/${BUNDLEVERSION}/xml/features,mvn:org.opendaylight.integration/features-integration-test/${BUNDLEVERSION}/xml/features,mvn:org.apache.karaf.decanter/apache-karaf-decanter/1.0.0/xml/features%g" \${FEATURESCONF} cat \${FEATURESCONF} @@ -73,6 +68,7 @@ LOGCONF=/tmp/${BUNDLEFOLDER}/etc/org.ops4j.pax.logging.cfg sed -ie 's/log4j.appender.out.maxBackupIndex=10/log4j.appender.out.maxBackupIndex=1/g' \${LOGCONF} # FIXME: Make log size limit configurable from build parameter. sed -ie 's/log4j.appender.out.maxFileSize=1MB/log4j.appender.out.maxFileSize=30GB/g' \${LOGCONF} +echo "log4j.logger.org.opendaylight.yangtools.yang.parser.repo.YangTextSchemaContextResolver = WARN" >> \${LOGCONF} cat \${LOGCONF} echo "Configure java home and max memory..." @@ -106,7 +102,7 @@ if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then fi echo "Configuring cluster" - /tmp/${BUNDLEFOLDER}/bin/configure_cluster.sh \$1 ${odl_node_list} + /tmp/${BUNDLEFOLDER}/bin/configure_cluster.sh \$1 \$2 echo "Dump akka.conf" cat ${AKKACONF} @@ -181,13 +177,26 @@ exit_on_log_file_message 'server is unhealthy' EOF -# Copy over the config script to controller and execute it. -for i in `seq 1 ${NUM_ODL_SYSTEM}` +[ "$NUM_OPENSTACK_SITES" ] || NUM_OPENSTACK_SITES=1 +NUM_ODLS_PER_SITE=$((NUM_ODL_SYSTEM / NUM_OPENSTACK_SITES)) +for i in `seq 1 ${NUM_OPENSTACK_SITES}` do - CONTROLLERIP=ODL_SYSTEM_${i}_IP - echo "Execute the configuration script on controller ${!CONTROLLERIP}" - scp ${WORKSPACE}/configuration-script.sh ${!CONTROLLERIP}:/tmp - ssh ${!CONTROLLERIP} "bash /tmp/configuration-script.sh ${i}" + # Get full list of ODL nodes for this site + odl_node_list= + for j in `seq 1 ${NUM_ODLS_PER_SITE}` + do + odl_ip=ODL_SYSTEM_$(((i - 1) * NUM_ODLS_PER_SITE + j))_IP + odl_node_list="${odl_node_list} ${!odl_ip}" + done + + for j in `seq 1 ${NUM_ODLS_PER_SITE}` + do + odl_ip=ODL_SYSTEM_$(((i - 1) * NUM_ODLS_PER_SITE + j))_IP + # Copy over the config script to controller and execute it (parameters are used only for cluster) + echo "Execute the configuration script on controller ${!odl_ip} for index $j with node list ${odl_node_list}" + scp ${WORKSPACE}/configuration-script.sh ${!odl_ip}:/tmp + ssh ${!odl_ip} "bash /tmp/configuration-script.sh ${j} '${odl_node_list}'" + done done echo "Locating config plan to use..." @@ -203,6 +212,7 @@ if [ -f "${configplan_filepath}" ]; then cat configplan.txt for line in $( egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' configplan.txt ); do echo "Executing ${line}..." + # shellcheck source=${line} disable=SC1091 source ${line} done fi @@ -216,32 +226,36 @@ do ssh ${!CONTROLLERIP} "bash /tmp/startup-script.sh" done +seed_index=1 for i in `seq 1 ${NUM_ODL_SYSTEM}` do CONTROLLERIP=ODL_SYSTEM_${i}_IP echo "Execute the post startup script on controller ${!CONTROLLERIP}" scp ${WORKSPACE}/post-startup-script.sh ${!CONTROLLERIP}:/tmp - ssh ${!CONTROLLERIP} "bash /tmp/post-startup-script.sh ${i}" + ssh ${!CONTROLLERIP} "bash /tmp/post-startup-script.sh $(( seed_index++ ))" + if [ $(( $i % (${NUM_ODL_SYSTEM} / ${NUM_OPENSTACK_SITES}) )) == 0 ]; then + seed_index=1 + fi done echo "Cool down for ${COOLDOWN_PERIOD} seconds :)..." sleep ${COOLDOWN_PERIOD} -if [ ${NUM_OPENSTACK_SYSTEM} -gt 0 ]; then - echo "Exiting without running tests to deploy openstack for testing" - exit -fi - echo "Generating controller variables..." for i in `seq 1 ${NUM_ODL_SYSTEM}` do CONTROLLERIP=ODL_SYSTEM_${i}_IP odl_variables=${odl_variables}" -v ${CONTROLLERIP}:${!CONTROLLERIP}" echo "Lets's take the karaf thread dump" - KARAF_PID=$(ssh ${!CONTROLLERIP} "ps aux | grep 'distribution-karaf' | grep -v grep | tr -s ' ' | cut -f2 -d' '") + KARAF_PID=$(ssh ${!CONTROLLERIP} "ps aux | grep ${KARAF_ARTIFACT} | grep -v grep | tr -s ' ' | cut -f2 -d' '") ssh ${!CONTROLLERIP} "jstack $KARAF_PID"> ${WORKSPACE}/karaf_${i}_threads_before.log || true done +if [ ${NUM_OPENSTACK_SYSTEM} -gt 0 ]; then + echo "Exiting without running tests to deploy openstack for testing" + exit +fi + echo "Generating mininet variables..." for i in `seq 1 ${NUM_TOOLS_SYSTEM}` do @@ -261,8 +275,8 @@ cat testplan.txt SUITES=$( egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' testplan.txt | tr '\012' ' ' ) echo "Starting Robot test suites ${SUITES} ..." -pybot -N ${TESTPLAN} --removekeywords wuks -c critical -e exclude -v BUNDLEFOLDER:${BUNDLEFOLDER} -v WORKSPACE:/tmp \ --v JAVA_HOME:${JAVA_HOME} -v BUNDLE_URL:${ACTUALBUNDLEURL} -v NEXUSURL_PREFIX:${NEXUSURL_PREFIX} \ +pybot -N ${TESTPLAN} --removekeywords wuks -c critical -e exclude -e skip_if_${DISTROSTREAM} -v BUNDLEFOLDER:${BUNDLEFOLDER} -v WORKSPACE:/tmp \ +-v JAVA_HOME:${JAVA_HOME} -v BUNDLE_URL:${ACTUAL_BUNDLE_URL} -v NEXUSURL_PREFIX:${NEXUSURL_PREFIX} \ -v CONTROLLER:${ODL_SYSTEM_IP} -v ODL_SYSTEM_IP:${ODL_SYSTEM_IP} -v ODL_SYSTEM_1_IP:${ODL_SYSTEM_IP} \ -v CONTROLLER_USER:${USER} -v ODL_SYSTEM_USER:${USER} \ -v TOOLS_SYSTEM_IP:${TOOLS_SYSTEM_IP} -v TOOLS_SYSTEM_2_IP:${TOOLS_SYSTEM_2_IP} -v TOOLS_SYSTEM_3_IP:${TOOLS_SYSTEM_3_IP} \ @@ -281,7 +295,7 @@ for i in `seq 1 ${NUM_ODL_SYSTEM}` do CONTROLLERIP=ODL_SYSTEM_${i}_IP echo "Lets's take the karaf thread dump again..." - KARAF_PID=$(ssh ${!CONTROLLERIP} "ps aux | grep 'distribution-karaf' | grep -v grep | tr -s ' ' | cut -f2 -d' '") + KARAF_PID=$(ssh ${!CONTROLLERIP} "ps aux | grep ${KARAF_ARTIFACT} | grep -v grep | tr -s ' ' | cut -f2 -d' '") ssh ${!CONTROLLERIP} "jstack $KARAF_PID"> ${WORKSPACE}/karaf_${i}_threads_after.log || true echo "Killing ODL" set +e # We do not want to create red dot just because something went wrong while fetching logs. @@ -289,17 +303,26 @@ do done sleep 5 +# FIXME: Unify the copy process between various scripts. +# TODO: Use rsync. for i in `seq 1 ${NUM_ODL_SYSTEM}` do CONTROLLERIP=ODL_SYSTEM_${i}_IP echo "Compressing karaf.log ${i}" ssh ${!CONTROLLERIP} gzip --best /tmp/${BUNDLEFOLDER}/data/log/karaf.log echo "Fetching compressed karaf.log ${i}" - scp "${!CONTROLLERIP}:/tmp/${BUNDLEFOLDER}/data/log/karaf.log.gz" "odl${i}_karaf.log.gz" + scp "${!CONTROLLERIP}:/tmp/${BUNDLEFOLDER}/data/log/karaf.log.gz" "odl${i}_karaf.log.gz" && ssh ${!CONTROLLERIP} rm -f "/tmp/${BUNDLEFOLDER}/data/log/karaf.log.gz" # TODO: Should we compress the output log file as well? - scp "${!CONTROLLERIP}:/tmp/${BUNDLEFOLDER}/data/log/karaf_console.log" "odl${i}_karaf_console.log" + scp "${!CONTROLLERIP}:/tmp/${BUNDLEFOLDER}/data/log/karaf_console.log" "odl${i}_karaf_console.log" && ssh ${!CONTROLLERIP} rm -f "/tmp/${BUNDLEFOLDER}/data/log/karaf_console.log" + echo "Fetch GC logs" + # FIXME: Put member index in filename, instead of directory name. + mkdir -p "gclogs-${i}" + scp "${!CONTROLLERIP}:/tmp/${BUNDLEFOLDER}/data/log/*.log" "gclogs-${i}/" && ssh ${!CONTROLLERIP} rm -f "/tmp/${BUNDLEFOLDER}/data/log/*.log" done +echo "Examine copied files" +ls -lt + true # perhaps Jenkins is testing last exit code # vim: ts=4 sw=4 sts=4 et ft=sh :