X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?a=blobdiff_plain;f=jjb%2Fintegration%2Fcommon-functions.sh;h=6c967d0cb00c1d18940143b25126de8631bbcd8a;hb=995edd3864f792c93980549fdd6e402d36d88fd5;hp=034cf88d903573584b09e85acfdfc148addc07d7;hpb=b7a0c1bea854a8a876bfb3bb3b314b1dc9b6b3a5;p=releng%2Fbuilder.git diff --git a/jjb/integration/common-functions.sh b/jjb/integration/common-functions.sh index 034cf88d9..6c967d0cb 100644 --- a/jjb/integration/common-functions.sh +++ b/jjb/integration/common-functions.sh @@ -10,7 +10,7 @@ export FEATURESCONF=/tmp/${BUNDLEFOLDER}/etc/org.apache.karaf.features.cfg export CUSTOMPROP=/tmp/${BUNDLEFOLDER}/etc/custom.properties export LOGCONF=/tmp/${BUNDLEFOLDER}/etc/org.ops4j.pax.logging.cfg export MEMCONF=/tmp/${BUNDLEFOLDER}/bin/setenv -export CONTROLLERMEM="2048m" +export CONTROLLERMEM=${CONTROLLERMAXMEM} # Cluster specific configuration settings export AKKACONF=/tmp/${BUNDLEFOLDER}/configuration/initial/akka.conf @@ -42,8 +42,13 @@ function set_java_vars() { local -r controllermem=$2 local -r memconf=$3 - echo "Configure\n java home: ${java_home}\n max memory: ${controllermem}\n memconf: ${memconf}" + echo "Configure" + echo " java home: ${java_home}" + echo " max memory: ${controllermem}" + echo " memconf: ${memconf}" + # We do not want expressions to expand here. + # shellcheck disable=SC2016 sed -ie 's%^# export JAVA_HOME%export JAVA_HOME=${JAVA_HOME:-'"${java_home}"'}%g' "${memconf}" sed -ie 's/JAVA_MAX_MEM="2048m"/JAVA_MAX_MEM='"${controllermem}"'/g' "${memconf}" echo "cat ${memconf}" @@ -71,18 +76,15 @@ function configure_karaf_log() { local logapi=log4j # Check what the logging.cfg file is using for the logging api: log4j or log4j2 - grep "log4j2" "${LOGCONF}" - if [ $? -eq 0 ]; then + if grep "log4j2" "${LOGCONF}"; then logapi=log4j2 fi echo "Configuring the karaf log... karaf_version: ${karaf_version}, logapi: ${logapi}" if [ "${logapi}" == "log4j2" ]; then # FIXME: Make log size limit configurable from build parameter. - # From Neon the default karaf file size is 64 MB + # Increase default log file size to 1GB sed -ie 's/log4j2.appender.rolling.policies.size.size = 64MB/log4j2.appender.rolling.policies.size.size = 1GB/g' "${LOGCONF}" - # Flourine still uses 16 MB - sed -ie 's/log4j2.appender.rolling.policies.size.size = 16MB/log4j2.appender.rolling.policies.size.size = 1GB/g' "${LOGCONF}" orgmodule="org.opendaylight.yangtools.yang.parser.repo.YangTextSchemaContextResolver" orgmodule_="${orgmodule//./_}" echo "${logapi}.logger.${orgmodule_}.name = WARN" >> "${LOGCONF}" @@ -156,7 +158,9 @@ function configure_karaf_log_for_apex() { echo "$logging_config" # fine a sane line number to inject the custom logging json - lineno=$(ssh $OPENSTACK_CONTROL_NODE_1_IP "sudo grep -Fn 'opendaylight::log_mechanism' /etc/puppet/hieradata/service_configs.json" | awk -F: '{print $1}') + lineno=$(ssh "$OPENSTACK_CONTROL_NODE_1_IP" "sudo grep -Fn 'opendaylight::log_mechanism' /etc/puppet/hieradata/service_configs.json" | awk -F: '{print $1}') + # We purposely want these variables to expand client-side + # shellcheck disable=SC2029 ssh "$controller_ip" "sudo sed -i \"${lineno}i ${logging_config}\" /etc/puppet/hieradata/service_configs.json" ssh "$controller_ip" "sudo cat /etc/puppet/hieradata/service_configs.json" fi @@ -173,7 +177,7 @@ function configure_odl_features_for_apex() { cat > /tmp/set_odl_features.sh << EOF sudo jq '.["opendaylight::extra_features"] |= []' $config_file > tmp.json && mv tmp.json $config_file -for feature in $(echo "$ACTUALFEATURES" | sed "s/,/ /g"); do +for feature in "\${ACTUALFEATURES//,/ }"; do sudo jq --arg jq_arg \$feature '.["opendaylight::extra_features"] |= . + [\$jq_arg]' $config_file > tmp && mv tmp $config_file; done echo "Modified puppet-opendaylight service_configs.json..." @@ -217,8 +221,13 @@ function get_test_suites() { testplan_filepath="${WORKSPACE}/test/csit/testplans/${TESTPLAN}" fi + if [ "${ELASTICSEARCHATTRIBUTE}" != "disabled" ]; then + add_test="integration/test/csit/suites/integration/Create_JVM_Plots.robot" + echo "${add_test}" >> "$testplan_filepath" + fi + echo "Changing the testplan path..." - cat "${testplan_filepath}" | sed "s:integration:${WORKSPACE}:" > testplan.txt + sed "s:integration:${WORKSPACE}:" "${testplan_filepath}" > testplan.txt cat testplan.txt # Use the testplan if specific SUITES are not defined. @@ -237,7 +246,7 @@ function get_test_suites() { done fi - eval $__suite_list="'$suite_list'" + eval "$__suite_list='$suite_list'" } function run_plan() { @@ -252,7 +261,7 @@ function run_plan() { ;; esac - printf "Locating %s plan to use...\n" "${type}" + printf "Locating %s plan to use...\\n" "${type}" plan_filepath="${WORKSPACE}/test/csit/${type}plans/$plan" if [ ! -f "${plan_filepath}" ]; then plan_filepath="${WORKSPACE}/test/csit/${type}plans/${STREAMTESTPLAN}" @@ -262,19 +271,139 @@ function run_plan() { fi if [ -f "${plan_filepath}" ]; then - printf "%s plan exists!!!\n" "${type}" - printf "Changing the %s plan path...\n" "${type}" - cat "${plan_filepath}" | sed "s:integration:${WORKSPACE}:" > "${type}plan.txt" + printf "%s plan exists!!!\\n" "${type}" + printf "Changing the %s plan path...\\n" "${type}" + sed "s:integration:${WORKSPACE}:" "${plan_filepath}" > "${type}plan.txt" cat "${type}plan.txt" + # shellcheck disable=SC2013 for line in $( grep -E -v '(^[[:space:]]*#|^[[:space:]]*$)' "${type}plan.txt" ); do - printf "Executing %s...\n" "${line}" + printf "Executing %s...\\n" "${line}" # shellcheck source=${line} disable=SC1091 - source "${line}" + . "${line}" done fi - printf "Finished running %s plans\n" "${type}" + printf "Finished running %s plans\\n" "${type}" } # function run_plan() +# Run scripts to support JVM monitoring. +function add_jvm_support() +{ + if [ "${ELASTICSEARCHATTRIBUTE}" != "disabled" ]; then + set_elasticsearch_attribute "${ELASTICSEARCHATTRIBUTE}" + set_jvm_common_attribute + fi +} # function add_jvm_support() + +#Expected input parameter: long/short/a number +function set_elasticsearch_attribute() +{ +short=5000 +long=120000 +default=$short + +case $1 in +short) + period=$short + ;; +long) + period=$long + ;; +*) + # shellcheck disable=SC2166 + if [[ "$1" =~ ^[0-9]+$ ]] && [ "$1" -ge $short -a "$1" -le $long ]; then + period=$1 + else + period=$default + fi + ;; +esac + +cat > "${WORKSPACE}"/org.apache.karaf.decanter.scheduler.simple.cfg < "${WORKSPACE}"/org.apache.karaf.decanter.collector.jmx-local.cfg < "${WORKSPACE}"/org.apache.karaf.decanter.collector.jmx-others.cfg < "${WORKSPACE}"/elasticsearch.yml < "${WORKSPACE}"/elasticsearch_startup.sh < /dev/null 2>&1 & + ls -al /tmp/elasticsearch/elasticsearch-1.7.5/bin/elasticsearch + +EOF + echo "Setup ODL_SYSTEM_IP specific config files for ${!CONTROLLERIP} " + cat "${WORKSPACE}"/org.apache.karaf.decanter.collector.jmx-local.cfg + cat "${WORKSPACE}"/org.apache.karaf.decanter.collector.jmx-others.cfg + cat "${WORKSPACE}"/elasticsearch.yml + + + echo "Copying config files to ${!CONTROLLERIP}" + scp "${WORKSPACE}"/org.apache.karaf.decanter.collector.jmx-local.cfg "${!CONTROLLERIP}":/tmp/"${BUNDLEFOLDER}"/etc/ + scp "${WORKSPACE}"/org.apache.karaf.decanter.collector.jmx-others.cfg "${!CONTROLLERIP}":/tmp/"${BUNDLEFOLDER}"/etc/ + scp "${WORKSPACE}"/elasticsearch.yml "${!CONTROLLERIP}":/tmp/ + + ssh "${!CONTROLLERIP}" "sudo ls -al /tmp/elasticsearch/" + ssh "${!CONTROLLERIP}" "sudo mv /tmp/elasticsearch.yml /tmp/elasticsearch/elasticsearch-1.7.5/config/" + ssh "${!CONTROLLERIP}" "cat /tmp/elasticsearch/elasticsearch-1.7.5/config/elasticsearch.yml" + + echo "Copying the elasticsearch_startup script to ${!CONTROLLERIP}" + cat "${WORKSPACE}"/elasticsearch_startup.sh + scp "${WORKSPACE}"/elasticsearch_startup.sh "${!CONTROLLERIP}":/tmp + ssh "${!CONTROLLERIP}" 'bash /tmp/elasticsearch_startup.sh' + ssh "${!CONTROLLERIP}" 'ps aux | grep elasticsearch' +done +} #function set_jvm_common_attribute + # Return elapsed time. Usage: # - Call first time with no arguments and a new timer is returned. # - Next call with the first argument as the timer and the elapsed time is returned. @@ -333,6 +462,7 @@ DISTROSTREAM: ${DISTROSTREAM} BUNDLE_URL: ${BUNDLE_URL} CONTROLLERFEATURES: ${CONTROLLERFEATURES} CONTROLLERDEBUGMAP: ${CONTROLLERDEBUGMAP} +CONTROLLERMAXMEM: ${CONTROLLERMAXMEM} SCRIPTPLAN: ${SCRIPTPLAN} CONFIGPLAN: ${CONFIGPLAN} STREAMTESTPLAN: ${STREAMTESTPLAN} @@ -365,6 +495,7 @@ CREATE_INITIAL_NETWORKS: ${CREATE_INITIAL_NETWORKS} LBAAS_SERVICE_PROVIDER: ${LBAAS_SERVICE_PROVIDER} ODL_SFC_DRIVER: ${ODL_SFC_DRIVER} ODL_SNAT_MODE: ${ODL_SNAT_MODE} +GROUP_ADD_MOD_ENABLED: ${GROUP_ADD_MOD_ENABLED} EOF } @@ -375,7 +506,7 @@ function tcpdump_start() { local -r filter=$3 filter_=${filter// /_} - printf "node %s, %s_%s__%s: starting tcpdump\n" "${ip}" "${prefix}" "${ip}" "${filter}" + printf "node %s, %s_%s__%s: starting tcpdump\\n" "${ip}" "${prefix}" "${ip}" "${filter}" # $fileter needs to be parsed client-side # shellcheck disable=SC2029 ssh "${ip}" "nohup sudo /usr/sbin/tcpdump -vvv -ni eth0 ${filter} -w /tmp/tcpdump_${prefix}_${ip}__${filter_}.pcap > /tmp/tcpdump_start.log 2>&1 &" @@ -385,7 +516,7 @@ function tcpdump_start() { function tcpdump_stop() { local -r ip=$1 - printf "node %s: stopping tcpdump\n" "$ip" + printf "node %s: stopping tcpdump\\n" "$ip" ${SSH} "${ip}" "ps -ef | grep tcpdump.sh" ${SSH} "${ip}" "sudo pkill -f tcpdump" ${SSH} "${ip}" "sudo xz -9ekvvf /tmp/*.pcap" @@ -441,7 +572,7 @@ function collect_openstack_logs() { local -r node_type=${3} local oslogs="${folder}/oslogs" - printf "collect_openstack_logs for %s node: %s into %s\n" "${node_type}" "${ip}" "${oslogs}" + printf "collect_openstack_logs for %s node: %s into %s\\n" "${node_type}" "${ip}" "${oslogs}" rm -rf "${oslogs}" mkdir -p "${oslogs}" # There are always some logs in /opt/stack/logs and this also covers the @@ -458,7 +589,7 @@ function extract_from_journal() { local -r services=\${1} local -r folder=\${2} local -r node_type=\${3} - printf "extract_from_journal folder: \${folder}, services: \${services}\n" + printf "extract_from_journal folder: \${folder}, services: \${services}\\n" for service in \${services}; do # strip anything before @ and anything after . # devstack@g-api.service will end as g-api @@ -481,7 +612,7 @@ fi ls -al /tmp/oslogs EOF # cat > ${WORKSPACE}/collect_openstack_logs.sh << EOF - printf "collect_openstack_logs for %s node: %s into %s, executing script\n" "${node_type}" "${ip}" "${oslogs}" + printf "collect_openstack_logs for %s node: %s into %s, executing script\\n" "${node_type}" "${ip}" "${oslogs}" cat "${WORKSPACE}"/collect_openstack_logs.sh scp "${WORKSPACE}"/collect_openstack_logs.sh "${ip}":/tmp ${SSH} "${ip}" "bash /tmp/collect_openstack_logs.sh > /tmp/collect_openstack_logs.log 2>&1" @@ -494,30 +625,30 @@ function collect_netvirt_logs() { set +e # We do not want to create red dot just because something went wrong while fetching logs. cat > extra_debug.sh << EOF -echo -e "/usr/sbin/lsmod | /usr/bin/grep openvswitch\n" +echo -e "/usr/sbin/lsmod | /usr/bin/grep openvswitch\\n" /usr/sbin/lsmod | /usr/bin/grep openvswitch -echo -e "\nsudo grep ct_ /var/log/openvswitch/ovs-vswitchd.log\n" +echo -e "\\nsudo grep ct_ /var/log/openvswitch/ovs-vswitchd.log\\n" sudo grep "Datapath supports" /var/log/openvswitch/ovs-vswitchd.log -echo -e "\nsudo netstat -punta\n" +echo -e "\\nsudo netstat -punta\\n" sudo netstat -punta -echo -e "\nsudo getenforce\n" +echo -e "\\nsudo getenforce\\n" sudo getenforce -echo -e "\nsudo systemctl status httpd\n" +echo -e "\\nsudo systemctl status httpd\\n" sudo systemctl status httpd -echo -e "\nenv\n" +echo -e "\\nenv\\n" env source /opt/stack/devstack/openrc admin admin -echo -e "\nenv after openrc\n" +echo -e "\\nenv after openrc\\n" env -echo -e "\nsudo du -hs /opt/stack" +echo -e "\\nsudo du -hs /opt/stack" sudo du -hs /opt/stack -echo -e "\nsudo mount" +echo -e "\\nsudo mount" sudo mount -echo -e "\ndmesg -T > /tmp/dmesg.log" +echo -e "\\ndmesg -T > /tmp/dmesg.log" dmesg -T > /tmp/dmesg.log -echo -e "\njournalctl > /tmp/journalctl.log\n" +echo -e "\\njournalctl > /tmp/journalctl.log\\n" sudo journalctl > /tmp/journalctl.log -echo -e "\novsdb-tool -mm show-log > /tmp/ovsdb-tool.log" +echo -e "\\novsdb-tool -mm show-log > /tmp/ovsdb-tool.log" ovsdb-tool -mm show-log > /tmp/ovsdb-tool.log EOF @@ -540,7 +671,7 @@ EOF echo "collect_logs: for opendaylight controller ip: ${!CONTROLLERIP}" NODE_FOLDER="odl_${i}" mkdir -p "${NODE_FOLDER}" - echo "Lets's take the karaf thread dump again..." + echo "Let's take the karaf thread dump again..." ssh "${!CONTROLLERIP}" "sudo ps aux" > "${WORKSPACE}"/ps_after.log pid=$(grep org.apache.karaf.main.Main "${WORKSPACE}"/ps_after.log | grep -v grep | tr -s ' ' | cut -f2 -d' ') echo "karaf main: org.apache.karaf.main.Main, pid:${pid}" @@ -562,8 +693,8 @@ EOF ${SSH} "${!CONTROLLERIP}" "tar -cf /tmp/odl${i}_zrpcd.log.tar /tmp/zrpcd.init.log" scp "${!CONTROLLERIP}:/tmp/odl${i}_zrpcd.log.tar" "${NODE_FOLDER}" tar -xvf "${NODE_FOLDER}/odl${i}_karaf.log.tar" -C "${NODE_FOLDER}" --strip-components 2 --transform "s/karaf/odl${i}_karaf/g" - grep "ROBOT MESSAGE\| ERROR " "${NODE_FOLDER}/odl${i}_karaf.log" > "${NODE_FOLDER}/odl${i}_err.log" - grep "ROBOT MESSAGE\| ERROR \| WARN \|Exception" \ + grep "ROBOT MESSAGE\\| ERROR " "${NODE_FOLDER}/odl${i}_karaf.log" > "${NODE_FOLDER}/odl${i}_err.log" + grep "ROBOT MESSAGE\\| ERROR \\| WARN \\|Exception" \ "${NODE_FOLDER}/odl${i}_karaf.log" > "${NODE_FOLDER}/odl${i}_err_warn_exception.log" # Print ROBOT lines and print Exception lines. For exception lines also print the previous line for context sed -n -e '/ROBOT MESSAGE/P' -e '$!N;/Exception/P;D' "${NODE_FOLDER}/odl${i}_karaf.log" > "${NODE_FOLDER}/odl${i}_exception.log" @@ -725,12 +856,26 @@ function get_nodes_list() { function get_features() { if [ "${CONTROLLERSCOPE}" == 'all' ]; then - ACTUALFEATURES="odl-integration-compatible-with-all,${CONTROLLERFEATURES}" - export CONTROLLERMEM="3072m" + if [ "$KARAF_PROJECT" == "integration" ]; then + ACTUALFEATURES="odl-integration-compatible-with-all,${CONTROLLERFEATURES}" + else + ACTUALFEATURES="odl-infrautils-ready,${CONTROLLERFEATURES}" + fi + # if CONTROLLERMEM still is the default 2G and was not overridden by a + # custom job, then we need to make sure to increase it because "all" + # features can be heavy + if [ "${CONTROLLERMEM}" == "2048m" ]; then + export CONTROLLERMEM="3072m" + fi else ACTUALFEATURES="odl-infrautils-ready,${CONTROLLERFEATURES}" fi + if [ "${ELASTICSEARCHATTRIBUTE}" != "disabled" ]; then + # Add decanter features to allow JVM monitoring + ACTUALFEATURES="${ACTUALFEATURES},decanter-collector-jmx,decanter-appender-elasticsearch-rest" + fi + # Some versions of jenkins job builder result in feature list containing spaces # and ending in newline. Remove all that. ACTUALFEATURES=$(echo "${ACTUALFEATURES}" | tr -d '\n \r') @@ -766,18 +911,22 @@ cat ${MAVENCONF} if [[ "$USEFEATURESBOOT" == "True" ]]; then echo "Configuring the startup features..." - sed -ie "s/\(featuresBoot=\|featuresBoot =\)/featuresBoot = ${ACTUALFEATURES},/g" ${FEATURESCONF} + sed -ie "s/\\(featuresBoot=\\|featuresBoot =\\)/featuresBoot = ${ACTUALFEATURES},/g" ${FEATURESCONF} fi -FEATURE_TEST_STRING="features-integration-test" -KARAF_VERSION=${KARAF_VERSION:-karaf4} -if [[ "$KARAF_VERSION" == "karaf4" ]]; then - FEATURE_TEST_STRING="features-test" +FEATURE_TEST_STRING="features-test" +FEATURE_TEST_VERSION="$BUNDLE_VERSION" +if [[ "$KARAF_ARTIFACT" == "opendaylight" ]]; then + FEATURE_TEST_VERSION="$(sed -r "s%^([0-9]+)\.([0-9]+)\.0(.*)%0.\1.\2\3%" <<<"$BUNDLE_VERSION")" fi +KARAF_VERSION=${KARAF_VERSION:-karaf4} -sed -ie "s%\(featuresRepositories=\|featuresRepositories =\)%featuresRepositories = mvn:org.opendaylight.integration/\${FEATURE_TEST_STRING}/${BUNDLE_VERSION}/xml/features,mvn:org.apache.karaf.decanter/apache-karaf-decanter/1.0.0/xml/features,%g" ${FEATURESCONF} -if [[ ! -z "${REPO_URL}" ]]; then - sed -ie "s%featuresRepositories =%featuresRepositories = ${REPO_URL},%g" ${FEATURESCONF} +# only manipulate feature repo in integration distro +if [[ "$KARAF_PROJECT" == "integration" ]]; then + sed -ie "s%\\(featuresRepositories=\\|featuresRepositories =\\)%featuresRepositories = mvn:org.opendaylight.integration/\${FEATURE_TEST_STRING}/\${FEATURE_TEST_VERSION}/xml/features,mvn:org.apache.karaf.decanter/apache-karaf-decanter/1.2.0/xml/features,%g" ${FEATURESCONF} + if [[ ! -z "${REPO_URL}" ]]; then + sed -ie "s%featuresRepositories =%featuresRepositories = ${REPO_URL},%g" ${FEATURESCONF} + fi fi cat ${FEATURESCONF} @@ -825,15 +974,26 @@ EOF function create_post_startup_script() { cat > "${WORKSPACE}"/post-startup-script.sh < "${WORKSPACE}"/ps_before.log pid=$(grep org.apache.karaf.main.Main "${WORKSPACE}"/ps_before.log | grep -v grep | tr -s ' ' | cut -f2 -d' ') echo "karaf main: org.apache.karaf.main.Main, pid:${pid}" @@ -1096,5 +1254,3 @@ function install_ovs_from_path() { ${SSH} "${ip}" "sudo yum -y install createrepo && createrepo --database /tmp/ovs_rpms" install_ovs_from_repo "${ip}" file:/tmp/ovs_rpms } - -