export CUSTOMPROP=/tmp/${BUNDLEFOLDER}/etc/custom.properties
export LOGCONF=/tmp/${BUNDLEFOLDER}/etc/org.ops4j.pax.logging.cfg
export MEMCONF=/tmp/${BUNDLEFOLDER}/bin/setenv
-export CONTROLLERMEM="2048m"
+export CONTROLLERMEM=${CONTROLLERMAXMEM}
# Cluster specific configuration settings
export AKKACONF=/tmp/${BUNDLEFOLDER}/configuration/initial/akka.conf
local -r controllermem=$2
local -r memconf=$3
- echo "Configure\n java home: ${java_home}\n max memory: ${controllermem}\n memconf: ${memconf}"
+ echo "Configure"
+ echo " java home: ${java_home}"
+ echo " max memory: ${controllermem}"
+ echo " memconf: ${memconf}"
- sed -ie 's%^# export JAVA_HOME%export JAVA_HOME=${JAVA_HOME:-'"${java_home}"'}%g' ${memconf}
- sed -ie 's/JAVA_MAX_MEM="2048m"/JAVA_MAX_MEM='"${controllermem}"'/g' ${memconf}
+ # We do not want expressions to expand here.
+ # shellcheck disable=SC2016
+ sed -ie 's%^# export JAVA_HOME%export JAVA_HOME=${JAVA_HOME:-'"${java_home}"'}%g' "${memconf}"
+ sed -ie 's/JAVA_MAX_MEM="2048m"/JAVA_MAX_MEM='"${controllermem}"'/g' "${memconf}"
echo "cat ${memconf}"
- cat ${memconf}
+ cat "${memconf}"
echo "Set Java version"
- sudo /usr/sbin/alternatives --install /usr/bin/java java ${java_home}/bin/java 1
- sudo /usr/sbin/alternatives --set java ${java_home}/bin/java
+ sudo /usr/sbin/alternatives --install /usr/bin/java java "${java_home}/bin/java" 1
+ sudo /usr/sbin/alternatives --set java "${java_home}/bin/java"
echo "JDK default version ..."
java -version
local logapi=log4j
# Check what the logging.cfg file is using for the logging api: log4j or log4j2
- grep "log4j2" ${LOGCONF}
- if [ $? -eq 0 ]; then
+ if grep "log4j2" "${LOGCONF}"; then
logapi=log4j2
fi
echo "Configuring the karaf log... karaf_version: ${karaf_version}, logapi: ${logapi}"
if [ "${logapi}" == "log4j2" ]; then
# FIXME: Make log size limit configurable from build parameter.
- # From Neon the default karaf file size is 64 MB
- sed -ie 's/log4j2.appender.rolling.policies.size.size = 64MB/log4j2.appender.rolling.policies.size.size = 1GB/g' ${LOGCONF}
- # Flourine still uses 16 MB
- sed -ie 's/log4j2.appender.rolling.policies.size.size = 16MB/log4j2.appender.rolling.policies.size.size = 1GB/g' ${LOGCONF}
+ # Increase default log file size to 1GB
+ sed -ie 's/log4j2.appender.rolling.policies.size.size = 64MB/log4j2.appender.rolling.policies.size.size = 1GB/g' "${LOGCONF}"
orgmodule="org.opendaylight.yangtools.yang.parser.repo.YangTextSchemaContextResolver"
orgmodule_="${orgmodule//./_}"
- echo "${logapi}.logger.${orgmodule_}.name = WARN" >> ${LOGCONF}
- echo "${logapi}.logger.${orgmodule_}.level = WARN" >> ${LOGCONF}
+ echo "${logapi}.logger.${orgmodule_}.name = WARN" >> "${LOGCONF}"
+ echo "${logapi}.logger.${orgmodule_}.level = WARN" >> "${LOGCONF}"
else
- sed -ie 's/log4j.appender.out.maxBackupIndex=10/log4j.appender.out.maxBackupIndex=1/g' ${LOGCONF}
+ sed -ie 's/log4j.appender.out.maxBackupIndex=10/log4j.appender.out.maxBackupIndex=1/g' "${LOGCONF}"
# FIXME: Make log size limit configurable from build parameter.
- sed -ie 's/log4j.appender.out.maxFileSize=1MB/log4j.appender.out.maxFileSize=30GB/g' ${LOGCONF}
- echo "${logapi}.logger.org.opendaylight.yangtools.yang.parser.repo.YangTextSchemaContextResolver = WARN" >> ${LOGCONF}
+ sed -ie 's/log4j.appender.out.maxFileSize=1MB/log4j.appender.out.maxFileSize=30GB/g' "${LOGCONF}"
+ echo "${logapi}.logger.org.opendaylight.yangtools.yang.parser.repo.YangTextSchemaContextResolver = WARN" >> "${LOGCONF}"
fi
# Add custom logging levels
orgmodule="org.opendaylight.${module}"
if [ "${logapi}" == "log4j2" ]; then
orgmodule_="${orgmodule//./_}"
- echo "${logapi}.logger.${orgmodule_}.name = ${orgmodule}" >> ${LOGCONF}
- echo "${logapi}.logger.${orgmodule_}.level = ${level}" >> ${LOGCONF}
+ echo "${logapi}.logger.${orgmodule_}.name = ${orgmodule}" >> "${LOGCONF}"
+ echo "${logapi}.logger.${orgmodule_}.level = ${level}" >> "${LOGCONF}"
else
- echo "${logapi}.logger.${orgmodule} = ${level}" >> ${LOGCONF}
+ echo "${logapi}.logger.${orgmodule} = ${level}" >> "${LOGCONF}"
fi
fi
done
fi
echo "cat ${LOGCONF}"
- cat ${LOGCONF}
+ cat "${LOGCONF}"
} # function configure_karaf_log()
function configure_karaf_log_for_apex() {
done
# replace the trailing comma with a closing brace followed by trailing comma
logging_config=${logging_config%,}" },"
- echo $logging_config
+ echo "$logging_config"
# fine a sane line number to inject the custom logging json
- lineno=$(ssh $OPENSTACK_CONTROL_NODE_1_IP "sudo grep -Fn 'opendaylight::log_mechanism' /etc/puppet/hieradata/service_configs.json" | awk -F: '{print $1}')
- ssh $controller_ip "sudo sed -i \"${lineno}i ${logging_config}\" /etc/puppet/hieradata/service_configs.json"
- ssh $controller_ip "sudo cat /etc/puppet/hieradata/service_configs.json"
+ lineno=$(ssh "$OPENSTACK_CONTROL_NODE_1_IP" "sudo grep -Fn 'opendaylight::log_mechanism' /etc/puppet/hieradata/service_configs.json" | awk -F: '{print $1}')
+ # We purposely want these variables to expand client-side
+ # shellcheck disable=SC2029
+ ssh "$controller_ip" "sudo sed -i \"${lineno}i ${logging_config}\" /etc/puppet/hieradata/service_configs.json"
+ ssh "$controller_ip" "sudo cat /etc/puppet/hieradata/service_configs.json"
fi
} # function configure_karaf_log_for_apex()
cat > /tmp/set_odl_features.sh << EOF
sudo jq '.["opendaylight::extra_features"] |= []' $config_file > tmp.json && mv tmp.json $config_file
-for feature in $(echo $ACTUALFEATURES | sed "s/,/ /g"); do
+for feature in "\${ACTUALFEATURES//,/ }"; do
sudo jq --arg jq_arg \$feature '.["opendaylight::extra_features"] |= . + [\$jq_arg]' $config_file > tmp && mv tmp $config_file;
done
echo "Modified puppet-opendaylight service_configs.json..."
cat /tmp/set_odl_features.sh
if [ -n "${ACTUALFEATURES}" ]; then
- scp /tmp/set_odl_features.sh $controller_ip:/tmp/set_odl_features.sh
- ssh $controller_ip "sudo bash /tmp/set_odl_features.sh"
+ scp /tmp/set_odl_features.sh "$controller_ip":/tmp/set_odl_features.sh
+ ssh "$controller_ip" "sudo bash /tmp/set_odl_features.sh"
fi
} # function configure_odl_features_for_apex()
testplan_filepath="${WORKSPACE}/test/csit/testplans/${TESTPLAN}"
fi
+ if [ "${ELASTICSEARCHATTRIBUTE}" != "disabled" ]; then
+ add_test="integration/test/csit/suites/integration/Create_JVM_Plots.robot"
+ echo "${add_test}" >> "$testplan_filepath"
+ fi
+
echo "Changing the testplan path..."
- cat "${testplan_filepath}" | sed "s:integration:${WORKSPACE}:" > testplan.txt
+ sed "s:integration:${WORKSPACE}:" "${testplan_filepath}" > testplan.txt
cat testplan.txt
# Use the testplan if specific SUITES are not defined.
if [ -z "${SUITES}" ]; then
- suite_list=$(egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' testplan.txt | tr '\012' ' ')
+ suite_list=$(grep -E -v '(^[[:space:]]*#|^[[:space:]]*$)' testplan.txt | tr '\012' ' ')
else
suite_list=""
workpath="${WORKSPACE}/test/csit/suites"
done
fi
- eval $__suite_list="'$suite_list'"
+ eval "$__suite_list='$suite_list'"
}
function run_plan() {
;;
esac
- printf "Locating %s plan to use...\n" "${type}"
+ printf "Locating %s plan to use...\\n" "${type}"
plan_filepath="${WORKSPACE}/test/csit/${type}plans/$plan"
if [ ! -f "${plan_filepath}" ]; then
plan_filepath="${WORKSPACE}/test/csit/${type}plans/${STREAMTESTPLAN}"
fi
if [ -f "${plan_filepath}" ]; then
- printf "%s plan exists!!!\n" "${type}"
- printf "Changing the %s plan path...\n" "${type}"
- cat ${plan_filepath} | sed "s:integration:${WORKSPACE}:" > ${type}plan.txt
- cat ${type}plan.txt
- for line in $( egrep -v '(^[[:space:]]*#|^[[:space:]]*$)' ${type}plan.txt ); do
- printf "Executing %s...\n" "${line}"
+ printf "%s plan exists!!!\\n" "${type}"
+ printf "Changing the %s plan path...\\n" "${type}"
+ sed "s:integration:${WORKSPACE}:" "${plan_filepath}" > "${type}plan.txt"
+ cat "${type}plan.txt"
+ # shellcheck disable=SC2013
+ for line in $( grep -E -v '(^[[:space:]]*#|^[[:space:]]*$)' "${type}plan.txt" ); do
+ printf "Executing %s...\\n" "${line}"
# shellcheck source=${line} disable=SC1091
- source ${line}
+ source "${line}"
done
fi
- printf "Finished running %s plans\n" "${type}"
+ printf "Finished running %s plans\\n" "${type}"
} # function run_plan()
+# Run scripts to support JVM monitoring.
+function add_jvm_support()
+{
+ if [ "${ELASTICSEARCHATTRIBUTE}" != "disabled" ]; then
+ set_elasticsearch_attribute "${ELASTICSEARCHATTRIBUTE}"
+ set_jvm_common_attribute
+ fi
+} # function add_jvm_support()
+
+#Expected input parameter: long/short/a number
+function set_elasticsearch_attribute()
+{
+short=5000
+long=120000
+default=$short
+
+case $1 in
+short)
+ period=$short
+ ;;
+long)
+ period=$long
+ ;;
+*)
+ # shellcheck disable=SC2166
+ if [[ "$1" =~ ^[0-9]+$ ]] && [ "$1" -ge $short -a "$1" -le $long ]; then
+ period=$1
+ else
+ period=$default
+ fi
+ ;;
+esac
+
+cat > "${WORKSPACE}"/org.apache.karaf.decanter.scheduler.simple.cfg <<EOF
+period=$period
+
+EOF
+
+echo "Copying config files to ODL Controller folder"
+
+# shellcheck disable=SC2086
+for i in $(seq 1 ${NUM_ODL_SYSTEM})
+do
+ CONTROLLERIP=ODL_SYSTEM_${i}_IP
+ echo "Set Decanter Polling Period to ${!CONTROLLERIP}"
+ # shellcheck disable=SC2029
+ ssh "${!CONTROLLERIP}" "mkdir -p \"/tmp/${BUNDLEFOLDER}/etc/opendaylight/karaf/\""
+ scp "${WORKSPACE}"/org.apache.karaf.decanter.scheduler.simple.cfg "${!CONTROLLERIP}":/tmp/"${BUNDLEFOLDER}"/etc/
+done
+} #function set_elasticsearch_attribute
+
+function set_jvm_common_attribute()
+{
+cat > "${WORKSPACE}"/org.apache.karaf.decanter.collector.jmx-local.cfg <<EOF
+type=jmx-local
+url=local
+object.name=java.lang:type=*,name=*
+
+EOF
+
+cat > "${WORKSPACE}"/org.apache.karaf.decanter.collector.jmx-others.cfg <<EOF
+type=jmx-local
+url=local
+object.name=java.lang:type=*
+
+EOF
+
+# shellcheck disable=SC2086
+for i in $(seq 1 ${NUM_ODL_SYSTEM})
+do
+ CONTROLLERIP=ODL_SYSTEM_${i}_IP
+
+ cat > "${WORKSPACE}"/elasticsearch.yml <<EOF
+ discovery.zen.ping.multicast.enabled: false
+
+EOF
+
+ cat > "${WORKSPACE}"/elasticsearch_startup.sh <<EOF
+ cd /tmp/elasticsearch/elasticsearch-1.7.5
+ ls -al
+
+ if [ -d "data" ]; then
+ echo "data directory exists, deleting...."
+ rm -r data
+ else
+ echo "data directory does not exist"
+ fi
+
+ cd /tmp/elasticsearch
+ ls -al
+
+ echo "Starting Elasticsearch node"
+ sudo /tmp/elasticsearch/elasticsearch-1.7.5/bin/elasticsearch > /dev/null 2>&1 &
+ ls -al /tmp/elasticsearch/elasticsearch-1.7.5/bin/elasticsearch
+
+EOF
+ echo "Setup ODL_SYSTEM_IP specific config files for ${!CONTROLLERIP} "
+ cat "${WORKSPACE}"/org.apache.karaf.decanter.collector.jmx-local.cfg
+ cat "${WORKSPACE}"/org.apache.karaf.decanter.collector.jmx-others.cfg
+ cat "${WORKSPACE}"/elasticsearch.yml
+
+
+ echo "Copying config files to ${!CONTROLLERIP}"
+ scp "${WORKSPACE}"/org.apache.karaf.decanter.collector.jmx-local.cfg "${!CONTROLLERIP}":/tmp/"${BUNDLEFOLDER}"/etc/
+ scp "${WORKSPACE}"/org.apache.karaf.decanter.collector.jmx-others.cfg "${!CONTROLLERIP}":/tmp/"${BUNDLEFOLDER}"/etc/
+ scp "${WORKSPACE}"/elasticsearch.yml "${!CONTROLLERIP}":/tmp/
+
+ ssh "${!CONTROLLERIP}" "sudo ls -al /tmp/elasticsearch/"
+ ssh "${!CONTROLLERIP}" "sudo mv /tmp/elasticsearch.yml /tmp/elasticsearch/elasticsearch-1.7.5/config/"
+ ssh "${!CONTROLLERIP}" "cat /tmp/elasticsearch/elasticsearch-1.7.5/config/elasticsearch.yml"
+
+ echo "Copying the elasticsearch_startup script to ${!CONTROLLERIP}"
+ cat "${WORKSPACE}"/elasticsearch_startup.sh
+ scp "${WORKSPACE}"/elasticsearch_startup.sh "${!CONTROLLERIP}":/tmp
+ ssh "${!CONTROLLERIP}" 'bash /tmp/elasticsearch_startup.sh'
+ ssh "${!CONTROLLERIP}" 'ps aux | grep elasticsearch'
+done
+} #function set_jvm_common_attribute
+
# Return elapsed time. Usage:
# - Call first time with no arguments and a new timer is returned.
# - Next call with the first argument as the timer and the elapsed time is returned.
BUNDLE_URL: ${BUNDLE_URL}
CONTROLLERFEATURES: ${CONTROLLERFEATURES}
CONTROLLERDEBUGMAP: ${CONTROLLERDEBUGMAP}
+CONTROLLERMAXMEM: ${CONTROLLERMAXMEM}
SCRIPTPLAN: ${SCRIPTPLAN}
CONFIGPLAN: ${CONFIGPLAN}
STREAMTESTPLAN: ${STREAMTESTPLAN}
LBAAS_SERVICE_PROVIDER: ${LBAAS_SERVICE_PROVIDER}
ODL_SFC_DRIVER: ${ODL_SFC_DRIVER}
ODL_SNAT_MODE: ${ODL_SNAT_MODE}
+GROUP_ADD_MOD_ENABLED: ${GROUP_ADD_MOD_ENABLED}
EOF
}
local -r filter=$3
filter_=${filter// /_}
- printf "node %s, %s_%s__%s: starting tcpdump\n" "${ip}" "${prefix}" "${ip}" "${filter}"
+ printf "node %s, %s_%s__%s: starting tcpdump\\n" "${ip}" "${prefix}" "${ip}" "${filter}"
# $fileter needs to be parsed client-side
# shellcheck disable=SC2029
ssh "${ip}" "nohup sudo /usr/sbin/tcpdump -vvv -ni eth0 ${filter} -w /tmp/tcpdump_${prefix}_${ip}__${filter_}.pcap > /tmp/tcpdump_start.log 2>&1 &"
function tcpdump_stop() {
local -r ip=$1
- printf "node %s: stopping tcpdump\n" "$ip"
+ printf "node %s: stopping tcpdump\\n" "$ip"
${SSH} "${ip}" "ps -ef | grep tcpdump.sh"
${SSH} "${ip}" "sudo pkill -f tcpdump"
${SSH} "${ip}" "sudo xz -9ekvvf /tmp/*.pcap"
local -r node_type=${3}
local oslogs="${folder}/oslogs"
- printf "collect_openstack_logs for %s node: %s into %s\n" "${node_type}" "${ip}" "${oslogs}"
+ printf "collect_openstack_logs for %s node: %s into %s\\n" "${node_type}" "${ip}" "${oslogs}"
rm -rf "${oslogs}"
mkdir -p "${oslogs}"
# There are always some logs in /opt/stack/logs and this also covers the
local -r services=\${1}
local -r folder=\${2}
local -r node_type=\${3}
- printf "extract_from_journal folder: \${folder}, services: \${services}\n"
+ printf "extract_from_journal folder: \${folder}, services: \${services}\\n"
for service in \${services}; do
# strip anything before @ and anything after .
# devstack@g-api.service will end as g-api
ls -al /tmp/oslogs
EOF
# cat > ${WORKSPACE}/collect_openstack_logs.sh << EOF
- printf "collect_openstack_logs for %s node: %s into %s, executing script\n" "${node_type}" "${ip}" "${oslogs}"
+ printf "collect_openstack_logs for %s node: %s into %s, executing script\\n" "${node_type}" "${ip}" "${oslogs}"
cat "${WORKSPACE}"/collect_openstack_logs.sh
scp "${WORKSPACE}"/collect_openstack_logs.sh "${ip}":/tmp
${SSH} "${ip}" "bash /tmp/collect_openstack_logs.sh > /tmp/collect_openstack_logs.log 2>&1"
set +e # We do not want to create red dot just because something went wrong while fetching logs.
cat > extra_debug.sh << EOF
-echo -e "/usr/sbin/lsmod | /usr/bin/grep openvswitch\n"
+echo -e "/usr/sbin/lsmod | /usr/bin/grep openvswitch\\n"
/usr/sbin/lsmod | /usr/bin/grep openvswitch
-echo -e "\nsudo grep ct_ /var/log/openvswitch/ovs-vswitchd.log\n"
+echo -e "\\nsudo grep ct_ /var/log/openvswitch/ovs-vswitchd.log\\n"
sudo grep "Datapath supports" /var/log/openvswitch/ovs-vswitchd.log
-echo -e "\nsudo netstat -punta\n"
+echo -e "\\nsudo netstat -punta\\n"
sudo netstat -punta
-echo -e "\nsudo getenforce\n"
+echo -e "\\nsudo getenforce\\n"
sudo getenforce
-echo -e "\nsudo systemctl status httpd\n"
+echo -e "\\nsudo systemctl status httpd\\n"
sudo systemctl status httpd
-echo -e "\nenv\n"
+echo -e "\\nenv\\n"
env
source /opt/stack/devstack/openrc admin admin
-echo -e "\nenv after openrc\n"
+echo -e "\\nenv after openrc\\n"
env
-echo -e "\nsudo du -hs /opt/stack"
+echo -e "\\nsudo du -hs /opt/stack"
sudo du -hs /opt/stack
-echo -e "\nsudo mount"
+echo -e "\\nsudo mount"
sudo mount
-echo -e "\ndmesg -T > /tmp/dmesg.log"
+echo -e "\\ndmesg -T > /tmp/dmesg.log"
dmesg -T > /tmp/dmesg.log
-echo -e "\njournalctl > /tmp/journalctl.log\n"
+echo -e "\\njournalctl > /tmp/journalctl.log\\n"
sudo journalctl > /tmp/journalctl.log
-echo -e "\novsdb-tool -mm show-log > /tmp/ovsdb-tool.log"
+echo -e "\\novsdb-tool -mm show-log > /tmp/ovsdb-tool.log"
ovsdb-tool -mm show-log > /tmp/ovsdb-tool.log
EOF
echo "collect_logs: for opendaylight controller ip: ${!CONTROLLERIP}"
NODE_FOLDER="odl_${i}"
mkdir -p "${NODE_FOLDER}"
- echo "Lets's take the karaf thread dump again..."
+ echo "Let's take the karaf thread dump again..."
ssh "${!CONTROLLERIP}" "sudo ps aux" > "${WORKSPACE}"/ps_after.log
pid=$(grep org.apache.karaf.main.Main "${WORKSPACE}"/ps_after.log | grep -v grep | tr -s ' ' | cut -f2 -d' ')
echo "karaf main: org.apache.karaf.main.Main, pid:${pid}"
${SSH} "${!CONTROLLERIP}" "tar -cf /tmp/odl${i}_zrpcd.log.tar /tmp/zrpcd.init.log"
scp "${!CONTROLLERIP}:/tmp/odl${i}_zrpcd.log.tar" "${NODE_FOLDER}"
tar -xvf "${NODE_FOLDER}/odl${i}_karaf.log.tar" -C "${NODE_FOLDER}" --strip-components 2 --transform "s/karaf/odl${i}_karaf/g"
- grep "ROBOT MESSAGE\| ERROR " "${NODE_FOLDER}/odl${i}_karaf.log" > "${NODE_FOLDER}/odl${i}_err.log"
- grep "ROBOT MESSAGE\| ERROR \| WARN \|Exception" \
+ grep "ROBOT MESSAGE\\| ERROR " "${NODE_FOLDER}/odl${i}_karaf.log" > "${NODE_FOLDER}/odl${i}_err.log"
+ grep "ROBOT MESSAGE\\| ERROR \\| WARN \\|Exception" \
"${NODE_FOLDER}/odl${i}_karaf.log" > "${NODE_FOLDER}/odl${i}_err_warn_exception.log"
# Print ROBOT lines and print Exception lines. For exception lines also print the previous line for context
sed -n -e '/ROBOT MESSAGE/P' -e '$!N;/Exception/P;D' "${NODE_FOLDER}/odl${i}_karaf.log" > "${NODE_FOLDER}/odl${i}_exception.log"
function get_features() {
if [ "${CONTROLLERSCOPE}" == 'all' ]; then
ACTUALFEATURES="odl-integration-compatible-with-all,${CONTROLLERFEATURES}"
- export CONTROLLERMEM="3072m"
+ # if CONTROLLERMEM still is the default 2G and was not overridden by a
+ # custom job, then we need to make sure to increase it because "all"
+ # features can be heavy
+ if [ "${CONTROLLERMEM}" == "2048m" ]; then
+ export CONTROLLERMEM="3072m"
+ fi
else
ACTUALFEATURES="odl-infrautils-ready,${CONTROLLERFEATURES}"
fi
+ if [ "${ELASTICSEARCHATTRIBUTE}" != "disabled" ]; then
+ # Add decanter features to allow JVM monitoring
+ ACTUALFEATURES="${ACTUALFEATURES},decanter-collector-jmx,decanter-appender-elasticsearch-rest"
+ fi
+
# Some versions of jenkins job builder result in feature list containing spaces
# and ending in newline. Remove all that.
ACTUALFEATURES=$(echo "${ACTUALFEATURES}" | tr -d '\n \r')
if [[ "$USEFEATURESBOOT" == "True" ]]; then
echo "Configuring the startup features..."
- sed -ie "s/\(featuresBoot=\|featuresBoot =\)/featuresBoot = ${ACTUALFEATURES},/g" ${FEATURESCONF}
+ sed -ie "s/\\(featuresBoot=\\|featuresBoot =\\)/featuresBoot = ${ACTUALFEATURES},/g" ${FEATURESCONF}
fi
FEATURE_TEST_STRING="features-integration-test"
FEATURE_TEST_STRING="features-test"
fi
-sed -ie "s%\(featuresRepositories=\|featuresRepositories =\)%featuresRepositories = mvn:org.opendaylight.integration/\${FEATURE_TEST_STRING}/${BUNDLE_VERSION}/xml/features,mvn:org.apache.karaf.decanter/apache-karaf-decanter/1.0.0/xml/features,%g" ${FEATURESCONF}
+sed -ie "s%\\(featuresRepositories=\\|featuresRepositories =\\)%featuresRepositories = mvn:org.opendaylight.integration/\${FEATURE_TEST_STRING}/${BUNDLE_VERSION}/xml/features,mvn:org.apache.karaf.decanter/apache-karaf-decanter/1.2.0/xml/features,%g" ${FEATURESCONF}
if [[ ! -z "${REPO_URL}" ]]; then
sed -ie "s%featuresRepositories =%featuresRepositories = ${REPO_URL},%g" ${FEATURESCONF}
fi
function create_post_startup_script() {
cat > "${WORKSPACE}"/post-startup-script.sh <<EOF
-if [[ "$USEFEATURESBOOT" != "True" ]]; then
+# wait up to 60s for karaf port 8101 to be opened, polling every 5s
+loop_count=0;
+until [[ \$loop_count -ge 12 ]]; do
+ netstat -na | grep ":::8101" && break;
+ loop_count=\$[\$loop_count+1];
+ sleep 5;
+done
- # wait up to 60s for karaf port 8101 to be opened, polling every 5s
- loop_count=0;
- until [[ \$loop_count -ge 12 ]]; do
- netstat -na | grep 8101 && break;
- loop_count=\$[\$loop_count+1];
- sleep 5;
- done
+# This workaround is required for Karaf decanter to work proper
+# The bundle:refresh command does not fail if the decanter bundles are not present
+echo "ssh to karaf console to do bundle refresh of decanter jmx collector"
+sshpass -p karaf ssh -o StrictHostKeyChecking=no \
+ -o UserKnownHostsFile=/dev/null \
+ -o LogLevel=error \
+ -p 8101 karaf@localhost \
+ "bundle:refresh org.apache.karaf.decanter.collector.jmx && bundle:refresh org.apache.karaf.decanter.api"
+
+if [[ "$USEFEATURESBOOT" != "True" ]]; then
echo "going to feature:install --no-auto-refresh ${SPACE_SEPARATED_FEATURES} one at a time"
for feature in ${SPACE_SEPARATED_FEATURES}; do
# if we ended up not finding ready status in the above loop, we can output some debugs
grep 'org.opendaylight.infrautils.*System ready' /tmp/${BUNDLEFOLDER}/data/log/karaf.log
-if [ $? -ne 0 ]; then
+if [ \$? -ne 0 ]; then
echo "Timeout Controller DOWN"
echo "Dumping first 500K bytes of karaf log..."
head --bytes=500K "/tmp/${BUNDLEFOLDER}/data/log/karaf.log"
for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
CONTROLLERIP="ODL_SYSTEM_${i}_IP"
echo "Execute the post startup script on controller ${!CONTROLLERIP}"
- scp "${WORKSPACE}"/post-startup-script.sh "${!CONTROLLERIP}":/
+ scp "${WORKSPACE}"/post-startup-script.sh "${!CONTROLLERIP}":/tmp/
# $seed_index needs to be parsed client-side
# shellcheck disable=SC2029
ssh "${!CONTROLLERIP}" "bash /tmp/post-startup-script.sh $(( seed_index++ ))"
done
}
-function create_controller_variables() {
- echo "Generating controller variables..."
+function dump_controller_threads() {
for i in $(seq 1 "${NUM_ODL_SYSTEM}"); do
CONTROLLERIP="ODL_SYSTEM_${i}_IP"
- odl_variables=${odl_variables}" -v ${CONTROLLERIP}:${!CONTROLLERIP}"
- echo "Lets's take the karaf thread dump"
+ echo "Let's take the karaf thread dump"
ssh "${!CONTROLLERIP}" "sudo ps aux" > "${WORKSPACE}"/ps_before.log
pid=$(grep org.apache.karaf.main.Main "${WORKSPACE}"/ps_before.log | grep -v grep | tr -s ' ' | cut -f2 -d' ')
echo "karaf main: org.apache.karaf.main.Main, pid:${pid}"