-[default]
-bears = SpaceConsistencyBear,GitCommitBear
-files = scripts/*
-allow_trailing_whitespace = False
-enforce_newline_at_EOF = True
-indent_size = 4
-use_spaces = yeah
+[GitCommit]
+bears = GitCommitBear
+ignore_length_regex = Signed-off-by,
+ Also-by,
+ Co-authored-by,
+ http://,
+ https://
+
+[JSON]
+bears = JSONFormatBear
+files = **/*.json
+ignore = .*/**
+indent_size = 2
[YAML]
bears = YAMLLintBear
files = jjb/**/*.yaml,openstack-hot/**/*.yaml
document_start = True
yamllint_config = yamllint.conf
+
+[ShellCheck]
+bears = ShellCheckBear,SpaceConsistencyBear
+files = jenkins-scripts/**/*.sh,
+ jjb/**/*.sh,
+ scripts/**/*.sh
+ignore = jenkins-scripts/*-local-env.sh,
+ jjb/integration/*.sh
+shell = bash
+indent_size = 4
+use_spaces = yeah
* CentOS 7 - docker - 20170120-1434
* CentOS 7 - java-builder - 20170117-0004
* CentOS 7 - java-builder - 20170126-0058
+* CentOS 7 - java-builder - 20170309-2355
+* CentOS 7 - java-builder - 20170311-0517
* CentOS 7 - robot - 20170117-0004
* CentOS 7 - robot - 20170210-1803
* Fedora 23 (20151030 cloud)
* Ubuntu 14.04 - mininet-ovs-25 - 20170130-0425
* Ubuntu 14.04 - mininet-ovs-25 - 20170210-0300
* Ubuntu 14.04 LTS Trusty Tahr (cloudimg)
+* Ubuntu 16.04 - gbp - 20170308-0321
+* Ubuntu 16.04 - mininet-ovs-25 - 20170308-0230
* Ubuntu 16.04 LTS (2016-05-03 cloudimg)
useradd -m -s /bin/bash jenkins
# Check if docker group exists
-grep -q docker /etc/group
-if [ "$?" == '0' ]
+if grep -q docker /etc/group
then
- # Add jenkins user to docker group
- usermod -a -G docker jenkins
+ # Add jenkins user to docker group
+ usermod -a -G docker jenkins
fi
# Check if mock group exists
-grep -q mock /etc/group
-if [ "$?" == '0' ]
+if grep -q mock /etc/group
then
- # Add jenkins user to mock group so they can build Int/Pack's RPMs
- usermod -a -G mock jenkins
+ # Add jenkins user to mock group so they can build Int/Pack's RPMs
+ usermod -a -G mock jenkins
fi
mkdir /home/jenkins/.ssh
mkdir /w
-cp -r /home/${OS}/.ssh/authorized_keys /home/jenkins/.ssh/authorized_keys
+cp -r "/home/${OS}/.ssh/authorized_keys" /home/jenkins/.ssh/authorized_keys
# Generate ssh key for use by Robot jobs
echo -e 'y\n' | ssh-keygen -N "" -f /home/jenkins/.ssh/id_rsa -t rsa
chown -R jenkins:jenkins /home/jenkins/.ssh /w
#!/bin/bash
-OS=`facter operatingsystem`
+OS=$(facter operatingsystem)
case "$OS" in
Fedora)
systemctl stop firewalld
;;
CentOS|RedHat)
- if [ `facter operatingsystemrelease | cut -d '.' -f1` -lt "7" ]; then
+ if [ "$(facter operatingsystemrelease | cut -d '.' -f1)" -lt "7" ]; then
service iptables stop
else
systemctl stop firewalld
# make sure jenkins is part of the docker only if jenkins has already been
# created
-grep -q jenkins /etc/passwd
-if [ "$?" == '0' ]
+
+if grep -q jenkins /etc/passwd
then
/usr/sbin/usermod -a -G docker jenkins
fi
# http://www.eclipse.org/legal/epl-v10.html
##############################################################################
-cd /builder/jenkins-scripts
-chmod +x *.sh
+cd /builder/jenkins-scripts || exit 1
+chmod +x -- *.sh
./system_type.sh
+# shellcheck disable=SC1091
source /tmp/system_type.sh
./basic_settings.sh
-./${SYSTEM_TYPE}.sh
+"./${SYSTEM_TYPE}.sh"
# Create the jenkins user last so that hopefully we don't have to deal with
# guard files
- inject:
properties-file: variables.prop
-- builder:
- name: autorelease-generate-project-report
- builders:
- - shell: !include-raw: include-raw-generate-project-report.sh
-
- builder:
name: autorelease-fix-relative-paths
builders:
name: autorelease-projects
jobs:
- 'autorelease-release-{stream}'
- - 'autorelease-project-report-{stream}'
stream:
- carbon:
jdk: 'openjdk8'
integration-test: boron
- beryllium:
+ # Only run once a week since Beryllium is in maintenance mode
+ cron: 'H H * * 0'
next-release-tag: Beryllium-SR5
branch: 'stable/beryllium'
jdk: 'openjdk7'
integration-test: beryllium
project: 'releng/autorelease'
- archive-artifacts: '**/*.prop **/*.log **/patches/*.bundle **/patches/*.patch all-bundles.tar.gz'
+ archive-artifacts: >
+ **/*.prop
+ **/*.log
+ **/patches/*.bundle
+ **/patches/*.patch
+ patches.tar.gz
###
# TODO: Remove this job once guava21 testing is complete
project-type: freestyle
node: centos7-autorelease-4c-16g
jdk: '{jdk}'
+ cron: 'H 0 * * *'
properties:
- opendaylight-infra-properties:
build-timeout: '1440'
triggers:
- - timed: 'H 0 * * *'
+ - timed: '{cron}'
builders:
# force jenkins install of maven version before any shell scripts use it
- opendaylight-infra-shiplogs:
maven-version: 'mvn33'
-
-- job-template:
- name: 'autorelease-project-report-{stream}'
-
- project-type: freestyle
- node: centos7-java-builder-2c-8g
-
- properties:
- - opendaylight-infra-properties:
- build-days-to-keep: '{build-days-to-keep}'
-
- parameters:
- - opendaylight-infra-parameters:
- project: '{project}'
- branch: '{branch}'
- refspec: 'refs/heads/{branch}'
- artifacts: '{archive-artifacts}'
- - string:
- name: REPORT_DIR
- default: '$WORKSPACE/project-reports'
- description: "The directory containing project reports"
-
- scm:
- - git:
- credentials-id: 'opendaylight-jenkins-ssh'
- url: '$GIT_BASE'
- refspec: '$GERRIT_REFSPEC'
- branches:
- - '$GERRIT_BRANCH'
- choosing-strategy: 'gerrit'
- skip-tag: true
- submodule:
- recursive: true
-
- wrappers:
- - opendaylight-infra-wrappers:
- build-timeout: '30'
-
- triggers:
- - timed: '0 0 * * 0'
-
- builders:
- - shell: 'echo "DATE=`date +%Y-%m-%d`" > $WORKSPACE/variables.prop'
- - inject:
- properties-file: variables.prop
- - autorelease-generate-project-report
- - shell: "./scripts/list-project-dependencies.sh"
- - autorelease-determine-merge-order
- - autorelease-sys-stats
-
- publishers:
- - email-ext:
- attachments: 'project-reports/*.log'
- recipients: 'skitt@redhat.com thanh.ha@linuxfoundation.org'
- reply-to: dev@lists.opendaylight.org
- content-type: default
- subject: '[releng] ODL {stream} project report for ${{ENV, var="DATE"}}'
- body: |
- This is a project report generated on $DATE listing the commit
- history of ODL projects for the past week. See attached
- git-report.log
- Archive also available on Jenkins at $BUILD_URL
- always: true
- - opendaylight-infra-shiplogs:
- maven-version: 'mvn33'
NEXUS_STAGING_URL=${ODLNEXUS_STAGING_URL:-$ODLNEXUSPROXY}
NEXUSURL=${NEXUS_STAGING_URL}/content/repositories/
-VERSION=`grep -m2 '<version>' ${WORKSPACE}/integration/distribution/distribution-karaf/pom.xml | tail -n1 | awk -F'[<|>]' '/version/ { printf $3 }'`
+VERSION=$(grep -m2 '<version>' "${WORKSPACE}/integration/distribution/distribution-karaf/pom.xml" | tail -n1 | awk -F'[<|>]' '/version/ { printf $3 }')
echo "VERSION: ${VERSION}"
-STAGING_REPO_ID=`grep "Created staging repository with ID" $WORKSPACE/deploy-staged-repository.log | cut -d '"' -f2`
-BUNDLEURL=${NEXUSURL}/${STAGING_REPO_ID}/org/opendaylight/integration/distribution-karaf/${VERSION}/distribution-karaf-${VERSION}.zip
-echo STAGING_REPO_ID=$STAGING_REPO_ID >> $WORKSPACE/variables.prop
-echo BUNDLEURL=$BUNDLEURL >> $WORKSPACE/variables.prop
+STAGING_REPO_ID=$(grep "Created staging repository with ID" "$WORKSPACE/deploy-staged-repository.log" | cut -d '"' -f2)
+BUNDLEURL="${NEXUSURL}/${STAGING_REPO_ID}/org/opendaylight/integration/distribution-karaf/${VERSION}/distribution-karaf-${VERSION}.zip"
+echo STAGING_REPO_ID="$STAGING_REPO_ID" >> "$WORKSPACE/variables.prop"
+echo BUNDLEURL="$BUNDLEURL" >> "$WORKSPACE/variables.prop"
echo "BUNDLEURL: ${BUNDLEURL}"
# Copy variables.prop to variables.jenkins-trigger so that the end of build
##############################################################################
# Assuming that mvn deploy created the hide/from/pom/files/stage directory.
-cd hide/from/pom/files
+cd hide/from/pom/files || exit 1
mkdir -p m2repo/org/opendaylight/
# ODLNEXUSPROXY is used to define the location of the Nexus server used by the CI system.
"stage/org/opendaylight" m2repo/org/
"$MVN" -V -B org.sonatype.plugins:nexus-staging-maven-plugin:1.6.2:deploy-staged-repository \
- -DrepositoryDirectory="`pwd`/m2repo" \
- -DnexusUrl=$NEXUS_STAGING_URL \
+ -DrepositoryDirectory="$(pwd)/m2repo" \
+ -DnexusUrl="$NEXUS_STAGING_URL" \
-DstagingProfileId="$NEXUS_STAGING_PROFILE" \
-DserverId="$NEXUS_STAGING_SERVER_ID" \
- -s $SETTINGS_FILE \
- -gs $GLOBAL_SETTINGS_FILE | tee $WORKSPACE/deploy-staged-repository.log
+ -s "$SETTINGS_FILE" \
+ -gs "$GLOBAL_SETTINGS_FILE" | tee "$WORKSPACE/deploy-staged-repository.log"
#!/bin/bash
# @License EPL-1.0 <http://spdx.org/licenses/EPL-1.0>
##############################################################################
-# Copyright (c) 2015 The Linux Foundation and others.
+# Copyright (c) 2015, 2017 The Linux Foundation and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
LFTOOLS_DIR="$WORKSPACE/.venv-lftools"
if [ ! -d "$LFTOOLS_DIR" ]
then
- virtualenv $LFTOOLS_DIR
- source $LFTOOLS_DIR/bin/activate
+ virtualenv "$LFTOOLS_DIR"
+ # shellcheck disable=SC1090
+ source "$LFTOOLS_DIR/bin/activate"
pip install --upgrade pip
pip freeze
pip install lftools
fi
+# shellcheck disable=SC1090
source "$LFTOOLS_DIR/bin/activate"
# Directory to put git format-patches
-PATCH_DIR=`pwd`/patches
+PATCH_DIR="$WORKSPACE/patches"
-echo $RELEASE_TAG
-lftools version release $RELEASE_TAG
+echo "$RELEASE_TAG"
+lftools version release "$RELEASE_TAG"
git submodule foreach "git commit -am \"Release $RELEASE_TAG\" || true"
git commit -am "Release $RELEASE_TAG"
mkdir patches
-mv taglist.log $PATCH_DIR
-modules=`xmlstarlet sel -N x=http://maven.apache.org/POM/4.0.0 -t -m '//x:modules' -v '//x:module' pom.xml`
+mv taglist.log "$PATCH_DIR"
+modules=$(xmlstarlet sel -N x=http://maven.apache.org/POM/4.0.0 -t -m '//x:modules' -v '//x:module' pom.xml)
for module in $modules; do
- pushd $module
- git format-patch --stdout origin/$RELEASE_BRANCH > $PATCH_DIR/${module//\//-}.patch
- git bundle create $PATCH_DIR/${module//\//-}.bundle "origin/master..HEAD"
+ pushd "$module"
+ git format-patch --stdout "origin/$RELEASE_BRANCH" > "$PATCH_DIR/${module//\//-}.patch"
+ git bundle create "$PATCH_DIR/${module//\//-}.bundle" "origin/master..HEAD"
popd
done
-tar cvzf all-bundles.tar.gz `find $PATCH_DIR -type f -print0 \
- | xargs -0r file \
- | egrep -e ':.*Git bundle.*' \
- | cut -d: -f1`
-rm $PATCH_DIR/*.bundle
+tar cvzf patches.tar.gz -C "$WORKSPACE" patches
+rm "$PATCH_DIR"/*.bundle
+++ /dev/null
-#!/bin/bash
-# @License EPL-1.0 <http://spdx.org/licenses/EPL-1.0>
-##############################################################################
-# Copyright (c) 2015 The Linux Foundation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Eclipse Public License v1.0
-# which accompanies this distribution, and is available at
-# http://www.eclipse.org/legal/epl-v10.html
-##############################################################################
-
-GIT_REPORT_FILE=$REPORT_DIR/git-report.log
-
-mkdir $REPORT_DIR
-touch $GIT_REPORT_FILE
-
-projects=`grep path .gitmodules | sed 's/.*= //' | sort`
-for p in $projects; do
- echo "" >> $GIT_REPORT_FILE
- echo "========" >> $GIT_REPORT_FILE
- echo "$p" >> $GIT_REPORT_FILE
- echo "========" >> $GIT_REPORT_FILE
- echo "" >> $GIT_REPORT_FILE
-
- cd $WORKSPACE/$p
- git log --after="1 week ago" | sed 'sX^ Change-Id: X -> https://git.opendaylight.org/gerrit/r/X' >> $GIT_REPORT_FILE
- cd $WORKSPACE
-done
#!/bin/bash
-if [ $GERRIT_BRANCH == "master" ]; then
+if [ "$GERRIT_BRANCH" == "master" ]; then
RTD_BUILD_VERSION=latest
else
- RTD_BUILD_VERSION=${{GERRIT_BRANCH/\//-}}
+ RTD_BUILD_VERSION="${{GERRIT_BRANCH/\//-}}"
fi
+
+# shellcheck disable=SC1083
curl -X POST --data "version_slug=$RTD_BUILD_VERSION" https://readthedocs.org/build/{rtdproject}
##############################################################################
# Clear workspace
-rm -rf *
+rm -rf -- "${WORKSPACE:?}"/*
# Create python script to parse json
-cat > ${WORKSPACE}/parse_json.py << EOF
+cat > "${WORKSPACE}/parse_json.py" << EOF
import json
import sys
# Clone all ODL projects
curl -s --header "Accept: application/json" \
https://git.opendaylight.org/gerrit/projects/ | \
- tail -n +2 > ${WORKSPACE}/projects.json
-for p in `cat ${WORKSPACE}/projects.json | python ${WORKSPACE}/parse_json.py`
+ tail -n +2 > "${WORKSPACE}/projects.json"
+for p in $(python "${WORKSPACE}/parse_json.py" < "${WORKSPACE}/projects.json")
do
# Ignore non-projects and archived projects
if [ "$p" == "All-Users" ] || \
then
continue
fi
- mkdir -p `dirname "$p"`
+ mkdir -p "$(dirname "$p")"
git clone "https://git.opendaylight.org/gerrit/$p.git" "$p"
done
# Check pom.xml for <repositories> and <pluginRepositories>
FILE=repos.txt
-find . -name pom.xml | xargs grep -i '<repositories>\|<pluginRepositories>' > $FILE
+find . -name pom.xml -print0 | xargs -0 grep -i '<repositories>\|<pluginRepositories>' > "$FILE"
[[ $(tr -d "\r\n" < $FILE|wc -c) -eq 0 ]] && rm $FILE
if [ -a $FILE ]
echo "Build logs: <a href=\"$LOGS_SERVER/$SILO/$ARCHIVES_DIR\">$LOGS_SERVER/$SILO/$ARCHIVES_DIR</a>"
mkdir .archives
-cd .archives/ || exit 404
+cd .archives/ || exit 1
cat > deploy-archives.xml <<EOF
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
# http://www.eclipse.org/legal/epl-v10.html
##############################################################################
-if [[ $P2ZIP_URL == "" ]]; then
+if [[ "$P2ZIP_URL" == "" ]]; then
P2ZIP_URL=opendaylight.snapshot/$(find . -name "*.zip" -type f -exec ls "{}" + | head -1)
- FILE_NAME=`echo $P2ZIP_URL | awk -F'/' '{ print $NF }'`
+ FILE_NAME=$(echo "$P2ZIP_URL" | awk -F'/' '{ print $NF }')
RELEASE_PATH="snapshot"
else
- FILE_NAME=`echo $P2ZIP_URL | awk -F'/' '{ print $NF }'`
- VERSION=`echo $P2ZIP_URL | awk -F'/' '{ print $(NF-1) }'`
+ FILE_NAME=$(echo "$P2ZIP_URL" | awk -F'/' '{ print $NF }')
+ VERSION=$(echo "$P2ZIP_URL" | awk -F'/' '{ print $(NF-1) }')
RELEASE_PATH="release/$VERSION"
- wget --quiet $P2ZIP_URL -O $FILE_NAME
+ wget --quiet "$P2ZIP_URL" -O "$FILE_NAME"
fi
# If we detect a snapshot build then release to a snapshot repo
# YangIDE has indicated that the only want the latest snapshot released to
# the snapshot directory.
-if echo $P2ZIP_URL | grep opendaylight.snapshot; then
+if echo "$P2ZIP_URL" | grep opendaylight.snapshot; then
RELEASE_PATH="snapshot"
fi
-cat > ${WORKSPACE}/pom.xml <<EOF
+cat > "${WORKSPACE}/pom.xml" <<EOF
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.opendaylight.$PROJECT</groupId>
#!/bin/bash
if [ -d "$WORKSPACE/.venv-openstack" ]; then
- source $WORKSPACE/.venv-openstack/bin/activate
- OS_STATUS=`openstack --os-cloud rackspace stack show -f json -c stack_status $STACK_NAME | jq -r '.stack_status'`
+ # shellcheck disable=SC1090
+ source "$WORKSPACE/.venv-openstack/bin/activate"
+ OS_STATUS=$(openstack --os-cloud rackspace stack show -f json -c stack_status "$STACK_NAME" | jq -r '.stack_status')
if [ "$OS_STATUS" == "CREATE_COMPLETE" ] || [ "$OS_STATUS" == "CREATE_FAILED" ]; then
echo "Deleting $STACK_NAME"
- openstack --os-cloud rackspace stack delete --yes $STACK_NAME
+ openstack --os-cloud rackspace stack delete --yes "$STACK_NAME"
fi
fi
##############################################################################
# Assuming that mvn deploy created the hide/from/pom/files/stage directory.
-cd hide/from/pom/files
+cd hide/from/pom/files || exit 1
mkdir -p m2repo/org/opendaylight/
rsync -avz --exclude 'maven-metadata*' \
--exclude '_remote.repositories' \
--exclude 'resolver-status.properties' \
- "stage/org/opendaylight/$m" m2repo/org/opendaylight/
+ "stage/org/opendaylight/$PROJECT" m2repo/org/opendaylight/
mvn org.sonatype.plugins:nexus-staging-maven-plugin:1.6.2:deploy-staged-repository \
- -DrepositoryDirectory="`pwd`/m2repo" \
+ -DrepositoryDirectory="$(pwd)/m2repo" \
-DnexusUrl=https://nexus.opendaylight.org/ \
-DstagingProfileId="$STAGING_PROFILE_ID" \
-DserverId="opendaylight-staging" \
- -s $SETTINGS_FILE \
- -gs $GLOBAL_SETTINGS_FILE | tee $WORKSPACE/deploy-staged-repository.log
+ -s "$SETTINGS_FILE" \
+ -gs "$GLOBAL_SETTINGS_FILE" | tee "$WORKSPACE/deploy-staged-repository.log"
# If we detect a snapshot build then there is no need to run this script.
# YangIDE has indicated that the only want the latest snapshot released to
# the snapshot directory.
-if echo $P2ZIP_URL | grep opendaylight.snapshot; then
+if echo "$P2ZIP_URL" | grep opendaylight.snapshot; then
exit 0
fi
if [[ "$P2ZIP_URL" == "" ]]; then
exit 0
fi
-EPOCH_DATE=`date +%s%3N`
-MVN_METADATA=`echo $P2ZIP_URL | sed 's,/*[^/]\+/*$,,' | sed 's,/*[^/]\+/*$,,'`/maven-metadata.xml
+EPOCH_DATE=$(date +%s%3N)
+MVN_METADATA=$(echo "$P2ZIP_URL" | sed 's,/*[^/]\+/*$,,' | sed 's,/*[^/]\+/*$,,')/maven-metadata.xml
P2_COMPOSITE_ARTIFACTS=compositeArtifacts.xml
P2_COMPOSITE_CONTENT=compositeContent.xml
-wget $MVN_METADATA -O maven-metadata.xml
+wget "$MVN_METADATA" -O maven-metadata.xml
-VERSIONS=`xmlstarlet sel -t -m "/metadata/versioning/versions" -v "version" maven-metadata.xml`
-NUM_VERSIONS=`echo $VERSIONS | wc -w`
+VERSIONS=$(xmlstarlet sel -t -m "/metadata/versioning/versions" -v "version" maven-metadata.xml)
+NUM_VERSIONS=$(echo "$VERSIONS" | wc -w)
##
#!/bin/bash
-git log --show-signature -1 | egrep -q 'gpg: Signature made.*key ID'
-if [ $? -eq 0 ]; then
+if git log --show-signature -1 | egrep -q 'gpg: Signature made.*key ID'; then
echo "git commit is gpg signed"
else
echo "WARNING: gpg signature missing for the commit"
#!/bin/bash -x
set +e # To avoid failures in projects which generate zero snapshot artifacts.
-find /tmp/r/org/opendaylight/$GERRIT_PROJECT/ -path *-SNAPSHOT* -delete
+find "/tmp/r/org/opendaylight/$GERRIT_PROJECT/" -path "*-SNAPSHOT*" -delete
find /tmp/r/ -regex '.*/_remote.repositories\|.*/maven-metadata-local\.xml\|.*/maven-metadata-fake-nexus\.xml\|.*/resolver-status\.properties' -delete
find /tmp/r/ -type d -empty -delete
echo "INFO: A listing of project related files left in local repository follows."
-find /tmp/r/org/opendaylight/$GERRIT_PROJECT/
+find "/tmp/r/org/opendaylight/$GERRIT_PROJECT/"
true # To prevent the possibly non-zero return code from failing the job.
+#!/bin/bash
echo "Cleaning up Robot installation..."
# ${ROBOT_VENV} comes from the include-raw-integration-install-robotframework.sh
# script.
# TODO: Is this still needed when we have integration-cleanup-workspace?
-rm -rf ${ROBOT_VENV}
+rm -rf "${ROBOT_VENV}"
+#!/bin/bash
echo "Cleaning up the workspace..."
# Leftover files from previous runs could be wrongly copied as results.
# Keep the cloned integration/test repository!
-for file_or_dir in `ls -A -1 -I "test"`
+for file_or_dir in *
# FIXME: Make this compatible with multipatch and other possible build&run jobs.
do
- rm -vrf "$file_or_dir"
+ if [ "$file_or_dir" != "test" ]; then
+ rm -vrf "$file_or_dir"
+ fi
done
echo "Listing all open ports on controller system"
netstat -pnatu
-echo "redirected karaf console output to karaf_console.log"
-export KARAF_REDIRECT=${WORKSPACE}/${BUNDLEFOLDER}/data/log/karaf_console.log
-
if [ ${JDKVERSION} == 'openjdk8' ]; then
echo "Setting the JRE Version to 8"
# dynamic_verify does not allow sudo, JAVA_HOME should be enough for karaf start.
pip install --upgrade docker-py importlib requests scapy netifaces netaddr ipaddr pyhocon
pip install --upgrade robotframework{,-{httplibrary,requests,sshlibrary,selenium2library}}
+pip install --upgrade robotframework-pycurllibrary
# Module jsonpath is needed by current AAA idmlite suite.
pip install --upgrade jsonpath-rw
set -e
echo "---> Cleaning up OVS $OVS_VERSION"
-docker logs $CID > $WORKSPACE/docker-ovs-${OVS_VERSION}.log
-docker stop $CID
-docker rm $CID
+docker logs "$CID" > "$WORKSPACE/docker-ovs-${OVS_VERSION}.log"
+docker stop "$CID"
+docker rm "$CID"
rm env.properties
docker images
#
# https://github.com/openstack-infra/project-config/blob/master/jenkins/jobs/networking-odl.yaml
-export PATH=$PATH:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/usr/local/sbin
+export PATH="$PATH:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/usr/local/sbin"
# *SIGH*. This is required to get lsb_release
sudo yum -y install redhat-lsb-core indent python-testrepository
sudo bash -c 'echo "stack ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers'
# We need to install some scripts from openstack/os-testr project
-cd ~
+cd ~ || exit 1
echo "Setting up infra scripts"
sudo mkdir -p /usr/local/jenkins/slave_scripts
git clone https://github.com/openstack/os-testr.git
-cd os-testr/os_testr
+cd os-testr/os_testr || exit 1
sudo cp subunit2html.py /usr/local/jenkins/slave_scripts
# Save existing WORKSPACE
-SAVED_WORKSPACE=$WORKSPACE
+SAVED_WORKSPACE="$WORKSPACE"
export WORKSPACE=~/workspace
-mkdir -p $WORKSPACE
-cd $WORKSPACE
+mkdir -p "$WORKSPACE"
+cd "$WORKSPACE" || exit 1
# This is the job which checks out devstack-gate
if [[ ! -e devstack-gate ]]; then
git clone https://git.openstack.org/openstack-infra/devstack-gate
else
echo "Fixing devstack-gate git remotes"
- cd devstack-gate
+ cd devstack-gate || exit 1
git remote set-url origin https://git.openstack.org/openstack-infra/devstack-gate
git remote update
git reset --hard
# Set the pieces we want to test
if [ "$GERRIT_PROJECT" == "openstack/neutron" ]; then
- ZUUL_PROJECT=$GERRIT_PROJECT
- ZUUL_BRANCH=$GERRIT_REFSPEC
+ export ZUUL_PROJECT=$GERRIT_PROJECT
+ export ZUUL_BRANCH=$GERRIT_REFSPEC
elif [ "$GERRIT_PROJECT" == "openstack-dev/devstack" ]; then
- ZUUL_PROJECT=$GERRIT_PROJECT
- ZUUL_BRANCH=$GERRIT_REFSPEC
+ export ZUUL_PROJECT=$GERRIT_PROJECT
+ export ZUUL_BRANCH=$GERRIT_REFSPEC
fi
echo "Setting environment variables"
DEVSTACK_LOCAL_CONFIG+="ODL_JAVA_MAX_PERM_MEM=784m;"
# Set ODL_URL_PREFIX if "nexus proxy" is provided
-URL_PREFIX=${ODLNEXUSPROXY:-https://nexus.opendaylight.org}
+export URL_PREFIX="${ODLNEXUSPROXY:-https://nexus.opendaylight.org}"
if [ -n "$ODLNEXUSPROXY" ] ; then
DEVSTACK_LOCAL_CONFIG+="ODL_URL_PREFIX=$ODLNEXUSPROXY;"
fi
DGRET=$?
# Restore WORKSPACE
-OS_WORKSPACE=$WORKSPACE
-export WORKSPACE=$SAVED_WORKSPACE
+OS_WORKSPACE="$WORKSPACE"
+export WORKSPACE="$SAVED_WORKSPACE"
# Copy and display all the logs
cat /opt/stack/new/devstacklog*
ls /opt/stack/; ls /opt/stack/new; ls /opt/stack/new/opendaylight;
-cp -r $OS_WORKSPACE/logs $WORKSPACE
-cp -a /opt/stack/new/logs/screen-odl-karaf* $WORKSPACE/logs
-mkdir -p $WORKSPACE/logs/opendaylight
-cp -a /opt/stack/new/opendaylight/distribution*/etc $WORKSPACE/logs/opendaylight
+cp -r "$OS_WORKSPACE/logs" "$WORKSPACE"
+cp -a /opt/stack/new/logs/screen-odl-karaf* "$WORKSPACE/logs"
+mkdir -p "$WORKSPACE/logs/opendaylight"
+cp -a /opt/stack/new/opendaylight/distribution*/etc "$WORKSPACE/logs/opendaylight"
# Unzip the logs to make them easier to view
-gunzip $WORKSPACE/logs/*.gz
+gunzip "$WORKSPACE"/logs/*.gz
-exit $DGRET
+exit "$DGRET"
set -e
-OVS_VERSION=${OVS_VERSION:-2.5.0}
+OVS_VERSION="${OVS_VERSION:-2.5.0}"
echo "---> Cleaning up existing Docker processes and images"
for x in $(docker ps -a -q)
echo "---> Starting OVS $OVS_VERSION"
-/usr/bin/docker pull vpickard/openvswitch:$OVS_VERSION
-CID=$(/usr/bin/docker run -p 6641:6640 --privileged=true -d -i -t vpickard/openvswitch:$OVS_VERSION /usr/bin/supervisord)
-REALCID=`echo $CID | rev | cut -d ' ' -f 1 | rev`
+/usr/bin/docker pull "vpickard/openvswitch:$OVS_VERSION"
+CID=$(/usr/bin/docker run -p 6641:6640 --privileged=true -d -i -t "vpickard/openvswitch:$OVS_VERSION" /usr/bin/supervisord)
+REALCID=$(echo "$CID" | rev | cut -d ' ' -f 1 | rev)
echo "CID=$REALCID" > env.properties
echo "OVS_VERSION=${OVS_VERSION}" >> env.properties
-CONTROLLER_IP=`facter ipaddress`
+CONTROLLER_IP=$(facter ipaddress)
echo "CONTROLLER_IP=${CONTROLLER_IP}" >> env.properties
echo "---> Waiting..."
set -e
echo "---> Configuring OVS for HW VTEP Emulator"
-/usr/bin/docker exec $CID supervisorctl stop ovsdb-server
-/usr/bin/docker exec $CID supervisorctl start ovsdb-server-vtep
-/usr/bin/docker exec $CID ovs-vsctl add-br br-vtep
-/usr/bin/docker exec $CID ovs-vsctl add-port br-vtep eth0
-/usr/bin/docker exec $CID vtep-ctl add-ps br-vtep
-/usr/bin/docker exec $CID vtep-ctl add-port br-vtep eth0
-/usr/bin/docker exec $CID vtep-ctl set Physical_Switch br-vtep tunnel_ips=192.168.254.20
-/usr/bin/docker exec $CID vtep-ctl set-manager ptcp:6640
+/usr/bin/docker exec "$CID" supervisorctl stop ovsdb-server
+/usr/bin/docker exec "$CID" supervisorctl start ovsdb-server-vtep
+/usr/bin/docker exec "$CID" ovs-vsctl add-br br-vtep
+/usr/bin/docker exec "$CID" ovs-vsctl add-port br-vtep eth0
+/usr/bin/docker exec "$CID" vtep-ctl add-ps br-vtep
+/usr/bin/docker exec "$CID" vtep-ctl add-port br-vtep eth0
+/usr/bin/docker exec "$CID" vtep-ctl set Physical_Switch br-vtep tunnel_ips=192.168.254.20
+/usr/bin/docker exec "$CID" vtep-ctl set-manager ptcp:6640
sleep 5
echo "---> Starting OVS HW VTEP Emulator"
-/usr/bin/docker exec $CID supervisorctl start ovs-vtep
+/usr/bin/docker exec "$CID" supervisorctl start ovs-vtep
sleep 5
set -e
echo "---> Setting up controller IP"
-CONTROLLER_IP=`facter ipaddress`
+CONTROLLER_IP=$(facter ipaddress)
echo "CONTROLLER_IP=${CONTROLLER_IP}" > env.properties
echo "---> Loading OVS kernel module"
echo "---> Verifying OVS kernel module loaded"
/usr/sbin/lsmod | /usr/bin/grep openvswitch
-
sudo bash -c 'echo "stack ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers'
# We need to install some scripts from openstack-infra/project-config
-cd ~
+cd ~ || exit 1
echo "Setting up infra scripts"
sudo mkdir -p /usr/local/jenkins/slave_scripts
git clone https://git.openstack.org/openstack-infra/project-config
-cd project-config
+cd project-config || exit 1
sudo cp jenkins/scripts/subunit2html.py /usr/local/jenkins/slave_scripts
# Save existing WORKSPACE
SAVED_WORKSPACE=$WORKSPACE
export WORKSPACE=~/workspace
mkdir -p $WORKSPACE
-cd $WORKSPACE
+cd $WORKSPACE || exit 1
# This is the job which checks out devstack-gate
if [[ ! -e devstack-gate ]]; then
git clone https://git.openstack.org/openstack-infra/devstack-gate
else
echo "Fixing devstack-gate git remotes"
- cd devstack-gate
+ cd devstack-gate || exit 1
git remote set-url origin https://git.openstack.org/openstack-infra/devstack-gate
git remote update
git reset --hard
# Set the pieces we want to test
if [ "$GERRIT_PROJECT" == "openstack/neutron" ]; then
- ZUUL_PROJECT=$GERRIT_PROJECT
- ZUUL_BRANCH=$GERRIT_REFSPEC
+ export ZUUL_PROJECT=$GERRIT_PROJECT
+ export ZUUL_BRANCH=$GERRIT_REFSPEC
elif [ "$GERRIT_PROJECT" == "openstack-dev/devstack" ]; then
- ZUUL_PROJECT=$GERRIT_PROJECT
- ZUUL_BRANCH=$GERRIT_REFSPEC
+ export ZUUL_PROJECT=$GERRIT_PROJECT
+ export ZUUL_BRANCH=$GERRIT_REFSPEC
fi
echo "Setting environment variables"
DEVSTACK_LOCAL_CONFIG+="ODL_JAVA_MAX_PERM_MEM=784m;"
# Set ODL_URL_PREFIX if "nexus proxy" is provided
-URL_PREFIX=${ODLNEXUSPROXY:-https://nexus.opendaylight.org}
+export URL_PREFIX="${ODLNEXUSPROXY:-https://nexus.opendaylight.org}"
if [ -n "$ODLNEXUSPROXY" ] ; then
DEVSTACK_LOCAL_CONFIG+="ODL_URL_PREFIX=$ODLNEXUSPROXY;"
fi
export WORKSPACE=$SAVED_WORKSPACE
# Copy all the logs
-cp -r $OS_WORKSPACE/logs $WORKSPACE
-cp -a /opt/stack/new/logs/q-odl-karaf* $WORKSPACE/logs
-mkdir -p $WORKSPACE/logs/opendaylight
-cp -a /opt/stack/new/opendaylight/distribution*/etc $WORKSPACE/logs/opendaylight
+cp -r "$OS_WORKSPACE/logs" "$WORKSPACE"
+cp -a /opt/stack/new/logs/q-odl-karaf* "$WORKSPACE/logs"
+mkdir -p "$WORKSPACE/logs/opendaylight"
+cp -a /opt/stack/new/opendaylight/distribution*/etc "$WORKSPACE/logs/opendaylight"
# Unzip the logs to make them easier to view
-gunzip $WORKSPACE/logs/*.gz
+gunzip "$WORKSPACE"/logs/*.gz
exit $DGRET
#!/bin/bash
-virtualenv $WORKSPACE/.venv
-source $WORKSPACE/.venv/bin/activate
+virtualenv "$WORKSPACE/.venv"
+# shellcheck disable=SC1090
+source "$WORKSPACE/.venv/bin/activate"
pip install --upgrade pip
pip install --upgrade python-openstackclient python-heatclient
pip freeze
#########################
# Fetch stack list before fetching active builds to minimize race condition
# where we might be try to delete stacks while jobs are trying to start
-OS_STACKS=(`openstack --os-cloud rackspace stack list \
+OS_STACKS=($(openstack --os-cloud rackspace stack list \
-f json -c "Stack Name" -c "Stack Status" \
--property "stack_status=CREATE_COMPLETE" \
--property "stack_status=DELETE_FAILED" \
--property "stack_status=CREATE_FAILED" \
- | jq -r '.[] | ."Stack Name"'`)
+ | jq -r '.[] | ."Stack Name"'))
# Make sure we fetch active builds on both the releng and sandbox silos
ACTIVE_BUILDS=()
for silo in releng sandbox; do
JENKINS_URL="https://jenkins.opendaylight.org/$silo//computer/api/json?tree=computer[executors[currentExecutable[url]],oneOffExecutors[currentExecutable[url]]]&xpath=//url&wrapper=builds"
- wget --no-verbose -O $silo_builds.json $JENKINS_URL
+ wget --no-verbose -O "${silo}_builds.json" "$JENKINS_URL"
sleep 1 # Need to sleep for 1 second otherwise next line causes script to stall
- ACTIVE_BUILDS=(${ACTIVE_BUILDS[@]} ` \
- jq -r '.computer[].executors[].currentExecutable.url' $silo_builds.json \
- | grep -v null | awk -F'/' '{print $6 "-" $7}'`)
+ ACTIVE_BUILDS=(${ACTIVE_BUILDS[@]} $( \
+ jq -r '.computer[].executors[].currentExecutable.url' "${silo}_builds.json" \
+ | grep -v null | awk -F'/' '{print $6 "-" $7}'))
done
##########################
##########################
# Search for stacks taht are not in use by either releng or sandbox silos and
# delete them.
-for stack in ${OS_STACKS[@]}; do
+for stack in "${OS_STACKS[@]}"; do
if [[ "${ACTIVE_BUILDS[@]}" =~ $stack ]]; then
# No need to delete stacks if there exists an active build for them
continue
else
echo "Deleting orphaned stack: $stack"
- openstack --os-cloud rackspace stack delete --yes $stack
+ openstack --os-cloud rackspace stack delete --yes "$stack"
fi
done
#!/bin/bash
echo "----------> Copy ssh public keys to csit lab"
-source $WORKSPACE/.venv-openstack/bin/activate
+# shellcheck disable=SC1090
+source "$WORKSPACE/.venv-openstack/bin/activate"
function copy-ssh-keys-to-slave() {
RETRIES=60
for j in $(seq 1 $RETRIES); do
+ # shellcheck disable=SC2092
if `ssh-copy-id -i /home/jenkins/.ssh/id_rsa.pub "jenkins@${i}" > /dev/null 2>&1`; then
- ssh jenkins@${i} 'echo "$(facter ipaddress_eth0) $(/bin/hostname)" | sudo tee -a /etc/hosts'
+ ssh "jenkins@${i}" 'echo "$(facter ipaddress_eth0) $(/bin/hostname)" | sudo tee -a /etc/hosts'
echo "Successfully copied public keys to slave ${i}"
break
- elif [ $j -eq $RETRIES ]; then
+ elif [ "$j" -eq $RETRIES ]; then
echo "SSH not responding on ${i} after $RETIRES tries. Giving up."
exit 1
else
fi
# ping test to see if connectivity is available
- if ping -c1 ${i} &> /dev/null; then
+ if ping -c1 "${i}" &> /dev/null; then
echo "Ping to ${i} successful."
else
echo "Ping to ${i} failed."
# Print the Stack outputs parameters so that we can identify which IPs belong
# to which VM types.
-openstack --os-cloud rackspace stack show -c outputs $STACK_NAME
+openstack --os-cloud rackspace stack show -c outputs "$STACK_NAME"
-ADDR=(`openstack --os-cloud rackspace stack show -f json -c outputs $STACK_NAME | \
+# shellcheck disable=SC2006
+ADDR=(`openstack --os-cloud rackspace stack show -f json -c outputs "$STACK_NAME" | \
jq -r '.outputs[] | \
select(.output_key | match("^vm_[0-9]+_ips\$")) | \
.output_value | .[]'`)
# Detect when a process failed to copy ssh keys and fail build
for p in $pids; do
- if wait $p; then
+ if wait "$p"; then
echo "Process $p successfully copied ssh keys."
else
echo "Process $p failed to copy ssh keys."
#!/bin/bash
-CHANGE_ID=`ssh -p 29418 jenkins-$SILO@git.opendaylight.org gerrit query \
+# shellcheck disable=SC1083
+CHANGE_ID=$(ssh -p 29418 "jenkins-$SILO@git.opendaylight.org" gerrit query \
limit:1 owner:self is:open project:{project} \
message:'{gerrit-commit-message}' \
topic:{gerrit-topic} | \
grep 'Change-Id:' | \
- awk '{{ print $2 }}'`
+ awk '{{ print $2 }}')
if [ -z "$CHANGE_ID" ]; then
git commit -sm "{gerrit-commit-message}"
fi
git status
-git remote add gerrit ssh://jenkins-$SILO@git.opendaylight.org:29418/releng/builder.git
+git remote add gerrit "ssh://jenkins-$SILO@git.opendaylight.org:29418/releng/builder.git"
# Don't fail the build if this command fails because it's possible that there
# is no changes since last update.
+# shellcheck disable=SC1083
git review --yes -t {gerrit-topic} || true
#!/bin/bash
-virtualenv $WORKSPACE/.venv-openstack
-source $WORKSPACE/.venv-openstack/bin/activate
+virtualenv "$WORKSPACE/.venv-openstack"
+# shellcheck disable=SC1090
+source "$WORKSPACE/.venv-openstack/bin/activate"
pip install --upgrade pip
pip install --upgrade python-openstackclient python-heatclient
pip freeze
-cd /builder/openstack-hot
+cd /builder/openstack-hot || exit 1
-JOB_SUM=`echo $JOB_NAME | sum | awk '{{ print $1 }}'`
+JOB_SUM=$(echo "$JOB_NAME" | sum | awk '{{ print $1 }}')
VM_NAME="$JOB_SUM-$BUILD_NUMBER"
OS_TIMEOUT=10 # Minutes to wait for OpenStack VM to come online
openstack --os-cloud rackspace limits show --absolute
openstack --os-cloud rackspace limits show --rate
echo "Trying up to $STACK_RETRIES times to create $STACK_NAME."
-for try in `seq $STACK_RETRIES`; do
- openstack --os-cloud rackspace stack create --timeout $OS_TIMEOUT -t {stack-template} -e $WORKSPACE/opendaylight-infra-environment.yaml --parameter "job_name=$VM_NAME" --parameter "silo=$SILO" $STACK_NAME
+for try in $(seq $STACK_RETRIES); do
+ # shellcheck disable=SC1083
+ openstack --os-cloud rackspace stack create --timeout "$OS_TIMEOUT" -t {stack-template} -e "$WORKSPACE/opendaylight-infra-environment.yaml" --parameter "job_name=$VM_NAME" --parameter "silo=$SILO" "$STACK_NAME"
openstack --os-cloud rackspace stack list
- echo "Waiting for $OS_TIMEOUT minutes to create $STACK_NAME."
- for i in `seq $OS_TIMEOUT`; do
+ echo "$try: Waiting for $OS_TIMEOUT minutes to create $STACK_NAME."
+ for i in $(seq $OS_TIMEOUT); do
sleep 60
- OS_STATUS=`openstack --os-cloud rackspace stack show -f json -c stack_status $STACK_NAME | jq -r '.stack_status'`
+ OS_STATUS=$(openstack --os-cloud rackspace stack show -f json -c stack_status "$STACK_NAME" | jq -r '.stack_status')
+ echo "$i: $OS_STATUS"
case "$OS_STATUS" in
CREATE_COMPLETE)
CREATE_FAILED)
echo "ERROR: Failed to initialize infrastructure. Deleting stack and possibly retrying to create..."
openstack --os-cloud rackspace stack list
- openstack --os-cloud rackspace stack delete --yes $STACK_NAME
- openstack --os-cloud rackspace stack show $STACK_NAME
+ openstack --os-cloud rackspace stack delete --yes "$STACK_NAME"
+ openstack --os-cloud rackspace stack show "$STACK_NAME"
# after stack delete, poll for 10m to know when stack is fully removed
# the logic here is that when "stack show $STACK_NAME" does not contain $STACK_NAME
# we assume it's successfully deleted and we can break to retry
- for i in `seq 20`; do
+ for j in $(seq 20); do
sleep 30;
- STACK_SHOW=$(openstack --os-cloud rackspace stack show $STACK_NAME)
- echo $STACK_SHOW
+ STACK_SHOW=$(openstack --os-cloud rackspace stack show "$STACK_NAME")
+ echo "$j: $STACK_SHOW"
if [[ $STACK_SHOW == *"DELETE_FAILED"* ]]; then
echo "stack delete failed. trying to stack abandon now"
- openstack --os-cloud rackspace stack abandon $STACK_NAME
- STACK_SHOW=$(openstack --os-cloud rackspace stack show $STACK_NAME)
- echo $STACK_SHOW
+ openstack --os-cloud rackspace stack abandon "$STACK_NAME"
+ STACK_SHOW=$(openstack --os-cloud rackspace stack show "$STACK_NAME")
+ echo "$STACK_SHOW"
fi
if [[ $STACK_SHOW != *"$STACK_NAME"* ]]; then
echo "stack show on $STACK_NAME came back empty. Assuming successful delete"
done
# capture stack info in console logs
-openstack --os-cloud rackspace stack show $STACK_NAME
+openstack --os-cloud rackspace stack show "$STACK_NAME"
if ! $STACK_SUCCESSFUL; then
exit 1
#!/bin/bash
-virtualenv $WORKSPACE/.venv
-source $WORKSPACE/.venv/bin/activate
+virtualenv "$WORKSPACE/.venv"
+# shellcheck disable=SC1090
+source "$WORKSPACE/.venv/bin/activate"
pip install --upgrade --quiet pip
pip install --upgrade --quiet python-openstackclient python-heatclient
pip freeze
-cat > $WORKSPACE/docs/cloud-images.rst << EOF
+cat > "$WORKSPACE/docs/cloud-images.rst" << EOF
Following are the list of published images available to be used with Jenkins jobs.
EOF
# Blank line before EOF is on purpose to ensure there is spacing.
IFS=$'\n'
-IMAGES=(`openstack --os-cloud odlpriv image list --public -f value -c Name`)
-for i in ${IMAGES[@]}; do
- echo "* $i" >> $WORKSPACE/docs/cloud-images.rst
+IMAGES=($(openstack --os-cloud odlpriv image list --public -f value -c Name))
+for i in "${IMAGES[@]}"; do
+ echo "* $i" >> "$WORKSPACE/docs/cloud-images.rst"
done
git add docs/cloud-images.rst
./autogen.sh
./configure --prefix="$ROOT" \
--with-buildversion=$BUILD_NUMBER \
- CPPFLAGS="-isystem $ROOT/include"
+ CPPFLAGS="-isystem $ROOT/include" \
CXXFLAGS="-Wall"
make -j8
if ! make check; then find . -name test-suite.log -exec cat {} \; && false; fi
set -e
echo "---> Cleaning up OVS $OVS_VERSION"
-docker logs $CID > $WORKSPACE/docker-ovs-${OVS_VERSION}.log
-docker stop $CID
-docker rm $CID
+docker logs "$CID" > "$WORKSPACE/docker-ovs-${OVS_VERSION}.log"
+docker stop "$CID"
+docker rm "$CID"
rm env.properties
docker images
echo "---> Starting OVS $OVS_VERSION"
-/usr/bin/docker pull vpickard/openvswitch:$OVS_VERSION
-CID=$(/usr/bin/docker run -p 6641:6640 --privileged=true -d -i -t vpickard/openvswitch:$OVS_VERSION /usr/bin/supervisord)
-REALCID=`echo $CID | rev | cut -d ' ' -f 1 | rev`
+/usr/bin/docker pull "vpickard/openvswitch:$OVS_VERSION"
+CID=$(/usr/bin/docker run -p 6641:6640 --privileged=true -d -i -t "vpickard/openvswitch:$OVS_VERSION" /usr/bin/supervisord)
+REALCID=$(echo "$CID" | rev | cut -d ' ' -f 1 | rev)
echo "CID=$REALCID" > env.properties
echo "OVS_VERSION=${OVS_VERSION}" >> env.properties
-CONTROLLER_IP=`facter ipaddress`
+CONTROLLER_IP=$(facter ipaddress)
echo "CONTROLLER_IP=${CONTROLLER_IP}" >> env.properties
echo "---> Waiting..."
set -e
echo "---> Configuring OVS for HW VTEP Emulator"
-/usr/bin/docker exec $CID supervisorctl stop ovsdb-server
-/usr/bin/docker exec $CID supervisorctl start ovsdb-server-vtep
-/usr/bin/docker exec $CID ovs-vsctl add-br br-vtep
-/usr/bin/docker exec $CID ovs-vsctl add-port br-vtep eth0
-/usr/bin/docker exec $CID vtep-ctl add-ps br-vtep
-/usr/bin/docker exec $CID vtep-ctl add-port br-vtep eth0
-/usr/bin/docker exec $CID vtep-ctl set Physical_Switch br-vtep tunnel_ips=192.168.254.20
-/usr/bin/docker exec $CID vtep-ctl set-manager ptcp:6640
+/usr/bin/docker exec "$CID" supervisorctl stop ovsdb-server
+/usr/bin/docker exec "$CID" supervisorctl start ovsdb-server-vtep
+/usr/bin/docker exec "$CID" ovs-vsctl add-br br-vtep
+/usr/bin/docker exec "$CID" ovs-vsctl add-port br-vtep eth0
+/usr/bin/docker exec "$CID" vtep-ctl add-ps br-vtep
+/usr/bin/docker exec "$CID" vtep-ctl add-port br-vtep eth0
+/usr/bin/docker exec "$CID" vtep-ctl set Physical_Switch br-vtep tunnel_ips=192.168.254.20
+/usr/bin/docker exec "$CID" vtep-ctl set-manager ptcp:6640
sleep 5
echo "---> Starting OVS HW VTEP Emulator"
-/usr/bin/docker exec $CID supervisorctl start ovs-vtep
+/usr/bin/docker exec "$CID" supervisorctl start ovs-vtep
sleep 5
project: 'ovsdb'
# The functionality under test
- functionality: 'clustering'
+ functionality:
+ - 'upstream-clustering'
+ - 'gate-clustering'
# Project branches
stream:
project: 'ovsdb'
# The functionality under test
- functionality: 'southbound'
+ functionality:
+ - 'upstream-southbound'
+ - 'gate-southbound'
# Project branches
stream:
- core:
csit-list: >
- ovsdb-csit-1node-southbound-only-{stream},
- ovsdb-csit-1node-southbound-all-{stream},
- ovsdb-csit-3node-clustering-only-{stream}
+ ovsdb-csit-1node-gate-southbound-only-{stream},
+ ovsdb-csit-1node-gate-southbound-all-{stream},
+ ovsdb-csit-3node-gate-clustering-only-{stream}
gdebi
# Build release specified by build params
-$WORKSPACE/packaging/deb/build.py --major "$VERSION_MAJOR" \
+"$WORKSPACE/packaging/deb/build.py" --major "$VERSION_MAJOR" \
--minor "$VERSION_MINOR" \
--patch "$VERSION_PATCH" \
--deb "$PKG_VERSION" \
# Install required packages
virtualenv rpm_build
+# shellcheck disable=SC1091
source rpm_build/bin/activate
pip install --upgrade pip
-pip install -r $WORKSPACE/packaging/rpm/requirements.txt
+pip install -r "$WORKSPACE/packaging/rpm/requirements.txt"
# Build the latest snapshot matching the given major minor version
-$WORKSPACE/packaging/rpm/build.py --build-latest-snap \
+"$WORKSPACE/packaging/rpm/build.py" --build-latest-snap \
--major "$VERSION_MAJOR" \
--minor "$VERSION_MINOR" \
--sysd_commit "$SYSD_COMMIT" \
# Install required packages
virtualenv rpm_build
+# shellcheck disable=SC1091
source rpm_build/bin/activate
pip install --upgrade pip
-pip install -r $WORKSPACE/packaging/rpm/requirements.txt
+pip install -r "$WORKSPACE/packaging/rpm/requirements.txt"
# Make a URL for the tarball artifact from DOWNLOAD_URL (a zip)
+# shellcheck disable=SC2154
download_url="${{DOWNLOAD_URL//zip/tar.gz}}"
# Build release specified by build params
-$WORKSPACE/packaging/rpm/build.py --download_url "$download_url" \
+"$WORKSPACE/packaging/rpm/build.py" --download_url "$download_url" \
--sysd_commit "$SYSD_COMMIT" \
--changelog_date "$CHANGELOG_DATE" \
--changelog_name "$CHANGELOG_NAME" \
--- /dev/null
+#!/bin/bash
+
+# Options:
+# -x: Echo commands
+# -e: Fail on errors
+# -o pipefail: Fail on errors in scripts this calls, give stacktrace
+set -ex -o pipefail
+
+# Install required packages
+virtualenv deb_build
+source deb_build/bin/activate
+pip install --upgrade pip
+
+# Install latest ansible
+sudo apt-add-repository ppa:ansible/ansible
+sudo apt-get update
+sudo apt-get install -y ansible
+
+git clone https://github.com/dfarrell07/ansible-opendaylight.git
+cd ansible-opendaylight
+sudo ansible-galaxy install -r requirements.yml
+sudo ansible-playbook -i "localhost," -c local examples/deb_repo_install_playbook.yml
+
+# Add more tests
--- /dev/null
+#!/bin/bash
+
+# Options:
+# -x: Echo commands
+# -e: Fail on errors
+# -o pipefail: Fail on errors in scripts this calls, give stacktrace
+set -ex -o pipefail
+
+# Install required packages
+virtualenv rpm_build
+source rpm_build/bin/activate
+pip install --upgrade pip
+sudo yum install -y ansible
+
+git clone https://github.com/dfarrell07/ansible-opendaylight.git
+cd ansible-opendaylight
+sudo ansible-galaxy install -r requirements.yml
+sudo ansible-playbook -i "localhost," -c local examples/odl_6_testing_playbook.yml
+
+# Add more tests
# https://github.com/dfarrell07/puppet-opendaylight/blob/master/Vagrantfile
# Update Int/Pack's puppet-opendaylight submodule to latest master
-pushd $WORKSPACE/packaging
+pushd "$WORKSPACE/packaging"
git submodule init
git submodule update --remote
gpg2 --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3
curl -L get.rvm.io | bash -s stable
# Expected by RVM, seems required to make RVM functions (`rvm use`) available
-source $HOME/.rvm/scripts/rvm
+# shellcheck disable=SC1090
+source "$HOME/.rvm/scripts/rvm"
rvm install 2.4.0
ruby --version
# This has to be done as a login shell to get rvm fns
# Install gems dependencies of puppet-opendaylight via Bundler
gem install bundler
-echo export PATH=\\$PATH:/usr/local/bin >> $HOME/.bashrc
-pushd $WORKSPACE/packaging/puppet/puppet-opendaylight
+echo export PATH="\\$PATH:/usr/local/bin" >> "$HOME/.bashrc"
+pushd "$WORKSPACE/packaging/puppet/puppet-opendaylight"
bundle install
bundle update
- 'packaging-build-deb-{stream}'
- 'packaging-test-rpm-{stream}'
- 'packaging-test-puppet-{stream}'
+ - 'packaging-test-ansible-rpm-{stream}'
+ - 'packaging-test-ansible-deb-{stream}'
project: 'integration/packaging'
- string:
name: DOWNLOAD_URL
# yamllint disable-line rule:line-length
- default: 'https://nexus.opendaylight.org/content/repositories/public/org/opendaylight/integration/distribution-karaf/0.5.0-Boron/distribution-karaf-0.5.0-Boron.tar.gz'
+ default: 'https://nexus.opendaylight.org/content/repositories/public/org/opendaylight/integration/distribution-karaf/0.5.2-Boron/distribution-karaf-0.5.2-Boron.tar.gz'
description: 'URL to ODL tarball artifact to repackage into RPM'
- string:
name: SYSD_COMMIT
artifacts: '{archive-artifacts}'
- string:
name: VERSION_MAJOR
- default: '5'
+ default: '6'
description: 'OpenDaylight major (element) version number to build'
- string:
name: VERSION_MINOR
- default: '2'
+ default: '0'
description: 'OpenDaylight minor (SR) version number to build'
- string:
name: SYSD_COMMIT
- string:
name: URL
# yamllint disable-line rule:line-length
- default: 'https://raw.githubusercontent.com/opendaylight/integration-packaging/master/rpm/example_repo_configs/opendaylight-51-release.repo'
+ default: 'https://raw.githubusercontent.com/opendaylight/integration-packaging/master/rpm/example_repo_configs/opendaylight-52-release.repo'
description: 'Link to .repo or .rpm file'
- string:
name: REPO_FILE
- default: '/etc/yum.repos.d/opendaylight-51-release.repo'
+ default: '/etc/yum.repos.d/opendaylight-52-release.repo'
description: 'Name of the .repo file'
scm:
builders:
- shell: !include-raw: include-raw-test-rpm.sh
- - shell: !include-raw: include-raw-test-karaf.sh
+ - shell: !include-raw: include-raw-test-karaf.expect
publishers:
- archive-artifacts:
artifacts: '**'
+- job-template:
+ name: 'packaging-test-ansible-rpm-{stream}'
+
+ node: centos7-java-builder-2c-4g
+
+ project-type: freestyle
+
+ properties:
+ - opendaylight-infra-properties:
+ build-days-to-keep: 7
+
+ parameters:
+ - opendaylight-infra-parameters:
+ project: '{project}'
+ branch: '{branch}'
+ refspec: 'refs/heads/{branch}'
+ artifacts: '{archive-artifacts}'
+
+ scm:
+ - integration-gerrit-scm:
+ basedir: 'packaging'
+ refspec: '$GERRIT_REFSPEC'
+ branch: 'master'
+
+ wrappers:
+ - opendaylight-infra-wrappers:
+ build-timeout: '{build-timeout}'
+
+ builders:
+ - shell: !include-raw: include-raw-test-ansible-rpm.sh
+
+ triggers:
+ - timed: '@daily'
+
+
- job-template:
name: 'packaging-build-deb-{stream}'
description: 'OpenDaylight major (element) version number to build'
- string:
name: VERSION_MINOR
- default: '0'
+ default: '2'
description: 'OpenDaylight minor (SR) version number to build'
- string:
name: VERSION_PATCH
description: 'Version of ODL systemd unitfile to download and package in ODL .deb'
- string:
name: CODENAME
- default: 'Boron'
+ default: 'Boron-SR2'
description: 'Elemental codename for the ODL release, including SR if applicable'
- string:
name: DOWNLOAD_URL
# yamllint disable-line rule:line-length
- default: 'https://nexus.opendaylight.org/content/repositories/public/org/opendaylight/integration/distribution-karaf/0.5.0-Boron/distribution-karaf-0.5.0-Boron.tar.gz'
+ default: 'https://nexus.opendaylight.org/content/repositories/public/org/opendaylight/integration/distribution-karaf/0.5.2-Boron/distribution-karaf-0.5.2-Boron.tar.gz'
description: 'URL to ODL tarball artifact to repackage into .deb'
- string:
name: JAVA_VERSION
publishers:
- archive-artifacts:
artifacts: 'packaging/deb/opendaylight/*.deb'
+
+
+- job-template:
+ name: 'packaging-test-ansible-deb-{stream}'
+
+ node: ubuntu-xenial-mininet-ovs-25-2c-4g
+
+ project-type: freestyle
+
+ properties:
+ - opendaylight-infra-properties:
+ build-days-to-keep: 7
+
+ parameters:
+ - opendaylight-infra-parameters:
+ project: '{project}'
+ branch: '{branch}'
+ refspec: 'refs/heads/{branch}'
+ artifacts: '{archive-artifacts}'
+
+ scm:
+ - integration-gerrit-scm:
+ basedir: 'packaging'
+ refspec: '$GERRIT_REFSPEC'
+ branch: 'master'
+
+ wrappers:
+ - opendaylight-infra-wrappers:
+ build-timeout: '{build-timeout}'
+
+ builders:
+ - shell: !include-raw: include-raw-test-ansible-deb.sh
+
+ triggers:
+ - timed: '@daily'
openflowplugin-csit-3node-periodic-bulkomatic-clustering-perf-daily-only-carbon,
openflowplugin-csit-3node-periodic-restconf-clustering-perf-daily-only-carbon,
ovsdb-csit-1node-periodic-scalability-daily-only-carbon,
- ovsdb-csit-1node-southbound-all-carbon,
- ovsdb-csit-1node-southbound-only-carbon,
- ovsdb-csit-3node-clustering-only-carbon,
+ ovsdb-csit-1node-upstream-southbound-all-carbon,
+ ovsdb-csit-1node-upstream-southbound-only-carbon,
+ ovsdb-csit-3node-upstream-clustering-only-carbon,
packetcable-csit-1node-pcmm-all-carbon,
packetcable-csit-1node-pcmm-only-carbon,
sdninterfaceapp-csit-1node-basic-only-carbon,
openflowplugin-csit-3node-periodic-bulkomatic-clustering-perf-daily-only-boron,
openflowplugin-csit-3node-periodic-restconf-clustering-perf-daily-only-boron,
ovsdb-csit-1node-periodic-scalability-daily-only-boron,
- ovsdb-csit-1node-southbound-all-boron,
- ovsdb-csit-1node-southbound-only-boron,
- ovsdb-csit-3node-clustering-only-boron,
+ ovsdb-csit-1node-upstream-southbound-all-boron,
+ ovsdb-csit-1node-upstream-southbound-only-boron,
+ ovsdb-csit-3node-upstream-clustering-only-boron,
packetcable-csit-1node-pcmm-all-boron,
packetcable-csit-1node-pcmm-only-boron,
sdninterfaceapp-csit-1node-basic-only-boron,
openflowplugin-csit-3node-periodic-bulkomatic-clustering-perf-daily-only-beryllium,
openflowplugin-csit-3node-periodic-restconf-clustering-perf-daily-only-beryllium,
ovsdb-csit-1node-periodic-scalability-daily-only-beryllium,
- ovsdb-csit-1node-southbound-all-beryllium,
- ovsdb-csit-1node-southbound-only-beryllium,
- ovsdb-csit-3node-clustering-only-beryllium,
+ ovsdb-csit-1node-upstream-southbound-all-beryllium,
+ ovsdb-csit-1node-upstream-southbound-only-beryllium,
+ ovsdb-csit-3node-upstream-clustering-only-beryllium,
packetcable-csit-1node-pcmm-all-beryllium,
packetcable-csit-1node-pcmm-only-beryllium,
sdninterfaceapp-csit-1node-basic-only-beryllium,
properties:
- build-discarder:
days-to-keep: '{build-days-to-keep}'
+ num-to-keep: 40
+ artifact-days-to-keep: -1
+ artifact-num-to-keep: 5
- parameter:
name: opendaylight-infra-parameters
description: 'Parameter to identify an ODL Gerrit project'
- string:
name: ARCHIVE_ARTIFACTS
- default: '{artifacts} **/target/surefire-reports/*-output.txt **/hs_err_*.log **/target/feature/feature.xml'
+ default: >
+ {artifacts}
+ **/target/surefire-reports/*-output.txt
+ **/hs_err_*.log
+ **/target/feature/feature.xml
+ **/*.hprof
description: 'Space separated glob patterns for artifacts to archive into logs.opendaylight.org'
- string:
name: GERRIT_PROJECT
cd packer
export PACKER_LOG="yes" && \
export PACKER_LOG_PATH="packer-build.log" && \
- packer.io build -var-file=$CLOUDENV \
+ packer.io build -color=false -var-file=$CLOUDENV \
-var-file=../packer/vars/{platform}.json \
../packer/templates/{template}.json
-jenkins-job-builder==1.6.1
+jenkins-job-builder==1.6.2
# force any errors to cause the script and job to end in failure
set -xeu -o pipefail
+ensure_kernel_install() {
+ # Workaround for mkinitrd failing on occassion.
+ # On CentOS 7 it seems like the kernel install can fail it's mkinitrd
+ # run quietly, so we may not notice the failure. This script retries for a
+ # few times before giving up.
+ initramfs_ver=$(rpm -q kernel | tail -1 | sed "s/kernel-/initramfs-/")
+ grub_conf="/boot/grub/grub.conf"
+ # Public cloud does not use /boot/grub/grub.conf and uses grub2 instead.
+ if [ ! -e "$grub_conf" ]; then
+ echo "$grub_conf not found. Using Grub 2 conf instead."
+ grub_conf="/boot/grub2/grub.cfg"
+ fi
+
+ for i in $(seq 3); do
+ if grep "$initramfs_ver" "$grub_conf"; then
+ break
+ fi
+ echo "Kernel initrd missing. Retrying to install kernel..."
+ yum reinstall -y kernel
+ done
+ if ! grep "$initramfs_ver" "$grub_conf"; then
+ cat /boot/grub/grub.conf
+ echo "ERROR: Failed to install kernel."
+ exit 1
+ fi
+}
+
rh_systems() {
# Handle the occurance where SELINUX is actually disabled
SELINUX=$(grep -E '^SELINUX=(disabled|permissive|enforcing)$' /etc/selinux/config)
echo "---> Updating operating system"
yum clean all
yum install -y deltarpm
-
- # Workaround for kernel panic issue that appears sometimes after kernel update
- # https://www.centos.org/forums/viewtopic.php?t=22425
- yum remove -y kernel
yum update -y
- yum install -y kernel
+
+ ensure_kernel_install
# add in components we need or want on systems
echo "---> Installing base packages"
fi
;;
RedHat|CentOS)
- if [ "$(echo $FACTER_OSVER | cut -d'.' -f1)" -ge "7" ]
+ if [ "$(echo "$FACTER_OSVER" | cut -d'.' -f1)" -ge "7" ]
then
echo "---> not modifying java alternatives as OpenJDK 1.7.0 does not exist"
else
EOF
# Add hostname to /etc/hosts to fix 'unable to resolve host' issue with sudo
- sed -i "/127.0.0.1/s/$/ `hostname`/" /etc/hosts
+ sed -i "/127.0.0.1/s/$/ $(hostname)/" /etc/hosts
echo "---> Updating operating system"
# Use retry loop to install packages for failing mirrors
for i in {0..5}
do
+ echo "Attempt $i of installing base packages..."
apt-get clean
apt-get update -m
apt-get upgrade -m
for pkg in unzip xz-utils puppet git git-review libxml-xpath-perl
do
+ # shellcheck disable=SC2046
if [ $(dpkg-query -W -f='${Status}' $pkg 2>/dev/null | grep -c "ok installed") -eq 0 ]; then
- apt-get install $pkg;
+ apt-get install $pkg
fi
done
done
pip install -c requirements/upper-constraints.txt -r ${proj}/test-requirements.txt
done
-echo '---> Installing openvswitch from openstack repo'
-# the newton release has ovs 2.5.0
-yum install -y http://rdoproject.org/repos/openstack-newton/rdo-release-newton.rpm
+if [ "$branch" == "stable/mitaka" ] || [ "$branch" == "stable/liberty" ]; then
+ # the newton release has ovs 2.5.0
+ echo '---> Installing openvswitch from openstack Newton repo (2.5.0)'
+ yum install -y http://rdoproject.org/repos/openstack-newton/rdo-release-newton.rpm
+else
+ # the ocata release has ovs 2.6.1
+ echo '---> Installing openvswitch from openstack Ocata repo (2.6.1)'
+ yum install -y http://rdoproject.org/repos/openstack-ocata/rdo-release-ocata.rpm
+fi
+
yum install -y --nogpgcheck openvswitch
cd $OLDPWD
# Needed by autorelease scripts
yum install -y xmlstarlet
+# Needed by docs project
+yum install -y graphviz
+
# sshpass for the current deploy test to be runable immediatelly after
# build
yum install -y sshpass
"public_tenant": null,
"public_user": null,
"public_pass": null,
-
"private_base_image": null,
"private_network": null,
"private_cloud_user": null,
"private_tenant": null,
"private_user": null,
"private_pass": null,
-
"distro": null,
"cloud_user_data": null
},
"public_tenant": null,
"public_user": null,
"public_pass": null,
-
"private_base_image": null,
"private_network": null,
"private_cloud_user": null,
"private_tenant": null,
"private_user": null,
"private_pass": null,
-
"distro": null,
"cloud_user_data": null
},
"public_tenant": null,
"public_user": null,
"public_pass": null,
-
"private_base_image": null,
"private_network": null,
"private_cloud_user": null,
"private_tenant": null,
"private_user": null,
"private_pass": null,
-
"distro": null,
"cloud_user_data": null
},
"public_tenant": null,
"public_user": null,
"public_pass": null,
-
"private_base_image": null,
"private_network": null,
"private_cloud_user": null,
"private_tenant": null,
"private_user": null,
"private_pass": null,
-
"distro": null,
"cloud_user_data": null
},
"public_tenant": null,
"public_user": null,
"public_pass": null,
-
"private_base_image": null,
"private_network": null,
"private_cloud_user": null,
"private_tenant": null,
"private_user": null,
"private_pass": null,
-
"distro": null,
"cloud_user_data": null
},
"public_tenant": null,
"public_user": null,
"public_pass": null,
-
"private_base_image": null,
"private_network": null,
"private_cloud_user": null,
"private_tenant": null,
"private_user": null,
"private_pass": null,
-
"distro": null,
"cloud_user_data": null
},
"provisioners": [
{
"type": "shell",
- "inline": ["mkdir -p /tmp/packer"]
+ "inline": [
+ "mkdir -p /tmp/packer"
+ ]
},
{
"type": "file",
"public_tenant": null,
"public_user": null,
"public_pass": null,
-
"private_base_image": null,
"private_network": null,
"private_cloud_user": null,
"private_tenant": null,
"private_user": null,
"private_pass": null,
-
"distro": null,
"cloud_user_data": null
},
"public_tenant": null,
"public_user": null,
"public_pass": null,
-
"private_base_image": null,
"private_network": null,
"private_cloud_user": null,
"private_tenant": null,
"private_user": null,
"private_pass": null,
-
"distro": null,
"cloud_user_data": null
},
"public_tenant": null,
"public_user": null,
"public_pass": null,
-
"private_base_image": null,
"private_network": null,
"private_cloud_user": null,
"private_tenant": null,
"private_user": null,
"private_pass": null,
-
"distro": null,
"cloud_user_data": null
},
"public_tenant": null,
"public_user": null,
"public_pass": null,
-
"private_base_image": null,
"private_network": null,
"private_cloud_user": null,
"private_tenant": null,
"private_user": null,
"private_pass": null,
-
"distro": null,
"cloud_user_data": null
},
"public_tenant": null,
"public_user": null,
"public_pass": null,
-
"private_base_image": null,
"private_network": null,
"private_cloud_user": null,
"private_tenant": null,
"private_user": null,
"private_pass": null,
-
"distro": null,
"cloud_user_data": null
},
{
"public_base_image": "CentOS 7 (PVHVM)",
"public_cloud_user": "root",
-
"private_base_image": "CentOS 7 (cloudimg 1510)",
"private_cloud_user": "centos",
-
"distro": "CentOS 7",
"cloud_user_data": "provision/rh-user_data.sh"
}
{
"public_auth_url": "https://identity.api.rackspacecloud.com/v2.0/",
-
"public_tenant": "TENNANTID",
"public_user": "USERID",
"public_pass": "USERPASS",
"public_network": "cac67a72-aefc-48f8-ae55-9affa3540dd0",
-
"private_auth_url": "https://privapi.opendaylight.org:5000/v2.0",
-
"private_tenant": "TENNANTID",
"private_user": "USERID",
"private_pass": "USERPASS",
{
"public_base_image": "Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)",
"public_cloud_user": "root",
-
"private_base_image": "Ubuntu 14.04 LTS Trusty Tahr (cloudimg)",
"private_cloud_user": "ubuntu",
-
"distro": "Ubuntu 14.04",
"cloud_user_data": "provision/null_data.sh"
}
{
"public_base_image": "Ubuntu 16.04 LTS (Xenial Xerus) (PVHVM)",
"public_cloud_user": "root",
-
"private_base_image": "Ubuntu 16.04 LTS (2016-05-03 cloudimg)",
"private_cloud_user": "ubuntu",
-
"distro": "Ubuntu 16.04",
"cloud_user_data": "provision/null_data.sh"
}
extends: default
rules:
+ empty-lines:
+ max-end: 1
line-length:
max: 120